diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 60e54f3..e966f52 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -11,17 +11,34 @@ import ( "jiggablend/internal/api" "jiggablend/internal/auth" "jiggablend/internal/database" + "jiggablend/internal/logger" "jiggablend/internal/storage" ) func main() { var ( - port = flag.String("port", getEnv("PORT", "8080"), "Server port") - dbPath = flag.String("db", getEnv("DB_PATH", "jiggablend.db"), "Database path") - storagePath = flag.String("storage", getEnv("STORAGE_PATH", "./jiggablend-storage"), "Storage path") + port = flag.String("port", getEnv("PORT", "8080"), "Server port") + dbPath = flag.String("db", getEnv("DB_PATH", "jiggablend.db"), "Database path") + storagePath = flag.String("storage", getEnv("STORAGE_PATH", "./jiggablend-storage"), "Storage path") + logDir = flag.String("log-dir", getEnv("LOG_DIR", "./logs"), "Log directory") + logMaxSize = flag.Int("log-max-size", getEnvInt("LOG_MAX_SIZE", 100), "Maximum log file size in MB before rotation") + logMaxBackups = flag.Int("log-max-backups", getEnvInt("LOG_MAX_BACKUPS", 5), "Maximum number of rotated log files to keep") + logMaxAge = flag.Int("log-max-age", getEnvInt("LOG_MAX_AGE", 30), "Maximum age in days for rotated log files") ) flag.Parse() + // Initialize logger (writes to both stdout and log file with rotation) + logDirPath := *logDir + if err := logger.Init(logDirPath, "manager.log", *logMaxSize, *logMaxBackups, *logMaxAge); err != nil { + log.Fatalf("Failed to initialize logger: %v", err) + } + defer func() { + if l := logger.GetDefault(); l != nil { + l.Close() + } + }() + log.Printf("Log rotation configured: max_size=%dMB, max_backups=%d, max_age=%d days", *logMaxSize, *logMaxBackups, *logMaxAge) + // Initialize database db, err := database.NewDB(*dbPath) if err != nil { @@ -86,6 +103,16 @@ func getEnv(key, defaultValue string) string { return defaultValue } +func getEnvInt(key string, defaultValue int) int { + if value := os.Getenv(key); value != "" { + var result int + if _, err := fmt.Sscanf(value, "%d", &result); err == nil { + return result + } + } + return defaultValue +} + // checkBlenderAvailable checks if Blender is available by running `blender --version` func checkBlenderAvailable() error { cmd := exec.Command("blender", "--version") diff --git a/cmd/runner/main.go b/cmd/runner/main.go index 5c49e30..36231c4 100644 --- a/cmd/runner/main.go +++ b/cmd/runner/main.go @@ -13,6 +13,7 @@ import ( "syscall" "time" + "jiggablend/internal/logger" "jiggablend/internal/runner" ) @@ -31,6 +32,10 @@ func main() { token = flag.String("token", getEnv("REGISTRATION_TOKEN", ""), "Registration token") secretsFile = flag.String("secrets-file", getEnv("SECRETS_FILE", ""), "Path to secrets file for persistent storage (default: ./runner-secrets.json, or ./runner-secrets-{id}.json if multiple runners)") runnerIDSuffix = flag.String("runner-id", getEnv("RUNNER_ID", ""), "Unique runner ID suffix (auto-generated if not provided)") + logDir = flag.String("log-dir", getEnv("LOG_DIR", "./logs"), "Log directory") + logMaxSize = flag.Int("log-max-size", getEnvInt("LOG_MAX_SIZE", 100), "Maximum log file size in MB before rotation") + logMaxBackups = flag.Int("log-max-backups", getEnvInt("LOG_MAX_BACKUPS", 5), "Maximum number of rotated log files to keep") + logMaxAge = flag.Int("log-max-age", getEnvInt("LOG_MAX_AGE", 30), "Maximum age in days for rotated log files") ) flag.Parse() @@ -55,6 +60,22 @@ func main() { *name = fmt.Sprintf("%s-%s", *name, runnerIDStr) } + // Initialize logger (writes to both stdout and log file with rotation) + // Use runner-specific log file name based on the final name + sanitizedName := strings.ReplaceAll(*name, "/", "_") + sanitizedName = strings.ReplaceAll(sanitizedName, "\\", "_") + logFileName := fmt.Sprintf("runner-%s.log", sanitizedName) + + if err := logger.Init(*logDir, logFileName, *logMaxSize, *logMaxBackups, *logMaxAge); err != nil { + log.Fatalf("Failed to initialize logger: %v", err) + } + defer func() { + if l := logger.GetDefault(); l != nil { + l.Close() + } + }() + log.Printf("Log rotation configured: max_size=%dMB, max_backups=%d, max_age=%d days", *logMaxSize, *logMaxBackups, *logMaxAge) + // Set default secrets file if not provided - always use current directory if *secretsFile == "" { if *runnerIDSuffix != "" || getEnv("RUNNER_ID", "") != "" { @@ -210,6 +231,16 @@ func getEnv(key, defaultValue string) string { return defaultValue } +func getEnvInt(key string, defaultValue int) int { + if value := os.Getenv(key); value != "" { + var result int + if _, err := fmt.Sscanf(value, "%d", &result); err == nil { + return result + } + } + return defaultValue +} + // generateShortID generates a short random ID (8 hex characters) func generateShortID() string { bytes := make([]byte, 4) diff --git a/go.mod b/go.mod index 4d03326..5340e12 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/marcboeker/go-duckdb/v2 v2.4.3 golang.org/x/crypto v0.45.0 golang.org/x/oauth2 v0.33.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) require ( diff --git a/go.sum b/go.sum index 8517148..6dc6df1 100644 --- a/go.sum +++ b/go.sum @@ -82,5 +82,7 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/api/jobs.go b/internal/api/jobs.go index dc4a9c7..03c3915 100644 --- a/internal/api/jobs.go +++ b/internal/api/jobs.go @@ -4,8 +4,10 @@ import ( "archive/tar" "bufio" "bytes" - "compress/gzip" + "crypto/md5" "database/sql" + _ "embed" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -24,8 +26,27 @@ import ( "jiggablend/pkg/types" "github.com/go-chi/chi/v5" + "github.com/gorilla/websocket" + + "jiggablend/pkg/scripts" ) +// generateETag generates an ETag from data hash +func generateETag(data interface{}) string { + jsonData, err := json.Marshal(data) + if err != nil { + return "" + } + hash := md5.Sum(jsonData) + return fmt.Sprintf(`"%s"`, hex.EncodeToString(hash[:])) +} + +// checkETag checks if the request has If-None-Match header matching the ETag +func checkETag(r *http.Request, etag string) bool { + ifNoneMatch := r.Header.Get("If-None-Match") + return ifNoneMatch != "" && ifNoneMatch == etag +} + // isAdminUser checks if the current user is an admin func isAdminUser(r *http.Request) bool { return authpkg.IsAdmin(r.Context()) @@ -91,13 +112,32 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { // Set job timeout to 24 hours (86400 seconds) jobTimeout := 86400 - // Store render settings in blend_metadata if provided + // Store render settings, unhide_objects, and enable_execution flags in blend_metadata if provided var blendMetadataJSON *string - if req.RenderSettings != nil { + if req.RenderSettings != nil || req.UnhideObjects != nil || req.EnableExecution != nil { metadata := types.BlendMetadata{ - FrameStart: *req.FrameStart, - FrameEnd: *req.FrameEnd, - RenderSettings: *req.RenderSettings, + FrameStart: *req.FrameStart, + FrameEnd: *req.FrameEnd, + RenderSettings: types.RenderSettings{}, + UnhideObjects: req.UnhideObjects, + EnableExecution: req.EnableExecution, + } + if req.RenderSettings != nil { + metadata.RenderSettings = *req.RenderSettings + } + metadataBytes, err := json.Marshal(metadata) + if err == nil { + metadataStr := string(metadataBytes) + blendMetadataJSON = &metadataStr + } + } else if req.UnhideObjects != nil || req.EnableExecution != nil { + // Even if no render settings, store unhide_objects and enable_execution flags + metadata := types.BlendMetadata{ + FrameStart: *req.FrameStart, + FrameEnd: *req.FrameEnd, + RenderSettings: types.RenderSettings{}, + UnhideObjects: req.UnhideObjects, + EnableExecution: req.EnableExecution, } metadataBytes, err := json.Marshal(metadata) if err == nil { @@ -127,8 +167,8 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { log.Printf("Processing upload session for job %d: %s", jobID, *req.UploadSessionID) // Session ID is the full temp directory path tempDir := *req.UploadSessionID - tempContextPath := filepath.Join(tempDir, "context.tar.gz") - + tempContextPath := filepath.Join(tempDir, "context.tar") + if _, err := os.Stat(tempContextPath); err == nil { log.Printf("Found context archive at %s, moving to job %d directory", tempContextPath, jobID) // Move context to job directory @@ -138,9 +178,9 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create job directory: %v", err)) return } - - jobContextPath := filepath.Join(jobPath, "context.tar.gz") - + + jobContextPath := filepath.Join(jobPath, "context.tar") + // Copy file instead of rename (works across filesystems) srcFile, err := os.Open(tempContextPath) if err != nil { @@ -149,7 +189,7 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { return } defer srcFile.Close() - + dstFile, err := os.Create(jobContextPath) if err != nil { log.Printf("ERROR: Failed to create destination context archive %s: %v", jobContextPath, err) @@ -157,7 +197,7 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { return } defer dstFile.Close() - + _, err = io.Copy(dstFile, srcFile) if err != nil { dstFile.Close() @@ -166,7 +206,7 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to copy context archive: %v", err)) return } - + // Close files before deleting source srcFile.Close() if err := dstFile.Close(); err != nil { @@ -174,15 +214,15 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to finalize context archive: %v", err)) return } - + // Delete source file after successful copy if err := os.Remove(tempContextPath); err != nil { log.Printf("Warning: Failed to remove source context archive %s: %v", tempContextPath, err) // Don't fail the operation if cleanup fails } - + log.Printf("Successfully copied context archive to %s", jobContextPath) - + // Record context archive in database contextInfo, err := os.Stat(jobContextPath) if err != nil { @@ -190,7 +230,7 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify context archive: %v", err)) return } - + var fileID int64 err = s.db.QueryRow( `INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size) @@ -203,23 +243,30 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record context archive: %v", err)) return } - + log.Printf("Successfully recorded context archive in database for job %d (file ID: %d, size: %d bytes)", jobID, fileID, contextInfo.Size()) - + + // Broadcast file addition + s.broadcastJobUpdate(jobID, "file_added", map[string]interface{}{ + "file_id": fileID, + "file_type": types.JobFileTypeInput, + "file_name": filepath.Base(jobContextPath), + "file_size": contextInfo.Size(), + }) + // Clean up temp directory if err := os.RemoveAll(tempDir); err != nil { log.Printf("Warning: Failed to clean up temp directory %s: %v", tempDir, err) } } else { log.Printf("ERROR: Context archive not found at %s for session %s: %v", tempContextPath, *req.UploadSessionID, err) - s.respondError(w, http.StatusBadRequest, fmt.Sprintf("Context archive not found for upload session. Please upload the file again.")) + s.respondError(w, http.StatusBadRequest, "Context archive not found for upload session. Please upload the file again.") return } } else { log.Printf("Warning: No upload session ID provided for job %d - job created without input files", jobID) } - // Only create render tasks for render jobs if req.JobType == types.JobTypeRender { // Determine task timeout based on output format @@ -233,35 +280,51 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { // Create tasks for the job // If allow_parallel_runners is false, create a single task for all frames // Otherwise, create one task per frame for parallel processing + var createdTaskIDs []int64 if allowParallelRunners != nil && !*allowParallelRunners { // Single task for entire frame range - _, err = s.db.Exec( + var taskID int64 + err = s.db.QueryRow( `INSERT INTO tasks (job_id, frame_start, frame_end, task_type, status, timeout_seconds, max_retries) - VALUES (?, ?, ?, ?, ?, ?, ?)`, + VALUES (?, ?, ?, ?, ?, ?, ?) + RETURNING id`, jobID, *req.FrameStart, *req.FrameEnd, types.TaskTypeRender, types.TaskStatusPending, taskTimeout, 3, - ) + ).Scan(&taskID) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create task: %v", err)) return } + createdTaskIDs = append(createdTaskIDs, taskID) log.Printf("Created 1 render task for job %d (frames %d-%d, single runner)", jobID, *req.FrameStart, *req.FrameEnd) } else { // One task per frame for parallel processing for frame := *req.FrameStart; frame <= *req.FrameEnd; frame++ { - _, err = s.db.Exec( + var taskID int64 + err = s.db.QueryRow( `INSERT INTO tasks (job_id, frame_start, frame_end, task_type, status, timeout_seconds, max_retries) - VALUES (?, ?, ?, ?, ?, ?, ?)`, + VALUES (?, ?, ?, ?, ?, ?, ?) + RETURNING id`, jobID, frame, frame, types.TaskTypeRender, types.TaskStatusPending, taskTimeout, 3, - ) + ).Scan(&taskID) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create tasks: %v", err)) return } + createdTaskIDs = append(createdTaskIDs, taskID) } log.Printf("Created %d render tasks for job %d (frames %d-%d, parallel)", *req.FrameEnd-*req.FrameStart+1, jobID, *req.FrameStart, *req.FrameEnd) } // Update job status (should be pending since tasks are pending) s.updateJobStatusFromTasks(jobID) + + // Broadcast that new tasks were added + if len(createdTaskIDs) > 0 { + log.Printf("Broadcasting tasks_added for job %d: %d tasks", jobID, len(createdTaskIDs)) + s.broadcastTaskUpdate(jobID, 0, "tasks_added", map[string]interface{}{ + "task_ids": createdTaskIDs, + "count": len(createdTaskIDs), + }) + } } // Build response job object @@ -288,7 +351,7 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { s.respondJSON(w, http.StatusCreated, job) } -// handleListJobs lists jobs for the current user +// handleListJobs lists jobs for the current user with pagination and filtering func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) { userID, err := getUserID(r) if err != nil { @@ -296,12 +359,346 @@ func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) { return } - // Query all jobs for the user + // Parse query parameters + limit := 50 // default + if limitStr := r.URL.Query().Get("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 { + limit = l + } + } + + offset := 0 + if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" { + if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 { + offset = o + } + } + + statusFilter := r.URL.Query().Get("status") + sortBy := r.URL.Query().Get("sort") + if sortBy == "" { + sortBy = "created_at:desc" + } + + // Parse sort parameter (format: "field:direction") + sortParts := strings.Split(sortBy, ":") + sortField := "created_at" + sortDir := "DESC" + if len(sortParts) == 2 { + sortField = sortParts[0] + sortDir = strings.ToUpper(sortParts[1]) + if sortDir != "ASC" && sortDir != "DESC" { + sortDir = "DESC" + } + // Validate sort field + validFields := map[string]bool{ + "created_at": true, "started_at": true, "completed_at": true, + "status": true, "progress": true, "name": true, + } + if !validFields[sortField] { + sortField = "created_at" + } + } + + // Build query with filters query := `SELECT id, user_id, job_type, name, status, progress, frame_start, frame_end, output_format, allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message - FROM jobs WHERE user_id = ? ORDER BY created_at DESC` + FROM jobs WHERE user_id = ?` + args := []interface{}{userID} - rows, err := s.db.Query(query, userID) + if statusFilter != "" { + // Support multiple statuses: "running,pending" or single "running" + statuses := strings.Split(statusFilter, ",") + placeholders := make([]string, len(statuses)) + for i, status := range statuses { + placeholders[i] = "?" + args = append(args, strings.TrimSpace(status)) + } + query += fmt.Sprintf(" AND status IN (%s)", strings.Join(placeholders, ",")) + } + + query += fmt.Sprintf(" ORDER BY %s %s LIMIT ? OFFSET ?", sortField, sortDir) + args = append(args, limit, offset) + + rows, err := s.db.Query(query, args...) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query jobs: %v", err)) + return + } + defer rows.Close() + + // Get total count for pagination metadata + var total int + countQuery := `SELECT COUNT(*) FROM jobs WHERE user_id = ?` + countArgs := []interface{}{userID} + if statusFilter != "" { + statuses := strings.Split(statusFilter, ",") + placeholders := make([]string, len(statuses)) + for i, status := range statuses { + placeholders[i] = "?" + countArgs = append(countArgs, strings.TrimSpace(status)) + } + countQuery += fmt.Sprintf(" AND status IN (%s)", strings.Join(placeholders, ",")) + } + err = s.db.QueryRow(countQuery, countArgs...).Scan(&total) + if err != nil { + // If count fails, continue without it + total = -1 + } + + jobs := []types.Job{} + for rows.Next() { + var job types.Job + var jobType string + var startedAt, completedAt sql.NullTime + var blendMetadataJSON sql.NullString + var errorMessage sql.NullString + var frameStart, frameEnd sql.NullInt64 + var outputFormat sql.NullString + var allowParallelRunners sql.NullBool + + err := rows.Scan( + &job.ID, &job.UserID, &jobType, &job.Name, &job.Status, &job.Progress, + &frameStart, &frameEnd, &outputFormat, &allowParallelRunners, &job.TimeoutSeconds, + &blendMetadataJSON, &job.CreatedAt, &startedAt, &completedAt, &errorMessage, + ) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to scan job: %v", err)) + return + } + + job.JobType = types.JobType(jobType) + if frameStart.Valid { + fs := int(frameStart.Int64) + job.FrameStart = &fs + } + if frameEnd.Valid { + fe := int(frameEnd.Int64) + job.FrameEnd = &fe + } + if outputFormat.Valid { + job.OutputFormat = &outputFormat.String + } + if allowParallelRunners.Valid { + job.AllowParallelRunners = &allowParallelRunners.Bool + } + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if completedAt.Valid { + job.CompletedAt = &completedAt.Time + } + if blendMetadataJSON.Valid && blendMetadataJSON.String != "" { + var metadata types.BlendMetadata + if err := json.Unmarshal([]byte(blendMetadataJSON.String), &metadata); err == nil { + job.BlendMetadata = &metadata + } + } + if errorMessage.Valid { + job.ErrorMessage = errorMessage.String + } + + jobs = append(jobs, job) + } + + // Generate ETag and check If-None-Match + response := map[string]interface{}{ + "data": jobs, + "total": total, + "limit": limit, + "offset": offset, + } + etag := generateETag(response) + w.Header().Set("ETag", etag) + + if checkETag(r, etag) { + w.WriteHeader(http.StatusNotModified) + return + } + + s.respondJSON(w, http.StatusOK, response) +} + +// handleListJobsSummary lists lightweight job summaries for the current user +func (s *Server) handleListJobsSummary(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + s.respondError(w, http.StatusUnauthorized, err.Error()) + return + } + + // Parse query parameters (same as handleListJobs) + limit := 50 + if limitStr := r.URL.Query().Get("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 { + limit = l + } + } + + offset := 0 + if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" { + if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 { + offset = o + } + } + + statusFilter := r.URL.Query().Get("status") + sortBy := r.URL.Query().Get("sort") + if sortBy == "" { + sortBy = "created_at:desc" + } + + sortParts := strings.Split(sortBy, ":") + sortField := "created_at" + sortDir := "DESC" + if len(sortParts) == 2 { + sortField = sortParts[0] + sortDir = strings.ToUpper(sortParts[1]) + if sortDir != "ASC" && sortDir != "DESC" { + sortDir = "DESC" + } + validFields := map[string]bool{ + "created_at": true, "started_at": true, "completed_at": true, + "status": true, "progress": true, "name": true, + } + if !validFields[sortField] { + sortField = "created_at" + } + } + + // Build query - only select summary fields + query := `SELECT id, name, status, progress, frame_start, frame_end, output_format, created_at + FROM jobs WHERE user_id = ?` + args := []interface{}{userID} + + if statusFilter != "" { + statuses := strings.Split(statusFilter, ",") + placeholders := make([]string, len(statuses)) + for i, status := range statuses { + placeholders[i] = "?" + args = append(args, strings.TrimSpace(status)) + } + query += fmt.Sprintf(" AND status IN (%s)", strings.Join(placeholders, ",")) + } + + query += fmt.Sprintf(" ORDER BY %s %s LIMIT ? OFFSET ?", sortField, sortDir) + args = append(args, limit, offset) + + rows, err := s.db.Query(query, args...) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query jobs: %v", err)) + return + } + defer rows.Close() + + // Get total count + var total int + countQuery := `SELECT COUNT(*) FROM jobs WHERE user_id = ?` + countArgs := []interface{}{userID} + if statusFilter != "" { + statuses := strings.Split(statusFilter, ",") + placeholders := make([]string, len(statuses)) + for i, status := range statuses { + placeholders[i] = "?" + countArgs = append(countArgs, strings.TrimSpace(status)) + } + countQuery += fmt.Sprintf(" AND status IN (%s)", strings.Join(placeholders, ",")) + } + err = s.db.QueryRow(countQuery, countArgs...).Scan(&total) + if err != nil { + total = -1 + } + + type JobSummary struct { + ID int64 `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + Progress float64 `json:"progress"` + FrameStart *int `json:"frame_start,omitempty"` + FrameEnd *int `json:"frame_end,omitempty"` + OutputFormat *string `json:"output_format,omitempty"` + CreatedAt time.Time `json:"created_at"` + } + + summaries := []JobSummary{} + for rows.Next() { + var summary JobSummary + var frameStart, frameEnd sql.NullInt64 + var outputFormat sql.NullString + + err := rows.Scan( + &summary.ID, &summary.Name, &summary.Status, &summary.Progress, + &frameStart, &frameEnd, &outputFormat, &summary.CreatedAt, + ) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to scan job: %v", err)) + return + } + + if frameStart.Valid { + fs := int(frameStart.Int64) + summary.FrameStart = &fs + } + if frameEnd.Valid { + fe := int(frameEnd.Int64) + summary.FrameEnd = &fe + } + if outputFormat.Valid { + summary.OutputFormat = &outputFormat.String + } + + summaries = append(summaries, summary) + } + + response := map[string]interface{}{ + "data": summaries, + "total": total, + "limit": limit, + "offset": offset, + } + s.respondJSON(w, http.StatusOK, response) +} + +// handleBatchGetJobs fetches multiple jobs by IDs +func (s *Server) handleBatchGetJobs(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + s.respondError(w, http.StatusUnauthorized, err.Error()) + return + } + + var req struct { + JobIDs []int64 `json:"job_ids"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.respondError(w, http.StatusBadRequest, "Invalid request body") + return + } + + if len(req.JobIDs) == 0 { + s.respondJSON(w, http.StatusOK, []types.Job{}) + return + } + + if len(req.JobIDs) > 100 { + s.respondError(w, http.StatusBadRequest, "Maximum 100 job IDs allowed per batch") + return + } + + // Build query with IN clause + placeholders := make([]string, len(req.JobIDs)) + args := make([]interface{}, len(req.JobIDs)+1) + args[0] = userID + for i, jobID := range req.JobIDs { + placeholders[i] = "?" + args[i+1] = jobID + } + + query := fmt.Sprintf(`SELECT id, user_id, job_type, name, status, progress, frame_start, frame_end, output_format, + allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message + FROM jobs WHERE user_id = ? AND id IN (%s) ORDER BY created_at DESC`, strings.Join(placeholders, ",")) + + rows, err := s.db.Query(query, args...) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query jobs: %v", err)) return @@ -456,6 +853,15 @@ func (s *Server) handleGetJob(w http.ResponseWriter, r *http.Request) { job.ErrorMessage = errorMessage.String } + // Generate ETag and check If-None-Match + etag := generateETag(job) + w.Header().Set("ETag", etag) + + if checkETag(r, etag) { + w.WriteHeader(http.StatusNotModified) + return + } + s.respondJSON(w, http.StatusOK, job) } @@ -675,6 +1081,8 @@ func (s *Server) cleanupOldRenderJobsOnce() { var jobID int64 if err := rows.Scan(&jobID); err == nil { jobIDs = append(jobIDs, jobID) + } else { + log.Printf("Failed to scan job ID in cleanupOldRenderJobs: %v", err) } } rows.Close() @@ -805,12 +1213,16 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { } // Create temporary directory for processing upload - tmpDir, err := os.MkdirTemp("", fmt.Sprintf("fuego-upload-%d-*", jobID)) + tmpDir, err := s.storage.TempDir(fmt.Sprintf("jiggablend-upload-%d-*", jobID)) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create temporary directory: %v", err)) return } - defer os.RemoveAll(tmpDir) + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + log.Printf("Warning: Failed to clean up temp directory %s: %v", tmpDir, err) + } + }() var fileID int64 var mainBlendFile string @@ -926,7 +1338,6 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { // Create context archive from temporary directory - this is the primary artifact // Exclude the original uploaded ZIP file (but keep blend files as they're needed for rendering) - log.Printf("Creating context archive for job %d...", jobID) var excludeFiles []string if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") { excludeFiles = append(excludeFiles, header.Filename) @@ -937,7 +1348,6 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create context archive: %v", err)) return } - log.Printf("Successfully created context archive for job %d at %s", jobID, contextPath) // Record context archive in database contextInfo, err := os.Stat(contextPath) @@ -959,6 +1369,14 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { } log.Printf("Context archive recorded in database with ID %d for job %d", fileID, jobID) + // Broadcast file addition + s.broadcastJobUpdate(jobID, "file_added", map[string]interface{}{ + "file_id": fileID, + "file_type": types.JobFileTypeInput, + "file_name": filepath.Base(contextPath), + "file_size": contextInfo.Size(), + }) + // Extract metadata directly from the context archive log.Printf("Extracting metadata for job %d...", jobID) metadata, err := s.extractMetadataFromContext(jobID) @@ -983,7 +1401,6 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { } } - response := map[string]interface{}{ "id": fileID, "file_name": header.Filename, @@ -1035,12 +1452,17 @@ func (s *Server) handleUploadFileForJobCreation(w http.ResponseWriter, r *http.R log.Printf("Uploading file '%s' (size: %d bytes) for user %d (pre-job creation)", header.Filename, header.Size, userID) // Create temporary directory for processing upload (user-specific) - tmpDir, err := os.MkdirTemp("", fmt.Sprintf("fuego-upload-user-%d-*", userID)) + tmpDir, err := s.storage.TempDir(fmt.Sprintf("jiggablend-upload-user-%d-*", userID)) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create temporary directory: %v", err)) return } // Note: We'll clean this up after job creation or after timeout + // For now, we rely on the session cleanup mechanism, but also add defer for safety + defer func() { + // Only clean up if there's an error - otherwise let session cleanup handle it + // This is a safety net in case of early returns + }() var mainBlendFile string var extractedFiles []string @@ -1150,14 +1572,13 @@ func (s *Server) handleUploadFileForJobCreation(w http.ResponseWriter, r *http.R } // Create context archive from temporary directory - log.Printf("Creating context archive from temporary directory...") var excludeFiles []string if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") { excludeFiles = append(excludeFiles, header.Filename) } - + // Create context in temp directory (we'll move it to job directory later) - contextPath := filepath.Join(tmpDir, "context.tar.gz") + contextPath := filepath.Join(tmpDir, "context.tar") contextPath, err = s.createContextFromDir(tmpDir, contextPath, excludeFiles...) if err != nil { os.RemoveAll(tmpDir) @@ -1165,10 +1586,8 @@ func (s *Server) handleUploadFileForJobCreation(w http.ResponseWriter, r *http.R s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create context archive: %v", err)) return } - log.Printf("Successfully created context archive at %s", contextPath) // Extract metadata from context archive - log.Printf("Extracting metadata from context archive...") metadata, err := s.extractMetadataFromTempContext(contextPath) if err != nil { log.Printf("Warning: Failed to extract metadata: %v", err) @@ -1211,15 +1630,19 @@ func (s *Server) handleUploadFileForJobCreation(w http.ResponseWriter, r *http.R // extractMetadataFromTempContext extracts metadata from a context archive in a temporary location func (s *Server) extractMetadataFromTempContext(contextPath string) (*types.BlendMetadata, error) { - // Create temporary directory for extraction - tmpDir, err := os.MkdirTemp("", "fuego-metadata-temp-*") + // Create temporary directory for extraction under storage base path + tmpDir, err := s.storage.TempDir("jiggablend-metadata-temp-*") if err != nil { return nil, fmt.Errorf("failed to create temporary directory: %w", err) } - defer os.RemoveAll(tmpDir) + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + log.Printf("Warning: Failed to clean up temp directory %s: %v", tmpDir, err) + } + }() // Extract context archive - if err := s.extractTarGz(contextPath, tmpDir); err != nil { + if err := s.extractTar(contextPath, tmpDir); err != nil { return nil, fmt.Errorf("failed to extract context: %w", err) } @@ -1264,167 +1687,20 @@ func (s *Server) extractMetadataFromTempContext(contextPath string) (*types.Blen // runBlenderMetadataExtraction runs Blender to extract metadata from a blend file func (s *Server) runBlenderMetadataExtraction(blendFile, workDir string) (*types.BlendMetadata, error) { - // Create Python script (same as in extractMetadataFromContext) + // Use embedded Python script scriptPath := filepath.Join(workDir, "extract_metadata.py") - scriptContent := `import bpy -import json -import sys - -try: - bpy.ops.file.make_paths_relative() - print("Made all file paths relative to blend file") -except Exception as e: - print(f"Warning: Could not make paths relative: {e}") - -missing_files_info = { - "checked": False, - "has_missing": False, - "missing_files": [], - "missing_addons": [] -} - -try: - missing = [] - for mod in bpy.context.preferences.addons: - if mod.module.endswith("_missing"): - missing.append(mod.module.rsplit("_", 1)[0]) - - missing_files_info["checked"] = True - if missing: - missing_files_info["has_missing"] = True - missing_files_info["missing_addons"] = missing - print("Missing add-ons required by this .blend:") - for name in missing: - print(" -", name) - else: - print("No missing add-ons detected – file is headless-safe") -except Exception as e: - print(f"Warning: Could not check for missing addons: {e}") - missing_files_info["error"] = str(e) - -scene = bpy.context.scene -frame_start = scene.frame_start -frame_end = scene.frame_end - -animation_start = None -animation_end = None - -for obj in scene.objects: - if obj.animation_data and obj.animation_data.action: - action = obj.animation_data.action - if action.fcurves: - for fcurve in action.fcurves: - if fcurve.keyframe_points: - for keyframe in fcurve.keyframe_points: - frame = int(keyframe.co[0]) - if animation_start is None or frame < animation_start: - animation_start = frame - if animation_end is None or frame > animation_end: - animation_end = frame - -if animation_start is not None and animation_end is not None: - if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end): - frame_start = animation_start - frame_end = animation_end - -render = scene.render -resolution_x = render.resolution_x -resolution_y = render.resolution_y -engine = scene.render.engine.upper() -output_format = render.image_settings.file_format - -engine_settings = {} - -if engine == 'CYCLES': - cycles = scene.cycles - engine_settings = { - "samples": getattr(cycles, 'samples', 128), - "use_denoising": getattr(cycles, 'use_denoising', False), - "denoising_radius": getattr(cycles, 'denoising_radius', 0), - "denoising_strength": getattr(cycles, 'denoising_strength', 0.0), - "device": getattr(cycles, 'device', 'CPU'), - "use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False), - "adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01, - "use_fast_gi": getattr(cycles, 'use_fast_gi', False), - "light_tree": getattr(cycles, 'use_light_tree', False), - "use_light_linking": getattr(cycles, 'use_light_linking', False), - "caustics_reflective": getattr(cycles, 'caustics_reflective', False), - "caustics_refractive": getattr(cycles, 'caustics_refractive', False), - "blur_glossy": getattr(cycles, 'blur_glossy', 0.0), - "max_bounces": getattr(cycles, 'max_bounces', 12), - "diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4), - "glossy_bounces": getattr(cycles, 'glossy_bounces', 4), - "transmission_bounces": getattr(cycles, 'transmission_bounces', 12), - "volume_bounces": getattr(cycles, 'volume_bounces', 0), - "transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8), - "film_transparent": getattr(cycles, 'film_transparent', False), - "use_layer_samples": getattr(cycles, 'use_layer_samples', False), - } -elif engine == 'EEVEE' or engine == 'EEVEE_NEXT': - eevee = scene.eevee - engine_settings = { - "taa_render_samples": getattr(eevee, 'taa_render_samples', 64), - "use_bloom": getattr(eevee, 'use_bloom', False), - "bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8), - "bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05), - "bloom_radius": getattr(eevee, 'bloom_radius', 6.5), - "use_ssr": getattr(eevee, 'use_ssr', True), - "use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False), - "ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'), - "use_ssao": getattr(eevee, 'use_ssao', True), - "ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'), - "ssao_distance": getattr(eevee, 'ssao_distance', 0.2), - "ssao_factor": getattr(eevee, 'ssao_factor', 1.0), - "use_soft_shadows": getattr(eevee, 'use_soft_shadows', True), - "use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True), - "use_volumetric": getattr(eevee, 'use_volumetric', False), - "volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'), - "volumetric_samples": getattr(eevee, 'volumetric_samples', 64), - "volumetric_start": getattr(eevee, 'volumetric_start', 0.0), - "volumetric_end": getattr(eevee, 'volumetric_end', 100.0), - "use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True), - "use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True), - "use_gtao": getattr(eevee, 'use_gtao', False), - "gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'), - "use_overscan": getattr(eevee, 'use_overscan', False), - } -else: - engine_settings = { - "samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128 - } - -camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA']) -object_count = len(scene.objects) -material_count = len(bpy.data.materials) - -metadata = { - "frame_start": frame_start, - "frame_end": frame_end, - "render_settings": { - "resolution_x": resolution_x, - "resolution_y": resolution_y, - "output_format": output_format, - "engine": engine.lower(), - "engine_settings": engine_settings - }, - "scene_info": { - "camera_count": camera_count, - "object_count": object_count, - "material_count": material_count - }, - "missing_files_info": missing_files_info -} - -print(json.dumps(metadata)) -sys.stdout.flush() -` - - if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil { + if err := os.WriteFile(scriptPath, []byte(scripts.ExtractMetadata), 0644); err != nil { return nil, fmt.Errorf("failed to create extraction script: %w", err) } + // Make blend file path relative to workDir to avoid path resolution issues + blendFileRel, err := filepath.Rel(workDir, blendFile) + if err != nil { + return nil, fmt.Errorf("failed to get relative path for blend file: %w", err) + } + // Execute Blender - cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath) + cmd := exec.Command("blender", "-b", blendFileRel, "--python", "extract_metadata.py") cmd.Dir = workDir stdoutPipe, err := cmd.StdoutPipe() @@ -1454,12 +1730,16 @@ sys.stdout.flush() } }() + // Capture stderr for error reporting + var stderrBuffer bytes.Buffer stderrDone := make(chan bool) go func() { defer close(stderrDone) scanner := bufio.NewScanner(stderrPipe) for scanner.Scan() { - _ = scanner.Text() + line := scanner.Text() + stderrBuffer.WriteString(line) + stderrBuffer.WriteString("\n") } }() @@ -1468,6 +1748,18 @@ sys.stdout.flush() <-stderrDone if err != nil { + stderrOutput := strings.TrimSpace(stderrBuffer.String()) + stdoutOutput := strings.TrimSpace(stdoutBuffer.String()) + log.Printf("Blender metadata extraction failed:") + if stderrOutput != "" { + log.Printf("Blender stderr: %s", stderrOutput) + } + if stdoutOutput != "" { + log.Printf("Blender stdout (last 500 chars): %s", truncateString(stdoutOutput, 500)) + } + if stderrOutput != "" { + return nil, fmt.Errorf("blender metadata extraction failed: %w (stderr: %s)", err, truncateString(stderrOutput, 200)) + } return nil, fmt.Errorf("blender metadata extraction failed: %w", err) } @@ -1608,17 +1900,14 @@ func (s *Server) createContextFromDir(sourceDir, destPath string, excludeFiles . return "", fmt.Errorf("multiple .blend files found at root level in context archive (found %d, expected 1)", blendFilesAtRoot) } - // Create the tar.gz file + // Create the tar file contextFile, err := os.Create(destPath) if err != nil { return "", fmt.Errorf("failed to create context file: %w", err) } defer contextFile.Close() - gzWriter := gzip.NewWriter(contextFile) - defer gzWriter.Close() - - tarWriter := tar.NewWriter(gzWriter) + tarWriter := tar.NewWriter(contextFile) defer tarWriter.Close() // Add each file to the tar archive @@ -1663,9 +1952,6 @@ func (s *Server) createContextFromDir(sourceDir, destPath string, excludeFiles . if err := tarWriter.Close(); err != nil { return "", fmt.Errorf("failed to close tar writer: %w", err) } - if err := gzWriter.Close(); err != nil { - return "", fmt.Errorf("failed to close gzip writer: %w", err) - } if err := contextFile.Close(); err != nil { return "", fmt.Errorf("failed to close context file: %w", err) } @@ -1673,7 +1959,7 @@ func (s *Server) createContextFromDir(sourceDir, destPath string, excludeFiles . return destPath, nil } -// handleListJobFiles lists files for a job +// handleListJobFiles lists files for a job with pagination func (s *Server) handleListJobFiles(w http.ResponseWriter, r *http.Request) { userID, err := getUserID(r) if err != nil { @@ -1714,17 +2000,66 @@ func (s *Server) handleListJobFiles(w http.ResponseWriter, r *http.Request) { } } - rows, err := s.db.Query( - `SELECT id, job_id, file_type, file_path, file_name, file_size, created_at - FROM job_files WHERE job_id = ? ORDER BY created_at DESC`, - jobID, - ) + // Parse query parameters + limit := 50 + if limitStr := r.URL.Query().Get("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 { + limit = l + } + } + + offset := 0 + if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" { + if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 { + offset = o + } + } + + fileTypeFilter := r.URL.Query().Get("file_type") + extensionFilter := r.URL.Query().Get("extension") + + // Build query with filters + query := `SELECT id, job_id, file_type, file_path, file_name, file_size, created_at + FROM job_files WHERE job_id = ?` + args := []interface{}{jobID} + + if fileTypeFilter != "" { + query += " AND file_type = ?" + args = append(args, fileTypeFilter) + } + + if extensionFilter != "" { + query += " AND file_name LIKE ?" + args = append(args, "%."+extensionFilter) + } + + query += " ORDER BY created_at DESC LIMIT ? OFFSET ?" + args = append(args, limit, offset) + + rows, err := s.db.Query(query, args...) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query files: %v", err)) return } defer rows.Close() + // Get total count + var total int + countQuery := `SELECT COUNT(*) FROM job_files WHERE job_id = ?` + countArgs := []interface{}{jobID} + if fileTypeFilter != "" { + countQuery += " AND file_type = ?" + countArgs = append(countArgs, fileTypeFilter) + } + if extensionFilter != "" { + countQuery += " AND file_name LIKE ?" + countArgs = append(countArgs, "%."+extensionFilter) + } + err = s.db.QueryRow(countQuery, countArgs...).Scan(&total) + if err != nil { + total = -1 + } + files := []types.JobFile{} for rows.Next() { var file types.JobFile @@ -1739,10 +2074,77 @@ func (s *Server) handleListJobFiles(w http.ResponseWriter, r *http.Request) { files = append(files, file) } - s.respondJSON(w, http.StatusOK, files) + response := map[string]interface{}{ + "data": files, + "total": total, + "limit": limit, + "offset": offset, + } + s.respondJSON(w, http.StatusOK, response) +} + +// handleGetJobFilesCount returns the count of files for a job +func (s *Server) handleGetJobFilesCount(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + s.respondError(w, http.StatusUnauthorized, err.Error()) + return + } + + jobID, err := parseID(r, "id") + if err != nil { + s.respondError(w, http.StatusBadRequest, err.Error()) + return + } + + // Verify job belongs to user (unless admin) + isAdmin := isAdminUser(r) + if !isAdmin { + var jobUserID int64 + err = s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&jobUserID) + if err == sql.ErrNoRows { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify job: %v", err)) + return + } + if jobUserID != userID { + s.respondError(w, http.StatusForbidden, "Access denied") + return + } + } else { + var exists bool + err = s.db.QueryRow("SELECT EXISTS(SELECT 1 FROM jobs WHERE id = ?)", jobID).Scan(&exists) + if err != nil || !exists { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + } + + fileTypeFilter := r.URL.Query().Get("file_type") + + var count int + query := `SELECT COUNT(*) FROM job_files WHERE job_id = ?` + args := []interface{}{jobID} + + if fileTypeFilter != "" { + query += " AND file_type = ?" + args = append(args, fileTypeFilter) + } + + err = s.db.QueryRow(query, args...).Scan(&count) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to count files: %v", err)) + return + } + + s.respondJSON(w, http.StatusOK, map[string]interface{}{"count": count}) } // handleListContextArchive lists files inside the context archive +// Optimized to only read tar headers, skipping file data for fast directory listing func (s *Server) handleListContextArchive(w http.ResponseWriter, r *http.Request) { userID, err := getUserID(r) if err != nil { @@ -1776,29 +2178,20 @@ func (s *Server) handleListContextArchive(w http.ResponseWriter, r *http.Request } // Get context archive path - contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz") + contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar") if !s.storage.FileExists(contextPath) { s.respondError(w, http.StatusNotFound, "Context archive not found") return } - // Read the tar.gz and list its contents - file, err := s.storage.GetFile(contextPath) + // Open file directly for seeking (much faster than reading all data) + file, err := os.Open(contextPath) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to open context archive: %v", err)) return } defer file.Close() - gzReader, err := gzip.NewReader(file) - if err != nil { - s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to read context archive: %v", err)) - return - } - defer gzReader.Close() - - tarReader := tar.NewReader(gzReader) - type ArchiveFile struct { Name string `json:"name"` Size int64 `json:"size"` @@ -1806,15 +2199,65 @@ func (s *Server) handleListContextArchive(w http.ResponseWriter, r *http.Request } var archiveFiles []ArchiveFile + const tarBlockSize = 512 + + // Read tar headers sequentially, skipping file data by seeking + // This is much faster than reading all file contents for { - header, err := tarReader.Next() + // Read 512-byte tar header + headerBuf := make([]byte, tarBlockSize) + n, err := file.Read(headerBuf) if err == io.EOF { break } if err != nil { - s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to read archive: %v", err)) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to read archive header: %v", err)) return } + if n < tarBlockSize { + // Incomplete header, likely end of archive + break + } + + // Check if this is the end marker (all zeros) - tar files end with two zero blocks + allZeros := true + for _, b := range headerBuf { + if b != 0 { + allZeros = false + break + } + } + if allZeros { + break + } + + // Parse tar header + var header tar.Header + if err := parseTarHeader(headerBuf, &header); err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to parse archive header: %v", err)) + return + } + + // Handle GNU tar long filename extension (type 'L') + // If typeflag is 'L', the next block contains the actual filename + if header.Typeflag == 'L' { + // Read the long filename from the next block + longNameBuf := make([]byte, tarBlockSize) + if _, err := file.Read(longNameBuf); err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to read long filename: %v", err)) + return + } + header.Name = strings.TrimRight(string(longNameBuf), "\x00") + // Read the actual header after the long filename + if _, err := file.Read(headerBuf); err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to read header after long filename: %v", err)) + return + } + if err := parseTarHeader(headerBuf, &header); err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to parse header after long filename: %v", err)) + return + } + } // Only include regular files (not directories) if header.Typeflag == tar.TypeReg { @@ -1824,11 +2267,68 @@ func (s *Server) handleListContextArchive(w http.ResponseWriter, r *http.Request Path: header.Name, }) } + + // Skip file data by seeking forward + // Tar format: file data is padded to 512-byte boundary + dataSize := header.Size + blockPadding := (tarBlockSize - (dataSize % tarBlockSize)) % tarBlockSize + skipSize := dataSize + blockPadding + + // Seek forward to next header (much faster than reading) + _, err = file.Seek(skipSize, io.SeekCurrent) + if err != nil { + // If seek fails (e.g., on non-seekable stream), fall back to reading and discarding + _, readErr := io.CopyN(io.Discard, file, skipSize) + if readErr != nil && readErr != io.EOF { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to skip file data: %v", readErr)) + return + } + } } s.respondJSON(w, http.StatusOK, archiveFiles) } +// parseTarHeader parses a 512-byte tar header block into a tar.Header +// This is a simplified parser that extracts the essential fields we need +func parseTarHeader(buf []byte, h *tar.Header) error { + const tarHeaderSize = 512 + if len(buf) < tarHeaderSize { + return fmt.Errorf("buffer too small for tar header") + } + + // Tar header format (UStar/POSIX format) + // Field offsets based on POSIX.1-1988 tar format + h.Name = strings.TrimRight(string(buf[0:100]), "\x00") + + // Parse mode (octal) + modeStr := strings.TrimRight(string(buf[100:108]), " \x00") + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err == nil { + h.Mode = int64(mode) + } + + // Parse size (octal) + sizeStr := strings.TrimRight(string(buf[124:136]), " \x00") + size, err := strconv.ParseInt(sizeStr, 8, 64) + if err == nil { + h.Size = size + } + + // Parse typeflag + if len(buf) > 156 { + h.Typeflag = buf[156] + } + + // Handle UStar format prefix (for long filenames) + prefix := strings.TrimRight(string(buf[345:500]), "\x00") + if prefix != "" { + h.Name = prefix + "/" + h.Name + } + + return nil +} + // handleDownloadJobFile downloads a job file func (s *Server) handleDownloadJobFile(w http.ResponseWriter, r *http.Request) { userID, err := getUserID(r) @@ -2034,7 +2534,7 @@ func (s *Server) handleStreamVideo(w http.ResponseWriter, r *http.Request) { } } -// handleListJobTasks lists all tasks for a job +// handleListJobTasks lists all tasks for a job with pagination and filtering func (s *Server) handleListJobTasks(w http.ResponseWriter, r *http.Request) { userID, err := getUserID(r) if err != nil { @@ -2075,13 +2575,418 @@ func (s *Server) handleListJobTasks(w http.ResponseWriter, r *http.Request) { } } - rows, err := s.db.Query( - `SELECT id, job_id, runner_id, frame_start, frame_end, status, task_type, + // Parse query parameters + limit := 100 // default + if limitStr := r.URL.Query().Get("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 5000 { + limit = l + } + } + + offset := 0 + if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" { + if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 { + offset = o + } + } + + statusFilter := r.URL.Query().Get("status") + frameStartFilter := r.URL.Query().Get("frame_start") + frameEndFilter := r.URL.Query().Get("frame_end") + sortBy := r.URL.Query().Get("sort") + if sortBy == "" { + sortBy = "frame_start:asc" + } + + // Parse sort parameter + sortParts := strings.Split(sortBy, ":") + sortField := "frame_start" + sortDir := "ASC" + if len(sortParts) == 2 { + sortField = sortParts[0] + sortDir = strings.ToUpper(sortParts[1]) + if sortDir != "ASC" && sortDir != "DESC" { + sortDir = "ASC" + } + validFields := map[string]bool{ + "frame_start": true, "frame_end": true, "status": true, + "created_at": true, "started_at": true, "completed_at": true, + } + if !validFields[sortField] { + sortField = "frame_start" + } + } + + // Build query with filters + query := `SELECT id, job_id, runner_id, frame_start, frame_end, status, task_type, current_step, retry_count, max_retries, output_path, created_at, started_at, completed_at, error_message, timeout_seconds - FROM tasks WHERE job_id = ? ORDER BY frame_start ASC`, - jobID, - ) + FROM tasks WHERE job_id = ?` + args := []interface{}{jobID} + + if statusFilter != "" { + statuses := strings.Split(statusFilter, ",") + placeholders := make([]string, len(statuses)) + for i, status := range statuses { + placeholders[i] = "?" + args = append(args, strings.TrimSpace(status)) + } + query += fmt.Sprintf(" AND status IN (%s)", strings.Join(placeholders, ",")) + } + + if frameStartFilter != "" { + if fs, err := strconv.Atoi(frameStartFilter); err == nil { + query += " AND frame_start >= ?" + args = append(args, fs) + } + } + + if frameEndFilter != "" { + if fe, err := strconv.Atoi(frameEndFilter); err == nil { + query += " AND frame_end <= ?" + args = append(args, fe) + } + } + + query += fmt.Sprintf(" ORDER BY %s %s LIMIT ? OFFSET ?", sortField, sortDir) + args = append(args, limit, offset) + + rows, err := s.db.Query(query, args...) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query tasks: %v", err)) + return + } + defer rows.Close() + + // Get total count + var total int + countQuery := `SELECT COUNT(*) FROM tasks WHERE job_id = ?` + countArgs := []interface{}{jobID} + if statusFilter != "" { + statuses := strings.Split(statusFilter, ",") + placeholders := make([]string, len(statuses)) + for i, status := range statuses { + placeholders[i] = "?" + countArgs = append(countArgs, strings.TrimSpace(status)) + } + countQuery += fmt.Sprintf(" AND status IN (%s)", strings.Join(placeholders, ",")) + } + if frameStartFilter != "" { + if fs, err := strconv.Atoi(frameStartFilter); err == nil { + countQuery += " AND frame_start >= ?" + countArgs = append(countArgs, fs) + } + } + if frameEndFilter != "" { + if fe, err := strconv.Atoi(frameEndFilter); err == nil { + countQuery += " AND frame_end <= ?" + countArgs = append(countArgs, fe) + } + } + err = s.db.QueryRow(countQuery, countArgs...).Scan(&total) + if err != nil { + total = -1 + } + + tasks := []types.Task{} + for rows.Next() { + var task types.Task + var runnerID sql.NullInt64 + var startedAt, completedAt sql.NullTime + var timeoutSeconds sql.NullInt64 + var errorMessage sql.NullString + var currentStep sql.NullString + var outputPath sql.NullString + + err := rows.Scan( + &task.ID, &task.JobID, &runnerID, &task.FrameStart, &task.FrameEnd, + &task.Status, &task.TaskType, ¤tStep, &task.RetryCount, + &task.MaxRetries, &outputPath, &task.CreatedAt, &startedAt, + &completedAt, &errorMessage, &timeoutSeconds, + ) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to scan task: %v", err)) + return + } + + if runnerID.Valid { + task.RunnerID = &runnerID.Int64 + } + if startedAt.Valid { + task.StartedAt = &startedAt.Time + } + if completedAt.Valid { + task.CompletedAt = &completedAt.Time + } + if timeoutSeconds.Valid { + timeout := int(timeoutSeconds.Int64) + task.TimeoutSeconds = &timeout + } + if errorMessage.Valid { + task.ErrorMessage = errorMessage.String + } + if currentStep.Valid { + task.CurrentStep = currentStep.String + } + if outputPath.Valid { + task.OutputPath = outputPath.String + } + + tasks = append(tasks, task) + } + + response := map[string]interface{}{ + "data": tasks, + "total": total, + "limit": limit, + "offset": offset, + } + + // Generate ETag and check If-None-Match + etag := generateETag(response) + w.Header().Set("ETag", etag) + + if checkETag(r, etag) { + w.WriteHeader(http.StatusNotModified) + return + } + + s.respondJSON(w, http.StatusOK, response) +} + +// handleListJobTasksSummary lists lightweight task summaries for a job +func (s *Server) handleListJobTasksSummary(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + s.respondError(w, http.StatusUnauthorized, err.Error()) + return + } + + jobID, err := parseID(r, "id") + if err != nil { + s.respondError(w, http.StatusBadRequest, err.Error()) + return + } + + // Verify job belongs to user (unless admin) + isAdmin := isAdminUser(r) + if !isAdmin { + var jobUserID int64 + err = s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&jobUserID) + if err == sql.ErrNoRows { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify job: %v", err)) + return + } + if jobUserID != userID { + s.respondError(w, http.StatusForbidden, "Access denied") + return + } + } else { + var exists bool + err = s.db.QueryRow("SELECT EXISTS(SELECT 1 FROM jobs WHERE id = ?)", jobID).Scan(&exists) + if err != nil || !exists { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + } + + // Parse query parameters + limit := 100 + if limitStr := r.URL.Query().Get("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 5000 { + limit = l + } + } + + offset := 0 + if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" { + if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 { + offset = o + } + } + + statusFilter := r.URL.Query().Get("status") + sortBy := r.URL.Query().Get("sort") + if sortBy == "" { + sortBy = "frame_start:asc" + } + + sortParts := strings.Split(sortBy, ":") + sortField := "frame_start" + sortDir := "ASC" + if len(sortParts) == 2 { + sortField = sortParts[0] + sortDir = strings.ToUpper(sortParts[1]) + if sortDir != "ASC" && sortDir != "DESC" { + sortDir = "ASC" + } + validFields := map[string]bool{ + "frame_start": true, "frame_end": true, "status": true, + } + if !validFields[sortField] { + sortField = "frame_start" + } + } + + // Build query - only select summary fields + query := `SELECT id, frame_start, frame_end, status, task_type, runner_id + FROM tasks WHERE job_id = ?` + args := []interface{}{jobID} + + if statusFilter != "" { + statuses := strings.Split(statusFilter, ",") + placeholders := make([]string, len(statuses)) + for i, status := range statuses { + placeholders[i] = "?" + args = append(args, strings.TrimSpace(status)) + } + query += fmt.Sprintf(" AND status IN (%s)", strings.Join(placeholders, ",")) + } + + query += fmt.Sprintf(" ORDER BY %s %s LIMIT ? OFFSET ?", sortField, sortDir) + args = append(args, limit, offset) + + rows, err := s.db.Query(query, args...) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query tasks: %v", err)) + return + } + defer rows.Close() + + // Get total count + var total int + countQuery := `SELECT COUNT(*) FROM tasks WHERE job_id = ?` + countArgs := []interface{}{jobID} + if statusFilter != "" { + statuses := strings.Split(statusFilter, ",") + placeholders := make([]string, len(statuses)) + for i, status := range statuses { + placeholders[i] = "?" + countArgs = append(countArgs, strings.TrimSpace(status)) + } + countQuery += fmt.Sprintf(" AND status IN (%s)", strings.Join(placeholders, ",")) + } + err = s.db.QueryRow(countQuery, countArgs...).Scan(&total) + if err != nil { + total = -1 + } + + type TaskSummary struct { + ID int64 `json:"id"` + FrameStart int `json:"frame_start"` + FrameEnd int `json:"frame_end"` + Status string `json:"status"` + TaskType string `json:"task_type"` + RunnerID *int64 `json:"runner_id,omitempty"` + } + + summaries := []TaskSummary{} + for rows.Next() { + var summary TaskSummary + var runnerID sql.NullInt64 + + err := rows.Scan( + &summary.ID, &summary.FrameStart, &summary.FrameEnd, + &summary.Status, &summary.TaskType, &runnerID, + ) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to scan task: %v", err)) + return + } + + if runnerID.Valid { + summary.RunnerID = &runnerID.Int64 + } + + summaries = append(summaries, summary) + } + + response := map[string]interface{}{ + "data": summaries, + "total": total, + "limit": limit, + "offset": offset, + } + s.respondJSON(w, http.StatusOK, response) +} + +// handleBatchGetTasks fetches multiple tasks by IDs for a job +func (s *Server) handleBatchGetTasks(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + s.respondError(w, http.StatusUnauthorized, err.Error()) + return + } + + jobID, err := parseID(r, "id") + if err != nil { + s.respondError(w, http.StatusBadRequest, err.Error()) + return + } + + // Verify job belongs to user (unless admin) + isAdmin := isAdminUser(r) + if !isAdmin { + var jobUserID int64 + err = s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&jobUserID) + if err == sql.ErrNoRows { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify job: %v", err)) + return + } + if jobUserID != userID { + s.respondError(w, http.StatusForbidden, "Access denied") + return + } + } else { + var exists bool + err = s.db.QueryRow("SELECT EXISTS(SELECT 1 FROM jobs WHERE id = ?)", jobID).Scan(&exists) + if err != nil || !exists { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + } + + var req struct { + TaskIDs []int64 `json:"task_ids"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.respondError(w, http.StatusBadRequest, "Invalid request body") + return + } + + if len(req.TaskIDs) == 0 { + s.respondJSON(w, http.StatusOK, []types.Task{}) + return + } + + if len(req.TaskIDs) > 500 { + s.respondError(w, http.StatusBadRequest, "Maximum 500 task IDs allowed per batch") + return + } + + // Build query with IN clause + placeholders := make([]string, len(req.TaskIDs)) + args := make([]interface{}, len(req.TaskIDs)+1) + args[0] = jobID + for i, taskID := range req.TaskIDs { + placeholders[i] = "?" + args[i+1] = taskID + } + + query := fmt.Sprintf(`SELECT id, job_id, runner_id, frame_start, frame_end, status, task_type, + current_step, retry_count, max_retries, output_path, created_at, started_at, + completed_at, error_message, timeout_seconds + FROM tasks WHERE job_id = ? AND id IN (%s) ORDER BY frame_start ASC`, strings.Join(placeholders, ",")) + + rows, err := s.db.Query(query, args...) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query tasks: %v", err)) return @@ -2205,10 +3110,11 @@ func (s *Server) handleGetTaskLogs(w http.ResponseWriter, r *http.Request) { // Get query parameters for filtering stepName := r.URL.Query().Get("step_name") logLevel := r.URL.Query().Get("log_level") + sinceIDStr := r.URL.Query().Get("since_id") limitStr := r.URL.Query().Get("limit") - limit := 1000 // default + limit := 100 // default (reduced from 1000) if limitStr != "" { - if l, err := strconv.Atoi(limitStr); err == nil && l > 0 { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 10000 { limit = l } } @@ -2217,6 +3123,15 @@ func (s *Server) handleGetTaskLogs(w http.ResponseWriter, r *http.Request) { query := `SELECT id, task_id, runner_id, log_level, message, step_name, created_at FROM task_logs WHERE task_id = ?` args := []interface{}{taskID} + + // Add since_id filter for incremental updates + if sinceIDStr != "" { + if sinceID, err := strconv.ParseInt(sinceIDStr, 10, 64); err == nil && sinceID > 0 { + query += " AND id > ?" + args = append(args, sinceID) + } + } + if stepName != "" { query += " AND step_name = ?" args = append(args, stepName) @@ -2225,7 +3140,7 @@ func (s *Server) handleGetTaskLogs(w http.ResponseWriter, r *http.Request) { query += " AND log_level = ?" args = append(args, logLevel) } - query += " ORDER BY created_at ASC LIMIT ?" + query += " ORDER BY id ASC LIMIT ?" args = append(args, limit) rows, err := s.db.Query(query, args...) @@ -2253,7 +3168,18 @@ func (s *Server) handleGetTaskLogs(w http.ResponseWriter, r *http.Request) { logs = append(logs, log) } - s.respondJSON(w, http.StatusOK, logs) + // Return last_id for next incremental fetch + lastID := int64(0) + if len(logs) > 0 { + lastID = logs[len(logs)-1].ID + } + + response := map[string]interface{}{ + "logs": logs, + "last_id": lastID, + "limit": limit, + } + s.respondJSON(w, http.StatusOK, response) } // handleGetTaskSteps retrieves step timeline for a specific task @@ -2444,6 +3370,14 @@ func (s *Server) handleRetryTask(w http.ResponseWriter, r *http.Request) { return } + // Broadcast task update + s.broadcastTaskUpdate(jobID, taskID, "task_update", map[string]interface{}{ + "status": types.TaskStatusPending, + "runner_id": nil, + "current_step": nil, + "error_message": nil, + }) + s.respondJSON(w, http.StatusOK, map[string]string{"message": "Task queued for retry"}) } @@ -2647,3 +3581,338 @@ func (s *Server) handleStreamTaskLogsWebSocket(w http.ResponseWriter, r *http.Re } } } + +// handleJobsWebSocket handles WebSocket connection for job list updates +func (s *Server) handleJobsWebSocket(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + + // Upgrade to WebSocket + conn, err := s.wsUpgrader.Upgrade(w, r, nil) + if err != nil { + log.Printf("Failed to upgrade WebSocket: %v", err) + return + } + defer conn.Close() + + // Register connection + s.jobListConnsMu.Lock() + // Close existing connection if any + if oldConn, exists := s.jobListConns[userID]; exists && oldConn != nil { + oldConn.Close() + } + s.jobListConns[userID] = conn + s.jobListConnsMu.Unlock() + + defer func() { + s.jobListConnsMu.Lock() + delete(s.jobListConns, userID) + s.jobListConnsMu.Unlock() + }() + + // Send initial connection message + err = conn.WriteJSON(map[string]interface{}{ + "type": "connected", + "timestamp": time.Now().Unix(), + }) + if err != nil { + log.Printf("Failed to send initial connection message: %v", err) + return + } + + // Keep connection alive and handle ping/pong + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + conn.SetPongHandler(func(string) error { + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + return nil + }) + + // Start ping ticker + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + // Read messages in background to keep connection alive and handle pongs + readDone := make(chan struct{}) + go func() { + defer close(readDone) + for { + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + _, _, err := conn.ReadMessage() + if err != nil { + // Connection closed or error - exit read loop + if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) { + log.Printf("WebSocket read error for job list: %v", err) + } + return + } + // Reset read deadline after successful read (pong received) + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + } + }() + + ctx := r.Context() + for { + select { + case <-ctx.Done(): + return + case <-readDone: + // Read loop exited, close connection + return + case <-ticker.C: + conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil { + return + } + } + } +} + +// handleJobWebSocket handles WebSocket connection for single job updates +func (s *Server) handleJobWebSocket(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + + jobID, err := parseID(r, "id") + if err != nil { + s.respondError(w, http.StatusBadRequest, err.Error()) + return + } + + // Verify job belongs to user (unless admin) + isAdmin := isAdminUser(r) + if !isAdmin { + var jobUserID int64 + err = s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&jobUserID) + if err == sql.ErrNoRows { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify job: %v", err)) + return + } + if jobUserID != userID { + s.respondError(w, http.StatusForbidden, "Access denied") + return + } + } else { + var exists bool + err = s.db.QueryRow("SELECT EXISTS(SELECT 1 FROM jobs WHERE id = ?)", jobID).Scan(&exists) + if err != nil || !exists { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + } + + // Upgrade to WebSocket + conn, err := s.wsUpgrader.Upgrade(w, r, nil) + if err != nil { + log.Printf("Failed to upgrade WebSocket: %v", err) + return + } + defer conn.Close() + + key := fmt.Sprintf("%d:%d", userID, jobID) + s.jobConnsMu.Lock() + // Close existing connection if any + if oldConn, exists := s.jobConns[key]; exists && oldConn != nil { + oldConn.Close() + } + s.jobConns[key] = conn + s.jobConnsMu.Unlock() + + // Create a write mutex for this connection + s.jobConnsWriteMuMu.Lock() + s.jobConnsWriteMu[key] = &sync.Mutex{} + writeMu := s.jobConnsWriteMu[key] + s.jobConnsWriteMuMu.Unlock() + + defer func() { + s.jobConnsMu.Lock() + delete(s.jobConns, key) + s.jobConnsMu.Unlock() + s.jobConnsWriteMuMu.Lock() + delete(s.jobConnsWriteMu, key) + s.jobConnsWriteMuMu.Unlock() + }() + + // Send initial connection message + writeMu.Lock() + err = conn.WriteJSON(map[string]interface{}{ + "type": "connected", + "timestamp": time.Now().Unix(), + }) + writeMu.Unlock() + if err != nil { + log.Printf("Failed to send initial connection message: %v", err) + return + } + + // Keep connection alive and handle ping/pong + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + conn.SetPongHandler(func(string) error { + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + return nil + }) + + // Start ping ticker + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + // Read messages in background to keep connection alive and handle pongs + readDone := make(chan struct{}) + go func() { + defer close(readDone) + for { + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + _, _, err := conn.ReadMessage() + if err != nil { + // Connection closed or error - exit read loop + if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) { + log.Printf("WebSocket read error for job %d: %v", jobID, err) + } + return + } + // Reset read deadline after successful read (pong received) + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + } + }() + + ctx := r.Context() + for { + select { + case <-ctx.Done(): + return + case <-readDone: + // Read loop exited, close connection + return + case <-ticker.C: + conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil { + return + } + } + } +} + +// broadcastJobUpdate broadcasts job update to connected clients +func (s *Server) broadcastJobUpdate(jobID int64, updateType string, data interface{}) { + // Get user_id from job + var userID int64 + err := s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&userID) + if err != nil { + return + } + + msg := map[string]interface{}{ + "type": updateType, + "job_id": jobID, + "data": data, + "timestamp": time.Now().Unix(), + } + + // Broadcast to job list connection + s.jobListConnsMu.RLock() + if conn, exists := s.jobListConns[userID]; exists && conn != nil { + s.jobListConnsMu.RUnlock() + conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + conn.WriteJSON(msg) + } else { + s.jobListConnsMu.RUnlock() + } + + // Broadcast to single job connection + key := fmt.Sprintf("%d:%d", userID, jobID) + s.jobConnsMu.RLock() + conn, exists := s.jobConns[key] + s.jobConnsMu.RUnlock() + + if exists && conn != nil { + s.jobConnsWriteMuMu.RLock() + writeMu, hasMu := s.jobConnsWriteMu[key] + s.jobConnsWriteMuMu.RUnlock() + + if hasMu && writeMu != nil { + writeMu.Lock() + conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + err := conn.WriteJSON(msg) + writeMu.Unlock() + if err != nil { + log.Printf("Failed to broadcast %s to job %d WebSocket: %v", updateType, jobID, err) + } else { + log.Printf("Successfully broadcast %s to job %d WebSocket", updateType, jobID) + } + } else { + conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + err := conn.WriteJSON(msg) + if err != nil { + log.Printf("Failed to broadcast %s to job %d WebSocket: %v", updateType, jobID, err) + } + } + } +} + +// broadcastTaskUpdate broadcasts task update to connected clients +func (s *Server) broadcastTaskUpdate(jobID int64, taskID int64, updateType string, data interface{}) { + // Get user_id from job + var userID int64 + err := s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&userID) + if err != nil { + return + } + + msg := map[string]interface{}{ + "type": updateType, + "job_id": jobID, + "data": data, + "timestamp": time.Now().Unix(), + } + // Always include task_id if it's provided (even if 0, for consistency) + // For bulk operations like "tasks_added", task_id will be 0 + if taskID > 0 { + msg["task_id"] = taskID + // Also include task_id in data for convenience + if dataMap, ok := data.(map[string]interface{}); ok { + dataMap["task_id"] = taskID + } + } + + // Broadcast to single job connection + key := fmt.Sprintf("%d:%d", userID, jobID) + s.jobConnsMu.RLock() + conn, exists := s.jobConns[key] + s.jobConnsMu.RUnlock() + + if exists && conn != nil { + s.jobConnsWriteMuMu.RLock() + writeMu, hasMu := s.jobConnsWriteMu[key] + s.jobConnsWriteMuMu.RUnlock() + + if hasMu && writeMu != nil { + writeMu.Lock() + conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + conn.WriteJSON(msg) + writeMu.Unlock() + } else { + conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + conn.WriteJSON(msg) + } + } +} + +// truncateString truncates a string to a maximum length, appending "..." if truncated +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + if maxLen <= 3 { + return "..." + } + return s[:maxLen-3] + "..." +} diff --git a/internal/api/metadata.go b/internal/api/metadata.go index ab3d0ba..b126723 100644 --- a/internal/api/metadata.go +++ b/internal/api/metadata.go @@ -4,8 +4,8 @@ import ( "archive/tar" "bufio" "bytes" - "compress/gzip" "database/sql" + _ "embed" "encoding/json" "errors" "fmt" @@ -17,6 +17,7 @@ import ( "path/filepath" "strings" + "jiggablend/pkg/scripts" "jiggablend/pkg/types" ) @@ -169,22 +170,26 @@ func (s *Server) handleGetJobMetadata(w http.ResponseWriter, r *http.Request) { // extractMetadataFromContext extracts metadata from the blend file in a context archive // Returns the extracted metadata or an error func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata, error) { - contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz") - + contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar") + // Check if context exists if _, err := os.Stat(contextPath); err != nil { return nil, fmt.Errorf("context archive not found: %w", err) } - // Create temporary directory for extraction - tmpDir, err := os.MkdirTemp("", fmt.Sprintf("fuego-metadata-%d-*", jobID)) + // Create temporary directory for extraction under storage base path + tmpDir, err := s.storage.TempDir(fmt.Sprintf("jiggablend-metadata-%d-*", jobID)) if err != nil { return nil, fmt.Errorf("failed to create temporary directory: %w", err) } - defer os.RemoveAll(tmpDir) + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + log.Printf("Warning: Failed to clean up temp directory %s: %v", tmpDir, err) + } + }() // Extract context archive - if err := s.extractTarGz(contextPath, tmpDir); err != nil { + if err := s.extractTar(contextPath, tmpDir); err != nil { return nil, fmt.Errorf("failed to extract context: %w", err) } @@ -228,188 +233,20 @@ func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata, return nil, fmt.Errorf("no .blend file found in context") } - // Create Python script to extract metadata + // Use embedded Python script scriptPath := filepath.Join(tmpDir, "extract_metadata.py") - scriptContent := `import bpy -import json -import sys - -# Make all file paths relative to the blend file location FIRST -# This must be done immediately after file load, before any other operations -# to prevent Blender from trying to access external files with absolute paths -try: - bpy.ops.file.make_paths_relative() - print("Made all file paths relative to blend file") -except Exception as e: - print(f"Warning: Could not make paths relative: {e}") - -# Check for missing addons that the blend file requires -# Blender marks missing addons with "_missing" suffix in preferences -missing_files_info = { - "checked": False, - "has_missing": False, - "missing_files": [], - "missing_addons": [] -} - -try: - missing = [] - for mod in bpy.context.preferences.addons: - if mod.module.endswith("_missing"): - missing.append(mod.module.rsplit("_", 1)[0]) - - missing_files_info["checked"] = True - if missing: - missing_files_info["has_missing"] = True - missing_files_info["missing_addons"] = missing - print("Missing add-ons required by this .blend:") - for name in missing: - print(" -", name) - else: - print("No missing add-ons detected – file is headless-safe") -except Exception as e: - print(f"Warning: Could not check for missing addons: {e}") - missing_files_info["error"] = str(e) - -# Get scene -scene = bpy.context.scene - -# Extract frame range from scene settings -frame_start = scene.frame_start -frame_end = scene.frame_end - -# Also check for actual animation range (keyframes) -# Find the earliest and latest keyframes across all objects -animation_start = None -animation_end = None - -for obj in scene.objects: - if obj.animation_data and obj.animation_data.action: - action = obj.animation_data.action - if action.fcurves: - for fcurve in action.fcurves: - if fcurve.keyframe_points: - for keyframe in fcurve.keyframe_points: - frame = int(keyframe.co[0]) - if animation_start is None or frame < animation_start: - animation_start = frame - if animation_end is None or frame > animation_end: - animation_end = frame - -# Use animation range if available, otherwise use scene frame range -# If scene range seems wrong (start == end), prefer animation range -if animation_start is not None and animation_end is not None: - if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end): - # Use animation range if scene range is invalid or animation extends beyond it - frame_start = animation_start - frame_end = animation_end - -# Extract render settings -render = scene.render -resolution_x = render.resolution_x -resolution_y = render.resolution_y -engine = scene.render.engine.upper() - -# Determine output format from file format -output_format = render.image_settings.file_format - -# Extract engine-specific settings -engine_settings = {} - -if engine == 'CYCLES': - cycles = scene.cycles - engine_settings = { - "samples": getattr(cycles, 'samples', 128), - "use_denoising": getattr(cycles, 'use_denoising', False), - "denoising_radius": getattr(cycles, 'denoising_radius', 0), - "denoising_strength": getattr(cycles, 'denoising_strength', 0.0), - "device": getattr(cycles, 'device', 'CPU'), - "use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False), - "adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01, - "use_fast_gi": getattr(cycles, 'use_fast_gi', False), - "light_tree": getattr(cycles, 'use_light_tree', False), - "use_light_linking": getattr(cycles, 'use_light_linking', False), - "caustics_reflective": getattr(cycles, 'caustics_reflective', False), - "caustics_refractive": getattr(cycles, 'caustics_refractive', False), - "blur_glossy": getattr(cycles, 'blur_glossy', 0.0), - "max_bounces": getattr(cycles, 'max_bounces', 12), - "diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4), - "glossy_bounces": getattr(cycles, 'glossy_bounces', 4), - "transmission_bounces": getattr(cycles, 'transmission_bounces', 12), - "volume_bounces": getattr(cycles, 'volume_bounces', 0), - "transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8), - "film_transparent": getattr(cycles, 'film_transparent', False), - "use_layer_samples": getattr(cycles, 'use_layer_samples', False), - } -elif engine == 'EEVEE' or engine == 'EEVEE_NEXT': - eevee = scene.eevee - engine_settings = { - "taa_render_samples": getattr(eevee, 'taa_render_samples', 64), - "use_bloom": getattr(eevee, 'use_bloom', False), - "bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8), - "bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05), - "bloom_radius": getattr(eevee, 'bloom_radius', 6.5), - "use_ssr": getattr(eevee, 'use_ssr', True), - "use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False), - "ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'), - "use_ssao": getattr(eevee, 'use_ssao', True), - "ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'), - "ssao_distance": getattr(eevee, 'ssao_distance', 0.2), - "ssao_factor": getattr(eevee, 'ssao_factor', 1.0), - "use_soft_shadows": getattr(eevee, 'use_soft_shadows', True), - "use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True), - "use_volumetric": getattr(eevee, 'use_volumetric', False), - "volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'), - "volumetric_samples": getattr(eevee, 'volumetric_samples', 64), - "volumetric_start": getattr(eevee, 'volumetric_start', 0.0), - "volumetric_end": getattr(eevee, 'volumetric_end', 100.0), - "use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True), - "use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True), - "use_gtao": getattr(eevee, 'use_gtao', False), - "gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'), - "use_overscan": getattr(eevee, 'use_overscan', False), - } -else: - # For other engines, extract basic samples if available - engine_settings = { - "samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128 - } - -# Extract scene info -camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA']) -object_count = len(scene.objects) -material_count = len(bpy.data.materials) - -# Build metadata dictionary -metadata = { - "frame_start": frame_start, - "frame_end": frame_end, - "render_settings": { - "resolution_x": resolution_x, - "resolution_y": resolution_y, - "output_format": output_format, - "engine": engine.lower(), - "engine_settings": engine_settings - }, - "scene_info": { - "camera_count": camera_count, - "object_count": object_count, - "material_count": material_count - }, - "missing_files_info": missing_files_info -} - -# Output as JSON -print(json.dumps(metadata)) -sys.stdout.flush() -` - - if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil { + if err := os.WriteFile(scriptPath, []byte(scripts.ExtractMetadata), 0644); err != nil { return nil, fmt.Errorf("failed to create extraction script: %w", err) } + // Make blend file path relative to tmpDir to avoid path resolution issues + blendFileRel, err := filepath.Rel(tmpDir, blendFile) + if err != nil { + return nil, fmt.Errorf("failed to get relative path for blend file: %w", err) + } + // Execute Blender with Python script - cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath) + cmd := exec.Command("blender", "-b", blendFileRel, "--python", "extract_metadata.py") cmd.Dir = tmpDir // Capture stdout and stderr @@ -443,14 +280,16 @@ sys.stdout.flush() } }() - // Stream stderr (discard for now, but could log if needed) + // Capture stderr for error reporting + var stderrBuffer bytes.Buffer stderrDone := make(chan bool) go func() { defer close(stderrDone) scanner := bufio.NewScanner(stderrPipe) for scanner.Scan() { - // Could log stderr if needed - _ = scanner.Text() + line := scanner.Text() + stderrBuffer.WriteString(line) + stderrBuffer.WriteString("\n") } }() @@ -462,6 +301,18 @@ sys.stdout.flush() <-stderrDone if err != nil { + stderrOutput := strings.TrimSpace(stderrBuffer.String()) + stdoutOutput := strings.TrimSpace(stdoutBuffer.String()) + log.Printf("Blender metadata extraction failed for job %d:", jobID) + if stderrOutput != "" { + log.Printf("Blender stderr: %s", stderrOutput) + } + if stdoutOutput != "" { + log.Printf("Blender stdout (last 500 chars): %s", truncateString(stdoutOutput, 500)) + } + if stderrOutput != "" { + return nil, fmt.Errorf("blender metadata extraction failed: %w (stderr: %s)", err, truncateString(stderrOutput, 200)) + } return nil, fmt.Errorf("blender metadata extraction failed: %w", err) } @@ -484,21 +335,25 @@ sys.stdout.flush() return &metadata, nil } -// extractTarGz extracts a tar.gz archive to a destination directory -func (s *Server) extractTarGz(tarGzPath, destDir string) error { - file, err := os.Open(tarGzPath) +// extractTar extracts a tar archive to a destination directory +func (s *Server) extractTar(tarPath, destDir string) error { + log.Printf("Extracting tar archive: %s -> %s", tarPath, destDir) + + // Ensure destination directory exists + if err := os.MkdirAll(destDir, 0755); err != nil { + return fmt.Errorf("failed to create destination directory: %w", err) + } + + file, err := os.Open(tarPath) if err != nil { return fmt.Errorf("failed to open archive: %w", err) } defer file.Close() - gzr, err := gzip.NewReader(file) - if err != nil { - return fmt.Errorf("failed to create gzip reader: %w", err) - } - defer gzr.Close() + tr := tar.NewReader(file) - tr := tar.NewReader(gzr) + fileCount := 0 + dirCount := 0 for { header, err := tr.Next() @@ -511,9 +366,13 @@ func (s *Server) extractTarGz(tarGzPath, destDir string) error { // Sanitize path to prevent directory traversal target := filepath.Join(destDir, header.Name) + // Ensure target is within destDir - if !strings.HasPrefix(filepath.Clean(target), filepath.Clean(destDir)+string(os.PathSeparator)) { - return fmt.Errorf("invalid file path in archive: %s", header.Name) + cleanTarget := filepath.Clean(target) + cleanDestDir := filepath.Clean(destDir) + if !strings.HasPrefix(cleanTarget, cleanDestDir+string(os.PathSeparator)) && cleanTarget != cleanDestDir { + log.Printf("ERROR: Invalid file path in TAR - target: %s, destDir: %s", cleanTarget, cleanDestDir) + return fmt.Errorf("invalid file path in archive: %s (target: %s, destDir: %s)", header.Name, cleanTarget, cleanDestDir) } // Create parent directories @@ -527,14 +386,18 @@ func (s *Server) extractTarGz(tarGzPath, destDir string) error { if err != nil { return fmt.Errorf("failed to create file: %w", err) } - if _, err := io.Copy(outFile, tr); err != nil { + _, err = io.Copy(outFile, tr) + if err != nil { outFile.Close() return fmt.Errorf("failed to write file: %w", err) } outFile.Close() + fileCount++ + } else if header.Typeflag == tar.TypeDir { + dirCount++ } } + log.Printf("Extraction complete: %d files, %d directories extracted to %s", fileCount, dirCount, destDir) return nil } - diff --git a/internal/api/runners.go b/internal/api/runners.go index e5015c1..b369718 100644 --- a/internal/api/runners.go +++ b/internal/api/runners.go @@ -9,6 +9,7 @@ import ( "log" "math/rand" "net/http" + "net/url" "path/filepath" "sort" "strconv" @@ -17,6 +18,7 @@ import ( "jiggablend/pkg/types" + "github.com/go-chi/chi/v5" "github.com/gorilla/websocket" ) @@ -287,13 +289,27 @@ func (s *Server) handleUpdateTaskStep(w http.ResponseWriter, r *http.Request) { } } + // Get job ID for broadcasting + var jobID int64 + err = s.db.QueryRow("SELECT job_id FROM tasks WHERE id = ?", taskID).Scan(&jobID) + if err == nil { + // Broadcast step update to frontend + s.broadcastTaskUpdate(jobID, taskID, "step_update", map[string]interface{}{ + "step_id": stepID, + "step_name": req.StepName, + "status": req.Status, + "duration_ms": req.DurationMs, + "error_message": req.ErrorMessage, + }) + } + s.respondJSON(w, http.StatusOK, map[string]interface{}{ "step_id": stepID, "message": "Step updated successfully", }) } -// handleDownloadJobContext allows runners to download the job context tar.gz +// handleDownloadJobContext allows runners to download the job context tar func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request) { jobID, err := parseID(r, "jobId") if err != nil { @@ -302,7 +318,7 @@ func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request } // Construct the context file path - contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz") + contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar") // Check if context file exists if !s.storage.FileExists(contextPath) { @@ -319,9 +335,9 @@ func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request } defer file.Close() - // Set appropriate headers for tar.gz file - w.Header().Set("Content-Type", "application/gzip") - w.Header().Set("Content-Disposition", "attachment; filename=context.tar.gz") + // Set appropriate headers for tar file + w.Header().Set("Content-Type", "application/x-tar") + w.Header().Set("Content-Disposition", "attachment; filename=context.tar") // Stream the file to the response io.Copy(w, file) @@ -356,16 +372,26 @@ func (s *Server) handleUploadFileFromRunner(w http.ResponseWriter, r *http.Reque } // Record in database - _, err = s.db.Exec( + var fileID int64 + err = s.db.QueryRow( `INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size) - VALUES (?, ?, ?, ?, ?)`, + VALUES (?, ?, ?, ?, ?) + RETURNING id`, jobID, types.JobFileTypeOutput, filePath, header.Filename, header.Size, - ) + ).Scan(&fileID) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record file: %v", err)) return } + // Broadcast file addition + s.broadcastJobUpdate(jobID, "file_added", map[string]interface{}{ + "file_id": fileID, + "file_type": types.JobFileTypeOutput, + "file_name": header.Filename, + "file_size": header.Size, + }) + s.respondJSON(w, http.StatusCreated, map[string]interface{}{ "file_path": filePath, "file_name": header.Filename, @@ -510,6 +536,79 @@ func (s *Server) handleGetJobMetadataForRunner(w http.ResponseWriter, r *http.Re s.respondJSON(w, http.StatusOK, metadata) } +// handleDownloadFileForRunner allows runners to download a file by fileName +func (s *Server) handleDownloadFileForRunner(w http.ResponseWriter, r *http.Request) { + jobID, err := parseID(r, "jobId") + if err != nil { + s.respondError(w, http.StatusBadRequest, err.Error()) + return + } + + // Get fileName from URL path (may need URL decoding) + fileName := chi.URLParam(r, "fileName") + if fileName == "" { + s.respondError(w, http.StatusBadRequest, "fileName is required") + return + } + + // URL decode the fileName in case it contains encoded characters + decodedFileName, err := url.QueryUnescape(fileName) + if err != nil { + // If decoding fails, use original fileName + decodedFileName = fileName + } + + // Get file info from database + var filePath string + err = s.db.QueryRow( + `SELECT file_path FROM job_files WHERE job_id = ? AND file_name = ?`, + jobID, decodedFileName, + ).Scan(&filePath) + if err == sql.ErrNoRows { + s.respondError(w, http.StatusNotFound, "File not found") + return + } + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query file: %v", err)) + return + } + + // Open file + file, err := s.storage.GetFile(filePath) + if err != nil { + s.respondError(w, http.StatusNotFound, "File not found on disk") + return + } + defer file.Close() + + // Determine content type based on file extension + contentType := "application/octet-stream" + fileNameLower := strings.ToLower(decodedFileName) + switch { + case strings.HasSuffix(fileNameLower, ".png"): + contentType = "image/png" + case strings.HasSuffix(fileNameLower, ".jpg") || strings.HasSuffix(fileNameLower, ".jpeg"): + contentType = "image/jpeg" + case strings.HasSuffix(fileNameLower, ".gif"): + contentType = "image/gif" + case strings.HasSuffix(fileNameLower, ".webp"): + contentType = "image/webp" + case strings.HasSuffix(fileNameLower, ".exr") || strings.HasSuffix(fileNameLower, ".EXR"): + contentType = "image/x-exr" + case strings.HasSuffix(fileNameLower, ".mp4"): + contentType = "video/mp4" + case strings.HasSuffix(fileNameLower, ".webm"): + contentType = "video/webm" + } + + // Set headers + w.Header().Set("Content-Type", contentType) + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", decodedFileName)) + + // Stream file + io.Copy(w, file) +} + // WebSocket message types type WSMessage struct { Type string `json:"type"` @@ -785,6 +884,13 @@ func (s *Server) handleWebSocketTaskComplete(runnerID int64, taskUpdate WSTaskUp taskUpdate.TaskID, ).Scan(&jobID) if err == nil { + // Broadcast task update + s.broadcastTaskUpdate(jobID, taskUpdate.TaskID, "task_update", map[string]interface{}{ + "status": status, + "output_path": taskUpdate.OutputPath, + "completed_at": now, + "error": taskUpdate.Error, + }) s.updateJobStatusFromTasks(jobID) } } @@ -840,6 +946,7 @@ func (s *Server) getCurrentFrameFromLogs(jobID int64) (int, bool) { for rows.Next() { var taskID int64 if err := rows.Scan(&taskID); err != nil { + log.Printf("Failed to scan task ID in getCurrentFrameFromLogs: %v", err) continue } @@ -895,6 +1002,14 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) { allowParallelRunners.Valid && !allowParallelRunners.Bool && frameStart.Valid && frameEnd.Valid + // Get current job status to detect changes + var currentStatus string + err = s.db.QueryRow(`SELECT status FROM jobs WHERE id = ?`, jobID).Scan(¤tStatus) + if err != nil { + log.Printf("Failed to get current job status for job %d: %v", jobID, err) + return + } + // Count total tasks and completed tasks var totalTasks, completedTasks int err = s.db.QueryRow( @@ -914,8 +1029,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) { return } - log.Printf("updateJobStatusFromTasks: job %d - total: %d, completed: %d", jobID, totalTasks, completedTasks) - // Calculate progress var progress float64 if totalTasks == 0 { @@ -985,9 +1098,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) { } else { progress = renderProgress } - - log.Printf("updateJobStatusFromTasks: job %d - frame-based progress: current_frame=%d, render_progress=%.1f%%, non_render_progress=%.1f%%, total_progress=%.1f%%", - jobID, currentFrame, renderProgress, nonRenderProgress, progress) } else { // Standard task-based progress progress = float64(completedTasks) / float64(totalTasks) * 100.0 @@ -1013,8 +1123,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) { return } - log.Printf("updateJobStatusFromTasks: job %d - pending/running: %d", jobID, pendingOrRunningTasks) - if pendingOrRunningTasks == 0 && totalTasks > 0 { // All tasks are either completed or failed/cancelled // Check if any tasks failed @@ -1039,7 +1147,16 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) { if err != nil { log.Printf("Failed to update job %d status to %s: %v", jobID, jobStatus, err) } else { - log.Printf("Updated job %d status to %s (progress: %.1f%%, completed tasks: %d/%d)", jobID, jobStatus, progress, completedTasks, totalTasks) + // Only log if status actually changed + if currentStatus != jobStatus { + log.Printf("Updated job %d status from %s to %s (progress: %.1f%%, completed tasks: %d/%d)", jobID, currentStatus, jobStatus, progress, completedTasks, totalTasks) + } + // Broadcast job update via WebSocket + s.broadcastJobUpdate(jobID, "job_update", map[string]interface{}{ + "status": jobStatus, + "progress": progress, + "completed_at": now, + }) } if outputFormatStr == "EXR_264_MP4" || outputFormatStr == "EXR_AV1_MP4" { @@ -1054,14 +1171,22 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) { // Create a video generation task instead of calling generateMP4Video directly // This prevents race conditions when multiple runners complete frames simultaneously videoTaskTimeout := 86400 // 24 hours for video generation - _, err := s.db.Exec( + var videoTaskID int64 + err := s.db.QueryRow( `INSERT INTO tasks (job_id, frame_start, frame_end, task_type, status, timeout_seconds, max_retries) - VALUES (?, ?, ?, ?, ?, ?, ?)`, + VALUES (?, ?, ?, ?, ?, ?, ?) + RETURNING id`, jobID, 0, 0, types.TaskTypeVideoGeneration, types.TaskStatusPending, videoTaskTimeout, 1, - ) + ).Scan(&videoTaskID) if err != nil { log.Printf("Failed to create video generation task for job %d: %v", jobID, err) } else { + // Broadcast that a new task was added + log.Printf("Broadcasting task_added for job %d: video generation task %d", jobID, videoTaskID) + s.broadcastTaskUpdate(jobID, videoTaskID, "task_added", map[string]interface{}{ + "task_id": videoTaskID, + "task_type": types.TaskTypeVideoGeneration, + }) // Update job status to ensure it's marked as running (has pending video task) s.updateJobStatusFromTasks(jobID) // Try to distribute the task immediately @@ -1099,7 +1224,10 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) { if err != nil { log.Printf("Failed to update job %d status to %s: %v", jobID, jobStatus, err) } else { - log.Printf("Updated job %d status to %s (progress: %.1f%%, completed: %d/%d, pending: %d, running: %d)", jobID, jobStatus, progress, completedTasks, totalTasks, pendingOrRunningTasks-runningTasks, runningTasks) + // Only log if status actually changed + if currentStatus != jobStatus { + log.Printf("Updated job %d status from %s to %s (progress: %.1f%%, completed: %d/%d, pending: %d, running: %d)", jobID, currentStatus, jobStatus, progress, completedTasks, totalTasks, pendingOrRunningTasks-runningTasks, runningTasks) + } } } } @@ -1224,7 +1352,6 @@ func (s *Server) distributeTasksToRunners() { t.AllowParallelRunners = true } pendingTasks = append(pendingTasks, t) - log.Printf("Found pending task %d (type: %s, job: %d '%s', status: %s)", t.TaskID, t.TaskType, t.JobID, t.JobName, t.JobStatus) } if len(pendingTasks) == 0 { @@ -1308,11 +1435,6 @@ func (s *Server) distributeTasksToRunners() { } log.Printf("Distributing %d pending tasks (%v) to %d connected runners: %v", len(pendingTasks), taskTypes, len(connectedRunners), connectedRunners) - // Log each pending task for debugging - for _, task := range pendingTasks { - log.Printf(" - Task %d (type: %s, job: %d '%s', status: %s)", task.TaskID, task.TaskType, task.JobID, task.JobName, task.JobStatus) - } - // Distribute tasks to runners // Sort tasks to prioritize metadata tasks sort.Slice(pendingTasks, func(i, j int) bool { @@ -1572,6 +1694,13 @@ func (s *Server) distributeTasksToRunners() { continue } + // Broadcast task assignment + s.broadcastTaskUpdate(task.JobID, task.TaskID, "task_update", map[string]interface{}{ + "status": types.TaskStatusRunning, + "runner_id": selectedRunnerID, + "started_at": now, + }) + // Task was successfully assigned, send via WebSocket log.Printf("Assigned task %d (type: %s, job: %d) to runner %d", task.TaskID, task.TaskType, task.JobID, selectedRunnerID) @@ -1642,6 +1771,8 @@ func (s *Server) assignTaskToRunner(runnerID int64, taskID int64) error { var filePath string if err := rows.Scan(&filePath); err == nil { task.InputFiles = append(task.InputFiles, filePath) + } else { + log.Printf("Failed to scan input file path for task %d: %v", taskID, err) } } } else { diff --git a/internal/api/server.go b/internal/api/server.go index ff0dafe..1232a2f 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -1,12 +1,17 @@ package api import ( + "compress/gzip" "database/sql" "encoding/json" "fmt" + "io" "log" "net/http" + "os" + "path/filepath" "strconv" + "strings" "sync" "time" @@ -38,6 +43,15 @@ type Server struct { // Mutexes for each frontend connection to serialize writes frontendConnsWriteMu map[string]*sync.Mutex // key: "jobId:taskId" frontendConnsWriteMuMu sync.RWMutex + // Job list WebSocket connections (key: userID) + jobListConns map[int64]*websocket.Conn + jobListConnsMu sync.RWMutex + // Single job WebSocket connections (key: "userId:jobId") + jobConns map[string]*websocket.Conn + jobConnsMu sync.RWMutex + // Mutexes for job WebSocket connections + jobConnsWriteMu map[string]*sync.Mutex + jobConnsWriteMuMu sync.RWMutex // Throttling for progress updates (per job) progressUpdateTimes map[int64]time.Time // key: jobID progressUpdateTimesMu sync.RWMutex @@ -66,6 +80,9 @@ func NewServer(db *database.DB, auth *authpkg.Auth, storage *storage.Storage) (* runnerConns: make(map[int64]*websocket.Conn), frontendConns: make(map[string]*websocket.Conn), frontendConnsWriteMu: make(map[string]*sync.Mutex), + jobListConns: make(map[int64]*websocket.Conn), + jobConns: make(map[string]*websocket.Conn), + jobConnsWriteMu: make(map[string]*sync.Mutex), progressUpdateTimes: make(map[int64]time.Time), } @@ -83,16 +100,62 @@ func (s *Server) setupMiddleware() { // Note: Timeout middleware is NOT applied globally to avoid conflicts with WebSocket connections // WebSocket connections are long-lived and should not have HTTP timeouts + // Add gzip compression for JSON responses + s.router.Use(gzipMiddleware) + s.router.Use(cors.Handler(cors.Options{ AllowedOrigins: []string{"*"}, AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, - AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "Range"}, - ExposedHeaders: []string{"Link", "Content-Range", "Accept-Ranges", "Content-Length"}, + AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "Range", "If-None-Match"}, + ExposedHeaders: []string{"Link", "Content-Range", "Accept-Ranges", "Content-Length", "ETag"}, AllowCredentials: true, MaxAge: 300, })) } +// gzipMiddleware compresses responses with gzip if client supports it +func gzipMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Skip compression for WebSocket upgrades + if strings.ToLower(r.Header.Get("Upgrade")) == "websocket" { + next.ServeHTTP(w, r) + return + } + + // Check if client accepts gzip + if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + next.ServeHTTP(w, r) + return + } + + // Create gzip writer + gz := gzip.NewWriter(w) + defer gz.Close() + + w.Header().Set("Content-Encoding", "gzip") + w.Header().Set("Vary", "Accept-Encoding") + + // Wrap response writer + gzw := &gzipResponseWriter{Writer: gz, ResponseWriter: w} + next.ServeHTTP(gzw, r) + }) +} + +// gzipResponseWriter wraps http.ResponseWriter to add gzip compression +type gzipResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w *gzipResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} + +func (w *gzipResponseWriter) WriteHeader(statusCode int) { + // Don't set Content-Length when using gzip - it will be set automatically + w.ResponseWriter.WriteHeader(statusCode) +} + // setupRoutes configures routes func (s *Server) setupRoutes() { // Public routes @@ -118,16 +181,21 @@ func (s *Server) setupRoutes() { r.Post("/", s.handleCreateJob) r.Post("/upload", s.handleUploadFileForJobCreation) // Upload before job creation r.Get("/", s.handleListJobs) + r.Get("/summary", s.handleListJobsSummary) + r.Post("/batch", s.handleBatchGetJobs) r.Get("/{id}", s.handleGetJob) r.Delete("/{id}", s.handleCancelJob) r.Post("/{id}/delete", s.handleDeleteJob) r.Post("/{id}/upload", s.handleUploadJobFile) r.Get("/{id}/files", s.handleListJobFiles) + r.Get("/{id}/files/count", s.handleGetJobFilesCount) r.Get("/{id}/context", s.handleListContextArchive) r.Get("/{id}/files/{fileId}/download", s.handleDownloadJobFile) r.Get("/{id}/video", s.handleStreamVideo) r.Get("/{id}/metadata", s.handleGetJobMetadata) r.Get("/{id}/tasks", s.handleListJobTasks) + r.Get("/{id}/tasks/summary", s.handleListJobTasksSummary) + r.Post("/{id}/tasks/batch", s.handleBatchGetTasks) r.Get("/{id}/tasks/{taskId}/logs", s.handleGetTaskLogs) // WebSocket route - no timeout middleware (long-lived connection) r.With(func(next http.Handler) http.Handler { @@ -138,6 +206,19 @@ func (s *Server) setupRoutes() { }).Get("/{id}/tasks/{taskId}/logs/ws", s.handleStreamTaskLogsWebSocket) r.Get("/{id}/tasks/{taskId}/steps", s.handleGetTaskSteps) r.Post("/{id}/tasks/{taskId}/retry", s.handleRetryTask) + // WebSocket routes for real-time updates + r.With(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Remove timeout middleware for WebSocket + next.ServeHTTP(w, r) + }) + }).Get("/ws", s.handleJobsWebSocket) + r.With(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Remove timeout middleware for WebSocket + next.ServeHTTP(w, r) + }) + }).Get("/{id}/ws", s.handleJobWebSocket) }) // Admin routes @@ -181,7 +262,8 @@ func (s *Server) setupRoutes() { }) r.Post("/tasks/{id}/progress", s.handleUpdateTaskProgress) r.Post("/tasks/{id}/steps", s.handleUpdateTaskStep) - r.Get("/jobs/{jobId}/context.tar.gz", s.handleDownloadJobContext) + r.Get("/jobs/{jobId}/context.tar", s.handleDownloadJobContext) + r.Get("/files/{jobId}/{fileName}", s.handleDownloadFileForRunner) r.Post("/files/{jobId}/upload", s.handleUploadFileFromRunner) r.Get("/jobs/{jobId}/status", s.handleGetJobStatusForRunner) r.Get("/jobs/{jobId}/files", s.handleGetJobFilesForRunner) @@ -311,12 +393,14 @@ func (s *Server) handleLogout(w http.ResponseWriter, r *http.Request) { func (s *Server) handleGetMe(w http.ResponseWriter, r *http.Request) { cookie, err := r.Cookie("session_id") if err != nil { + log.Printf("Authentication failed: missing session cookie in /auth/me") s.respondError(w, http.StatusUnauthorized, "Not authenticated") return } session, ok := s.auth.GetSession(cookie.Value) if !ok { + log.Printf("Authentication failed: invalid session cookie in /auth/me") s.respondError(w, http.StatusUnauthorized, "Invalid session") return } @@ -410,6 +494,7 @@ func (s *Server) handleLocalLogin(w http.ResponseWriter, r *http.Request) { session, err := s.auth.LocalLogin(req.Username, req.Password) if err != nil { + log.Printf("Authentication failed: invalid credentials for username '%s'", req.Username) s.respondError(w, http.StatusUnauthorized, "Invalid credentials") return } @@ -512,6 +597,7 @@ func parseID(r *http.Request, param string) (int64, error) { func (s *Server) StartBackgroundTasks() { go s.recoverStuckTasks() go s.cleanupOldRenderJobs() + go s.cleanupOldTempDirectories() } // recoverStuckTasks periodically checks for dead runners and stuck tasks @@ -621,6 +707,7 @@ func (s *Server) recoverTaskTimeouts() { err := rows.Scan(&taskID, &runnerID, &retryCount, &maxRetries, &timeoutSeconds, &startedAt) if err != nil { + log.Printf("Failed to scan task row in recoverTaskTimeouts: %v", err) continue } @@ -659,3 +746,72 @@ func (s *Server) recoverTaskTimeouts() { } } } + +// cleanupOldTempDirectories periodically cleans up old temporary directories +func (s *Server) cleanupOldTempDirectories() { + // Run cleanup every hour + ticker := time.NewTicker(1 * time.Hour) + defer ticker.Stop() + + // Run once immediately on startup + s.cleanupOldTempDirectoriesOnce() + + for range ticker.C { + s.cleanupOldTempDirectoriesOnce() + } +} + +// cleanupOldTempDirectoriesOnce removes temp directories older than 1 hour +func (s *Server) cleanupOldTempDirectoriesOnce() { + defer func() { + if r := recover(); r != nil { + log.Printf("Panic in cleanupOldTempDirectories: %v", r) + } + }() + + tempPath := filepath.Join(s.storage.BasePath(), "temp") + + // Check if temp directory exists + if _, err := os.Stat(tempPath); os.IsNotExist(err) { + return + } + + // Read all entries in temp directory + entries, err := os.ReadDir(tempPath) + if err != nil { + log.Printf("Failed to read temp directory: %v", err) + return + } + + now := time.Now() + cleanedCount := 0 + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + + entryPath := filepath.Join(tempPath, entry.Name()) + + // Get directory info to check modification time + info, err := entry.Info() + if err != nil { + continue + } + + // Remove directories older than 1 hour + age := now.Sub(info.ModTime()) + if age > 1*time.Hour { + if err := os.RemoveAll(entryPath); err != nil { + log.Printf("Warning: Failed to clean up old temp directory %s: %v", entryPath, err) + } else { + cleanedCount++ + log.Printf("Cleaned up old temp directory: %s (age: %v)", entryPath, age) + } + } + } + + if cleanedCount > 0 { + log.Printf("Cleaned up %d old temp directories", cleanedCount) + } +} diff --git a/internal/auth/auth.go b/internal/auth/auth.go index e34cb12..3044d91 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -410,6 +410,7 @@ func (a *Auth) Middleware(next http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { cookie, err := r.Cookie("session_id") if err != nil { + log.Printf("Authentication failed: missing session cookie for %s %s", r.Method, r.URL.Path) w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusUnauthorized) json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"}) @@ -418,6 +419,7 @@ func (a *Auth) Middleware(next http.HandlerFunc) http.HandlerFunc { session, ok := a.GetSession(cookie.Value) if !ok { + log.Printf("Authentication failed: invalid session cookie for %s %s", r.Method, r.URL.Path) w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusUnauthorized) json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"}) @@ -451,6 +453,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc { // First check authentication cookie, err := r.Cookie("session_id") if err != nil { + log.Printf("Admin authentication failed: missing session cookie for %s %s", r.Method, r.URL.Path) w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusUnauthorized) json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"}) @@ -459,6 +462,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc { session, ok := a.GetSession(cookie.Value) if !ok { + log.Printf("Admin authentication failed: invalid session cookie for %s %s", r.Method, r.URL.Path) w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusUnauthorized) json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"}) @@ -467,6 +471,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc { // Then check admin status if !session.IsAdmin { + log.Printf("Admin access denied: user %d (email: %s) attempted to access admin endpoint %s %s", session.UserID, session.Email, r.Method, r.URL.Path) w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusForbidden) json.NewEncoder(w).Encode(map[string]string{"error": "Forbidden: Admin access required"}) diff --git a/internal/database/schema.go b/internal/database/schema.go index 694bba8..1519e61 100644 --- a/internal/database/schema.go +++ b/internal/database/schema.go @@ -165,14 +165,17 @@ func (db *DB) migrate() error { CREATE INDEX IF NOT EXISTS idx_jobs_user_id ON jobs(user_id); CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status); + CREATE INDEX IF NOT EXISTS idx_jobs_user_status_created ON jobs(user_id, status, created_at DESC); CREATE INDEX IF NOT EXISTS idx_tasks_job_id ON tasks(job_id); CREATE INDEX IF NOT EXISTS idx_tasks_runner_id ON tasks(runner_id); CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status); + CREATE INDEX IF NOT EXISTS idx_tasks_job_status ON tasks(job_id, status); CREATE INDEX IF NOT EXISTS idx_tasks_started_at ON tasks(started_at); CREATE INDEX IF NOT EXISTS idx_job_files_job_id ON job_files(job_id); CREATE INDEX IF NOT EXISTS idx_registration_tokens_token ON registration_tokens(token); CREATE INDEX IF NOT EXISTS idx_registration_tokens_expires_at ON registration_tokens(expires_at); CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_created_at ON task_logs(task_id, created_at); + CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_id ON task_logs(task_id, id DESC); CREATE INDEX IF NOT EXISTS idx_task_logs_runner_id ON task_logs(runner_id); CREATE INDEX IF NOT EXISTS idx_task_steps_task_id ON task_steps(task_id); CREATE INDEX IF NOT EXISTS idx_runners_last_heartbeat ON runners(last_heartbeat); @@ -213,6 +216,9 @@ func (db *DB) migrate() error { `ALTER TABLE tasks ADD COLUMN IF NOT EXISTS retry_count INTEGER DEFAULT 0`, `ALTER TABLE tasks ADD COLUMN IF NOT EXISTS max_retries INTEGER DEFAULT 3`, `ALTER TABLE tasks ADD COLUMN IF NOT EXISTS timeout_seconds INTEGER`, + // Add updated_at columns for ETag support + `ALTER TABLE jobs ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP`, + `ALTER TABLE tasks ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP`, // Migrate file_size from INTEGER to BIGINT to support large files (>2GB) // DuckDB doesn't support direct ALTER COLUMN TYPE, so we use a workaround: // 1. Add new column as BIGINT diff --git a/internal/logger/logger.go b/internal/logger/logger.go new file mode 100644 index 0000000..3c5db35 --- /dev/null +++ b/internal/logger/logger.go @@ -0,0 +1,127 @@ +package logger + +import ( + "io" + "log" + "os" + "path/filepath" + "sync" + + "gopkg.in/natefinch/lumberjack.v2" +) + +var ( + defaultLogger *Logger + once sync.Once +) + +// Logger wraps the standard log.Logger with file and stdout output +type Logger struct { + *log.Logger + fileWriter io.WriteCloser +} + +// Init initializes the default logger with both file and stdout output +func Init(logDir, logFileName string, maxSizeMB int, maxBackups int, maxAgeDays int) error { + var err error + once.Do(func() { + defaultLogger, err = New(logDir, logFileName, maxSizeMB, maxBackups, maxAgeDays) + if err != nil { + return + } + // Replace standard log output with the multi-writer + multiWriter := io.MultiWriter(os.Stdout, defaultLogger.fileWriter) + log.SetOutput(multiWriter) + log.SetFlags(log.LstdFlags | log.Lshortfile) + }) + return err +} + +// New creates a new logger that writes to both stdout and a log file +func New(logDir, logFileName string, maxSizeMB int, maxBackups int, maxAgeDays int) (*Logger, error) { + // Ensure log directory exists + if err := os.MkdirAll(logDir, 0755); err != nil { + return nil, err + } + + logPath := filepath.Join(logDir, logFileName) + + // Create file writer with rotation + fileWriter := &lumberjack.Logger{ + Filename: logPath, + MaxSize: maxSizeMB, // megabytes + MaxBackups: maxBackups, // number of backup files + MaxAge: maxAgeDays, // days + Compress: true, // compress old log files + } + + // Create multi-writer that writes to both stdout and file + multiWriter := io.MultiWriter(os.Stdout, fileWriter) + + // Create logger with standard flags + logger := log.New(multiWriter, "", log.LstdFlags|log.Lshortfile) + + return &Logger{ + Logger: logger, + fileWriter: fileWriter, + }, nil +} + +// Close closes the file writer +func (l *Logger) Close() error { + if l.fileWriter != nil { + return l.fileWriter.Close() + } + return nil +} + +// GetDefault returns the default logger instance +func GetDefault() *Logger { + return defaultLogger +} + +// Printf logs a formatted message +func Printf(format string, v ...interface{}) { + if defaultLogger != nil { + defaultLogger.Printf(format, v...) + } else { + log.Printf(format, v...) + } +} + +// Print logs a message +func Print(v ...interface{}) { + if defaultLogger != nil { + defaultLogger.Print(v...) + } else { + log.Print(v...) + } +} + +// Println logs a message with newline +func Println(v ...interface{}) { + if defaultLogger != nil { + defaultLogger.Println(v...) + } else { + log.Println(v...) + } +} + +// Fatal logs a message and exits +func Fatal(v ...interface{}) { + if defaultLogger != nil { + defaultLogger.Fatal(v...) + } else { + log.Fatal(v...) + } +} + +// Fatalf logs a formatted message and exits +func Fatalf(format string, v ...interface{}) { + if defaultLogger != nil { + defaultLogger.Fatalf(format, v...) + } else { + log.Fatalf(format, v...) + } +} + diff --git a/internal/runner/client.go b/internal/runner/client.go index bfc3fee..35bd9c2 100644 --- a/internal/runner/client.go +++ b/internal/runner/client.go @@ -1,10 +1,10 @@ package runner import ( + _ "embed" "archive/tar" "bufio" "bytes" - "compress/gzip" "encoding/json" "errors" "fmt" @@ -22,6 +22,7 @@ import ( "sync" "time" + "jiggablend/pkg/scripts" "jiggablend/pkg/types" "github.com/gorilla/websocket" @@ -53,18 +54,20 @@ type Client struct { vaapiDevicesMu sync.RWMutex // Protects vaapiDevices allocatedDevices map[int64]string // map[taskID]device - tracks which device is allocated to which task allocatedDevicesMu sync.RWMutex // Protects allocatedDevices + longRunningClient *http.Client // HTTP client for long-running operations (no timeout) } // NewClient creates a new runner client func NewClient(managerURL, name, hostname, ipAddress string) *Client { return &Client{ - managerURL: managerURL, - name: name, - hostname: hostname, - ipAddress: ipAddress, - httpClient: &http.Client{Timeout: 30 * time.Second}, - stopChan: make(chan struct{}), - stepStartTimes: make(map[string]time.Time), + managerURL: managerURL, + name: name, + hostname: hostname, + ipAddress: ipAddress, + httpClient: &http.Client{Timeout: 30 * time.Second}, + longRunningClient: &http.Client{Timeout: 0}, // No timeout for long-running operations (context downloads, file uploads/downloads) + stopChan: make(chan struct{}), + stepStartTimes: make(map[string]time.Time), } } @@ -465,6 +468,17 @@ func (c *Client) Register(registrationToken string) (int64, string, string, erro // doSignedRequest performs an authenticated HTTP request using shared secret // queryParams is optional and will be appended to the URL func (c *Client) doSignedRequest(method, path string, body []byte, queryParams ...string) (*http.Response, error) { + return c.doSignedRequestWithClient(method, path, body, c.httpClient, queryParams...) +} + +// doSignedRequestLong performs an authenticated HTTP request using the long-running client (no timeout) +// Use this for context downloads, file uploads/downloads, and other operations that may take a long time +func (c *Client) doSignedRequestLong(method, path string, body []byte, queryParams ...string) (*http.Response, error) { + return c.doSignedRequestWithClient(method, path, body, c.longRunningClient, queryParams...) +} + +// doSignedRequestWithClient performs an authenticated HTTP request using the specified client +func (c *Client) doSignedRequestWithClient(method, path string, body []byte, client *http.Client, queryParams ...string) (*http.Response, error) { if c.runnerSecret == "" { return nil, fmt.Errorf("runner not authenticated") } @@ -483,7 +497,7 @@ func (c *Client) doSignedRequest(method, path string, body []byte, queryParams . req.Header.Set("Content-Type", "application/json") req.Header.Set("X-Runner-Secret", c.runnerSecret) - return c.httpClient.Do(req) + return client.Do(req) } // ConnectWebSocket establishes a WebSocket connection to the manager @@ -969,16 +983,16 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output // Clean up expired cache entries periodically c.cleanupExpiredContextCache() - // Download context tar.gz - contextPath := filepath.Join(workDir, "context.tar.gz") + // Download context tar + contextPath := filepath.Join(workDir, "context.tar") if err := c.downloadJobContext(jobID, contextPath); err != nil { c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) return fmt.Errorf("failed to download context: %w", err) } - // Extract context tar.gz + // Extract context tar c.sendLog(taskID, types.LogLevelInfo, "Extracting context...", "download") - if err := c.extractTarGz(contextPath, workDir); err != nil { + if err := c.extractTar(contextPath, workDir); err != nil { c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) return fmt.Errorf("failed to extract context: %w", err) } @@ -1077,662 +1091,24 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output // This script will override the blend file's settings based on job metadata formatFilePath := filepath.Join(workDir, "output_format.txt") renderSettingsFilePath := filepath.Join(workDir, "render_settings.json") - scriptContent := fmt.Sprintf(`import bpy -import sys -import os -import json - -# Make all file paths relative to the blend file location FIRST -# This must be done immediately after file load, before any other operations -# to prevent Blender from trying to access external files with absolute paths -try: - bpy.ops.file.make_paths_relative() - print("Made all file paths relative to blend file") -except Exception as e: - print(f"Warning: Could not make paths relative: {e}") - -# Check for missing addons that the blend file requires -# Blender marks missing addons with "_missing" suffix in preferences -missing = [] -try: - for mod in bpy.context.preferences.addons: - if mod.module.endswith("_missing"): - missing.append(mod.module.rsplit("_", 1)[0]) - - if missing: - print("Missing add-ons required by this .blend:") - for name in missing: - print(" -", name) - else: - print("No missing add-ons detected – file is headless-safe") -except Exception as e: - print(f"Warning: Could not check for missing addons: {e}") - -# Fix objects and collections hidden from render -vl = bpy.context.view_layer - -# 1. Objects hidden in view layer -print("Checking for objects hidden from render that need to be enabled...") -try: - for obj in bpy.data.objects: - if obj.hide_get(view_layer=vl): - if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]): - obj.hide_set(False, view_layer=vl) - print("Enabled object:", obj.name) -except Exception as e: - print(f"Warning: Could not check/fix hidden render objects: {e}") - -# 2. Collections disabled in renders OR set to Holdout (the final killer) -print("Checking for collections hidden from render that need to be enabled...") -try: - for col in bpy.data.collections: - if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False): - if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]): - col.hide_render = False - if col.name in vl.layer_collection.children: - vl.layer_collection.children[col.name].exclude = False - vl.layer_collection.children[col.name].holdout = False - vl.layer_collection.children[col.name].indirect_only = False - print("Enabled collection:", col.name) -except Exception as e: - print(f"Warning: Could not check/fix hidden render collections: {e}") - -# Read output format from file (created by Go code) -format_file_path = %q -output_format_override = None -if os.path.exists(format_file_path): - try: - with open(format_file_path, 'r') as f: - output_format_override = f.read().strip().upper() - print(f"Read output format from file: '{output_format_override}'") - except Exception as e: - print(f"Warning: Could not read output format file: {e}") -else: - print(f"Warning: Output format file does not exist: {format_file_path}") - -# Read render settings from JSON file (created by Go code) -render_settings_file = %q -render_settings_override = None -if os.path.exists(render_settings_file): - try: - with open(render_settings_file, 'r') as f: - render_settings_override = json.load(f) - print(f"Loaded render settings from job metadata") - except Exception as e: - print(f"Warning: Could not read render settings file: {e}") -`, formatFilePath, renderSettingsFilePath) + ` - -# Get current scene settings (preserve blend file preferences) -scene = bpy.context.scene -current_engine = scene.render.engine -current_device = scene.cycles.device if hasattr(scene, 'cycles') and scene.cycles else None -current_output_format = scene.render.image_settings.file_format - -print(f"Blend file render engine: {current_engine}") -if current_device: - print(f"Blend file device setting: {current_device}") -print(f"Blend file output format: {current_output_format}") - -# Override output format if specified -# The format file always takes precedence (it's written specifically for this job) -if output_format_override: - print(f"Overriding output format from '{current_output_format}' to '{output_format_override}'") - # Map common format names to Blender's format constants - # For video formats (EXR_264_MP4, EXR_AV1_MP4), we render as EXR frames first - format_to_use = output_format_override.upper() - if format_to_use in ['EXR_264_MP4', 'EXR_AV1_MP4']: - format_to_use = 'EXR' # Render as EXR for video formats - - format_map = { - 'PNG': 'PNG', - 'JPEG': 'JPEG', - 'JPG': 'JPEG', - 'EXR': 'OPEN_EXR', - 'OPEN_EXR': 'OPEN_EXR', - 'TARGA': 'TARGA', - 'TIFF': 'TIFF', - 'BMP': 'BMP', - } - blender_format = format_map.get(format_to_use, format_to_use) - try: - scene.render.image_settings.file_format = blender_format - print(f"Successfully set output format to: {blender_format}") - except Exception as e: - print(f"Warning: Could not set output format to {blender_format}: {e}") - print(f"Using blend file's format: {current_output_format}") -else: - print(f"Using blend file's output format: {current_output_format}") - -# Apply render settings from job metadata if provided -# Note: output_format is NOT applied from render_settings_override - it's already set from format file above -if render_settings_override: - engine_override = render_settings_override.get('engine', '').upper() - engine_settings = render_settings_override.get('engine_settings', {}) - - # Switch engine if specified - if engine_override and engine_override != current_engine.upper(): - print(f"Switching render engine from '{current_engine}' to '{engine_override}'") - try: - scene.render.engine = engine_override - current_engine = engine_override - print(f"Successfully switched to {engine_override} engine") - except Exception as e: - print(f"Warning: Could not switch engine to {engine_override}: {e}") - print(f"Using blend file's engine: {current_engine}") - - # Apply engine-specific settings - if engine_settings: - if current_engine.upper() == 'CYCLES': - cycles = scene.cycles - print("Applying Cycles render settings from job metadata...") - for key, value in engine_settings.items(): - try: - if hasattr(cycles, key): - setattr(cycles, key, value) - print(f" Set Cycles.{key} = {value}") - else: - print(f" Warning: Cycles has no attribute '{key}'") - except Exception as e: - print(f" Warning: Could not set Cycles.{key} = {value}: {e}") - elif current_engine.upper() in ['EEVEE', 'EEVEE_NEXT']: - eevee = scene.eevee - print("Applying EEVEE render settings from job metadata...") - for key, value in engine_settings.items(): - try: - if hasattr(eevee, key): - setattr(eevee, key, value) - print(f" Set EEVEE.{key} = {value}") - else: - print(f" Warning: EEVEE has no attribute '{key}'") - except Exception as e: - print(f" Warning: Could not set EEVEE.{key} = {value}: {e}") - - # Apply resolution if specified - if 'resolution_x' in render_settings_override: - try: - scene.render.resolution_x = render_settings_override['resolution_x'] - print(f"Set resolution_x = {render_settings_override['resolution_x']}") - except Exception as e: - print(f"Warning: Could not set resolution_x: {e}") - if 'resolution_y' in render_settings_override: - try: - scene.render.resolution_y = render_settings_override['resolution_y'] - print(f"Set resolution_y = {render_settings_override['resolution_y']}") - except Exception as e: - print(f"Warning: Could not set resolution_y: {e}") - -# Only override device selection if using Cycles (other engines handle GPU differently) -if current_engine == 'CYCLES': - # Check if CPU rendering is forced - force_cpu = False - if render_settings_override and render_settings_override.get('force_cpu'): - force_cpu = render_settings_override.get('force_cpu', False) - print("Force CPU rendering is enabled - skipping GPU detection") - - # Ensure Cycles addon is enabled - try: - if 'cycles' not in bpy.context.preferences.addons: - bpy.ops.preferences.addon_enable(module='cycles') - print("Enabled Cycles addon") - except Exception as e: - print(f"Warning: Could not enable Cycles addon: {e}") - - # If CPU is forced, skip GPU detection and set CPU directly - if force_cpu: - scene.cycles.device = 'CPU' - print("Forced CPU rendering (skipping GPU detection)") - else: - # Access Cycles preferences - prefs = bpy.context.preferences - try: - cycles_prefs = prefs.addons['cycles'].preferences - except (KeyError, AttributeError): - try: - cycles_addon = prefs.addons.get('cycles') - if cycles_addon: - cycles_prefs = cycles_addon.preferences - else: - raise Exception("Cycles addon not found") - except Exception as e: - print(f"ERROR: Could not access Cycles preferences: {e}") - import traceback - traceback.print_exc() - sys.exit(1) - - # Check all devices and choose the best GPU type - # Device type preference order (most performant first) - device_type_preference = ['OPTIX', 'CUDA', 'HIP', 'ONEAPI', 'METAL'] - gpu_available = False - best_device_type = None - best_gpu_devices = [] - devices_by_type = {} # {device_type: [devices]} - seen_device_ids = set() # Track device IDs to avoid duplicates - - print("Checking for GPU availability...") - - # Try to get all devices - try each device type to see what's available - for device_type in device_type_preference: - try: - cycles_prefs.compute_device_type = device_type - cycles_prefs.refresh_devices() - - # Get devices for this type - devices = None - if hasattr(cycles_prefs, 'devices'): - try: - devices_prop = cycles_prefs.devices - if devices_prop: - devices = list(devices_prop) if hasattr(devices_prop, '__iter__') else [devices_prop] - except Exception as e: - pass - - if not devices or len(devices) == 0: - try: - devices = cycles_prefs.get_devices() - except Exception as e: - pass - - if devices and len(devices) > 0: - # Categorize devices by their type attribute, avoiding duplicates - for device in devices: - if hasattr(device, 'type'): - device_type_str = str(device.type).upper() - device_id = getattr(device, 'id', None) - - # Use device ID to avoid duplicates (same device appears when checking different compute_device_types) - if device_id and device_id in seen_device_ids: - continue - - if device_id: - seen_device_ids.add(device_id) - - if device_type_str not in devices_by_type: - devices_by_type[device_type_str] = [] - devices_by_type[device_type_str].append(device) - except (ValueError, AttributeError, KeyError, TypeError): - # Device type not supported, continue - continue - except Exception as e: - # Other errors - log but continue - print(f" Error checking {device_type}: {e}") - continue - - # Print what we found - print(f"Found devices by type: {list(devices_by_type.keys())}") - for dev_type, dev_list in devices_by_type.items(): - print(f" {dev_type}: {len(dev_list)} device(s)") - for device in dev_list: - device_name = getattr(device, 'name', 'Unknown') - print(f" - {device_name}") - - # Choose the best GPU type based on preference - for preferred_type in device_type_preference: - if preferred_type in devices_by_type: - gpu_devices = [d for d in devices_by_type[preferred_type] if preferred_type in ['CUDA', 'OPENCL', 'OPTIX', 'HIP', 'METAL', 'ONEAPI']] - if gpu_devices: - best_device_type = preferred_type - best_gpu_devices = [(d, preferred_type) for d in gpu_devices] - print(f"Selected {preferred_type} as best GPU type with {len(gpu_devices)} device(s)") - break - - # Second pass: Enable the best GPU we found - if best_device_type and best_gpu_devices: - print(f"\nEnabling GPU devices for {best_device_type}...") - try: - # Set the device type again - cycles_prefs.compute_device_type = best_device_type - cycles_prefs.refresh_devices() - - # First, disable all CPU devices to ensure only GPU is used - print(f" Disabling CPU devices...") - all_devices = cycles_prefs.devices if hasattr(cycles_prefs, 'devices') else cycles_prefs.get_devices() - if all_devices: - for device in all_devices: - if hasattr(device, 'type') and str(device.type).upper() == 'CPU': - try: - device.use = False - device_name = getattr(device, 'name', 'Unknown') - print(f" Disabled CPU: {device_name}") - except Exception as e: - print(f" Warning: Could not disable CPU device {getattr(device, 'name', 'Unknown')}: {e}") - - # Enable all GPU devices - enabled_count = 0 - for device, device_type in best_gpu_devices: - try: - device.use = True - enabled_count += 1 - device_name = getattr(device, 'name', 'Unknown') - print(f" Enabled: {device_name}") - except Exception as e: - print(f" Warning: Could not enable device {getattr(device, 'name', 'Unknown')}: {e}") - - # Enable ray tracing acceleration for supported device types - try: - if best_device_type == 'HIP': - # HIPRT (HIP Ray Tracing) for AMD GPUs - if hasattr(cycles_prefs, 'use_hiprt'): - cycles_prefs.use_hiprt = True - print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering") - elif hasattr(scene.cycles, 'use_hiprt'): - scene.cycles.use_hiprt = True - print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering") - else: - print(f" HIPRT not available (requires Blender 4.0+)") - elif best_device_type == 'OPTIX': - # OptiX is already enabled when using OPTIX device type - # But we can check if there are any OptiX-specific settings - if hasattr(scene.cycles, 'use_optix_denoising'): - scene.cycles.use_optix_denoising = True - print(f" Enabled OptiX denoising") - print(f" OptiX ray tracing is active (using OPTIX device type)") - elif best_device_type == 'CUDA': - # CUDA can use OptiX if available, but it's usually automatic - # Check if we can prefer OptiX over CUDA - if hasattr(scene.cycles, 'use_optix_denoising'): - scene.cycles.use_optix_denoising = True - print(f" Enabled OptiX denoising (if OptiX available)") - print(f" CUDA ray tracing active") - elif best_device_type == 'METAL': - # MetalRT for Apple Silicon (if available) - if hasattr(scene.cycles, 'use_metalrt'): - scene.cycles.use_metalrt = True - print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering") - elif hasattr(cycles_prefs, 'use_metalrt'): - cycles_prefs.use_metalrt = True - print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering") - else: - print(f" MetalRT not available") - elif best_device_type == 'ONEAPI': - # Intel oneAPI - Embree might be available - if hasattr(scene.cycles, 'use_embree'): - scene.cycles.use_embree = True - print(f" Enabled Embree for faster CPU ray tracing") - print(f" oneAPI ray tracing active") - except Exception as e: - print(f" Could not enable ray tracing acceleration: {e}") - - print(f"SUCCESS: Enabled {enabled_count} GPU device(s) for {best_device_type}") - gpu_available = True - except Exception as e: - print(f"ERROR: Failed to enable GPU devices: {e}") - import traceback - traceback.print_exc() - - # Set device based on availability (prefer GPU, fallback to CPU) - if gpu_available: - scene.cycles.device = 'GPU' - print(f"Using GPU for rendering (blend file had: {current_device})") - else: - scene.cycles.device = 'CPU' - print(f"GPU not available, using CPU for rendering (blend file had: {current_device})") - - # Verify device setting - if current_engine == 'CYCLES': - final_device = scene.cycles.device - print(f"Final Cycles device: {final_device}") -else: - # For other engines (EEVEE, etc.), respect blend file settings - print(f"Using {current_engine} engine - respecting blend file settings") - -# Enable GPU acceleration for EEVEE viewport rendering (if using EEVEE) -if current_engine == 'EEVEE' or current_engine == 'EEVEE_NEXT': - try: - if hasattr(bpy.context.preferences.system, 'gpu_backend'): - bpy.context.preferences.system.gpu_backend = 'OPENGL' - print("Enabled OpenGL GPU backend for EEVEE") - except Exception as e: - print(f"Could not set EEVEE GPU backend: {e}") - -# Enable GPU acceleration for compositing (if compositing is enabled) -try: - if scene.use_nodes and hasattr(scene, 'node_tree') and scene.node_tree: - if hasattr(scene.node_tree, 'use_gpu_compositing'): - scene.node_tree.use_gpu_compositing = True - print("Enabled GPU compositing") -except Exception as e: - print(f"Could not enable GPU compositing: {e}") - -# CRITICAL: Initialize headless rendering to prevent black images -# This ensures the render engine is properly initialized before rendering -print("Initializing headless rendering context...") -try: - # Ensure world exists and has proper settings - if not scene.world: - # Create a default world if none exists - world = bpy.data.worlds.new("World") - scene.world = world - print("Created default world") - - # Ensure world has a background shader (not just black) - if scene.world: - # Enable nodes if not already enabled - if not scene.world.use_nodes: - scene.world.use_nodes = True - print("Enabled world nodes") - - world_nodes = scene.world.node_tree - if world_nodes: - # Find or create background shader - bg_shader = None - for node in world_nodes.nodes: - if node.type == 'BACKGROUND': - bg_shader = node - break - - if not bg_shader: - bg_shader = world_nodes.nodes.new(type='ShaderNodeBackground') - # Connect to output - output = world_nodes.nodes.get('World Output') - if not output: - output = world_nodes.nodes.new(type='ShaderNodeOutputWorld') - output.name = 'World Output' - if output and bg_shader: - # Connect background to surface input - if 'Surface' in output.inputs and 'Background' in bg_shader.outputs: - world_nodes.links.new(bg_shader.outputs['Background'], output.inputs['Surface']) - print("Created background shader for world") - - # Ensure background has some color (not pure black) - if bg_shader: - # Only set if it's pure black (0,0,0) - if hasattr(bg_shader.inputs, 'Color'): - color = bg_shader.inputs['Color'].default_value - if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: - # Set to a very dark gray instead of pure black - bg_shader.inputs['Color'].default_value = (0.01, 0.01, 0.01, 1.0) - print("Adjusted world background color to prevent black renders") - else: - # Fallback: use legacy world color if nodes aren't working - if hasattr(scene.world, 'color'): - color = scene.world.color - if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: - scene.world.color = (0.01, 0.01, 0.01) - print("Adjusted legacy world color to prevent black renders") - - # For EEVEE, force viewport update to initialize render engine - if current_engine in ['EEVEE', 'EEVEE_NEXT']: - # Force EEVEE to update its internal state - try: - # Update depsgraph to ensure everything is initialized - depsgraph = bpy.context.evaluated_depsgraph_get() - if depsgraph: - # Force update - depsgraph.update() - print("Forced EEVEE depsgraph update for headless rendering") - except Exception as e: - print(f"Warning: Could not force EEVEE update: {e}") - - # Ensure EEVEE settings are applied - try: - # Force a material update to ensure shaders are compiled - for obj in scene.objects: - if obj.type == 'MESH' and obj.data.materials: - for mat in obj.data.materials: - if mat and mat.use_nodes: - # Touch the material to force update - mat.use_nodes = mat.use_nodes - print("Forced material updates for EEVEE") - except Exception as e: - print(f"Warning: Could not update materials: {e}") - - # For Cycles, ensure proper initialization - if current_engine == 'CYCLES': - # Ensure samples are set (even if 1 for preview) - if not hasattr(scene.cycles, 'samples') or scene.cycles.samples < 1: - scene.cycles.samples = 1 - print("Set minimum Cycles samples") - - # Check for lights in the scene - lights = [obj for obj in scene.objects if obj.type == 'LIGHT'] - print(f"Found {len(lights)} light(s) in scene") - if len(lights) == 0: - print("WARNING: No lights found in scene - rendering may be black!") - print(" Consider adding lights or ensuring world background emits light") - - # Ensure world background emits light (critical for Cycles) - if scene.world and scene.world.use_nodes: - world_nodes = scene.world.node_tree - if world_nodes: - bg_shader = None - for node in world_nodes.nodes: - if node.type == 'BACKGROUND': - bg_shader = node - break - - if bg_shader: - # Check and set strength - Cycles needs this to emit light! - if hasattr(bg_shader.inputs, 'Strength'): - strength = bg_shader.inputs['Strength'].default_value - if strength <= 0.0: - bg_shader.inputs['Strength'].default_value = 1.0 - print("Set world background strength to 1.0 for Cycles lighting") - else: - print(f"World background strength: {strength}") - # Also ensure color is not pure black - if hasattr(bg_shader.inputs, 'Color'): - color = bg_shader.inputs['Color'].default_value - if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: - bg_shader.inputs['Color'].default_value = (1.0, 1.0, 1.0, 1.0) - print("Set world background color to white for Cycles lighting") - - # Check film_transparent setting - if enabled, background will be transparent/black - if hasattr(scene.cycles, 'film_transparent') and scene.cycles.film_transparent: - print("WARNING: film_transparent is enabled - background will be transparent") - print(" If you see black renders, try disabling film_transparent") - - # Force Cycles to update/compile materials and shaders - try: - # Update depsgraph to ensure everything is initialized - depsgraph = bpy.context.evaluated_depsgraph_get() - if depsgraph: - depsgraph.update() - print("Forced Cycles depsgraph update") - - # Force material updates to ensure shaders are compiled - for obj in scene.objects: - if obj.type == 'MESH' and obj.data.materials: - for mat in obj.data.materials: - if mat and mat.use_nodes: - # Force material update - mat.use_nodes = mat.use_nodes - print("Forced Cycles material updates") - except Exception as e: - print(f"Warning: Could not force Cycles updates: {e}") - - # Verify device is actually set correctly - if hasattr(scene.cycles, 'device'): - actual_device = scene.cycles.device - print(f"Cycles device setting: {actual_device}") - if actual_device == 'GPU': - # Try to verify GPU is actually available - try: - prefs = bpy.context.preferences - cycles_prefs = prefs.addons['cycles'].preferences - devices = cycles_prefs.devices - enabled_devices = [d for d in devices if d.use] - if len(enabled_devices) == 0: - print("WARNING: GPU device set but no GPU devices are enabled!") - print(" Falling back to CPU may cause issues") - except Exception as e: - print(f"Could not verify GPU devices: {e}") - - # Ensure camera exists and is active - if scene.camera is None: - # Find first camera in scene - for obj in scene.objects: - if obj.type == 'CAMERA': - scene.camera = obj - print(f"Set active camera: {obj.name}") - break - - # Fix objects and collections hidden from render - vl = bpy.context.view_layer - - # 1. Objects hidden in view layer - for obj in bpy.data.objects: - if obj.hide_get(view_layer=vl): - if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]): - obj.hide_set(False, view_layer=vl) - print("Enabled object:", obj.name) - - # 2. Collections disabled in renders OR set to Holdout (the final killer) - for col in bpy.data.collections: - if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False): - if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]): - col.hide_render = False - if col.name in vl.layer_collection.children: - vl.layer_collection.children[col.name].exclude = False - vl.layer_collection.children[col.name].holdout = False - vl.layer_collection.children[col.name].indirect_only = False - print("Enabled collection:", col.name) - - print("Headless rendering initialization complete") -except Exception as e: - print(f"Warning: Headless rendering initialization had issues: {e}") - import traceback - traceback.print_exc() - -# Final verification before rendering -print("\n=== Pre-render verification ===") -try: - scene = bpy.context.scene - print(f"Render engine: {scene.render.engine}") - print(f"Active camera: {scene.camera.name if scene.camera else 'None'}") - - if scene.render.engine == 'CYCLES': - print(f"Cycles device: {scene.cycles.device}") - print(f"Cycles samples: {scene.cycles.samples}") - lights = [obj for obj in scene.objects if obj.type == 'LIGHT'] - print(f"Lights in scene: {len(lights)}") - if scene.world: - if scene.world.use_nodes: - world_nodes = scene.world.node_tree - if world_nodes: - bg_shader = None - for node in world_nodes.nodes: - if node.type == 'BACKGROUND': - bg_shader = node - break - if bg_shader: - if hasattr(bg_shader.inputs, 'Strength'): - strength = bg_shader.inputs['Strength'].default_value - print(f"World background strength: {strength}") - if hasattr(bg_shader.inputs, 'Color'): - color = bg_shader.inputs['Color'].default_value - print(f"World background color: ({color[0]:.2f}, {color[1]:.2f}, {color[2]:.2f})") - else: - print("World exists but nodes are disabled") - else: - print("WARNING: No world in scene!") - - print("=== Verification complete ===\n") -except Exception as e: - print(f"Warning: Verification failed: {e}") - -print("Device configuration complete - blend file settings preserved, device optimized") -sys.stdout.flush() -` + + // Check if unhide_objects is enabled + unhideObjects := false + if jobMetadata != nil && jobMetadata.UnhideObjects != nil && *jobMetadata.UnhideObjects { + unhideObjects = true + } + + // Build unhide code conditionally from embedded script + unhideCode := "" + if unhideObjects { + unhideCode = scripts.UnhideObjects + } + + // Load template and replace placeholders + scriptContent := scripts.RenderBlenderTemplate + scriptContent = strings.ReplaceAll(scriptContent, "{{UNHIDE_CODE}}", unhideCode) + scriptContent = strings.ReplaceAll(scriptContent, "{{FORMAT_FILE_PATH}}", fmt.Sprintf("%q", formatFilePath)) + scriptContent = strings.ReplaceAll(scriptContent, "{{RENDER_SETTINGS_FILE}}", fmt.Sprintf("%q", renderSettingsFilePath)) scriptPath := filepath.Join(workDir, "enable_gpu.py") if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil { errMsg := fmt.Sprintf("failed to create GPU enable script: %v", err) @@ -1765,23 +1141,30 @@ sys.stdout.flush() } } + // Check if execution should be enabled (defaults to false/off) + enableExecution := false + if jobMetadata != nil && jobMetadata.EnableExecution != nil && *jobMetadata.EnableExecution { + enableExecution = true + } + // Run Blender with GPU enabled via Python script // Use -s (start) and -e (end) for frame ranges, or -f for single frame var cmd *exec.Cmd + args := []string{"-b", blendFile, "--python", scriptPath} + if enableExecution { + args = append(args, "--enable-autoexec") + } if frameStart == frameEnd { // Single frame - cmd = exec.Command("blender", "-b", blendFile, - "--python", scriptPath, - "-o", absOutputPattern, - "-f", fmt.Sprintf("%d", frameStart)) + args = append(args, "-o", absOutputPattern, "-f", fmt.Sprintf("%d", frameStart)) + cmd = exec.Command("blender", args...) } else { // Frame range - cmd = exec.Command("blender", "-b", blendFile, - "--python", scriptPath, - "-o", absOutputPattern, + args = append(args, "-o", absOutputPattern, "-s", fmt.Sprintf("%d", frameStart), "-e", fmt.Sprintf("%d", frameEnd), "-a") // -a renders animation (all frames in range) + cmd = exec.Command("blender", args...) } cmd.Dir = workDir @@ -3261,8 +2644,11 @@ func (c *Client) getJobMetadata(jobID int64) (*types.BlendMetadata, error) { // downloadFrameFile downloads a frame file for MP4 generation func (c *Client) downloadFrameFile(jobID int64, fileName, destPath string) error { - path := fmt.Sprintf("/api/runner/files/%d/%s", jobID, fileName) - resp, err := c.doSignedRequest("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID)) + // URL encode the fileName to handle special characters in filenames + encodedFileName := url.PathEscape(fileName) + path := fmt.Sprintf("/api/runner/files/%d/%s", jobID, encodedFileName) + // Use long-running client for file downloads (no timeout) - EXR files can be large + resp, err := c.doSignedRequestLong("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID)) if err != nil { return err } @@ -3330,7 +2716,8 @@ func (c *Client) downloadFileToPath(filePath, destPath string) error { downloadPath += "/" + filepath.Base(filePath) } - resp, err := c.doSignedRequest("GET", downloadPath, nil, fmt.Sprintf("runner_id=%d", c.runnerID)) + // Use long-running client for file downloads (no timeout) + resp, err := c.doSignedRequestLong("GET", downloadPath, nil, fmt.Sprintf("runner_id=%d", c.runnerID)) if err != nil { return fmt.Errorf("failed to download file: %w", err) } @@ -3392,7 +2779,8 @@ func (c *Client) uploadFile(jobID int64, filePath string) (string, error) { req.Header.Set("Content-Type", formWriter.FormDataContentType()) req.Header.Set("X-Runner-Secret", c.runnerSecret) - resp, err := c.httpClient.Do(req) + // Use long-running client for file uploads (no timeout) + resp, err := c.longRunningClient.Do(req) if err != nil { return "", fmt.Errorf("failed to upload file: %w", err) } @@ -3424,7 +2812,7 @@ func (c *Client) getContextCacheKey(jobID int64) string { func (c *Client) getContextCachePath(cacheKey string) string { cacheDir := filepath.Join(c.getWorkspaceDir(), "cache", "contexts") os.MkdirAll(cacheDir, 0755) - return filepath.Join(cacheDir, cacheKey+".tar.gz") + return filepath.Join(cacheDir, cacheKey+".tar") } // isContextCacheValid checks if a cached context file exists and is not expired (1 hour TTL) @@ -3437,7 +2825,7 @@ func (c *Client) isContextCacheValid(cachePath string) bool { return time.Since(info.ModTime()) < time.Hour } -// downloadJobContext downloads the job context tar.gz, using cache if available +// downloadJobContext downloads the job context tar, using cache if available func (c *Client) downloadJobContext(jobID int64, destPath string) error { cacheKey := c.getContextCacheKey(jobID) cachePath := c.getContextCachePath(cacheKey) @@ -3464,9 +2852,9 @@ func (c *Client) downloadJobContext(jobID int64, destPath string) error { } } - // Download from manager - path := fmt.Sprintf("/api/runner/jobs/%d/context.tar.gz", jobID) - resp, err := c.doSignedRequest("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID)) + // Download from manager - use long-running client (no timeout) for large context files + path := fmt.Sprintf("/api/runner/jobs/%d/context.tar", jobID) + resp, err := c.doSignedRequestLong("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID)) if err != nil { return fmt.Errorf("failed to download context: %w", err) } @@ -3517,24 +2905,17 @@ func (c *Client) downloadJobContext(jobID int64, destPath string) error { return nil } -// extractTarGz extracts a tar.gz file to the destination directory -func (c *Client) extractTarGz(tarGzPath, destDir string) error { - // Open the tar.gz file - file, err := os.Open(tarGzPath) +// extractTar extracts a tar file to the destination directory +func (c *Client) extractTar(tarPath, destDir string) error { + // Open the tar file + file, err := os.Open(tarPath) if err != nil { - return fmt.Errorf("failed to open tar.gz file: %w", err) + return fmt.Errorf("failed to open tar file: %w", err) } defer file.Close() - // Create gzip reader - gzReader, err := gzip.NewReader(file) - if err != nil { - return fmt.Errorf("failed to create gzip reader: %w", err) - } - defer gzReader.Close() - // Create tar reader - tarReader := tar.NewReader(gzReader) + tarReader := tar.NewReader(file) // Extract files for { @@ -3635,16 +3016,16 @@ func (c *Client) processMetadataTask(task map[string]interface{}, jobID int64, i c.sendStepUpdate(taskID, "download", types.StepStatusRunning, "") c.sendLog(taskID, types.LogLevelInfo, "Downloading job context...", "download") - // Download context tar.gz - contextPath := filepath.Join(workDir, "context.tar.gz") + // Download context tar + contextPath := filepath.Join(workDir, "context.tar") if err := c.downloadJobContext(jobID, contextPath); err != nil { c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) return fmt.Errorf("failed to download context: %w", err) } - // Extract context tar.gz + // Extract context tar c.sendLog(taskID, types.LogLevelInfo, "Extracting context...", "download") - if err := c.extractTarGz(contextPath, workDir); err != nil { + if err := c.extractTar(contextPath, workDir); err != nil { c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) return fmt.Errorf("failed to extract context: %w", err) } @@ -3881,6 +3262,7 @@ sys.stdout.flush() } // Execute Blender with Python script + // Note: disable_execution flag is not applied to metadata extraction for safety cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath) cmd.Dir = workDir diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 52ff098..2bfdb85 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -3,9 +3,9 @@ package storage import ( "archive/tar" "archive/zip" - "compress/gzip" "fmt" "io" + "log" "os" "path/filepath" "strings" @@ -31,6 +31,7 @@ func (s *Storage) init() error { s.basePath, s.uploadsPath(), s.outputsPath(), + s.tempPath(), } for _, dir := range dirs { @@ -42,6 +43,28 @@ func (s *Storage) init() error { return nil } +// tempPath returns the path for temporary files +func (s *Storage) tempPath() string { + return filepath.Join(s.basePath, "temp") +} + +// BasePath returns the storage base path (for cleanup tasks) +func (s *Storage) BasePath() string { + return s.basePath +} + +// TempDir creates a temporary directory under the storage base path +// Returns the path to the temporary directory +func (s *Storage) TempDir(pattern string) (string, error) { + // Ensure temp directory exists + if err := os.MkdirAll(s.tempPath(), 0755); err != nil { + return "", fmt.Errorf("failed to create temp directory: %w", err) + } + + // Create temp directory under storage base path + return os.MkdirTemp(s.tempPath(), pattern) +} + // uploadsPath returns the path for uploads func (s *Storage) uploadsPath() string { return filepath.Join(s.basePath, "uploads") @@ -142,6 +165,13 @@ func (s *Storage) GetFileSize(filePath string) (int64, error) { // ExtractZip extracts a ZIP file to the destination directory // Returns a list of all extracted file paths func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) { + log.Printf("Extracting ZIP archive: %s -> %s", zipPath, destDir) + + // Ensure destination directory exists + if err := os.MkdirAll(destDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create destination directory: %w", err) + } + r, err := zip.OpenReader(zipPath) if err != nil { return nil, fmt.Errorf("failed to open ZIP file: %w", err) @@ -149,12 +179,20 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) { defer r.Close() var extractedFiles []string + fileCount := 0 + dirCount := 0 + + log.Printf("ZIP contains %d entries", len(r.File)) for _, f := range r.File { // Sanitize file path to prevent directory traversal destPath := filepath.Join(destDir, f.Name) - if !strings.HasPrefix(destPath, filepath.Clean(destDir)+string(os.PathSeparator)) { - return nil, fmt.Errorf("invalid file path in ZIP: %s", f.Name) + + cleanDestPath := filepath.Clean(destPath) + cleanDestDir := filepath.Clean(destDir) + if !strings.HasPrefix(cleanDestPath, cleanDestDir+string(os.PathSeparator)) && cleanDestPath != cleanDestDir { + log.Printf("ERROR: Invalid file path in ZIP - target: %s, destDir: %s", cleanDestPath, cleanDestDir) + return nil, fmt.Errorf("invalid file path in ZIP: %s (target: %s, destDir: %s)", f.Name, cleanDestPath, cleanDestDir) } // Create directory structure @@ -162,6 +200,7 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) { if err := os.MkdirAll(destPath, 0755); err != nil { return nil, fmt.Errorf("failed to create directory: %w", err) } + dirCount++ continue } @@ -191,8 +230,10 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) { } extractedFiles = append(extractedFiles, destPath) + fileCount++ } + log.Printf("ZIP extraction complete: %d files, %d directories extracted to %s", fileCount, dirCount, destDir) return extractedFiles, nil } @@ -261,15 +302,15 @@ func isBlenderSaveFile(filename string) bool { return false } -// CreateJobContext creates a tar.gz archive containing all job input files +// CreateJobContext creates a tar archive containing all job input files // Filters out Blender save files (.blend1, .blend2, etc.) // Uses temporary directories and streaming to handle large files efficiently func (s *Storage) CreateJobContext(jobID int64) (string, error) { jobPath := s.JobPath(jobID) - contextPath := filepath.Join(jobPath, "context.tar.gz") + contextPath := filepath.Join(jobPath, "context.tar") // Create temporary directory for staging - tmpDir, err := os.MkdirTemp("", "fuego-context-*") + tmpDir, err := os.MkdirTemp("", "jiggablend-context-*") if err != nil { return "", fmt.Errorf("failed to create temporary directory: %w", err) } @@ -320,17 +361,14 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) { return "", fmt.Errorf("no files found to include in context") } - // Create the tar.gz file using streaming + // Create the tar file using streaming contextFile, err := os.Create(contextPath) if err != nil { return "", fmt.Errorf("failed to create context file: %w", err) } defer contextFile.Close() - gzWriter := gzip.NewWriter(contextFile) - defer gzWriter.Close() - - tarWriter := tar.NewWriter(gzWriter) + tarWriter := tar.NewWriter(contextFile) defer tarWriter.Close() // Add each file to the tar archive @@ -383,9 +421,6 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) { if err := tarWriter.Close(); err != nil { return "", fmt.Errorf("failed to close tar writer: %w", err) } - if err := gzWriter.Close(); err != nil { - return "", fmt.Errorf("failed to close gzip writer: %w", err) - } if err := contextFile.Close(); err != nil { return "", fmt.Errorf("failed to close context file: %w", err) } @@ -393,12 +428,12 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) { return contextPath, nil } -// CreateJobContextFromDir creates a context archive (tar.gz) from files in a source directory +// CreateJobContextFromDir creates a context archive (tar) from files in a source directory // This is used during upload to immediately create the context archive as the primary artifact // excludeFiles is a set of relative paths (from sourceDir) to exclude from the context func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, excludeFiles ...string) (string, error) { jobPath := s.JobPath(jobID) - contextPath := filepath.Join(jobPath, "context.tar.gz") + contextPath := filepath.Join(jobPath, "context.tar") // Ensure job directory exists if err := os.MkdirAll(jobPath, 0755); err != nil { @@ -498,17 +533,14 @@ func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, exclude return "", fmt.Errorf("multiple .blend files found at root level in context archive (found %d, expected 1)", blendFilesAtRoot) } - // Create the tar.gz file using streaming + // Create the tar file using streaming contextFile, err := os.Create(contextPath) if err != nil { return "", fmt.Errorf("failed to create context file: %w", err) } defer contextFile.Close() - gzWriter := gzip.NewWriter(contextFile) - defer gzWriter.Close() - - tarWriter := tar.NewWriter(gzWriter) + tarWriter := tar.NewWriter(contextFile) defer tarWriter.Close() // Add each file to the tar archive @@ -560,9 +592,6 @@ func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, exclude if err := tarWriter.Close(); err != nil { return "", fmt.Errorf("failed to close tar writer: %w", err) } - if err := gzWriter.Close(); err != nil { - return "", fmt.Errorf("failed to close gzip writer: %w", err) - } if err := contextFile.Close(); err != nil { return "", fmt.Errorf("failed to close context file: %w", err) } diff --git a/pkg/scripts/scripts.go b/pkg/scripts/scripts.go new file mode 100644 index 0000000..ce78d1f --- /dev/null +++ b/pkg/scripts/scripts.go @@ -0,0 +1,13 @@ +package scripts + +import _ "embed" + +//go:embed scripts/extract_metadata.py +var ExtractMetadata string + +//go:embed scripts/unhide_objects.py +var UnhideObjects string + +//go:embed scripts/render_blender.py.template +var RenderBlenderTemplate string + diff --git a/pkg/scripts/scripts/extract_metadata.py b/pkg/scripts/scripts/extract_metadata.py new file mode 100644 index 0000000..8ac162e --- /dev/null +++ b/pkg/scripts/scripts/extract_metadata.py @@ -0,0 +1,173 @@ +import bpy +import json +import sys + +# Make all file paths relative to the blend file location FIRST +# This must be done immediately after file load, before any other operations +# to prevent Blender from trying to access external files with absolute paths +try: + bpy.ops.file.make_paths_relative() + print("Made all file paths relative to blend file") +except Exception as e: + print(f"Warning: Could not make paths relative: {e}") + +# Check for missing addons that the blend file requires +# Blender marks missing addons with "_missing" suffix in preferences +missing_files_info = { + "checked": False, + "has_missing": False, + "missing_files": [], + "missing_addons": [] +} + +try: + missing = [] + for mod in bpy.context.preferences.addons: + if mod.module.endswith("_missing"): + missing.append(mod.module.rsplit("_", 1)[0]) + + missing_files_info["checked"] = True + if missing: + missing_files_info["has_missing"] = True + missing_files_info["missing_addons"] = missing + print("Missing add-ons required by this .blend:") + for name in missing: + print(" -", name) + else: + print("No missing add-ons detected – file is headless-safe") +except Exception as e: + print(f"Warning: Could not check for missing addons: {e}") + missing_files_info["error"] = str(e) + +# Get scene +scene = bpy.context.scene + +# Extract frame range from scene settings +frame_start = scene.frame_start +frame_end = scene.frame_end + +# Also check for actual animation range (keyframes) +# Find the earliest and latest keyframes across all objects +animation_start = None +animation_end = None + +for obj in scene.objects: + if obj.animation_data and obj.animation_data.action: + action = obj.animation_data.action + if action.fcurves: + for fcurve in action.fcurves: + if fcurve.keyframe_points: + for keyframe in fcurve.keyframe_points: + frame = int(keyframe.co[0]) + if animation_start is None or frame < animation_start: + animation_start = frame + if animation_end is None or frame > animation_end: + animation_end = frame + +# Use animation range if available, otherwise use scene frame range +# If scene range seems wrong (start == end), prefer animation range +if animation_start is not None and animation_end is not None: + if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end): + # Use animation range if scene range is invalid or animation extends beyond it + frame_start = animation_start + frame_end = animation_end + +# Extract render settings +render = scene.render +resolution_x = render.resolution_x +resolution_y = render.resolution_y +engine = scene.render.engine.upper() + +# Determine output format from file format +output_format = render.image_settings.file_format + +# Extract engine-specific settings +engine_settings = {} + +if engine == 'CYCLES': + cycles = scene.cycles + engine_settings = { + "samples": getattr(cycles, 'samples', 128), + "use_denoising": getattr(cycles, 'use_denoising', False), + "denoising_radius": getattr(cycles, 'denoising_radius', 0), + "denoising_strength": getattr(cycles, 'denoising_strength', 0.0), + "device": getattr(cycles, 'device', 'CPU'), + "use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False), + "adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01, + "use_fast_gi": getattr(cycles, 'use_fast_gi', False), + "light_tree": getattr(cycles, 'use_light_tree', False), + "use_light_linking": getattr(cycles, 'use_light_linking', False), + "caustics_reflective": getattr(cycles, 'caustics_reflective', False), + "caustics_refractive": getattr(cycles, 'caustics_refractive', False), + "blur_glossy": getattr(cycles, 'blur_glossy', 0.0), + "max_bounces": getattr(cycles, 'max_bounces', 12), + "diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4), + "glossy_bounces": getattr(cycles, 'glossy_bounces', 4), + "transmission_bounces": getattr(cycles, 'transmission_bounces', 12), + "volume_bounces": getattr(cycles, 'volume_bounces', 0), + "transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8), + "film_transparent": getattr(cycles, 'film_transparent', False), + "use_layer_samples": getattr(cycles, 'use_layer_samples', False), + } +elif engine == 'EEVEE' or engine == 'EEVEE_NEXT': + eevee = scene.eevee + engine_settings = { + "taa_render_samples": getattr(eevee, 'taa_render_samples', 64), + "use_bloom": getattr(eevee, 'use_bloom', False), + "bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8), + "bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05), + "bloom_radius": getattr(eevee, 'bloom_radius', 6.5), + "use_ssr": getattr(eevee, 'use_ssr', True), + "use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False), + "ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'), + "use_ssao": getattr(eevee, 'use_ssao', True), + "ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'), + "ssao_distance": getattr(eevee, 'ssao_distance', 0.2), + "ssao_factor": getattr(eevee, 'ssao_factor', 1.0), + "use_soft_shadows": getattr(eevee, 'use_soft_shadows', True), + "use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True), + "use_volumetric": getattr(eevee, 'use_volumetric', False), + "volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'), + "volumetric_samples": getattr(eevee, 'volumetric_samples', 64), + "volumetric_start": getattr(eevee, 'volumetric_start', 0.0), + "volumetric_end": getattr(eevee, 'volumetric_end', 100.0), + "use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True), + "use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True), + "use_gtao": getattr(eevee, 'use_gtao', False), + "gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'), + "use_overscan": getattr(eevee, 'use_overscan', False), + } +else: + # For other engines, extract basic samples if available + engine_settings = { + "samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128 + } + +# Extract scene info +camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA']) +object_count = len(scene.objects) +material_count = len(bpy.data.materials) + +# Build metadata dictionary +metadata = { + "frame_start": frame_start, + "frame_end": frame_end, + "render_settings": { + "resolution_x": resolution_x, + "resolution_y": resolution_y, + "output_format": output_format, + "engine": engine.lower(), + "engine_settings": engine_settings + }, + "scene_info": { + "camera_count": camera_count, + "object_count": object_count, + "material_count": material_count + }, + "missing_files_info": missing_files_info +} + +# Output as JSON +print(json.dumps(metadata)) +sys.stdout.flush() + diff --git a/pkg/scripts/scripts/render_blender.py.template b/pkg/scripts/scripts/render_blender.py.template new file mode 100644 index 0000000..a5af2a8 --- /dev/null +++ b/pkg/scripts/scripts/render_blender.py.template @@ -0,0 +1,589 @@ +import bpy +import sys +import os +import json + +# Make all file paths relative to the blend file location FIRST +# This must be done immediately after file load, before any other operations +# to prevent Blender from trying to access external files with absolute paths +try: + bpy.ops.file.make_paths_relative() + print("Made all file paths relative to blend file") +except Exception as e: + print(f"Warning: Could not make paths relative: {e}") + +{{UNHIDE_CODE}} +# Read output format from file (created by Go code) +format_file_path = {{FORMAT_FILE_PATH}} +output_format_override = None +if os.path.exists(format_file_path): + try: + with open(format_file_path, 'r') as f: + output_format_override = f.read().strip().upper() + print(f"Read output format from file: '{output_format_override}'") + except Exception as e: + print(f"Warning: Could not read output format file: {e}") +else: + print(f"Warning: Output format file does not exist: {format_file_path}") + +# Read render settings from JSON file (created by Go code) +render_settings_file = {{RENDER_SETTINGS_FILE}} +render_settings_override = None +if os.path.exists(render_settings_file): + try: + with open(render_settings_file, 'r') as f: + render_settings_override = json.load(f) + print(f"Loaded render settings from job metadata") + except Exception as e: + print(f"Warning: Could not read render settings file: {e}") + +# Get current scene settings (preserve blend file preferences) +scene = bpy.context.scene +current_engine = scene.render.engine +current_device = scene.cycles.device if hasattr(scene, 'cycles') and scene.cycles else None +current_output_format = scene.render.image_settings.file_format + +print(f"Blend file render engine: {current_engine}") +if current_device: + print(f"Blend file device setting: {current_device}") +print(f"Blend file output format: {current_output_format}") + +# Override output format if specified +# The format file always takes precedence (it's written specifically for this job) +if output_format_override: + print(f"Overriding output format from '{current_output_format}' to '{output_format_override}'") + # Map common format names to Blender's format constants + # For video formats (EXR_264_MP4, EXR_AV1_MP4), we render as EXR frames first + format_to_use = output_format_override.upper() + if format_to_use in ['EXR_264_MP4', 'EXR_AV1_MP4']: + format_to_use = 'EXR' # Render as EXR for video formats + + format_map = { + 'PNG': 'PNG', + 'JPEG': 'JPEG', + 'JPG': 'JPEG', + 'EXR': 'OPEN_EXR', + 'OPEN_EXR': 'OPEN_EXR', + 'TARGA': 'TARGA', + 'TIFF': 'TIFF', + 'BMP': 'BMP', + } + blender_format = format_map.get(format_to_use, format_to_use) + try: + scene.render.image_settings.file_format = blender_format + print(f"Successfully set output format to: {blender_format}") + except Exception as e: + print(f"Warning: Could not set output format to {blender_format}: {e}") + print(f"Using blend file's format: {current_output_format}") +else: + print(f"Using blend file's output format: {current_output_format}") + +# Apply render settings from job metadata if provided +# Note: output_format is NOT applied from render_settings_override - it's already set from format file above +if render_settings_override: + engine_override = render_settings_override.get('engine', '').upper() + engine_settings = render_settings_override.get('engine_settings', {}) + + # Switch engine if specified + if engine_override and engine_override != current_engine.upper(): + print(f"Switching render engine from '{current_engine}' to '{engine_override}'") + try: + scene.render.engine = engine_override + current_engine = engine_override + print(f"Successfully switched to {engine_override} engine") + except Exception as e: + print(f"Warning: Could not switch engine to {engine_override}: {e}") + print(f"Using blend file's engine: {current_engine}") + + # Apply engine-specific settings + if engine_settings: + if current_engine.upper() == 'CYCLES': + cycles = scene.cycles + print("Applying Cycles render settings from job metadata...") + for key, value in engine_settings.items(): + try: + if hasattr(cycles, key): + setattr(cycles, key, value) + print(f" Set Cycles.{key} = {value}") + else: + print(f" Warning: Cycles has no attribute '{key}'") + except Exception as e: + print(f" Warning: Could not set Cycles.{key} = {value}: {e}") + elif current_engine.upper() in ['EEVEE', 'EEVEE_NEXT']: + eevee = scene.eevee + print("Applying EEVEE render settings from job metadata...") + for key, value in engine_settings.items(): + try: + if hasattr(eevee, key): + setattr(eevee, key, value) + print(f" Set EEVEE.{key} = {value}") + else: + print(f" Warning: EEVEE has no attribute '{key}'") + except Exception as e: + print(f" Warning: Could not set EEVEE.{key} = {value}: {e}") + + # Apply resolution if specified + if 'resolution_x' in render_settings_override: + try: + scene.render.resolution_x = render_settings_override['resolution_x'] + print(f"Set resolution_x = {render_settings_override['resolution_x']}") + except Exception as e: + print(f"Warning: Could not set resolution_x: {e}") + if 'resolution_y' in render_settings_override: + try: + scene.render.resolution_y = render_settings_override['resolution_y'] + print(f"Set resolution_y = {render_settings_override['resolution_y']}") + except Exception as e: + print(f"Warning: Could not set resolution_y: {e}") + +# Only override device selection if using Cycles (other engines handle GPU differently) +if current_engine == 'CYCLES': + # Check if CPU rendering is forced + force_cpu = False + if render_settings_override and render_settings_override.get('force_cpu'): + force_cpu = render_settings_override.get('force_cpu', False) + print("Force CPU rendering is enabled - skipping GPU detection") + + # Ensure Cycles addon is enabled + try: + if 'cycles' not in bpy.context.preferences.addons: + bpy.ops.preferences.addon_enable(module='cycles') + print("Enabled Cycles addon") + except Exception as e: + print(f"Warning: Could not enable Cycles addon: {e}") + + # If CPU is forced, skip GPU detection and set CPU directly + if force_cpu: + scene.cycles.device = 'CPU' + print("Forced CPU rendering (skipping GPU detection)") + else: + # Access Cycles preferences + prefs = bpy.context.preferences + try: + cycles_prefs = prefs.addons['cycles'].preferences + except (KeyError, AttributeError): + try: + cycles_addon = prefs.addons.get('cycles') + if cycles_addon: + cycles_prefs = cycles_addon.preferences + else: + raise Exception("Cycles addon not found") + except Exception as e: + print(f"ERROR: Could not access Cycles preferences: {e}") + import traceback + traceback.print_exc() + sys.exit(1) + + # Check all devices and choose the best GPU type + # Device type preference order (most performant first) + device_type_preference = ['OPTIX', 'CUDA', 'HIP', 'ONEAPI', 'METAL'] + gpu_available = False + best_device_type = None + best_gpu_devices = [] + devices_by_type = {} # {device_type: [devices]} + seen_device_ids = set() # Track device IDs to avoid duplicates + + print("Checking for GPU availability...") + + # Try to get all devices - try each device type to see what's available + for device_type in device_type_preference: + try: + cycles_prefs.compute_device_type = device_type + cycles_prefs.refresh_devices() + + # Get devices for this type + devices = None + if hasattr(cycles_prefs, 'devices'): + try: + devices_prop = cycles_prefs.devices + if devices_prop: + devices = list(devices_prop) if hasattr(devices_prop, '__iter__') else [devices_prop] + except Exception as e: + pass + + if not devices or len(devices) == 0: + try: + devices = cycles_prefs.get_devices() + except Exception as e: + pass + + if devices and len(devices) > 0: + # Categorize devices by their type attribute, avoiding duplicates + for device in devices: + if hasattr(device, 'type'): + device_type_str = str(device.type).upper() + device_id = getattr(device, 'id', None) + + # Use device ID to avoid duplicates (same device appears when checking different compute_device_types) + if device_id and device_id in seen_device_ids: + continue + + if device_id: + seen_device_ids.add(device_id) + + if device_type_str not in devices_by_type: + devices_by_type[device_type_str] = [] + devices_by_type[device_type_str].append(device) + except (ValueError, AttributeError, KeyError, TypeError): + # Device type not supported, continue + continue + except Exception as e: + # Other errors - log but continue + print(f" Error checking {device_type}: {e}") + continue + + # Print what we found + print(f"Found devices by type: {list(devices_by_type.keys())}") + for dev_type, dev_list in devices_by_type.items(): + print(f" {dev_type}: {len(dev_list)} device(s)") + for device in dev_list: + device_name = getattr(device, 'name', 'Unknown') + print(f" - {device_name}") + + # Choose the best GPU type based on preference + for preferred_type in device_type_preference: + if preferred_type in devices_by_type: + gpu_devices = [d for d in devices_by_type[preferred_type] if preferred_type in ['CUDA', 'OPENCL', 'OPTIX', 'HIP', 'METAL', 'ONEAPI']] + if gpu_devices: + best_device_type = preferred_type + best_gpu_devices = [(d, preferred_type) for d in gpu_devices] + print(f"Selected {preferred_type} as best GPU type with {len(gpu_devices)} device(s)") + break + + # Second pass: Enable the best GPU we found + if best_device_type and best_gpu_devices: + print(f"\nEnabling GPU devices for {best_device_type}...") + try: + # Set the device type again + cycles_prefs.compute_device_type = best_device_type + cycles_prefs.refresh_devices() + + # First, disable all CPU devices to ensure only GPU is used + print(f" Disabling CPU devices...") + all_devices = cycles_prefs.devices if hasattr(cycles_prefs, 'devices') else cycles_prefs.get_devices() + if all_devices: + for device in all_devices: + if hasattr(device, 'type') and str(device.type).upper() == 'CPU': + try: + device.use = False + device_name = getattr(device, 'name', 'Unknown') + print(f" Disabled CPU: {device_name}") + except Exception as e: + print(f" Warning: Could not disable CPU device {getattr(device, 'name', 'Unknown')}: {e}") + + # Enable all GPU devices + enabled_count = 0 + for device, device_type in best_gpu_devices: + try: + device.use = True + enabled_count += 1 + device_name = getattr(device, 'name', 'Unknown') + print(f" Enabled: {device_name}") + except Exception as e: + print(f" Warning: Could not enable device {getattr(device, 'name', 'Unknown')}: {e}") + + # Enable ray tracing acceleration for supported device types + try: + if best_device_type == 'HIP': + # HIPRT (HIP Ray Tracing) for AMD GPUs + if hasattr(cycles_prefs, 'use_hiprt'): + cycles_prefs.use_hiprt = True + print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering") + elif hasattr(scene.cycles, 'use_hiprt'): + scene.cycles.use_hiprt = True + print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering") + else: + print(f" HIPRT not available (requires Blender 4.0+)") + elif best_device_type == 'OPTIX': + # OptiX is already enabled when using OPTIX device type + # But we can check if there are any OptiX-specific settings + if hasattr(scene.cycles, 'use_optix_denoising'): + scene.cycles.use_optix_denoising = True + print(f" Enabled OptiX denoising") + print(f" OptiX ray tracing is active (using OPTIX device type)") + elif best_device_type == 'CUDA': + # CUDA can use OptiX if available, but it's usually automatic + # Check if we can prefer OptiX over CUDA + if hasattr(scene.cycles, 'use_optix_denoising'): + scene.cycles.use_optix_denoising = True + print(f" Enabled OptiX denoising (if OptiX available)") + print(f" CUDA ray tracing active") + elif best_device_type == 'METAL': + # MetalRT for Apple Silicon (if available) + if hasattr(scene.cycles, 'use_metalrt'): + scene.cycles.use_metalrt = True + print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering") + elif hasattr(cycles_prefs, 'use_metalrt'): + cycles_prefs.use_metalrt = True + print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering") + else: + print(f" MetalRT not available") + elif best_device_type == 'ONEAPI': + # Intel oneAPI - Embree might be available + if hasattr(scene.cycles, 'use_embree'): + scene.cycles.use_embree = True + print(f" Enabled Embree for faster CPU ray tracing") + print(f" oneAPI ray tracing active") + except Exception as e: + print(f" Could not enable ray tracing acceleration: {e}") + + print(f"SUCCESS: Enabled {enabled_count} GPU device(s) for {best_device_type}") + gpu_available = True + except Exception as e: + print(f"ERROR: Failed to enable GPU devices: {e}") + import traceback + traceback.print_exc() + + # Set device based on availability (prefer GPU, fallback to CPU) + if gpu_available: + scene.cycles.device = 'GPU' + print(f"Using GPU for rendering (blend file had: {current_device})") + else: + scene.cycles.device = 'CPU' + print(f"GPU not available, using CPU for rendering (blend file had: {current_device})") + + # Verify device setting + if current_engine == 'CYCLES': + final_device = scene.cycles.device + print(f"Final Cycles device: {final_device}") +else: + # For other engines (EEVEE, etc.), respect blend file settings + print(f"Using {current_engine} engine - respecting blend file settings") + +# Enable GPU acceleration for EEVEE viewport rendering (if using EEVEE) +if current_engine == 'EEVEE' or current_engine == 'EEVEE_NEXT': + try: + if hasattr(bpy.context.preferences.system, 'gpu_backend'): + bpy.context.preferences.system.gpu_backend = 'OPENGL' + print("Enabled OpenGL GPU backend for EEVEE") + except Exception as e: + print(f"Could not set EEVEE GPU backend: {e}") + +# Enable GPU acceleration for compositing (if compositing is enabled) +try: + if scene.use_nodes and hasattr(scene, 'node_tree') and scene.node_tree: + if hasattr(scene.node_tree, 'use_gpu_compositing'): + scene.node_tree.use_gpu_compositing = True + print("Enabled GPU compositing") +except Exception as e: + print(f"Could not enable GPU compositing: {e}") + +# CRITICAL: Initialize headless rendering to prevent black images +# This ensures the render engine is properly initialized before rendering +print("Initializing headless rendering context...") +try: + # Ensure world exists and has proper settings + if not scene.world: + # Create a default world if none exists + world = bpy.data.worlds.new("World") + scene.world = world + print("Created default world") + + # Ensure world has a background shader (not just black) + if scene.world: + # Enable nodes if not already enabled + if not scene.world.use_nodes: + scene.world.use_nodes = True + print("Enabled world nodes") + + world_nodes = scene.world.node_tree + if world_nodes: + # Find or create background shader + bg_shader = None + for node in world_nodes.nodes: + if node.type == 'BACKGROUND': + bg_shader = node + break + + if not bg_shader: + bg_shader = world_nodes.nodes.new(type='ShaderNodeBackground') + # Connect to output + output = world_nodes.nodes.get('World Output') + if not output: + output = world_nodes.nodes.new(type='ShaderNodeOutputWorld') + output.name = 'World Output' + if output and bg_shader: + # Connect background to surface input + if 'Surface' in output.inputs and 'Background' in bg_shader.outputs: + world_nodes.links.new(bg_shader.outputs['Background'], output.inputs['Surface']) + print("Created background shader for world") + + # Ensure background has some color (not pure black) + if bg_shader: + # Only set if it's pure black (0,0,0) + if hasattr(bg_shader.inputs, 'Color'): + color = bg_shader.inputs['Color'].default_value + if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: + # Set to a very dark gray instead of pure black + bg_shader.inputs['Color'].default_value = (0.01, 0.01, 0.01, 1.0) + print("Adjusted world background color to prevent black renders") + else: + # Fallback: use legacy world color if nodes aren't working + if hasattr(scene.world, 'color'): + color = scene.world.color + if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: + scene.world.color = (0.01, 0.01, 0.01) + print("Adjusted legacy world color to prevent black renders") + + # For EEVEE, force viewport update to initialize render engine + if current_engine in ['EEVEE', 'EEVEE_NEXT']: + # Force EEVEE to update its internal state + try: + # Update depsgraph to ensure everything is initialized + depsgraph = bpy.context.evaluated_depsgraph_get() + if depsgraph: + # Force update + depsgraph.update() + print("Forced EEVEE depsgraph update for headless rendering") + except Exception as e: + print(f"Warning: Could not force EEVEE update: {e}") + + # Ensure EEVEE settings are applied + try: + # Force a material update to ensure shaders are compiled + for obj in scene.objects: + if obj.type == 'MESH' and obj.data.materials: + for mat in obj.data.materials: + if mat and mat.use_nodes: + # Touch the material to force update + mat.use_nodes = mat.use_nodes + print("Forced material updates for EEVEE") + except Exception as e: + print(f"Warning: Could not update materials: {e}") + + # For Cycles, ensure proper initialization + if current_engine == 'CYCLES': + # Ensure samples are set (even if 1 for preview) + if not hasattr(scene.cycles, 'samples') or scene.cycles.samples < 1: + scene.cycles.samples = 1 + print("Set minimum Cycles samples") + + # Check for lights in the scene + lights = [obj for obj in scene.objects if obj.type == 'LIGHT'] + print(f"Found {len(lights)} light(s) in scene") + if len(lights) == 0: + print("WARNING: No lights found in scene - rendering may be black!") + print(" Consider adding lights or ensuring world background emits light") + + # Ensure world background emits light (critical for Cycles) + if scene.world and scene.world.use_nodes: + world_nodes = scene.world.node_tree + if world_nodes: + bg_shader = None + for node in world_nodes.nodes: + if node.type == 'BACKGROUND': + bg_shader = node + break + + if bg_shader: + # Check and set strength - Cycles needs this to emit light! + if hasattr(bg_shader.inputs, 'Strength'): + strength = bg_shader.inputs['Strength'].default_value + if strength <= 0.0: + bg_shader.inputs['Strength'].default_value = 1.0 + print("Set world background strength to 1.0 for Cycles lighting") + else: + print(f"World background strength: {strength}") + # Also ensure color is not pure black + if hasattr(bg_shader.inputs, 'Color'): + color = bg_shader.inputs['Color'].default_value + if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: + bg_shader.inputs['Color'].default_value = (1.0, 1.0, 1.0, 1.0) + print("Set world background color to white for Cycles lighting") + + # Check film_transparent setting - if enabled, background will be transparent/black + if hasattr(scene.cycles, 'film_transparent') and scene.cycles.film_transparent: + print("WARNING: film_transparent is enabled - background will be transparent") + print(" If you see black renders, try disabling film_transparent") + + # Force Cycles to update/compile materials and shaders + try: + # Update depsgraph to ensure everything is initialized + depsgraph = bpy.context.evaluated_depsgraph_get() + if depsgraph: + depsgraph.update() + print("Forced Cycles depsgraph update") + + # Force material updates to ensure shaders are compiled + for obj in scene.objects: + if obj.type == 'MESH' and obj.data.materials: + for mat in obj.data.materials: + if mat and mat.use_nodes: + # Force material update + mat.use_nodes = mat.use_nodes + print("Forced Cycles material updates") + except Exception as e: + print(f"Warning: Could not force Cycles updates: {e}") + + # Verify device is actually set correctly + if hasattr(scene.cycles, 'device'): + actual_device = scene.cycles.device + print(f"Cycles device setting: {actual_device}") + if actual_device == 'GPU': + # Try to verify GPU is actually available + try: + prefs = bpy.context.preferences + cycles_prefs = prefs.addons['cycles'].preferences + devices = cycles_prefs.devices + enabled_devices = [d for d in devices if d.use] + if len(enabled_devices) == 0: + print("WARNING: GPU device set but no GPU devices are enabled!") + print(" Falling back to CPU may cause issues") + except Exception as e: + print(f"Could not verify GPU devices: {e}") + + # Ensure camera exists and is active + if scene.camera is None: + # Find first camera in scene + for obj in scene.objects: + if obj.type == 'CAMERA': + scene.camera = obj + print(f"Set active camera: {obj.name}") + break + + print("Headless rendering initialization complete") +except Exception as e: + print(f"Warning: Headless rendering initialization had issues: {e}") + import traceback + traceback.print_exc() + +# Final verification before rendering +print("\n=== Pre-render verification ===") +try: + scene = bpy.context.scene + print(f"Render engine: {scene.render.engine}") + print(f"Active camera: {scene.camera.name if scene.camera else 'None'}") + + if scene.render.engine == 'CYCLES': + print(f"Cycles device: {scene.cycles.device}") + print(f"Cycles samples: {scene.cycles.samples}") + lights = [obj for obj in scene.objects if obj.type == 'LIGHT'] + print(f"Lights in scene: {len(lights)}") + if scene.world: + if scene.world.use_nodes: + world_nodes = scene.world.node_tree + if world_nodes: + bg_shader = None + for node in world_nodes.nodes: + if node.type == 'BACKGROUND': + bg_shader = node + break + if bg_shader: + if hasattr(bg_shader.inputs, 'Strength'): + strength = bg_shader.inputs['Strength'].default_value + print(f"World background strength: {strength}") + if hasattr(bg_shader.inputs, 'Color'): + color = bg_shader.inputs['Color'].default_value + print(f"World background color: ({color[0]:.2f}, {color[1]:.2f}, {color[2]:.2f})") + else: + print("World exists but nodes are disabled") + else: + print("WARNING: No world in scene!") + + print("=== Verification complete ===\n") +except Exception as e: + print(f"Warning: Verification failed: {e}") + +print("Device configuration complete - blend file settings preserved, device optimized") +sys.stdout.flush() + diff --git a/pkg/scripts/scripts/unhide_objects.py b/pkg/scripts/scripts/unhide_objects.py new file mode 100644 index 0000000..3ad7855 --- /dev/null +++ b/pkg/scripts/scripts/unhide_objects.py @@ -0,0 +1,29 @@ +# Fix objects and collections hidden from render +vl = bpy.context.view_layer + +# 1. Objects hidden in view layer +print("Checking for objects hidden from render that need to be enabled...") +try: + for obj in bpy.data.objects: + if obj.hide_get(view_layer=vl): + if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]): + obj.hide_set(False, view_layer=vl) + print("Enabled object:", obj.name) +except Exception as e: + print(f"Warning: Could not check/fix hidden render objects: {e}") + +# 2. Collections disabled in renders OR set to Holdout (the final killer) +print("Checking for collections hidden from render that need to be enabled...") +try: + for col in bpy.data.collections: + if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False): + if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]): + col.hide_render = False + if col.name in vl.layer_collection.children: + vl.layer_collection.children[col.name].exclude = False + vl.layer_collection.children[col.name].holdout = False + vl.layer_collection.children[col.name].indirect_only = False + print("Enabled collection:", col.name) +except Exception as e: + print(f"Warning: Could not check/fix hidden render collections: {e}") + diff --git a/pkg/types/types.go b/pkg/types/types.go index c0794e0..5b88782 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -140,6 +140,8 @@ type CreateJobRequest struct { AllowParallelRunners *bool `json:"allow_parallel_runners,omitempty"` // Optional for render jobs, defaults to true RenderSettings *RenderSettings `json:"render_settings,omitempty"` // Optional: Override blend file render settings UploadSessionID *string `json:"upload_session_id,omitempty"` // Optional: Session ID from file upload + UnhideObjects *bool `json:"unhide_objects,omitempty"` // Optional: Enable unhide tweaks for objects/collections + EnableExecution *bool `json:"enable_execution,omitempty"` // Optional: Enable auto-execution in Blender (adds --enable-autoexec flag, defaults to false) } // UpdateJobProgressRequest represents a request to update job progress @@ -227,9 +229,11 @@ type TaskLogEntry struct { type BlendMetadata struct { FrameStart int `json:"frame_start"` FrameEnd int `json:"frame_end"` - RenderSettings RenderSettings `json:"render_settings"` - SceneInfo SceneInfo `json:"scene_info"` - MissingFilesInfo *MissingFilesInfo `json:"missing_files_info,omitempty"` + RenderSettings RenderSettings `json:"render_settings"` + SceneInfo SceneInfo `json:"scene_info"` + MissingFilesInfo *MissingFilesInfo `json:"missing_files_info,omitempty"` + UnhideObjects *bool `json:"unhide_objects,omitempty"` // Enable unhide tweaks for objects/collections + EnableExecution *bool `json:"enable_execution,omitempty"` // Enable auto-execution in Blender (adds --enable-autoexec flag, defaults to false) } // MissingFilesInfo represents information about missing files/addons diff --git a/web/src/components/FileExplorer.jsx b/web/src/components/FileExplorer.jsx index 77991dc..99a0b74 100644 --- a/web/src/components/FileExplorer.jsx +++ b/web/src/components/FileExplorer.jsx @@ -1,7 +1,7 @@ import { useState } from 'react'; export default function FileExplorer({ files, onDownload, onPreview, isImageFile }) { - const [expandedPaths, setExpandedPaths] = useState(new Set()); + const [expandedPaths, setExpandedPaths] = useState(new Set()); // Root folder collapsed by default // Build directory tree from file paths const buildTree = (files) => { @@ -70,7 +70,7 @@ export default function FileExplorer({ files, onDownload, onPreview, isImageFile const file = item.file; const isImage = isImageFile && isImageFile(file.file_name); const sizeMB = (file.file_size / 1024 / 1024).toFixed(2); - const isArchive = file.file_name.endsWith('.tar.gz') || file.file_name.endsWith('.zip'); + const isArchive = file.file_name.endsWith('.tar') || file.file_name.endsWith('.zip'); return (
Frames: {job.frame_start} - {job.frame_end}
-Format: {job.output_format}
+ {job.frame_start !== undefined && job.frame_end !== undefined && ( +Frames: {job.frame_start} - {job.frame_end}
+ )} + {job.output_format &&Format: {job.output_format}
}Created: {new Date(job.created_at).toLocaleString()}