Implement job metadata extraction and task management features. Add validation for frame range limits, enhance job and task data structures, and introduce new API endpoints for metadata and task retrieval. Update client-side components to handle metadata extraction and display task statuses. Improve error handling in API responses.

This commit is contained in:
2025-11-22 06:37:32 -06:00
parent 27a09aedd6
commit c9ade39ad9
10 changed files with 1078 additions and 88 deletions

View File

@@ -8,6 +8,7 @@ import (
"log"
"net/http"
"strconv"
"strings"
"time"
"fuego/pkg/types"
@@ -39,6 +40,13 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) {
return
}
// Validate frame range limits (prevent abuse)
const maxFrameRange = 10000
if req.FrameEnd-req.FrameStart+1 > maxFrameRange {
s.respondError(w, http.StatusBadRequest, fmt.Sprintf("Frame range too large. Maximum allowed: %d frames", maxFrameRange))
return
}
if req.OutputFormat == "" {
req.OutputFormat = "PNG"
}
@@ -116,7 +124,7 @@ func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) {
rows, err := s.db.Query(
`SELECT id, user_id, name, status, progress, frame_start, frame_end, output_format,
allow_parallel_runners, timeout_seconds, created_at, started_at, completed_at, error_message
allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message
FROM jobs WHERE user_id = ? ORDER BY created_at DESC`,
userID,
)
@@ -130,11 +138,12 @@ func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) {
for rows.Next() {
var job types.Job
var startedAt, completedAt sql.NullTime
var blendMetadataJSON sql.NullString
err := rows.Scan(
&job.ID, &job.UserID, &job.Name, &job.Status, &job.Progress,
&job.FrameStart, &job.FrameEnd, &job.OutputFormat, &job.AllowParallelRunners, &job.TimeoutSeconds,
&job.CreatedAt, &startedAt, &completedAt, &job.ErrorMessage,
&blendMetadataJSON, &job.CreatedAt, &startedAt, &completedAt, &job.ErrorMessage,
)
if err != nil {
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to scan job: %v", err))
@@ -147,6 +156,12 @@ func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) {
if completedAt.Valid {
job.CompletedAt = &completedAt.Time
}
if blendMetadataJSON.Valid && blendMetadataJSON.String != "" {
var metadata types.BlendMetadata
if err := json.Unmarshal([]byte(blendMetadataJSON.String), &metadata); err == nil {
job.BlendMetadata = &metadata
}
}
jobs = append(jobs, job)
}
@@ -171,15 +186,16 @@ func (s *Server) handleGetJob(w http.ResponseWriter, r *http.Request) {
var job types.Job
var startedAt, completedAt sql.NullTime
var blendMetadataJSON sql.NullString
err = s.db.QueryRow(
`SELECT id, user_id, name, status, progress, frame_start, frame_end, output_format,
allow_parallel_runners, timeout_seconds, created_at, started_at, completed_at, error_message
allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message
FROM jobs WHERE id = ? AND user_id = ?`,
jobID, userID,
).Scan(
&job.ID, &job.UserID, &job.Name, &job.Status, &job.Progress,
&job.FrameStart, &job.FrameEnd, &job.OutputFormat, &job.AllowParallelRunners, &job.TimeoutSeconds,
&job.CreatedAt, &startedAt, &completedAt, &job.ErrorMessage,
&blendMetadataJSON, &job.CreatedAt, &startedAt, &completedAt, &job.ErrorMessage,
)
if err == sql.ErrNoRows {
@@ -197,6 +213,12 @@ func (s *Server) handleGetJob(w http.ResponseWriter, r *http.Request) {
if completedAt.Valid {
job.CompletedAt = &completedAt.Time
}
if blendMetadataJSON.Valid && blendMetadataJSON.String != "" {
var metadata types.BlendMetadata
if err := json.Unmarshal([]byte(blendMetadataJSON.String), &metadata); err == nil {
job.BlendMetadata = &metadata
}
}
s.respondJSON(w, http.StatusOK, job)
}
@@ -307,6 +329,25 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) {
fileID, _ := result.LastInsertId()
// If this is a blend file, create a metadata extraction task
if strings.HasSuffix(strings.ToLower(header.Filename), ".blend") {
// Create metadata extraction task
metadataTaskTimeout := 300 // 5 minutes for metadata extraction
taskResult, err := s.db.Exec(
`INSERT INTO tasks (job_id, frame_start, frame_end, task_type, status, timeout_seconds, max_retries)
VALUES (?, ?, ?, ?, ?, ?, ?)`,
jobID, 0, 0, types.TaskTypeMetadata, types.TaskStatusPending, metadataTaskTimeout, 1,
)
if err != nil {
log.Printf("Failed to create metadata extraction task: %v", err)
} else {
metadataTaskID, _ := taskResult.LastInsertId()
log.Printf("Created metadata extraction task %d for job %d", metadataTaskID, jobID)
// Try to distribute the task immediately
go s.distributeTasksToRunners()
}
}
s.respondJSON(w, http.StatusCreated, map[string]interface{}{
"id": fileID,
"file_name": header.Filename,
@@ -524,6 +565,87 @@ func (s *Server) handleStreamVideo(w http.ResponseWriter, r *http.Request) {
}
}
// handleListJobTasks lists all tasks for a job
func (s *Server) handleListJobTasks(w http.ResponseWriter, r *http.Request) {
userID, err := getUserID(r)
if err != nil {
s.respondError(w, http.StatusUnauthorized, err.Error())
return
}
jobID, err := parseID(r, "id")
if err != nil {
s.respondError(w, http.StatusBadRequest, err.Error())
return
}
// Verify job belongs to user
var jobUserID int64
err = s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&jobUserID)
if err == sql.ErrNoRows {
s.respondError(w, http.StatusNotFound, "Job not found")
return
}
if err != nil {
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify job: %v", err))
return
}
if jobUserID != userID {
s.respondError(w, http.StatusForbidden, "Access denied")
return
}
rows, err := s.db.Query(
`SELECT id, job_id, runner_id, frame_start, frame_end, status, task_type,
current_step, retry_count, max_retries, output_path, created_at, started_at,
completed_at, error_message, timeout_seconds
FROM tasks WHERE job_id = ? ORDER BY frame_start ASC`,
jobID,
)
if err != nil {
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query tasks: %v", err))
return
}
defer rows.Close()
tasks := []types.Task{}
for rows.Next() {
var task types.Task
var runnerID sql.NullInt64
var startedAt, completedAt sql.NullTime
var timeoutSeconds sql.NullInt64
err := rows.Scan(
&task.ID, &task.JobID, &runnerID, &task.FrameStart, &task.FrameEnd,
&task.Status, &task.TaskType, &task.CurrentStep, &task.RetryCount,
&task.MaxRetries, &task.OutputPath, &task.CreatedAt, &startedAt,
&completedAt, &task.ErrorMessage, &timeoutSeconds,
)
if err != nil {
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to scan task: %v", err))
return
}
if runnerID.Valid {
task.RunnerID = &runnerID.Int64
}
if startedAt.Valid {
task.StartedAt = &startedAt.Time
}
if completedAt.Valid {
task.CompletedAt = &completedAt.Time
}
if timeoutSeconds.Valid {
timeout := int(timeoutSeconds.Int64)
task.TimeoutSeconds = &timeout
}
tasks = append(tasks, task)
}
s.respondJSON(w, http.StatusOK, tasks)
}
// handleGetTaskLogs retrieves logs for a specific task
func (s *Server) handleGetTaskLogs(w http.ResponseWriter, r *http.Request) {
userID, err := getUserID(r)