|
|
|
|
@@ -1019,6 +1019,10 @@ func (s *Manager) handleGetJobStatusForRunner(w http.ResponseWriter, r *http.Req
|
|
|
|
|
&job.CreatedAt, &startedAt, &completedAt, &errorMessage,
|
|
|
|
|
)
|
|
|
|
|
})
|
|
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
|
s.respondError(w, http.StatusNotFound, "Job not found")
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query job: %v", err))
|
|
|
|
|
return
|
|
|
|
|
@@ -1037,15 +1041,6 @@ func (s *Manager) handleGetJobStatusForRunner(w http.ResponseWriter, r *http.Req
|
|
|
|
|
job.OutputFormat = &outputFormat.String
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
|
s.respondError(w, http.StatusNotFound, "Job not found")
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query job: %v", err))
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if startedAt.Valid {
|
|
|
|
|
job.StartedAt = &startedAt.Time
|
|
|
|
|
}
|
|
|
|
|
@@ -1920,8 +1915,37 @@ func (s *Manager) evaluateTaskCondition(taskID int64, jobID int64, conditionJSON
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// getJobStatusUpdateMutex returns the mutex for a specific jobID, creating it if needed.
|
|
|
|
|
// This ensures serialized execution of updateJobStatusFromTasks per job to prevent race conditions.
|
|
|
|
|
func (s *Manager) getJobStatusUpdateMutex(jobID int64) *sync.Mutex {
|
|
|
|
|
s.jobStatusUpdateMuMu.Lock()
|
|
|
|
|
defer s.jobStatusUpdateMuMu.Unlock()
|
|
|
|
|
|
|
|
|
|
mu, exists := s.jobStatusUpdateMu[jobID]
|
|
|
|
|
if !exists {
|
|
|
|
|
mu = &sync.Mutex{}
|
|
|
|
|
s.jobStatusUpdateMu[jobID] = mu
|
|
|
|
|
}
|
|
|
|
|
return mu
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// cleanupJobStatusUpdateMutex removes the mutex for a jobID after it's no longer needed.
|
|
|
|
|
// Should only be called when the job is in a final state (completed/failed) and no more updates are expected.
|
|
|
|
|
func (s *Manager) cleanupJobStatusUpdateMutex(jobID int64) {
|
|
|
|
|
s.jobStatusUpdateMuMu.Lock()
|
|
|
|
|
defer s.jobStatusUpdateMuMu.Unlock()
|
|
|
|
|
delete(s.jobStatusUpdateMu, jobID)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// updateJobStatusFromTasks updates job status and progress based on task states
|
|
|
|
|
// This function is serialized per jobID to prevent race conditions when multiple tasks
|
|
|
|
|
// complete concurrently and trigger status updates simultaneously.
|
|
|
|
|
func (s *Manager) updateJobStatusFromTasks(jobID int64) {
|
|
|
|
|
// Serialize updates per job to prevent race conditions
|
|
|
|
|
mu := s.getJobStatusUpdateMutex(jobID)
|
|
|
|
|
mu.Lock()
|
|
|
|
|
defer mu.Unlock()
|
|
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
|
|
// All jobs now use parallel runners (one task per frame), so we always use task-based progress
|
|
|
|
|
@@ -2087,6 +2111,11 @@ func (s *Manager) updateJobStatusFromTasks(jobID int64) {
|
|
|
|
|
"progress": progress,
|
|
|
|
|
"completed_at": now,
|
|
|
|
|
})
|
|
|
|
|
// Clean up mutex for jobs in final states (completed or failed)
|
|
|
|
|
// No more status updates will occur for these jobs
|
|
|
|
|
if jobStatus == string(types.JobStatusCompleted) || jobStatus == string(types.JobStatusFailed) {
|
|
|
|
|
s.cleanupJobStatusUpdateMutex(jobID)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|