Enhance server configuration for large file uploads and improve token handling. Increase request body size limit in the server to 20 GB, update registration token expiration logic to support infinite expiration, and adjust database schema to accommodate larger file sizes. Add detailed logging for file upload processes and error handling improvements.

This commit is contained in:
2025-11-23 16:59:36 -06:00
parent f7e1766d8b
commit f9ff4d0138
6 changed files with 99 additions and 32 deletions

View File

@@ -46,13 +46,26 @@ func main() {
log.Fatalf("Failed to create server: %v", err)
}
// Start server
// Start server with increased request body size limit for large file uploads
addr := fmt.Sprintf(":%s", *port)
log.Printf("Starting manager server on %s", addr)
log.Printf("Database: %s", *dbPath)
log.Printf("Storage: %s", *storagePath)
if err := http.ListenAndServe(addr, server); err != nil {
httpServer := &http.Server{
Addr: addr,
Handler: server,
MaxHeaderBytes: 1 << 20, // 1 MB for headers
ReadTimeout: 0, // No read timeout (for large uploads)
WriteTimeout: 0, // No write timeout (for large uploads)
}
// Note: MaxRequestBodySize is not directly configurable in http.Server
// It's handled by ParseMultipartForm in handlers, which we've already configured
// But we need to ensure the server can handle large requests
// The default limit is 10MB, but we bypass it by using ParseMultipartForm with larger limit
if err := httpServer.ListenAndServe(); err != nil {
log.Fatalf("Server failed: %v", err)
}
}

View File

@@ -25,8 +25,13 @@ func (s *Server) handleGenerateRegistrationToken(w http.ResponseWriter, r *http.
ExpiresInHours int `json:"expires_in_hours,omitempty"`
}
if r.Body != nil && r.ContentLength > 0 {
if err := json.NewDecoder(r.Body).Decode(&req); err == nil && req.ExpiresInHours > 0 {
expiresIn = time.Duration(req.ExpiresInHours) * time.Hour
if err := json.NewDecoder(r.Body).Decode(&req); err == nil {
if req.ExpiresInHours == 0 {
// 0 hours means infinite expiration
expiresIn = 0
} else if req.ExpiresInHours > 0 {
expiresIn = time.Duration(req.ExpiresInHours) * time.Hour
}
}
}
@@ -36,11 +41,17 @@ func (s *Server) handleGenerateRegistrationToken(w http.ResponseWriter, r *http.
return
}
s.respondJSON(w, http.StatusCreated, map[string]interface{}{
"token": token,
"expires_in": expiresIn.String(),
"expires_at": time.Now().Add(expiresIn),
})
response := map[string]interface{}{
"token": token,
}
if expiresIn == 0 {
response["expires_in"] = "infinite"
response["expires_at"] = nil
} else {
response["expires_in"] = expiresIn.String()
response["expires_at"] = time.Now().Add(expiresIn)
}
s.respondJSON(w, http.StatusCreated, response)
}
// handleListRegistrationTokens lists all registration tokens

View File

@@ -841,20 +841,25 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) {
return
}
// Parse multipart form
err = r.ParseMultipartForm(500 << 20) // 500 MB (larger for ZIP files)
// Parse multipart form with large limit for big files
// Note: For very large files, this will use temporary files on disk
err = r.ParseMultipartForm(20 << 30) // 20 GB (for large ZIP files and blend files)
if err != nil {
s.respondError(w, http.StatusBadRequest, "Failed to parse form")
log.Printf("Error parsing multipart form for job %d: %v", jobID, err)
s.respondError(w, http.StatusBadRequest, fmt.Sprintf("Failed to parse form: %v", err))
return
}
file, header, err := r.FormFile("file")
if err != nil {
s.respondError(w, http.StatusBadRequest, "No file provided")
log.Printf("Error getting file from form for job %d: %v", jobID, err)
s.respondError(w, http.StatusBadRequest, fmt.Sprintf("No file provided: %v", err))
return
}
defer file.Close()
log.Printf("Uploading file '%s' (size: %d bytes) for job %d", header.Filename, header.Size, jobID)
jobPath := s.storage.JobPath(jobID)
if err := os.MkdirAll(jobPath, 0755); err != nil {
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create job directory: %v", err))
@@ -867,22 +872,34 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) {
// Check if this is a ZIP file
if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") {
log.Printf("Processing ZIP file '%s' for job %d", header.Filename, jobID)
// Extract ZIP file
zipPath := filepath.Join(jobPath, header.Filename)
log.Printf("Creating ZIP file at: %s", zipPath)
zipFile, err := os.Create(zipPath)
if err != nil {
log.Printf("ERROR: Failed to create ZIP file for job %d: %v", jobID, err)
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create ZIP file: %v", err))
return
}
_, err = io.Copy(zipFile, file)
log.Printf("Copying %d bytes to ZIP file for job %d...", header.Size, jobID)
copied, err := io.Copy(zipFile, file)
zipFile.Close()
if err != nil {
log.Printf("ERROR: Failed to save ZIP file for job %d (copied %d bytes): %v", jobID, copied, err)
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to save ZIP file: %v", err))
return
}
log.Printf("Successfully copied %d bytes to ZIP file for job %d", copied, jobID)
// Record ZIP file in database
zipInfo, _ := os.Stat(zipPath)
zipInfo, err := os.Stat(zipPath)
if err != nil {
log.Printf("ERROR: Failed to stat ZIP file for job %d: %v", jobID, err)
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to stat ZIP file: %v", err))
return
}
log.Printf("Recording ZIP file in database for job %d (size: %d bytes)", jobID, zipInfo.Size())
err = s.db.QueryRow(
`INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size)
VALUES (?, ?, ?, ?, ?)
@@ -890,16 +907,21 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) {
jobID, types.JobFileTypeInput, zipPath, header.Filename, zipInfo.Size(),
).Scan(&fileID)
if err != nil {
log.Printf("ERROR: Failed to record ZIP file in database for job %d: %v", jobID, err)
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record ZIP file: %v", err))
return
}
log.Printf("ZIP file recorded in database with ID %d for job %d", fileID, jobID)
// Extract ZIP file
log.Printf("Extracting ZIP file for job %d...", jobID)
extractedFiles, err = s.storage.ExtractZip(zipPath, jobPath)
if err != nil {
log.Printf("ERROR: Failed to extract ZIP file for job %d: %v", jobID, err)
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to extract ZIP file: %v", err))
return
}
log.Printf("Successfully extracted %d files from ZIP for job %d", len(extractedFiles), jobID)
// Find main blend file (check for user selection first, then auto-detect)
mainBlendParam := r.FormValue("main_blend_file")

View File

@@ -70,6 +70,9 @@ func (s *Server) runnerAuthMiddleware(next http.HandlerFunc) http.HandlerFunc {
}
// handleRegisterRunner registers a new runner
// Note: Token expiration only affects whether the token can be used for registration.
// Once a runner is registered, it receives its own runner_secret and manager_secret
// and operates independently. The token expiration does not affect registered runners.
func (s *Server) handleRegisterRunner(w http.ResponseWriter, r *http.Request) {
var req struct {
types.RegisterRunnerRequest
@@ -90,7 +93,7 @@ func (s *Server) handleRegisterRunner(w http.ResponseWriter, r *http.Request) {
return
}
// Validate registration token
// Validate registration token (expiration only affects token usability, not registered runners)
result, err := s.secrets.ValidateRegistrationTokenDetailed(req.RegistrationToken)
if err != nil {
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to validate token: %v", err))
@@ -119,7 +122,7 @@ func (s *Server) handleRegisterRunner(w http.ResponseWriter, r *http.Request) {
return
}
// Generate runner secret
// Generate runner secret (runner will use this for all future authentication, independent of token)
runnerSecret, err := s.secrets.GenerateRunnerSecret()
if err != nil {
s.respondError(w, http.StatusInternalServerError, "Failed to generate runner secret")
@@ -347,7 +350,7 @@ func (s *Server) handleUploadFileFromRunner(w http.ResponseWriter, r *http.Reque
return
}
err = r.ParseMultipartForm(100 << 20) // 100 MB
err = r.ParseMultipartForm(50 << 30) // 50 GB (for large output files)
if err != nil {
s.respondError(w, http.StatusBadRequest, "Failed to parse form")
return

View File

@@ -76,13 +76,22 @@ func (s *Secrets) GetManagerSecret() (string, error) {
}
// GenerateRegistrationToken generates a new registration token
// If expiresIn is 0, the token will never expire (uses far future date)
// Note: Token expiration only affects whether the token can be used for registration.
// Once a runner registers, it operates independently using its own secrets.
func (s *Secrets) GenerateRegistrationToken(createdBy int64, expiresIn time.Duration) (string, error) {
token, err := generateSecret(32)
if err != nil {
return "", fmt.Errorf("failed to generate token: %w", err)
}
expiresAt := time.Now().Add(expiresIn)
var expiresAt time.Time
if expiresIn == 0 {
// Use far future date (year 9999) to represent infinite expiration
expiresAt = time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC)
} else {
expiresAt = time.Now().Add(expiresIn)
}
_, err = s.db.Exec(
"INSERT INTO registration_tokens (token, expires_at, created_by) VALUES (?, ?, ?)",
@@ -141,9 +150,16 @@ func (s *Secrets) ValidateRegistrationTokenDetailed(token string) (*TokenValidat
return &TokenValidationResult{Valid: false, Reason: "already_used"}, nil
}
if time.Now().After(expiresAt) {
return &TokenValidationResult{Valid: false, Reason: "expired"}, nil
// Check if token has infinite expiration (year 9999 or later)
// Tokens with infinite expiration never expire
infiniteExpirationThreshold := time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC)
if expiresAt.Before(infiniteExpirationThreshold) {
// Normal expiration check for tokens with finite expiration
if time.Now().After(expiresAt) {
return &TokenValidationResult{Valid: false, Reason: "expired"}, nil
}
}
// If expiresAt is after the threshold, treat it as infinite (never expires)
// Mark token as used
_, err = s.db.Exec("UPDATE registration_tokens SET used = 1 WHERE id = ?", id)

View File

@@ -122,7 +122,7 @@ func (db *DB) migrate() error {
file_type TEXT NOT NULL,
file_path TEXT NOT NULL,
file_name TEXT NOT NULL,
file_size INTEGER NOT NULL,
file_size BIGINT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
@@ -213,6 +213,17 @@ func (db *DB) migrate() error {
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS retry_count INTEGER DEFAULT 0`,
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS max_retries INTEGER DEFAULT 3`,
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS timeout_seconds INTEGER`,
// Migrate file_size from INTEGER to BIGINT to support large files (>2GB)
// DuckDB doesn't support direct ALTER COLUMN TYPE, so we use a workaround:
// 1. Add new column as BIGINT
// 2. Copy data from old column
// 3. Drop old column
// 4. Rename new column
// Note: This will only run if the column exists and is INTEGER
`ALTER TABLE job_files ADD COLUMN IF NOT EXISTS file_size_new BIGINT`,
`UPDATE job_files SET file_size_new = CAST(file_size AS BIGINT) WHERE file_size_new IS NULL`,
`ALTER TABLE job_files DROP COLUMN IF EXISTS file_size`,
`ALTER TABLE job_files RENAME COLUMN file_size_new TO file_size`,
}
for _, migration := range migrations {
@@ -220,6 +231,7 @@ func (db *DB) migrate() error {
if _, err := db.Exec(migration); err != nil {
// Log but don't fail - column might already exist or table might not exist yet
// This is fine for migrations that run after schema creation
// For the file_size migration, if it fails (e.g., already BIGINT), that's fine
}
}
@@ -235,16 +247,6 @@ func (db *DB) migrate() error {
}
return nil
for _, migration := range migrations {
// DuckDB supports IF NOT EXISTS for ALTER TABLE, so we can safely execute
if _, err := db.Exec(migration); err != nil {
// Log but don't fail - column might already exist or table might not exist yet
// This is fine for migrations that run after schema creation
}
}
return nil
}
// Close closes the database connection