something

This commit is contained in:
2025-11-27 00:46:48 -06:00
parent 11e7552b5b
commit edc8ea160c
43 changed files with 9990 additions and 3059 deletions

View File

@@ -46,6 +46,10 @@ scene = bpy.context.scene
frame_start = scene.frame_start
frame_end = scene.frame_end
# Check for negative frames (not supported)
has_negative_start = frame_start < 0
has_negative_end = frame_end < 0
# Also check for actual animation range (keyframes)
# Find the earliest and latest keyframes across all objects
animation_start = None
@@ -54,15 +58,21 @@ animation_end = None
for obj in scene.objects:
if obj.animation_data and obj.animation_data.action:
action = obj.animation_data.action
if action.fcurves:
for fcurve in action.fcurves:
if fcurve.keyframe_points:
for keyframe in fcurve.keyframe_points:
frame = int(keyframe.co[0])
if animation_start is None or frame < animation_start:
animation_start = frame
if animation_end is None or frame > animation_end:
animation_end = frame
# Check if action has fcurves attribute (varies by Blender version/context)
try:
fcurves = action.fcurves if hasattr(action, 'fcurves') else None
if fcurves:
for fcurve in fcurves:
if fcurve.keyframe_points:
for keyframe in fcurve.keyframe_points:
frame = int(keyframe.co[0])
if animation_start is None or frame < animation_start:
animation_start = frame
if animation_end is None or frame > animation_end:
animation_end = frame
except (AttributeError, TypeError) as e:
# Action doesn't have fcurves or fcurves is not iterable - skip this object
pass
# Use animation range if available, otherwise use scene frame range
# If scene range seems wrong (start == end), prefer animation range
@@ -72,6 +82,11 @@ if animation_start is not None and animation_end is not None:
frame_start = animation_start
frame_end = animation_end
# Check for negative frames (not supported)
has_negative_start = frame_start < 0
has_negative_end = frame_end < 0
has_negative_animation = (animation_start is not None and animation_start < 0) or (animation_end is not None and animation_end < 0)
# Extract render settings
render = scene.render
resolution_x = render.resolution_x
@@ -87,56 +102,230 @@ engine_settings = {}
if engine == 'CYCLES':
cycles = scene.cycles
# Get denoiser settings - in Blender 3.0+ it's on the view layer
denoiser = 'OPENIMAGEDENOISE' # Default
denoising_use_gpu = False
denoising_input_passes = 'RGB_ALBEDO_NORMAL' # Default: Albedo and Normal
denoising_prefilter = 'ACCURATE' # Default
denoising_quality = 'HIGH' # Default (for OpenImageDenoise)
try:
view_layer = bpy.context.view_layer
if hasattr(view_layer, 'cycles'):
vl_cycles = view_layer.cycles
denoiser = getattr(vl_cycles, 'denoiser', 'OPENIMAGEDENOISE')
denoising_use_gpu = getattr(vl_cycles, 'denoising_use_gpu', False)
denoising_input_passes = getattr(vl_cycles, 'denoising_input_passes', 'RGB_ALBEDO_NORMAL')
denoising_prefilter = getattr(vl_cycles, 'denoising_prefilter', 'ACCURATE')
# Quality is only for OpenImageDenoise in Blender 4.0+
denoising_quality = getattr(vl_cycles, 'denoising_quality', 'HIGH')
except:
pass
engine_settings = {
"samples": getattr(cycles, 'samples', 128),
# Sampling settings
"samples": getattr(cycles, 'samples', 4096), # Max Samples
"adaptive_min_samples": getattr(cycles, 'adaptive_min_samples', 0), # Min Samples
"use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', True), # Noise Threshold enabled
"adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01), # Noise Threshold value
"time_limit": getattr(cycles, 'time_limit', 0.0), # Time Limit (0 = disabled)
# Denoising settings
"use_denoising": getattr(cycles, 'use_denoising', False),
"denoising_radius": getattr(cycles, 'denoising_radius', 0),
"denoising_strength": getattr(cycles, 'denoising_strength', 0.0),
"denoiser": denoiser,
"denoising_use_gpu": denoising_use_gpu,
"denoising_input_passes": denoising_input_passes,
"denoising_prefilter": denoising_prefilter,
"denoising_quality": denoising_quality,
# Path Guiding settings
"use_guiding": getattr(cycles, 'use_guiding', False),
"guiding_training_samples": getattr(cycles, 'guiding_training_samples', 128),
"use_surface_guiding": getattr(cycles, 'use_surface_guiding', True),
"use_volume_guiding": getattr(cycles, 'use_volume_guiding', True),
# Lights settings
"use_light_tree": getattr(cycles, 'use_light_tree', True),
"light_sampling_threshold": getattr(cycles, 'light_sampling_threshold', 0.01),
# Device
"device": getattr(cycles, 'device', 'CPU'),
"use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False),
"adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01,
"use_fast_gi": getattr(cycles, 'use_fast_gi', False),
"light_tree": getattr(cycles, 'use_light_tree', False),
"use_light_linking": getattr(cycles, 'use_light_linking', False),
"caustics_reflective": getattr(cycles, 'caustics_reflective', False),
"caustics_refractive": getattr(cycles, 'caustics_refractive', False),
"blur_glossy": getattr(cycles, 'blur_glossy', 0.0),
# Advanced/Seed settings
"seed": getattr(cycles, 'seed', 0),
"use_animated_seed": getattr(cycles, 'use_animated_seed', False),
"sampling_pattern": getattr(cycles, 'sampling_pattern', 'AUTOMATIC'),
"scrambling_distance": getattr(cycles, 'scrambling_distance', 1.0),
"auto_scrambling_distance_multiplier": getattr(cycles, 'auto_scrambling_distance_multiplier', 1.0),
"preview_scrambling_distance": getattr(cycles, 'preview_scrambling_distance', False),
"min_light_bounces": getattr(cycles, 'min_light_bounces', 0),
"min_transparent_bounces": getattr(cycles, 'min_transparent_bounces', 0),
# Clamping
"sample_clamp_direct": getattr(cycles, 'sample_clamp_direct', 0.0),
"sample_clamp_indirect": getattr(cycles, 'sample_clamp_indirect', 0.0),
# Light Paths / Bounces
"max_bounces": getattr(cycles, 'max_bounces', 12),
"diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4),
"glossy_bounces": getattr(cycles, 'glossy_bounces', 4),
"transmission_bounces": getattr(cycles, 'transmission_bounces', 12),
"volume_bounces": getattr(cycles, 'volume_bounces', 0),
"transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8),
# Caustics
"caustics_reflective": getattr(cycles, 'caustics_reflective', False),
"caustics_refractive": getattr(cycles, 'caustics_refractive', False),
"blur_glossy": getattr(cycles, 'blur_glossy', 0.0), # Filter Glossy
# Fast GI Approximation
"use_fast_gi": getattr(cycles, 'use_fast_gi', False),
"fast_gi_method": getattr(cycles, 'fast_gi_method', 'REPLACE'), # REPLACE or ADD
"ao_bounces": getattr(cycles, 'ao_bounces', 1), # Viewport bounces
"ao_bounces_render": getattr(cycles, 'ao_bounces_render', 1), # Render bounces
# Volumes
"volume_step_rate": getattr(cycles, 'volume_step_rate', 1.0),
"volume_preview_step_rate": getattr(cycles, 'volume_preview_step_rate', 1.0),
"volume_max_steps": getattr(cycles, 'volume_max_steps', 1024),
# Film
"film_exposure": getattr(cycles, 'film_exposure', 1.0),
"film_transparent": getattr(cycles, 'film_transparent', False),
"film_transparent_glass": getattr(cycles, 'film_transparent_glass', False),
"film_transparent_roughness": getattr(cycles, 'film_transparent_roughness', 0.1),
"filter_type": getattr(cycles, 'filter_type', 'BLACKMAN_HARRIS'), # BOX, GAUSSIAN, BLACKMAN_HARRIS
"filter_width": getattr(cycles, 'filter_width', 1.5),
"pixel_filter_type": getattr(cycles, 'pixel_filter_type', 'BLACKMAN_HARRIS'),
# Performance
"use_auto_tile": getattr(cycles, 'use_auto_tile', True),
"tile_size": getattr(cycles, 'tile_size', 2048),
"use_persistent_data": getattr(cycles, 'use_persistent_data', False),
# Hair/Curves
"use_hair": getattr(cycles, 'use_hair', True),
"hair_subdivisions": getattr(cycles, 'hair_subdivisions', 2),
"hair_shape": getattr(cycles, 'hair_shape', 'THICK'), # ROUND, RIBBONS, THICK
# Simplify (from scene.render)
"use_simplify": getattr(scene.render, 'use_simplify', False),
"simplify_subdivision_render": getattr(scene.render, 'simplify_subdivision_render', 6),
"simplify_child_particles_render": getattr(scene.render, 'simplify_child_particles_render', 1.0),
# Other
"use_light_linking": getattr(cycles, 'use_light_linking', False),
"use_layer_samples": getattr(cycles, 'use_layer_samples', False),
}
elif engine == 'EEVEE' or engine == 'EEVEE_NEXT':
# Treat EEVEE_NEXT as EEVEE (modern Blender uses EEVEE for what was EEVEE_NEXT)
eevee = scene.eevee
engine_settings = {
# Sampling
"taa_render_samples": getattr(eevee, 'taa_render_samples', 64),
"taa_samples": getattr(eevee, 'taa_samples', 16), # Viewport samples
"use_taa_reprojection": getattr(eevee, 'use_taa_reprojection', True),
# Clamping
"clamp_surface_direct": getattr(eevee, 'clamp_surface_direct', 0.0),
"clamp_surface_indirect": getattr(eevee, 'clamp_surface_indirect', 0.0),
"clamp_volume_direct": getattr(eevee, 'clamp_volume_direct', 0.0),
"clamp_volume_indirect": getattr(eevee, 'clamp_volume_indirect', 0.0),
# Shadows
"shadow_cube_size": getattr(eevee, 'shadow_cube_size', '512'),
"shadow_cascade_size": getattr(eevee, 'shadow_cascade_size', '1024'),
"use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', False),
"use_soft_shadows": getattr(eevee, 'use_soft_shadows', True),
"light_threshold": getattr(eevee, 'light_threshold', 0.01),
# Raytracing (EEVEE Next / modern EEVEE)
"use_raytracing": getattr(eevee, 'use_raytracing', False),
"ray_tracing_method": getattr(eevee, 'ray_tracing_method', 'SCREEN'), # SCREEN or PROBE
"ray_tracing_options_trace_max_roughness": getattr(eevee, 'ray_tracing_options', {}).get('trace_max_roughness', 0.5) if hasattr(getattr(eevee, 'ray_tracing_options', None), 'get') else 0.5,
# Screen Space Reflections (legacy/fallback)
"use_ssr": getattr(eevee, 'use_ssr', False),
"use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False),
"use_ssr_halfres": getattr(eevee, 'use_ssr_halfres', True),
"ssr_quality": getattr(eevee, 'ssr_quality', 0.25),
"ssr_max_roughness": getattr(eevee, 'ssr_max_roughness', 0.5),
"ssr_thickness": getattr(eevee, 'ssr_thickness', 0.2),
"ssr_border_fade": getattr(eevee, 'ssr_border_fade', 0.075),
"ssr_firefly_fac": getattr(eevee, 'ssr_firefly_fac', 10.0),
# Ambient Occlusion
"use_gtao": getattr(eevee, 'use_gtao', False),
"gtao_distance": getattr(eevee, 'gtao_distance', 0.2),
"gtao_factor": getattr(eevee, 'gtao_factor', 1.0),
"gtao_quality": getattr(eevee, 'gtao_quality', 0.25),
"use_gtao_bent_normals": getattr(eevee, 'use_gtao_bent_normals', True),
"use_gtao_bounce": getattr(eevee, 'use_gtao_bounce', True),
# Bloom
"use_bloom": getattr(eevee, 'use_bloom', False),
"bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8),
"bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05),
"bloom_knee": getattr(eevee, 'bloom_knee', 0.5),
"bloom_radius": getattr(eevee, 'bloom_radius', 6.5),
"use_ssr": getattr(eevee, 'use_ssr', True),
"use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False),
"ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'),
"use_ssao": getattr(eevee, 'use_ssao', True),
"ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'),
"ssao_distance": getattr(eevee, 'ssao_distance', 0.2),
"ssao_factor": getattr(eevee, 'ssao_factor', 1.0),
"use_soft_shadows": getattr(eevee, 'use_soft_shadows', True),
"use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True),
"use_volumetric": getattr(eevee, 'use_volumetric', False),
"bloom_color": list(getattr(eevee, 'bloom_color', (1.0, 1.0, 1.0))),
"bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05),
"bloom_clamp": getattr(eevee, 'bloom_clamp', 0.0),
# Depth of Field
"bokeh_max_size": getattr(eevee, 'bokeh_max_size', 100.0),
"bokeh_threshold": getattr(eevee, 'bokeh_threshold', 1.0),
"bokeh_neighbor_max": getattr(eevee, 'bokeh_neighbor_max', 10.0),
"bokeh_denoise_fac": getattr(eevee, 'bokeh_denoise_fac', 0.75),
"use_bokeh_high_quality_slight_defocus": getattr(eevee, 'use_bokeh_high_quality_slight_defocus', False),
"use_bokeh_jittered": getattr(eevee, 'use_bokeh_jittered', False),
"bokeh_overblur": getattr(eevee, 'bokeh_overblur', 5.0),
# Subsurface Scattering
"sss_samples": getattr(eevee, 'sss_samples', 7),
"sss_jitter_threshold": getattr(eevee, 'sss_jitter_threshold', 0.3),
# Volumetrics
"use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True),
"use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', False),
"volumetric_start": getattr(eevee, 'volumetric_start', 0.1),
"volumetric_end": getattr(eevee, 'volumetric_end', 100.0),
"volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'),
"volumetric_samples": getattr(eevee, 'volumetric_samples', 64),
"volumetric_start": getattr(eevee, 'volumetric_start', 0.0),
"volumetric_end": getattr(eevee, 'volumetric_end', 100.0),
"use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True),
"use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True),
"use_gtao": getattr(eevee, 'use_gtao', False),
"gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'),
"volumetric_sample_distribution": getattr(eevee, 'volumetric_sample_distribution', 0.8),
"volumetric_ray_depth": getattr(eevee, 'volumetric_ray_depth', 16),
# Motion Blur
"use_motion_blur": getattr(eevee, 'use_motion_blur', False),
"motion_blur_position": getattr(eevee, 'motion_blur_position', 'CENTER'),
"motion_blur_shutter": getattr(eevee, 'motion_blur_shutter', 0.5),
"motion_blur_depth_scale": getattr(eevee, 'motion_blur_depth_scale', 100.0),
"motion_blur_max": getattr(eevee, 'motion_blur_max', 32),
"motion_blur_steps": getattr(eevee, 'motion_blur_steps', 1),
# Film
"use_overscan": getattr(eevee, 'use_overscan', False),
"overscan_size": getattr(eevee, 'overscan_size', 3.0),
# Indirect Lighting
"gi_diffuse_bounces": getattr(eevee, 'gi_diffuse_bounces', 3),
"gi_cubemap_resolution": getattr(eevee, 'gi_cubemap_resolution', '512'),
"gi_visibility_resolution": getattr(eevee, 'gi_visibility_resolution', '32'),
"gi_irradiance_smoothing": getattr(eevee, 'gi_irradiance_smoothing', 0.1),
"gi_glossy_clamp": getattr(eevee, 'gi_glossy_clamp', 0.0),
"gi_filter_quality": getattr(eevee, 'gi_filter_quality', 3.0),
"gi_show_irradiance": getattr(eevee, 'gi_show_irradiance', False),
"gi_show_cubemaps": getattr(eevee, 'gi_show_cubemaps', False),
"gi_auto_bake": getattr(eevee, 'gi_auto_bake', False),
# Hair/Curves
"hair_type": getattr(eevee, 'hair_type', 'STRIP'), # STRIP or STRAND
# Performance
"use_shadow_jitter_viewport": getattr(eevee, 'use_shadow_jitter_viewport', True),
# Simplify (from scene.render)
"use_simplify": getattr(scene.render, 'use_simplify', False),
"simplify_subdivision_render": getattr(scene.render, 'simplify_subdivision_render', 6),
"simplify_child_particles_render": getattr(scene.render, 'simplify_child_particles_render', 1.0),
}
else:
# For other engines, extract basic samples if available
@@ -149,10 +338,20 @@ camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA'])
object_count = len(scene.objects)
material_count = len(bpy.data.materials)
# Extract Blender version info
# bpy.app.version gives the current running Blender version
# For the file's saved version, we check bpy.data.version (version the file was saved with)
blender_version = {
"current": bpy.app.version_string, # Version of Blender running this script
"file_saved_with": ".".join(map(str, bpy.data.version)) if hasattr(bpy.data, 'version') else None, # Version file was saved with
}
# Build metadata dictionary
metadata = {
"frame_start": frame_start,
"frame_end": frame_end,
"has_negative_frames": has_negative_start or has_negative_end or has_negative_animation,
"blender_version": blender_version,
"render_settings": {
"resolution_x": resolution_x,
"resolution_y": resolution_y,

View File

@@ -338,9 +338,27 @@ if current_engine == 'CYCLES':
if gpu_available:
scene.cycles.device = 'GPU'
print(f"Using GPU for rendering (blend file had: {current_device})")
# Auto-enable GPU denoising when using GPU (OpenImageDenoise supports all GPUs)
try:
view_layer = bpy.context.view_layer
if hasattr(view_layer, 'cycles') and hasattr(view_layer.cycles, 'denoising_use_gpu'):
view_layer.cycles.denoising_use_gpu = True
print("Auto-enabled GPU denoising (OpenImageDenoise)")
except Exception as e:
print(f"Could not auto-enable GPU denoising: {e}")
else:
scene.cycles.device = 'CPU'
print(f"GPU not available, using CPU for rendering (blend file had: {current_device})")
# Ensure GPU denoising is disabled when using CPU
try:
view_layer = bpy.context.view_layer
if hasattr(view_layer, 'cycles') and hasattr(view_layer.cycles, 'denoising_use_gpu'):
view_layer.cycles.denoising_use_gpu = False
print("Using CPU denoising")
except Exception as e:
pass
# Verify device setting
if current_engine == 'CYCLES':