Files
jiggablend/pkg/scripts/scripts/extract_metadata.py

371 lines
17 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import bpy
import json
import sys
# Make all file paths relative to the blend file location FIRST
# This must be done immediately after file load, before any other operations
# to prevent Blender from trying to access external files with absolute paths
try:
bpy.ops.file.make_paths_relative()
print("Made all file paths relative to blend file")
except Exception as e:
print(f"Warning: Could not make paths relative: {e}")
# Check for missing addons that the blend file requires
# Blender marks missing addons with "_missing" suffix in preferences
missing_files_info = {
"checked": False,
"has_missing": False,
"missing_files": [],
"missing_addons": []
}
try:
missing = []
for mod in bpy.context.preferences.addons:
if mod.module.endswith("_missing"):
missing.append(mod.module.rsplit("_", 1)[0])
missing_files_info["checked"] = True
if missing:
missing_files_info["has_missing"] = True
missing_files_info["missing_addons"] = missing
print("Missing add-ons required by this .blend:")
for name in missing:
print(" -", name)
else:
print("No missing add-ons detected file is headless-safe")
except Exception as e:
print(f"Warning: Could not check for missing addons: {e}")
missing_files_info["error"] = str(e)
# Get scene
scene = bpy.context.scene
# Extract frame range from scene settings
frame_start = scene.frame_start
frame_end = scene.frame_end
# Check for negative frames (not supported)
has_negative_start = frame_start < 0
has_negative_end = frame_end < 0
# Also check for actual animation range (keyframes)
# Find the earliest and latest keyframes across all objects
animation_start = None
animation_end = None
for obj in scene.objects:
if obj.animation_data and obj.animation_data.action:
action = obj.animation_data.action
# Check if action has fcurves attribute (varies by Blender version/context)
try:
fcurves = action.fcurves if hasattr(action, 'fcurves') else None
if fcurves:
for fcurve in fcurves:
if fcurve.keyframe_points:
for keyframe in fcurve.keyframe_points:
frame = int(keyframe.co[0])
if animation_start is None or frame < animation_start:
animation_start = frame
if animation_end is None or frame > animation_end:
animation_end = frame
except (AttributeError, TypeError) as e:
# Action doesn't have fcurves or fcurves is not iterable - skip this object
pass
# Use animation range if available, otherwise use scene frame range
# If scene range seems wrong (start == end), prefer animation range
if animation_start is not None and animation_end is not None:
if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end):
# Use animation range if scene range is invalid or animation extends beyond it
frame_start = animation_start
frame_end = animation_end
# Check for negative frames (not supported)
has_negative_start = frame_start < 0
has_negative_end = frame_end < 0
has_negative_animation = (animation_start is not None and animation_start < 0) or (animation_end is not None and animation_end < 0)
# Extract render settings
render = scene.render
resolution_x = render.resolution_x
resolution_y = render.resolution_y
frame_rate = render.fps / render.fps_base if render.fps_base != 0 else render.fps
engine = scene.render.engine.upper()
# Determine output format from file format
output_format = render.image_settings.file_format
# Extract engine-specific settings
engine_settings = {}
if engine == 'CYCLES':
cycles = scene.cycles
# Get denoiser settings - in Blender 3.0+ it's on the view layer
denoiser = 'OPENIMAGEDENOISE' # Default
denoising_use_gpu = False
denoising_input_passes = 'RGB_ALBEDO_NORMAL' # Default: Albedo and Normal
denoising_prefilter = 'ACCURATE' # Default
denoising_quality = 'HIGH' # Default (for OpenImageDenoise)
try:
view_layer = bpy.context.view_layer
if hasattr(view_layer, 'cycles'):
vl_cycles = view_layer.cycles
denoiser = getattr(vl_cycles, 'denoiser', 'OPENIMAGEDENOISE')
denoising_use_gpu = getattr(vl_cycles, 'denoising_use_gpu', False)
denoising_input_passes = getattr(vl_cycles, 'denoising_input_passes', 'RGB_ALBEDO_NORMAL')
denoising_prefilter = getattr(vl_cycles, 'denoising_prefilter', 'ACCURATE')
# Quality is only for OpenImageDenoise in Blender 4.0+
denoising_quality = getattr(vl_cycles, 'denoising_quality', 'HIGH')
except:
pass
engine_settings = {
# Sampling settings
"samples": getattr(cycles, 'samples', 4096), # Max Samples
"adaptive_min_samples": getattr(cycles, 'adaptive_min_samples', 0), # Min Samples
"use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', True), # Noise Threshold enabled
"adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01), # Noise Threshold value
"time_limit": getattr(cycles, 'time_limit', 0.0), # Time Limit (0 = disabled)
# Denoising settings
"use_denoising": getattr(cycles, 'use_denoising', False),
"denoiser": denoiser,
"denoising_use_gpu": denoising_use_gpu,
"denoising_input_passes": denoising_input_passes,
"denoising_prefilter": denoising_prefilter,
"denoising_quality": denoising_quality,
# Path Guiding settings
"use_guiding": getattr(cycles, 'use_guiding', False),
"guiding_training_samples": getattr(cycles, 'guiding_training_samples', 128),
"use_surface_guiding": getattr(cycles, 'use_surface_guiding', True),
"use_volume_guiding": getattr(cycles, 'use_volume_guiding', True),
# Lights settings
"use_light_tree": getattr(cycles, 'use_light_tree', True),
"light_sampling_threshold": getattr(cycles, 'light_sampling_threshold', 0.01),
# Device
"device": getattr(cycles, 'device', 'CPU'),
# Advanced/Seed settings
"seed": getattr(cycles, 'seed', 0),
"use_animated_seed": getattr(cycles, 'use_animated_seed', False),
"sampling_pattern": getattr(cycles, 'sampling_pattern', 'AUTOMATIC'),
"scrambling_distance": getattr(cycles, 'scrambling_distance', 1.0),
"auto_scrambling_distance_multiplier": getattr(cycles, 'auto_scrambling_distance_multiplier', 1.0),
"preview_scrambling_distance": getattr(cycles, 'preview_scrambling_distance', False),
"min_light_bounces": getattr(cycles, 'min_light_bounces', 0),
"min_transparent_bounces": getattr(cycles, 'min_transparent_bounces', 0),
# Clamping
"sample_clamp_direct": getattr(cycles, 'sample_clamp_direct', 0.0),
"sample_clamp_indirect": getattr(cycles, 'sample_clamp_indirect', 0.0),
# Light Paths / Bounces
"max_bounces": getattr(cycles, 'max_bounces', 12),
"diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4),
"glossy_bounces": getattr(cycles, 'glossy_bounces', 4),
"transmission_bounces": getattr(cycles, 'transmission_bounces', 12),
"volume_bounces": getattr(cycles, 'volume_bounces', 0),
"transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8),
# Caustics
"caustics_reflective": getattr(cycles, 'caustics_reflective', False),
"caustics_refractive": getattr(cycles, 'caustics_refractive', False),
"blur_glossy": getattr(cycles, 'blur_glossy', 0.0), # Filter Glossy
# Fast GI Approximation
"use_fast_gi": getattr(cycles, 'use_fast_gi', False),
"fast_gi_method": getattr(cycles, 'fast_gi_method', 'REPLACE'), # REPLACE or ADD
"ao_bounces": getattr(cycles, 'ao_bounces', 1), # Viewport bounces
"ao_bounces_render": getattr(cycles, 'ao_bounces_render', 1), # Render bounces
# Volumes
"volume_step_rate": getattr(cycles, 'volume_step_rate', 1.0),
"volume_preview_step_rate": getattr(cycles, 'volume_preview_step_rate', 1.0),
"volume_max_steps": getattr(cycles, 'volume_max_steps', 1024),
# Film
"film_exposure": getattr(cycles, 'film_exposure', 1.0),
"film_transparent": getattr(cycles, 'film_transparent', False),
"film_transparent_glass": getattr(cycles, 'film_transparent_glass', False),
"film_transparent_roughness": getattr(cycles, 'film_transparent_roughness', 0.1),
"filter_type": getattr(cycles, 'filter_type', 'BLACKMAN_HARRIS'), # BOX, GAUSSIAN, BLACKMAN_HARRIS
"filter_width": getattr(cycles, 'filter_width', 1.5),
"pixel_filter_type": getattr(cycles, 'pixel_filter_type', 'BLACKMAN_HARRIS'),
# Performance
"use_auto_tile": getattr(cycles, 'use_auto_tile', True),
"tile_size": getattr(cycles, 'tile_size', 2048),
"use_persistent_data": getattr(cycles, 'use_persistent_data', False),
# Hair/Curves
"use_hair": getattr(cycles, 'use_hair', True),
"hair_subdivisions": getattr(cycles, 'hair_subdivisions', 2),
"hair_shape": getattr(cycles, 'hair_shape', 'THICK'), # ROUND, RIBBONS, THICK
# Simplify (from scene.render)
"use_simplify": getattr(scene.render, 'use_simplify', False),
"simplify_subdivision_render": getattr(scene.render, 'simplify_subdivision_render', 6),
"simplify_child_particles_render": getattr(scene.render, 'simplify_child_particles_render', 1.0),
# Other
"use_light_linking": getattr(cycles, 'use_light_linking', False),
"use_layer_samples": getattr(cycles, 'use_layer_samples', False),
}
elif engine == 'EEVEE' or engine == 'EEVEE_NEXT':
# Treat EEVEE_NEXT as EEVEE (modern Blender uses EEVEE for what was EEVEE_NEXT)
eevee = scene.eevee
engine_settings = {
# Sampling
"taa_render_samples": getattr(eevee, 'taa_render_samples', 64),
"taa_samples": getattr(eevee, 'taa_samples', 16), # Viewport samples
"use_taa_reprojection": getattr(eevee, 'use_taa_reprojection', True),
# Clamping
"clamp_surface_direct": getattr(eevee, 'clamp_surface_direct', 0.0),
"clamp_surface_indirect": getattr(eevee, 'clamp_surface_indirect', 0.0),
"clamp_volume_direct": getattr(eevee, 'clamp_volume_direct', 0.0),
"clamp_volume_indirect": getattr(eevee, 'clamp_volume_indirect', 0.0),
# Shadows
"shadow_cube_size": getattr(eevee, 'shadow_cube_size', '512'),
"shadow_cascade_size": getattr(eevee, 'shadow_cascade_size', '1024'),
"use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', False),
"use_soft_shadows": getattr(eevee, 'use_soft_shadows', True),
"light_threshold": getattr(eevee, 'light_threshold', 0.01),
# Raytracing (EEVEE Next / modern EEVEE)
"use_raytracing": getattr(eevee, 'use_raytracing', False),
"ray_tracing_method": getattr(eevee, 'ray_tracing_method', 'SCREEN'), # SCREEN or PROBE
"ray_tracing_options_trace_max_roughness": getattr(eevee, 'ray_tracing_options', {}).get('trace_max_roughness', 0.5) if hasattr(getattr(eevee, 'ray_tracing_options', None), 'get') else 0.5,
# Screen Space Reflections (legacy/fallback)
"use_ssr": getattr(eevee, 'use_ssr', False),
"use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False),
"use_ssr_halfres": getattr(eevee, 'use_ssr_halfres', True),
"ssr_quality": getattr(eevee, 'ssr_quality', 0.25),
"ssr_max_roughness": getattr(eevee, 'ssr_max_roughness', 0.5),
"ssr_thickness": getattr(eevee, 'ssr_thickness', 0.2),
"ssr_border_fade": getattr(eevee, 'ssr_border_fade', 0.075),
"ssr_firefly_fac": getattr(eevee, 'ssr_firefly_fac', 10.0),
# Ambient Occlusion
"use_gtao": getattr(eevee, 'use_gtao', False),
"gtao_distance": getattr(eevee, 'gtao_distance', 0.2),
"gtao_factor": getattr(eevee, 'gtao_factor', 1.0),
"gtao_quality": getattr(eevee, 'gtao_quality', 0.25),
"use_gtao_bent_normals": getattr(eevee, 'use_gtao_bent_normals', True),
"use_gtao_bounce": getattr(eevee, 'use_gtao_bounce', True),
# Bloom
"use_bloom": getattr(eevee, 'use_bloom', False),
"bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8),
"bloom_knee": getattr(eevee, 'bloom_knee', 0.5),
"bloom_radius": getattr(eevee, 'bloom_radius', 6.5),
"bloom_color": list(getattr(eevee, 'bloom_color', (1.0, 1.0, 1.0))),
"bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05),
"bloom_clamp": getattr(eevee, 'bloom_clamp', 0.0),
# Depth of Field
"bokeh_max_size": getattr(eevee, 'bokeh_max_size', 100.0),
"bokeh_threshold": getattr(eevee, 'bokeh_threshold', 1.0),
"bokeh_neighbor_max": getattr(eevee, 'bokeh_neighbor_max', 10.0),
"bokeh_denoise_fac": getattr(eevee, 'bokeh_denoise_fac', 0.75),
"use_bokeh_high_quality_slight_defocus": getattr(eevee, 'use_bokeh_high_quality_slight_defocus', False),
"use_bokeh_jittered": getattr(eevee, 'use_bokeh_jittered', False),
"bokeh_overblur": getattr(eevee, 'bokeh_overblur', 5.0),
# Subsurface Scattering
"sss_samples": getattr(eevee, 'sss_samples', 7),
"sss_jitter_threshold": getattr(eevee, 'sss_jitter_threshold', 0.3),
# Volumetrics
"use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True),
"use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', False),
"volumetric_start": getattr(eevee, 'volumetric_start', 0.1),
"volumetric_end": getattr(eevee, 'volumetric_end', 100.0),
"volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'),
"volumetric_samples": getattr(eevee, 'volumetric_samples', 64),
"volumetric_sample_distribution": getattr(eevee, 'volumetric_sample_distribution', 0.8),
"volumetric_ray_depth": getattr(eevee, 'volumetric_ray_depth', 16),
# Motion Blur
"use_motion_blur": getattr(eevee, 'use_motion_blur', False),
"motion_blur_position": getattr(eevee, 'motion_blur_position', 'CENTER'),
"motion_blur_shutter": getattr(eevee, 'motion_blur_shutter', 0.5),
"motion_blur_depth_scale": getattr(eevee, 'motion_blur_depth_scale', 100.0),
"motion_blur_max": getattr(eevee, 'motion_blur_max', 32),
"motion_blur_steps": getattr(eevee, 'motion_blur_steps', 1),
# Film
"use_overscan": getattr(eevee, 'use_overscan', False),
"overscan_size": getattr(eevee, 'overscan_size', 3.0),
# Indirect Lighting
"gi_diffuse_bounces": getattr(eevee, 'gi_diffuse_bounces', 3),
"gi_cubemap_resolution": getattr(eevee, 'gi_cubemap_resolution', '512'),
"gi_visibility_resolution": getattr(eevee, 'gi_visibility_resolution', '32'),
"gi_irradiance_smoothing": getattr(eevee, 'gi_irradiance_smoothing', 0.1),
"gi_glossy_clamp": getattr(eevee, 'gi_glossy_clamp', 0.0),
"gi_filter_quality": getattr(eevee, 'gi_filter_quality', 3.0),
"gi_show_irradiance": getattr(eevee, 'gi_show_irradiance', False),
"gi_show_cubemaps": getattr(eevee, 'gi_show_cubemaps', False),
"gi_auto_bake": getattr(eevee, 'gi_auto_bake', False),
# Hair/Curves
"hair_type": getattr(eevee, 'hair_type', 'STRIP'), # STRIP or STRAND
# Performance
"use_shadow_jitter_viewport": getattr(eevee, 'use_shadow_jitter_viewport', True),
# Simplify (from scene.render)
"use_simplify": getattr(scene.render, 'use_simplify', False),
"simplify_subdivision_render": getattr(scene.render, 'simplify_subdivision_render', 6),
"simplify_child_particles_render": getattr(scene.render, 'simplify_child_particles_render', 1.0),
}
else:
# For other engines, extract basic samples if available
engine_settings = {
"samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128
}
# Extract scene info
camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA'])
object_count = len(scene.objects)
material_count = len(bpy.data.materials)
# Extract Blender version info
# bpy.data.version gives the version the file was saved with
blender_version = ".".join(map(str, bpy.data.version)) if hasattr(bpy.data, 'version') else bpy.app.version_string
# Build metadata dictionary
metadata = {
"frame_start": frame_start,
"frame_end": frame_end,
"has_negative_frames": has_negative_start or has_negative_end or has_negative_animation,
"blender_version": blender_version,
"render_settings": {
"resolution_x": resolution_x,
"resolution_y": resolution_y,
"frame_rate": frame_rate,
"output_format": output_format,
"engine": engine.lower(),
"engine_settings": engine_settings
},
"scene_info": {
"camera_count": camera_count,
"object_count": object_count,
"material_count": material_count
},
"missing_files_info": missing_files_info
}
# Output as JSON
print(json.dumps(metadata))
sys.stdout.flush()