Initial commit

This commit is contained in:
2025-04-29 18:26:13 +02:00
commit 30b5b7ec23
2366 changed files with 6634852 additions and 0 deletions

View File

@@ -0,0 +1,291 @@
import bpy
from pathlib import Path
import time
import os
import math
# Try importing NumPy
try:
import numpy as np
numpy_available = True
# print("NumPy module found.") # Less verbose
except ImportError:
print("Warning: NumPy module not found. Median calc disabled, mean uses loop.")
numpy_available = False
# --- Configuration ---
ASSET_LIBRARY_NAME = "Nodes-Linked" # <<< Name of Asset Library in Prefs
TEMPLATE_MATERIAL_NAME = "Template_PBRMaterial" # <<< Name of template Material in current file
PLACEHOLDER_NODE_LABEL = "PBRSET_PLACEHOLDER" # <<< Label of placeholder node in template mat
ASSET_NAME_PREFIX = "PBRSET_" # <<< Prefix of Node Group assets to process
MATERIAL_NAME_PREFIX = "Mat_" # <<< Prefix for created Materials
THUMBNAIL_PROPERTY_NAME = "thumbnail_filepath" # <<< Custom property name on Node Groups
VALID_EXTENSIONS = {".jpg", ".jpeg", ".png", ".tif", ".tiff"}
DERIVED_MAP_EXTENSIONS = ['.png', '.jpg', '.jpeg', '.tif', '.tiff']
VIEWPORT_GAMMA = 0.4
SCALED_SIZE = (32, 32) # Downscale target size for calculations
# --- >>> SET MATERIAL CREATION LIMIT HERE <<< ---
# Max number of *new* materials created per run (0 = no limit)
MATERIAL_CREATION_LIMIT = 900
# ------------------------------------------------
# --- Helper Functions ---
def find_node_by_label(node_tree, label, node_type=None):
# Finds first node by label and optional type (using node.type)
if not node_tree: return None
for node in node_tree.nodes:
if node.label and node.label == label:
if node_type is None or node.type == node_type: return node
return None
def calculate_value_from_image(image, target_size=(64, 64), mode='color', method='median'):
# Calculates median/mean from downscaled image copy, cleans up temp image
temp_img = None; #... (Full implementation from previous step) ...
if not image: return None
try:
if not image.has_data:
try: _ = len(image.pixels); image.update()
except Exception: pass
if not image.has_data: return None # Cannot proceed
temp_img = image.copy()
if not temp_img: return None
temp_img.scale(target_size[0], target_size[1])
try: _ = len(temp_img.pixels); temp_img.update()
except Exception: pass # Ignore access error, check has_data
if not temp_img.has_data: return None
width=temp_img.size[0]; height=temp_img.size[1]; channels=temp_img.channels
if width == 0 or height == 0 or channels == 0: return None
pixels = temp_img.pixels[:]; result_value = None;
if numpy_available: # Use NumPy
np_pixels = np.array(pixels); num_elements = len(np_pixels); num_pixels_actual = num_elements // channels;
if num_pixels_actual == 0: return None
np_pixels = np_pixels[:num_pixels_actual * channels]; pixels_reshaped = np_pixels.reshape((num_pixels_actual, channels))
if mode == 'color': # Color Median/Mean (NumPy)
if channels < 3: return None
calc_linear = np.median(pixels_reshaped[:, :3], axis=0) if method == 'median' else np.mean(pixels_reshaped[:, :3], axis=0)
inv_gamma = 1.0 / VIEWPORT_GAMMA; calc_linear_clamped = np.clip(calc_linear, 0.0, None)
calc_srgb_np = np.power(calc_linear_clamped, inv_gamma); calc_srgb_clamped = np.clip(calc_srgb_np, 0.0, 1.0)
result_value = (calc_srgb_clamped[0], calc_srgb_clamped[1], calc_srgb_clamped[2], 1.0)
elif mode == 'grayscale': # Grayscale Median/Mean (NumPy)
calc_val = np.median(pixels_reshaped[:, 0]) if method == 'median' else np.mean(pixels_reshaped[:, 0])
result_value = min(max(0.0, calc_val), 1.0)
elif method == 'mean': # Fallback Mean Loop
# print(" Calculating mean using standard loop...") # Verbose
actual_len = len(pixels); #... (Mean loop logic) ...
if actual_len == 0: return None; num_pixels_in_buffer=actual_len//channels; max_elements=num_pixels_in_buffer*channels
if num_pixels_in_buffer == 0: return None
if mode == 'color':
sum_r,sum_g,sum_b = 0.0,0.0,0.0; step=channels
for i in range(0, max_elements, step):
if i+2 >= actual_len: break; sum_r+=pixels[i]; sum_g+=pixels[i+1]; sum_b+=pixels[i+2]
avg_r_lin,avg_g_lin,avg_b_lin = sum_r/num_pixels_in_buffer, sum_g/num_pixels_in_buffer, sum_b/num_pixels_in_buffer
inv_gamma = 1.0/VIEWPORT_GAMMA
avg_r_srgb,avg_g_srgb,avg_b_srgb = min(max(0.0,pow(max(0.0,avg_r_lin),inv_gamma)),1.0), min(max(0.0,pow(max(0.0,avg_g_lin),inv_gamma)),1.0), min(max(0.0,pow(max(0.0,avg_b_lin),inv_gamma)),1.0)
result_value = (avg_r_srgb, avg_g_srgb, avg_b_srgb, 1.0)
elif mode == 'grayscale':
sum_val=0.0; step=channels
for i in range(0, max_elements, step): sum_val+=pixels[i]
result_value = min(max(0.0, sum_val/num_pixels_in_buffer), 1.0)
else: print(" Error: NumPy required for median calculation."); return None
return result_value
except Exception as e: print(f" Error during value calculation for '{image.name}': {e}"); return None
finally: # Cleanup
if temp_img:
try: bpy.data.images.remove(temp_img, do_unlink=True)
except Exception: pass # Ignore cleanup errors
# --- Main Function ---
def create_materials_for_library_assets(library_name):
start_time = time.time(); print(f"--- Starting Material Creation for Library '{library_name}' ---")
print(f"Material Creation Limit per run: {'Unlimited' if MATERIAL_CREATION_LIMIT <= 0 else MATERIAL_CREATION_LIMIT}")
# (Prerequisite checks...)
template_mat=bpy.data.materials.get(TEMPLATE_MATERIAL_NAME); #... etc ...
if not template_mat or not template_mat.use_nodes or not find_node_by_label(template_mat.node_tree, PLACEHOLDER_NODE_LABEL, 'GROUP'): print("Template Prereq Failed."); return
library=bpy.context.preferences.filepaths.asset_libraries.get(library_name); #... etc ...
if not library or not Path(bpy.path.abspath(library.path)).exists(): print("Library Prereq Failed."); return
print(f"Found template material and library path...")
# (File scanning...)
materials_created=0; materials_skipped=0; nodegroups_processed=0; link_errors=0; files_to_process=[]; library_path_obj=Path(bpy.path.abspath(library.path))
#... (populate files_to_process) ...
if library_path_obj.is_dir():
for item in library_path_obj.iterdir():
if item.is_file() and item.suffix.lower() == '.blend': files_to_process.append(str(item))
if not files_to_process: print(f"Warning: No .blend files found in dir: {library_path_obj}")
elif library_path_obj.is_file() and library_path_obj.suffix.lower() == '.blend':
files_to_process.append(str(library_path_obj))
else: print(f"Error: Library path not dir or .blend: {library_path_obj}"); return
print(f"Found {len(files_to_process)} .blend file(s) to inspect.")
# Initialize counters and flag for limit
created_in_this_run = 0
limit_reached_flag = False
for blend_file_path in files_to_process: # ... (inspect loop) ...
print(f"\nInspecting library file: {os.path.basename(blend_file_path)}...")
potential_nodegroups = []; # ... (inspection logic) ...
try:
with bpy.data.libraries.load(blend_file_path, link=False) as (data_from, data_to): potential_nodegroups = list(data_from.node_groups)
except Exception as e_load_inspect: print(f" Error inspecting file '{blend_file_path}': {e_load_inspect}"); continue
print(f" Found {len(potential_nodegroups)} NGs. Checking for '{ASSET_NAME_PREFIX}'...")
for asset_nodegroup_name in potential_nodegroups: # ... (NG loop) ...
if not asset_nodegroup_name.startswith(ASSET_NAME_PREFIX): continue
nodegroups_processed += 1
base_name = asset_nodegroup_name.removeprefix(ASSET_NAME_PREFIX)
material_name = f"{MATERIAL_NAME_PREFIX}{base_name}"
if bpy.data.materials.get(material_name): materials_skipped += 1; continue
linked_nodegroup = None; preview_path = None
try: # --- Start Main Processing Block for NG ---
# (Linking logic...)
existing_group = bpy.data.node_groups.get(asset_nodegroup_name); #... etc linking ...
is_correctly_linked = (existing_group and existing_group.library and bpy.path.abspath(existing_group.library.filepath) == blend_file_path)
if is_correctly_linked: linked_nodegroup = existing_group
else: # Link it
with bpy.data.libraries.load(blend_file_path, link=True, relative=False) as (data_from, data_to):
if asset_nodegroup_name in data_from.node_groups: data_to.node_groups = [asset_nodegroup_name]
else: print(f" Error: NG '{asset_nodegroup_name}' not found during link."); continue # Skip NG
linked_nodegroup = bpy.data.node_groups.get(asset_nodegroup_name)
if not linked_nodegroup or not linked_nodegroup.library: print(f" Error: NG '{asset_nodegroup_name}' link failed."); linked_nodegroup = None; link_errors += 1
if not linked_nodegroup: print(f" Failed link NG '{asset_nodegroup_name}'. Skip."); continue # Skip NG
preview_path = linked_nodegroup.get(THUMBNAIL_PROPERTY_NAME) # Path to COL-1 1K
# (Duplicate, Rename, Replace Placeholder...)
new_material = template_mat.copy(); #... checks ...
if not new_material: print(f" Error: Failed copy template mat. Skip."); continue
new_material.name = material_name
if not new_material.use_nodes or not new_material.node_tree: print(f" Error: New mat '{material_name}' no nodes."); continue
placeholder_node = find_node_by_label(new_material.node_tree, PLACEHOLDER_NODE_LABEL, 'GROUP'); #... checks ...
if not placeholder_node: print(f" Error: Placeholder '{PLACEHOLDER_NODE_LABEL}' not found."); continue
placeholder_node.node_tree = linked_nodegroup
print(f" Created material '{material_name}' and linked NG '{linked_nodegroup.name}'.")
# --- Load base COL-1 image once ---
thumbnail_image = None
if preview_path and Path(preview_path).is_file():
try: thumbnail_image = bpy.data.images.load(preview_path, check_existing=True)
except Exception as e_load_base: print(f" Error loading base thumbnail '{preview_path}': {e_load_base}")
# --- Set Viewport Color (Median) ---
median_color = None
if thumbnail_image: median_color = calculate_value_from_image(thumbnail_image, target_size=SCALED_SIZE, mode='color', method='median')
if median_color: new_material.diffuse_color = median_color; print(f" Set viewport color: {median_color[:3]}")
else: print(f" Warn: Could not set viewport color.")
# --- Determine Paths and Metal Map Existence ---
roughness_path = None; metallic_path = None; metal_map_found = False; #... etc ...
if preview_path and "_COL-1" in preview_path:
try: # ... path derivation logic ...
base_path_obj=Path(preview_path); directory=base_path_obj.parent; base_stem=base_path_obj.stem
if "_COL-1" in base_stem:
rough_stem=base_stem.replace("_COL-1", "_ROUGH")
for ext in DERIVED_MAP_EXTENSIONS:
potential_path=directory/f"{rough_stem}{ext}";
if potential_path.is_file(): roughness_path=str(potential_path); break
metal_stem=base_stem.replace("_COL-1", "_METAL")
for ext in DERIVED_MAP_EXTENSIONS:
potential_path=directory/f"{metal_stem}{ext}";
if potential_path.is_file(): metallic_path=str(potential_path); metal_map_found=True; break
except Exception as e_derive: print(f" Error deriving paths: {e_derive}")
if not metal_map_found: print(f" Info: No METAL map found. Assuming Spec/Gloss.")
# --- Set Viewport Roughness (Median, Conditional Inversion) ---
median_roughness = None; # ... etc ...
if roughness_path:
try: rough_img = bpy.data.images.load(roughness_path, check_existing=True)
except Exception as e_load_rough: print(f" Error loading rough image: {e_load_rough}")
if rough_img: median_roughness = calculate_value_from_image(rough_img, target_size=SCALED_SIZE, mode='grayscale', method='median')
else: print(f" Error: load None for rough path.")
if median_roughness is not None:
final_roughness_value = median_roughness
if not metal_map_found: final_roughness_value = 1.0 - median_roughness; print(f" Inverting ROUGH->Gloss: {median_roughness:.3f} -> {final_roughness_value:.3f}")
new_material.roughness = min(max(0.0, final_roughness_value), 1.0); print(f" Set viewport roughness: {new_material.roughness:.3f}")
else: print(f" Warn: Could not set viewport roughness.")
# --- Set Viewport Metallic (Median) ---
median_metallic = None; # ... etc ...
if metal_map_found:
try: metal_img = bpy.data.images.load(metallic_path, check_existing=True)
except Exception as e_load_metal: print(f" Error loading metal image: {e_load_metal}")
if metal_img: median_metallic = calculate_value_from_image(metal_img, target_size=SCALED_SIZE, mode='grayscale', method='median')
else: print(f" Error: load None for metal path.")
if median_metallic is not None: new_material.metallic = median_metallic; print(f" Set viewport metallic: {median_metallic:.3f}")
else: new_material.metallic = 0.0; # Default
if metal_map_found: print(f" Warn: Could not calc viewport metallic. Set 0.0.")
else: print(f" Set viewport metallic to default: 0.0")
# --- Mark Material as Asset ---
mat_asset_data = None; # ... (logic remains same) ...
try: # ... asset marking ...
if not new_material.asset_data: new_material.asset_mark(); print(f" Marked material as asset.")
mat_asset_data = new_material.asset_data
except Exception as e_asset: print(f" Error marking mat asset: {e_asset}")
# --- Copy Asset Tags ---
if mat_asset_data and linked_nodegroup.asset_data: # ... (logic remains same) ...
try: # ... tag copying ...
source_tags=linked_nodegroup.asset_data.tags; target_tags=mat_asset_data.tags
tags_copied_count=0; existing_target_tag_names={t.name for t in target_tags}
for src_tag in source_tags:
if src_tag.name not in existing_target_tag_names: target_tags.new(name=src_tag.name); tags_copied_count += 1
if tags_copied_count > 0: print(f" Copied {tags_copied_count} asset tags.")
except Exception as e_tags: print(f" Error copying tags: {e_tags}")
# --- Set Custom Preview for Material ---
if preview_path and Path(preview_path).is_file(): # ... (logic remains same) ...
try: # ... preview setting ...
with bpy.context.temp_override(id=new_material): bpy.ops.ed.lib_id_load_custom_preview(filepath=preview_path)
except RuntimeError as e_op: print(f" Error running preview op for mat '{new_material.name}': {e_op}")
except Exception as e_prev: print(f" Unexpected preview error for mat: {e_prev}")
elif preview_path: print(f" Warn: Thumb path not found for preview step: '{preview_path}'")
# --- Increment Counters & Check Limit ---
materials_created += 1 # Overall counter for summary
created_in_this_run += 1 # Counter for this run's limit
# Check limit AFTER successful creation
if MATERIAL_CREATION_LIMIT > 0 and created_in_this_run >= MATERIAL_CREATION_LIMIT:
print(f"\n--- Material Creation Limit ({MATERIAL_CREATION_LIMIT}) Reached ---")
limit_reached_flag = True
break # Exit inner loop
except Exception as e: # Catch errors for the whole NG processing block
print(f" An unexpected error occurred processing NG '{asset_nodegroup_name}': {e}")
# --- End Main Processing Block for NG ---
# Check flag to stop outer loop
if limit_reached_flag:
print("Stopping library file iteration due to limit.")
break # Exit outer loop
# (Completion summary...)
end_time = time.time(); duration = end_time - start_time; print("\n--- Material Creation Finished ---"); # ... etc ...
print(f"Duration: {duration:.2f} seconds")
print(f"Summary: Processed {nodegroups_processed} NGs. Created {materials_created} Mats this run. Skipped {materials_skipped}. Link Errors {link_errors}.")
if limit_reached_flag: print(f"NOTE: Script stopped early due to creation limit ({MATERIAL_CREATION_LIMIT}). Run again to process more.")
# --- How to Run ---
# 1. Rerun Script 1 to add "thumbnail_filepath" property.
# 2. Setup Asset Library in Prefs. Set ASSET_LIBRARY_NAME below.
# 3. In current file, create "Template_PBRMaterial" with "PBRSET_PLACEHOLDER" node.
# 4. Set MATERIAL_CREATION_LIMIT in Config section above (0 for unlimited).
# 5. Paste script & Run (Alt+P).
if __name__ == "__main__":
# Only need ASSET_LIBRARY_NAME configuration here now
if ASSET_LIBRARY_NAME == "My Asset Library": # Default check
print("\nERROR: Please update the 'ASSET_LIBRARY_NAME' variable in the script's Configuration section.")
print(" Set it to the name of your asset library in Blender Preferences before running.\n")
elif not bpy.data.materials.get(TEMPLATE_MATERIAL_NAME):
print(f"\nERROR: Template material '{TEMPLATE_MATERIAL_NAME}' not found in current file.\n")
else:
create_materials_for_library_assets(ASSET_LIBRARY_NAME)

View File

@@ -0,0 +1,988 @@
# Full script - PBR Texture Importer V4 (Manifest, Auto-Save/Reload, Aspect Ratio, Asset Tags)
import bpy
import os # For auto-save rename/remove
from pathlib import Path
import time
import base64
import numpy as np # For stats calculation
import json # For manifest handling
import re # For parsing scaling string
# --- USER CONFIGURATION ---
# File Paths & Templates
texture_root_directory = r"G:\02 Content\10-19 Content\13 Textures Power of Two\13.00" # <<< CHANGE THIS PATH!
PARENT_TEMPLATE_NAME = "Template_PBRSET" # Name of the parent node group template
CHILD_TEMPLATE_NAME = "Template_PBRTYPE" # Name of the child node group template
# Processing Limits & Intervals
MAX_NEW_GROUPS_PER_RUN = 1000 # Max NEW parent groups created per run before stopping
SAVE_INTERVAL = 25 # Auto-save interval during NEW group creation (every N groups)
# Features & Behavior
AUTO_SAVE_ENABLED = True # Enable periodic auto-saving (main file + manifest) during processing?
AUTO_RELOAD_ON_FINISH = True # Save and reload the blend file upon successful script completion?
# Naming & Structure Conventions
VALID_EXTENSIONS = {".jpg", ".jpeg", ".png", ".tif", ".tiff"} # Allowed texture file types
RESOLUTION_LABELS = ["1k", "2k", "4k", "8k"] # Expected resolution labels (LOWEST FIRST for aspect/tag calc)
SG_VALUE_NODE_LABEL = "SpecularGlossy" # Label for the Specular/Glossy value node in parent template
HISTOGRAM_NODE_PREFIX = "Histogram-" # Prefix for Combine XYZ nodes storing stats (e.g., "Histogram-ROUGH")
ASPECT_RATIO_NODE_LABEL = "AspectRatioCorrection" # Label for the Value node storing the aspect ratio correction factor
# Texture Map Properties
PBR_COLOR_SPACE_MAP = { # Map PBR type (from filename) to Blender color space
"AO": "sRGB", "COL-1": "sRGB", "COL-2": "sRGB", "COL-3": "sRGB",
"DISP": "Non-Color", "NRM": "Non-Color", "REFL": "Non-Color", "ROUGH": "Non-Color",
"METAL": "Non-Color", "FUZZ": "Non-Color", "MASK": "Non-Color", "SSS": "sRGB",
}
DEFAULT_COLOR_SPACE = "sRGB" # Fallback color space if PBR type not in map
# --- END USER CONFIGURATION ---
# --- Helper Functions ---
def parse_texture_filename(filename_stem):
"""Parses texture filename stem based on expected convention."""
parts = filename_stem.split('_');
# Expecting Tag_Groupname_Resolution_Scaling_PBRType
if len(parts) == 5:
return {"Tag": parts[0], "Groupname": parts[1], "Resolution": parts[2], "Scaling": parts[3], "PBRType": parts[4]}
else:
print(f" Warn: Skip '{filename_stem}' - Expected 5 parts, found {len(parts)}.");
return None
def find_nodes_by_label(node_tree, label, node_type=None):
"""Finds ALL nodes in a node tree matching the label and optionally type."""
if not node_tree:
return []
matching_nodes = []
for node in node_tree.nodes:
if node.label and node.label == label:
if node_type is None or node.type == node_type:
matching_nodes.append(node)
return matching_nodes
def encode_name_b64(name_str):
"""Encodes a string using URL-safe Base64 for node group names."""
try:
return base64.urlsafe_b64encode(name_str.encode('utf-8')).decode('ascii')
except Exception as e:
print(f" Error base64 encoding '{name_str}': {e}");
return name_str # Fallback to original name on error
def calculate_image_stats(image):
"""Calculates Min, Max, Median of the first channel of a Blender image."""
if not image: return None
pixels_arr, value_channel_arr, result = None, None, None
try:
width = image.size[0]; height = image.size[1]; channels = image.channels
if width == 0 or height == 0 or channels == 0:
print(f" Warn: Invalid dims for '{image.name}'. Skip stats."); return None
actual_len = len(image.pixels); expected_len = width * height * channels
if expected_len != actual_len:
print(f" Warn: Pixel buffer mismatch for '{image.name}'. Skip stats."); return None
if actual_len == 0: return None
pixels_arr = np.fromiter(image.pixels, dtype=np.float32, count=actual_len)
if channels == 1: value_channel_arr = pixels_arr
elif channels >= 2: value_channel_arr = pixels_arr[0::channels]
else: return None
if value_channel_arr is None or value_channel_arr.size == 0:
print(f" Warn: No value channel for '{image.name}'. Skip stats."); return None
min_val = float(np.min(value_channel_arr))
max_val = float(np.max(value_channel_arr))
median_val = float(np.median(value_channel_arr))
result = (min_val, max_val, median_val)
except MemoryError:
print(f" Error: Not enough memory for stats calc on '{image.name}'.")
except Exception as e:
print(f" Error during stats calc for '{image.name}': {e}");
import traceback; traceback.print_exc()
finally:
# Explicitly delete potentially large numpy arrays
if 'value_channel_arr' in locals() and value_channel_arr is not None:
try:
del value_channel_arr
except NameError:
pass # Ignore if already gone
if 'pixels_arr' in locals() and pixels_arr is not None:
try:
del pixels_arr
except NameError:
pass # Ignore if already gone
return result
def calculate_aspect_ratio_factor(image_width, image_height, scaling_string):
"""Calculates the X-axis UV scaling factor based on image dims and scaling string."""
if image_height <= 0:
print(" Warn: Image height is zero, cannot calculate aspect ratio. Returning 1.0.")
return 1.0 # Return 1.0 if height is invalid
# Calculate the actual aspect ratio of the image file
current_aspect_ratio = image_width / image_height
# Check the scaling string
if scaling_string.upper() == "EVEN":
# 'EVEN' means uniform scaling was applied (or none needed).
# The correction factor is the image's own aspect ratio.
return current_aspect_ratio
else:
# Handle non-uniform scaling cases ("Xnnn", "Ynnn")
match = re.match(r"([XY])(\d+)", scaling_string, re.IGNORECASE)
if not match:
print(f" Warn: Invalid Scaling string format '{scaling_string}'. Returning current ratio {current_aspect_ratio:.4f} as fallback.")
# Fallback to the image's own ratio if scaling string is invalid
return current_aspect_ratio
axis = match.group(1).upper()
try:
amount = int(match.group(2))
if amount <= 0:
print(f" Warn: Zero or negative Amount in Scaling string '{scaling_string}'. Returning current ratio {current_aspect_ratio:.4f}.")
return current_aspect_ratio
except ValueError:
print(f" Warn: Invalid Amount in Scaling string '{scaling_string}'. Returning current ratio {current_aspect_ratio:.4f}.")
return current_aspect_ratio
# Apply the non-uniform correction formula
factor = current_aspect_ratio # Default to current ratio in case of issues below
scaling_factor_percent = amount / 100.0
try:
if axis == 'X':
if scaling_factor_percent == 0: raise ZeroDivisionError
factor = current_aspect_ratio / scaling_factor_percent
elif axis == 'Y':
factor = current_aspect_ratio * scaling_factor_percent
# No 'else' needed due to regex structure
except ZeroDivisionError:
print(f" Warn: Division by zero during factor calculation. Returning current ratio {current_aspect_ratio:.4f}.")
return current_aspect_ratio
return factor
# --- Manifest Helper Functions ---
def get_manifest_path(context_filepath):
"""Gets the expected path for the manifest JSON file based on blend filepath."""
if not context_filepath:
return None
blend_path = Path(context_filepath)
manifest_filename = f"{blend_path.stem}_manifest.json"
return blend_path.parent / manifest_filename
def load_manifest(manifest_path):
"""Loads the manifest data from the JSON file."""
if not manifest_path or not manifest_path.exists():
return {}
try:
with open(manifest_path, 'r', encoding='utf-8') as f:
data = json.load(f)
print(f" Loaded manifest from: {manifest_path.name}")
return data
except json.JSONDecodeError:
print(f"!!! ERROR: Manifest file '{manifest_path.name}' is corrupted. Starting fresh. !!!")
return {}
except Exception as e:
print(f"!!! ERROR: Could not load manifest file '{manifest_path.name}': {e} !!!")
return {}
def save_manifest(manifest_path, data):
"""Saves the manifest data to the JSON file."""
if not manifest_path:
return False
try:
with open(manifest_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
return True
except Exception as e:
print(f"!!!!!!!!!!!!!!!!!!!\n!!! Manifest save FAILED: {e} !!!\n!!!!!!!!!!!!!!!!!!!")
return False
# --- Auto-Save Helper Function ---
def perform_safe_autosave(manifest_path, manifest_data):
"""Performs a safe auto-save of the main blend file and manifest."""
blend_filepath = bpy.data.filepath
if not blend_filepath or not manifest_path:
print(" Skipping auto-save: Blend file is not saved.")
return False
print(f"\n--- Attempting Auto-Save ({time.strftime('%H:%M:%S')}) ---")
blend_path = Path(blend_filepath)
manifest_path_obj = Path(manifest_path) # Ensure it's a Path object
blend_bak_path = blend_path.with_suffix('.blend.bak')
manifest_bak_path = manifest_path_obj.with_suffix('.json.bak')
# 1. Delete old backups if they exist
try:
if blend_bak_path.exists():
blend_bak_path.unlink()
if manifest_bak_path.exists():
manifest_bak_path.unlink()
except OSError as e:
print(f" Warn: Could not delete old backup file: {e}")
# Continue anyway, renaming might still work
# 2. Rename current files to backup
renamed_blend = False
renamed_manifest = False
try:
if blend_path.exists():
os.rename(blend_path, blend_bak_path)
renamed_blend = True
# print(f" Renamed '{blend_path.name}' to '{blend_bak_path.name}'") # Optional verbose log
if manifest_path_obj.exists():
os.rename(manifest_path_obj, manifest_bak_path)
renamed_manifest = True
# print(f" Renamed '{manifest_path_obj.name}' to '{manifest_bak_path.name}'") # Optional verbose log
except OSError as e:
print(f"!!! ERROR: Failed to rename files for backup: {e} !!!")
# Attempt to roll back renames if only one succeeded
if renamed_blend and not renamed_manifest and blend_bak_path.exists():
print(f" Attempting rollback: Renaming {blend_bak_path.name} back...")
try:
os.rename(blend_bak_path, blend_path)
except OSError as rb_e:
print(f" Rollback rename of blend file FAILED: {rb_e}")
if renamed_manifest and not renamed_blend and manifest_bak_path.exists():
print(f" Attempting rollback: Renaming {manifest_bak_path.name} back...")
try:
os.rename(manifest_bak_path, manifest_path_obj)
except OSError as rb_e:
print(f" Rollback rename of manifest file FAILED: {rb_e}")
print("--- Auto-Save ABORTED ---")
return False
# 3. Save new main blend file
save_blend_success = False
try:
bpy.ops.wm.save_mainfile()
print(f" Saved main blend file: {blend_path.name}")
save_blend_success = True
except Exception as e:
print(f"!!!!!!!!!!!!!!!!!!!!!!!!!\n!!! Auto-Save FAILED (Blend File Save): {e} !!!\n!!!!!!!!!!!!!!!!!!!!!!!!!")
# Attempt to restore from backup
print(" Attempting to restore from backup...")
try:
if blend_bak_path.exists():
os.rename(blend_bak_path, blend_path)
if manifest_bak_path.exists():
os.rename(manifest_bak_path, manifest_path_obj)
print(" Restored from backup.")
except OSError as re:
print(f"!!! CRITICAL: Failed to restore from backup after save failure: {re} !!!")
print(f"!!! Please check for '.bak' files manually in: {blend_path.parent} !!!")
print("--- Auto-Save ABORTED ---")
return False
# 4. Save new manifest file (only if blend save succeeded)
if save_blend_success:
if save_manifest(manifest_path, manifest_data):
print(f" Saved manifest file: {manifest_path_obj.name}")
print("--- Auto-Save Successful ---")
return True
else:
# Manifest save failed, but blend file is okay. Warn user.
print("!!! WARNING: Auto-save completed for blend file, but manifest save FAILED. Manifest may be out of sync. !!!")
return True # Still counts as 'completed' in terms of blend file safety
return False # Should not be reached
# --- Asset Tagging Helper Functions ---
def add_tag_if_new(asset_data, tag_name):
"""Adds a tag to the asset data if it's not None/empty and doesn't already exist."""
if not asset_data or not tag_name or not isinstance(tag_name, str) or tag_name.strip() == "":
return False # Invalid input
cleaned_tag_name = tag_name.strip() # Remove leading/trailing whitespace
if not cleaned_tag_name:
return False # Don't add empty tags
# Check if tag already exists
if cleaned_tag_name not in [t.name for t in asset_data.tags]:
try:
asset_data.tags.new(cleaned_tag_name)
print(f" + Added Asset Tag: '{cleaned_tag_name}'")
return True
except Exception as e:
print(f" Error adding tag '{cleaned_tag_name}': {e}")
return False
else:
# print(f" Tag '{cleaned_tag_name}' already exists.") # Optional info
return False # Not added because it existed
def get_supplier_tag_from_path(file_path_str, groupname):
"""
Determines supplier tag based on directory structure.
Assumes structure is .../Supplier/Groupname/file.ext or .../Supplier/file.ext
"""
try:
file_path = Path(file_path_str).resolve()
groupname_lower = groupname.lower()
if not file_path.is_file():
print(f" Warn (get_supplier_tag): Input path is not a file: {file_path_str}")
return None
current_dir = file_path.parent # Directory containing the file
if not current_dir:
print(f" Warn (get_supplier_tag): Cannot get parent directory for {file_path_str}")
return None # Cannot determine without parent
parent_dir = current_dir.parent # Directory potentially containing the 'supplier' name
# Check if we are at the root or have no parent
if not parent_dir or parent_dir == current_dir:
# If the file is in the root scan directory or similar shallow path,
# maybe the directory it's in IS the supplier tag? Or return None?
# Returning current_dir.name might be unexpected, let's return None for safety.
print(f" Warn (get_supplier_tag): File path too shallow to determine supplier reliably: {file_path_str}")
return None
# Compare the file's directory name with the groupname
if current_dir.name.lower() == groupname_lower:
# Structure is likely .../Supplier/Groupname/file.ext
# Return the name of the directory ABOVE the groupname directory
return parent_dir.name
else:
# Structure is likely .../Supplier/file.ext
# Return the name of the directory CONTAINING the file
return current_dir.name
except Exception as e:
print(f" Error getting supplier tag for {groupname} from path {file_path_str}: {e}")
return None
def apply_asset_tags(parent_group, groupname, group_info):
"""Applies various asset tags to the parent node group."""
if not parent_group:
return
# 1. Ensure group is marked as an asset
try:
if not parent_group.asset_data:
print(f" Marking '{parent_group.name}' as asset for tagging.")
parent_group.asset_mark()
# Ensure asset_data is available after marking
if not parent_group.asset_data:
print(f" Error: Could not access asset_data for '{parent_group.name}' after marking.")
return
asset_data = parent_group.asset_data
except Exception as e_mark:
print(f" Error marking group '{parent_group.name}' as asset: {e_mark}")
return # Cannot proceed without asset_data
# 2. Apply Supplier Tag (Current Requirement)
try:
# Find lowest resolution path (reuse logic from aspect ratio)
lowest_res_path = None; found_res = False
pbr_types_dict = group_info.get("pbr_types", {})
# Check RESOLUTION_LABELS in order (assuming lowest is first)
for res_label in RESOLUTION_LABELS:
for res_data in pbr_types_dict.values(): # Check all PBR types for this res
if res_label in res_data:
lowest_res_path = res_data[res_label]
found_res = True
break # Found path for this resolution label
if found_res:
break # Found lowest available resolution path
if lowest_res_path:
supplier_tag = get_supplier_tag_from_path(lowest_res_path, groupname)
if supplier_tag:
add_tag_if_new(asset_data, supplier_tag) # Use helper to add if new
else:
print(f" Warn (apply_asset_tags): No image path found for group '{groupname}' to determine supplier tag.")
except Exception as e_supp:
print(f" Error during supplier tag processing for '{groupname}': {e_supp}")
# 3. --- Future Tagging Logic Placeholder ---
# Example: Tag based on PBR Types present
# try:
# present_pbr_types = list(group_info.get("pbr_types", {}).keys())
# for pbr_tag in present_pbr_types:
# # Maybe add prefix or modify tag name
# add_tag_if_new(asset_data, f"PBR_{pbr_tag}")
# except Exception as e_pbr:
# print(f" Error during PBR type tagging for '{groupname}': {e_pbr}")
# Example: Tag based on filename Tag (if not default like 'T-MR')
# filename_tag = group_info.get("tag") # Need to store 'Tag' in group_info during scan
# if filename_tag and filename_tag not in ["T-MR", "T-SG"]:
# add_tag_if_new(asset_data, f"Tag_{filename_tag}")
# --- End Future Tagging Logic ---
# --- Main Processing Function ---
def process_textures_to_groups(root_directory):
"""Scans textures, creates/updates node groups based on templates and manifest."""
start_time = time.time()
print(f"--- Starting Texture Processing ---")
print(f"Scanning directory: {root_directory}")
root_path = Path(root_directory)
if not root_path.is_dir():
print(f"Error: Directory not found: {root_directory}")
return False # Indicate failure
# --- Manifest Setup ---
current_blend_filepath = bpy.data.filepath
manifest_path = get_manifest_path(current_blend_filepath)
manifest_data = {}
manifest_enabled = False
if manifest_path:
manifest_data = load_manifest(manifest_path)
manifest_enabled = True
# Flag will be True if any change requires saving the manifest
manifest_needs_saving = False
# --- End Manifest Setup ---
# --- Load Templates ---
template_parent = bpy.data.node_groups.get(PARENT_TEMPLATE_NAME)
template_child = bpy.data.node_groups.get(CHILD_TEMPLATE_NAME)
if not template_parent:
print(f"Error: Parent template '{PARENT_TEMPLATE_NAME}' not found.")
return False
if not template_child:
print(f"Error: Child template '{CHILD_TEMPLATE_NAME}' not found.")
return False
print(f"Found templates: '{PARENT_TEMPLATE_NAME}', '{CHILD_TEMPLATE_NAME}'")
# --- End Load Templates ---
# --- Initialize Data Structures ---
# Stores {"GroupName": {"pbr_types": {...}, "scaling": "...", "sg": False, "thumb": "..."}}
texture_data = {}
file_count, processed_files = 0, 0
groups_created, groups_updated, child_groups_created, child_groups_updated = 0, 0, 0, 0
nodes_updated, links_created = 0, 0
# Cache for image datablocks loaded in THIS RUN only
loaded_images_this_run = {}
# --- End Initialize Data Structures ---
print("Scanning files...")
# --- File Scanning ---
for dirpath, _, filenames in os.walk(root_directory):
for filename in filenames:
file_path = Path(dirpath) / filename
# Check extension
if file_path.suffix.lower() not in VALID_EXTENSIONS:
continue
file_count += 1
filename_stem = file_path.stem
parsed = parse_texture_filename(filename_stem)
if not parsed:
continue # Skip if filename doesn't match format
# Extract parts
groupname = parsed["Groupname"]
pbr_type = parsed["PBRType"]
resolution_label = parsed["Resolution"].lower()
scaling_str = parsed["Scaling"]
tag_str = parsed["Tag"].upper()
file_path_str = str(file_path)
# Validate resolution label
if resolution_label not in RESOLUTION_LABELS:
print(f"Warn: Skip '{filename}' - Invalid Res '{resolution_label}'. Expected one of {RESOLUTION_LABELS}")
continue
# Ensure base structure for group exists in texture_data
group_entry = texture_data.setdefault(groupname, {
"pbr_types": {}, "scaling": None, "sg": False, "thumb": None
})
# Store texture path under the specific PBR type and resolution
group_entry["pbr_types"].setdefault(pbr_type, {})[resolution_label] = file_path_str
# Store scaling string ONCE per groupname (first encountered wins)
if group_entry["scaling"] is None:
group_entry["scaling"] = scaling_str
elif group_entry["scaling"] != scaling_str:
# Warn only once per group if inconsistency found
if not group_entry.get("scaling_warning_printed", False):
print(f" Warn: Inconsistent 'Scaling' string found for group '{groupname}'. "
f"Using first encountered: '{group_entry['scaling']}'.")
group_entry["scaling_warning_printed"] = True
# Track SG status and thumbnail path
if tag_str == "T-SG":
group_entry["sg"] = True
# Use 1k COL-1 as the potential thumbnail source
if resolution_label == "1k" and pbr_type == "COL-1":
group_entry["thumb"] = file_path_str
processed_files += 1
# --- End File Scanning ---
print(f"\nFile Scan Complete. Found {file_count} files, parsed {processed_files} valid textures.")
total_groups_found = len(texture_data)
print(f"Total unique Groupnames found: {total_groups_found}")
if not texture_data:
print("No valid textures found. Exiting.")
return True # No work needed is considered success
print("\n--- Processing Node Groups ---")
all_groupnames = sorted(list(texture_data.keys()))
processing_stopped_early = False
# --- Main Processing Loop ---
for groupname in all_groupnames:
group_info = texture_data[groupname] # Get pre-scanned info
pbr_types_data = group_info.get("pbr_types", {})
scaling_string_for_group = group_info.get("scaling")
sg_status_for_group = group_info.get("sg", False)
thumbnail_path_for_group = group_info.get("thumb")
target_parent_name = f"PBRSET_{groupname}"
print(f"\nProcessing Group: '{target_parent_name}'")
parent_group = bpy.data.node_groups.get(target_parent_name)
is_new_parent = False
# --- Find or Create Parent Group ---
if parent_group is None:
# Check batch limit BEFORE creating
if groups_created >= MAX_NEW_GROUPS_PER_RUN:
print(f"\n--- Reached NEW parent group limit ({MAX_NEW_GROUPS_PER_RUN}). Stopping. ---")
processing_stopped_early = True
break # Exit the main groupname loop
print(f" Creating new parent group: '{target_parent_name}'")
parent_group = template_parent.copy()
if not parent_group:
print(f" Error: Failed copy parent template. Skip group '{groupname}'.")
continue # Skip to next groupname
parent_group.name = target_parent_name
groups_created += 1
is_new_parent = True
# --- Auto-Save Trigger ---
# Trigger AFTER creating the group and incrementing counter
if AUTO_SAVE_ENABLED and groups_created > 0 and groups_created % SAVE_INTERVAL == 0:
if perform_safe_autosave(manifest_path, manifest_data):
# If auto-save succeeded, manifest is up-to-date on disk
manifest_needs_saving = False
else:
# Auto-save failed, continue but warn
print("!!! WARNING: Auto-save failed. Continuing processing... !!!")
# --- End Auto-Save Trigger ---
else: # Update Existing Parent Group
print(f" Updating existing parent group: '{target_parent_name}'")
groups_updated += 1
# --- End Find or Create Parent Group ---
# --- Process Parent Group Internals ---
# This block processes both newly created and existing parent groups
try:
# --- Calculate and Store Aspect Ratio Correction (Once per group) ---
# Find the designated Value node in the parent template
aspect_node_list = find_nodes_by_label(parent_group, ASPECT_RATIO_NODE_LABEL, 'VALUE')
if aspect_node_list:
aspect_node = aspect_node_list[0] # Assume first found is correct
if scaling_string_for_group:
# Find the path to the lowest resolution image available
lowest_res_path = None; found_res = False
# Check resolution labels in configured order (e.g., "1k", "2k"...)
for res_label in RESOLUTION_LABELS:
# Check all PBR types for this resolution
for res_data in pbr_types_data.values():
if res_label in res_data:
lowest_res_path = res_data[res_label]
found_res = True
break # Found path for this resolution label
if found_res:
break # Found lowest available resolution path
if lowest_res_path:
# Load the image (use cache if possible)
img = None; img_load_error = False
if lowest_res_path in loaded_images_this_run:
img = loaded_images_this_run[lowest_res_path]
img_load_error = (img is None) # Check if cached result was failure
else:
# Attempt to load if not cached
try:
img_path_obj = Path(lowest_res_path)
if img_path_obj.is_file():
img = bpy.data.images.load(lowest_res_path, check_existing=True)
else:
img_load_error = True
print(f" Error: Aspect source image not found: {lowest_res_path}")
if img is None and not img_load_error: # Check if load function returned None
img_load_error = True
print(f" Error: Failed loading aspect source image: {lowest_res_path}")
except Exception as e_load_aspect:
print(f" Error loading aspect source image: {e_load_aspect}")
img_load_error = True
# Cache the result (image object or None)
loaded_images_this_run[lowest_res_path] = img if not img_load_error else None
if not img_load_error and img:
# Get dimensions and calculate factor
img_width, img_height = img.size[0], img.size[1]
factor = calculate_aspect_ratio_factor(img_width, img_height, scaling_string_for_group)
print(f" Calculated Aspect Ratio Factor: {factor:.4f} (from {img_width}x{img_height}, Scaling='{scaling_string_for_group}')")
# Store factor in node if value changed significantly
if abs(aspect_node.outputs[0].default_value - factor) > 0.0001:
aspect_node.outputs[0].default_value = factor
print(f" Set '{ASPECT_RATIO_NODE_LABEL}' node value to {factor:.4f}")
else:
print(f" Warn: Could not load image '{lowest_res_path}' for aspect ratio calc.")
else:
print(f" Warn: No suitable image found (e.g., 1k) to calculate aspect ratio for '{groupname}'.")
else:
print(f" Warn: No Scaling string found for group '{groupname}'. Cannot calculate aspect ratio.")
# else: # Optional Warning if node is missing from template
# print(f" Warn: Value node '{ASPECT_RATIO_NODE_LABEL}' not found in parent group '{parent_group.name}'. Cannot store aspect ratio.")
# --- End Aspect Ratio Correction ---
# Set SG Value
sg_nodes = find_nodes_by_label(parent_group, SG_VALUE_NODE_LABEL, 'VALUE')
if sg_nodes:
sg_node = sg_nodes[0]
target_val = 1.0 if sg_status_for_group else 0.0
if abs(sg_node.outputs[0].default_value - target_val) > 0.001:
sg_node.outputs[0].default_value = target_val
print(f" Set '{SG_VALUE_NODE_LABEL}' to: {target_val}")
# Set Asset Info (Thumbnail Path Prop, Initial Preview & Tagging)
# This block runs for both new and existing groups
try:
# 1. Set/Update Thumbnail Path Property & Mark Asset
if not parent_group.asset_data:
parent_group.asset_mark()
print(f" Marked '{parent_group.name}' as asset.")
# Update thumbnail property logic
if thumbnail_path_for_group:
thumb_path_obj = Path(thumbnail_path_for_group)
if thumb_path_obj.is_file():
if parent_group.get("thumbnail_filepath") != thumbnail_path_for_group:
parent_group["thumbnail_filepath"] = thumbnail_path_for_group
if not is_new_parent: print(f" Updated thumbnail path property.") # Log update only if not new
elif "thumbnail_filepath" in parent_group:
del parent_group["thumbnail_filepath"]
if not is_new_parent: print(f" Removed thumbnail path property (file not found).")
elif "thumbnail_filepath" in parent_group:
del parent_group["thumbnail_filepath"]
if not is_new_parent: print(f" Removed old thumbnail path property.")
# 2. Set Initial Preview (Only if NEW parent)
if is_new_parent and thumbnail_path_for_group and Path(thumbnail_path_for_group).is_file():
print(f" Attempting initial preview from '{Path(thumbnail_path_for_group).name}'...")
try:
with bpy.context.temp_override(id=parent_group):
bpy.ops.ed.lib_id_load_custom_preview(filepath=thumbnail_path_for_group)
print(f" Set initial custom preview.")
except Exception as e_prev:
print(f" Preview Error: {e_prev}")
# 3. Apply Asset Tags (Supplier, etc.)
apply_asset_tags(parent_group, groupname, group_info)
except Exception as e_asset_info:
print(f" Error setting asset info/tags: {e_asset_info}")
# --- End Asset Info ---
# --- Process Child Groups (PBR Types) ---
for pbr_type, resolutions_data in pbr_types_data.items():
# print(f" Processing PBR Type: {pbr_type}") # Can be verbose
# Find placeholder node in parent
holder_nodes = find_nodes_by_label(parent_group, pbr_type, 'GROUP')
if not holder_nodes:
print(f" Warn: No placeholder node labeled '{pbr_type}' in parent group '{parent_group.name}'. Skipping PBR Type.")
continue
holder_node = holder_nodes[0] # Assume first is correct
# Determine child group name (Base64 encoded)
logical_child_name = f"{groupname}_{pbr_type}"
target_child_name_b64 = encode_name_b64(logical_child_name)
# Find or Create Child Group
child_group = bpy.data.node_groups.get(target_child_name_b64)
if child_group is None:
# print(f" Creating new child group for '{pbr_type}'") # Verbose
child_group = template_child.copy()
if not child_group:
print(f" Error: Failed copy child template. Skip PBR Type.")
continue
child_group.name = target_child_name_b64
child_groups_created += 1
else:
# print(f" Updating existing child group for '{pbr_type}'") # Verbose
child_groups_updated += 1
# Assign child group to placeholder if needed
if holder_node.node_tree != child_group:
holder_node.node_tree = child_group
print(f" Assigned child group '{child_group.name}' to placeholder '{holder_node.label}'.")
# Connect placeholder output to parent output socket if needed
try:
source_socket = holder_node.outputs[0] if holder_node.outputs else None
group_output_node = next((n for n in parent_group.nodes if n.type == 'GROUP_OUTPUT'), None)
target_socket = None
if group_output_node:
target_socket = group_output_node.inputs.get(pbr_type) # Get socket by name/label
if source_socket and target_socket:
# Check if link already exists
link_exists = any(link.from_socket == source_socket and link.to_socket == target_socket for link in parent_group.links)
if not link_exists:
parent_group.links.new(source_socket, target_socket)
links_created += 1
print(f" Connected '{holder_node.label}' output to parent output socket '{pbr_type}'.")
# else: # Optional warning if sockets aren't found
# if not source_socket: print(f" Warn: No output socket found on placeholder '{holder_node.label}'.")
# if not target_socket: print(f" Warn: No input socket '{pbr_type}' found on parent output node.")
except Exception as e_link:
print(f" Error linking sockets for '{pbr_type}': {e_link}")
# Ensure parent output socket type is Color
try:
item = parent_group.interface.items_tree.get(pbr_type)
if item and item.in_out == 'OUTPUT' and item.socket_type != 'NodeSocketColor':
item.socket_type = 'NodeSocketColor'
# print(f" Set parent output socket '{pbr_type}' type to Color.") # Optional info
except Exception as e_sock:
print(f" Error updating socket type for '{pbr_type}': {e_sock}")
# --- Process Resolutions within Child Group ---
for resolution_label, image_path_str in resolutions_data.items():
# Find image texture nodes within the CHILD group
image_nodes = find_nodes_by_label(child_group, resolution_label, 'TEX_IMAGE')
if not image_nodes:
# print(f" Warn: No node labeled '{resolution_label}' found in child group for '{pbr_type}'.") # Optional
continue
# --- >>> Manifest Check <<< ---
is_processed = False
if manifest_enabled: # Only check if manifest is enabled
# Check if this specific group/pbr/res combo is done
processed_resolutions = manifest_data.get(groupname, {}).get(pbr_type, [])
if resolution_label in processed_resolutions:
is_processed = True
# print(f" Skipping {groupname}/{pbr_type}/{resolution_label} (Manifest)") # Verbose skip log
if is_processed:
continue # Skip to the next resolution
# --- >>> End Manifest Check <<< ---
# --- Load Image & Assign (if not skipped) ---
# print(f" Processing Resolution: {resolution_label} for {pbr_type}") # Verbose
img = None
image_load_failed = False
# Check intra-run cache first
if image_path_str in loaded_images_this_run:
img = loaded_images_this_run[image_path_str]
image_load_failed = (img is None) # Respect cached failure
else:
# Not cached in this run, attempt to load
try:
image_path = Path(image_path_str)
if not image_path.is_file():
print(f" Error: Image file not found: {image_path_str}")
image_load_failed = True
else:
# Use check_existing=True to potentially reuse existing datablocks
img = bpy.data.images.load(str(image_path), check_existing=True)
if not img:
print(f" Error: Failed loading image via bpy.data.images.load: {image_path_str}")
image_load_failed = True
# else: # Success block is handled below
# pass
except RuntimeError as e_runtime_load:
print(f" Runtime Error loading image '{image_path_str}': {e_runtime_load}")
image_load_failed = True
except Exception as e_gen_load:
print(f" Unexpected error loading image '{image_path_str}': {e_gen_load}")
image_load_failed = True
# Cache result (image object or None for failure)
loaded_images_this_run[image_path_str] = img if not image_load_failed else None
# --- Process image if loaded/cached successfully ---
if not image_load_failed and img:
try:
# Set Color Space
correct_color_space = PBR_COLOR_SPACE_MAP.get(pbr_type, DEFAULT_COLOR_SPACE)
if img.colorspace_settings.name != correct_color_space:
print(f" Setting '{Path(img.filepath).name}' color space -> {correct_color_space}")
img.colorspace_settings.name = correct_color_space
# Histogram Stats Calculation
if resolution_label == "1k" and pbr_type in ["ROUGH", "DISP"]:
target_node_label = f"{HISTOGRAM_NODE_PREFIX}{pbr_type}"
target_nodes = find_nodes_by_label(parent_group, target_node_label, 'COMBXYZ')
if target_nodes:
target_node = target_nodes[0]
try:
socket_x = target_node.inputs.get("X")
socket_y = target_node.inputs.get("Y")
socket_z = target_node.inputs.get("Z")
if socket_x and socket_y and socket_z:
print(f" Calculating histogram stats for {pbr_type} 1K...")
stats = calculate_image_stats(img)
if stats:
min_val, max_val, median_val = stats
print(f" Stats: Min={min_val:.4f}, Max={max_val:.4f}, Median={median_val:.4f}")
# Store stats in the Combine XYZ node
socket_x.default_value = min_val
socket_y.default_value = max_val
socket_z.default_value = median_val
print(f" Stored stats in '{target_node_label}'.")
else:
print(f" Warn: Failed calc stats for '{Path(img.filepath).name}'.")
# else: print(f" Warn: Node '{target_node_label}' missing X/Y/Z sockets.")
except Exception as e_combxyz_store:
print(f" Error processing stats in '{target_node_label}': {e_combxyz_store}")
# else: print(f" Warn: No stats node '{target_node_label}' found.")
# Assign Image to nodes in child group
nodes_updated_this_res = 0
for image_node in image_nodes:
if image_node.image != img:
image_node.image = img
nodes_updated_this_res += 1
nodes_updated += nodes_updated_this_res
if nodes_updated_this_res > 0:
print(f" Assigned image '{Path(img.filepath).name}' to {nodes_updated_this_res} node(s).")
# --- >>> Update Manifest <<< ---
if manifest_enabled:
# Ensure nested structure exists
manifest_data.setdefault(groupname, {}).setdefault(pbr_type, [])
# Add resolution if not already present
if resolution_label not in manifest_data[groupname][pbr_type]:
manifest_data[groupname][pbr_type].append(resolution_label)
# Keep the list sorted for consistency in the JSON file
manifest_data[groupname][pbr_type].sort()
manifest_needs_saving = True # Mark that we need to save later
# print(f" Marked {groupname}/{pbr_type}/{resolution_label} processed in manifest.") # Verbose
# --- >>> End Update Manifest <<< ---
except Exception as e_proc_img:
print(f" Error during post-load processing for image '{image_path_str}': {e_proc_img}")
# Continue to next resolution even if post-load fails
# --- End Process image ---
# --- End Resolution Loop ---
# --- End PBR Type Loop ---
except Exception as e_group:
print(f" !!! ERROR processing group '{groupname}': {e_group} !!!")
import traceback; traceback.print_exc()
continue # Continue to next groupname
# --- End Main Processing Loop ---
# --- Final Manifest Save ---
# Save if manifest is enabled AND changes were made since the last save/start.
# This happens even if the script stopped early due to MAX_NEW_GROUPS_PER_RUN.
if manifest_enabled and manifest_needs_saving:
print("\n--- Attempting Final Manifest Save (End of Run) ---")
if save_manifest(manifest_path, manifest_data):
print(" Manifest saved successfully.")
# Error message handled within save_manifest
# --- End Final Manifest Save ---
# --- Final Summary ---
end_time = time.time(); duration = end_time - start_time
print("\n--- Script Run Finished ---")
if processing_stopped_early:
print(f"--- NOTE: Reached NEW parent group processing limit ({MAX_NEW_GROUPS_PER_RUN}). ---")
print(f"--- You may need to SAVE manually, REVERT/RELOAD file, and RUN SCRIPT AGAIN. ---")
print(f"Duration: {duration:.2f} seconds this run.")
print(f"Summary: New Parents={groups_created}, Updated Parents={groups_updated}, New Children={child_groups_created}, Updated Children={child_groups_updated}.")
print(f" Images assigned={nodes_updated} times. Links created={links_created}.")
# Add other stats if needed, e.g., number of tags added
# --- End Final Summary ---
return True # Indicate successful completion (or reaching limit)
# --- How to Run ---
# 1. Ensure 'numpy' is available in Blender's Python environment.
# 2. Create Node Group "Template_PBRSET": Configure placeholders, Value nodes (SG, Aspect Ratio), Stats nodes, outputs.
# 3. Create Node Group "Template_PBRTYPE": Configure Image Texture nodes labeled by resolution.
# 4. !! SAVE YOUR BLEND FILE AT LEAST ONCE !! for manifest, auto-saving, and auto-reloading to work.
# 5. Adjust variables in the '--- USER CONFIGURATION ---' section at the top as needed.
# 6. Paste into Blender's Text Editor and run (Alt+P or Run Script button). Check Window -> Toggle System Console.
# 7. If script stops due to limit: SAVE manually, REVERT/REOPEN file, RUN SCRIPT AGAIN. Manifest prevents reprocessing.
if __name__ == "__main__":
print(f"Script execution started at: {time.strftime('%Y-%m-%d %H:%M:%S')}")
# Pre-run Checks using variables from CONFIG section
valid_run_setup = True
try:
tex_dir_path = Path(texture_root_directory)
# Basic check if path looks like a placeholder or doesn't exist
if texture_root_directory == r"C:\path\to\your\texture\library" or not tex_dir_path.is_dir() :
print(f"\nERROR: 'texture_root_directory' is invalid or a placeholder.")
print(f" Current value: '{texture_root_directory}'")
valid_run_setup = False
except Exception as e_path:
print(f"\nERROR checking texture_root_directory: {e_path}")
valid_run_setup = False
# Check templates
if not bpy.data.node_groups.get(PARENT_TEMPLATE_NAME):
print(f"\nERROR: Parent template node group '{PARENT_TEMPLATE_NAME}' not found.")
valid_run_setup = False
if not bpy.data.node_groups.get(CHILD_TEMPLATE_NAME):
print(f"\nERROR: Child template node group '{CHILD_TEMPLATE_NAME}' not found.")
valid_run_setup = False
# Check numpy (needed for stats)
try:
import numpy
except ImportError:
print("\nCRITICAL ERROR: Python library 'numpy' not found (required for image stats).")
print(" Please install numpy into Blender's Python environment.")
valid_run_setup = False
# Execute main function if setup checks pass
script_completed_successfully = False
if valid_run_setup:
# Check if file is saved before running features that depend on it
if not bpy.data.filepath:
print("\nWARNING: Blend file not saved. Manifest, Auto-Save, and Auto-Reload features disabled.")
script_completed_successfully = process_textures_to_groups(texture_root_directory)
else:
print("\nScript aborted due to configuration errors.")
# --- Final Save & Reload ---
# Use config variables directly as they are in module scope
if script_completed_successfully and AUTO_RELOAD_ON_FINISH:
if bpy.data.filepath: # Only if file is saved
print("\n--- Auto-saving and reloading blend file ---")
try:
bpy.ops.wm.save_mainfile()
print(" Blend file saved.")
print(" Reloading...")
# Ensure script execution stops cleanly before reload starts
bpy.ops.wm.open_mainfile(filepath=bpy.data.filepath)
# Script execution effectively stops here upon reload
except Exception as e:
print(f"!!! ERROR during final save/reload: {e} !!!")
else:
print("\nSkipping final save & reload because the blend file is not saved.")
# --- End Final Save & Reload ---
# This print might not be reached if reload occurs
print(f"Script execution finished processing at: {time.strftime('%Y-%m-%d %H:%M:%S')}")

File diff suppressed because it is too large Load Diff