{ "sourceFile": "asset_processor.py", "activeCommit": 0, "commits": [ { "activePatchIndex": 52, "patches": [ { "date": 1745225918059, "content": "Index: \n===================================================================\n--- \n+++ \n" }, { "date": 1745226173949, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -0,0 +1,1813 @@\n+# asset_processor.py\r\n+\r\n+import os\r\n+import shutil\r\n+import tempfile\r\n+import zipfile\r\n+import logging\r\n+import json\r\n+import re\r\n+import time\r\n+from pathlib import Path\r\n+from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n+\r\n+# Attempt to import image processing libraries\r\n+try:\r\n+ import cv2\r\n+ import numpy as np\r\n+except ImportError:\r\n+ print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n+ print(\"pip install opencv-python numpy\")\r\n+ exit(1) # Exit if essential libraries are missing\r\n+\r\n+# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n+try:\r\n+ import OpenEXR\r\n+ import Imath\r\n+ _HAS_OPENEXR = True\r\n+except ImportError:\r\n+ _HAS_OPENEXR = False\r\n+ # Log this information - basic EXR might still work via OpenCV\r\n+ logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n+\r\n+\r\n+# Assuming Configuration class is in configuration.py\r\n+try:\r\n+ from configuration import Configuration, ConfigurationError\r\n+except ImportError:\r\n+ print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n+ print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n+ exit(1)\r\n+\r\n+# Use logger defined in main.py (or configure one here if run standalone)\r\n+log = logging.getLogger(__name__)\r\n+# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\n+if not log.hasHandlers():\r\n+ logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n+\r\n+\r\n+# --- Custom Exception ---\r\n+class AssetProcessingError(Exception):\r\n+ \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n+ pass\r\n+\r\n+# --- Helper Functions ---\r\n+def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n+ \"\"\"Calculates dimensions maintaining aspect ratio, fitting within target_max_dim.\"\"\"\r\n+ if orig_w <= 0 or orig_h <= 0: return (target_max_dim, target_max_dim) # Avoid division by zero\r\n+\r\n+ ratio = orig_w / orig_h\r\n+ if ratio > 1: # Width is dominant\r\n+ target_w = target_max_dim\r\n+ target_h = max(1, round(target_w / ratio)) # Ensure height is at least 1\r\n+ else: # Height is dominant or square\r\n+ target_h = target_max_dim\r\n+ target_w = max(1, round(target_h * ratio)) # Ensure width is at least 1\r\n+ return int(target_w), int(target_h)\r\n+\r\n+def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n+ \"\"\"\r\n+ Calculates min, max, mean for a given numpy image array.\r\n+ Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n+ \"\"\"\r\n+ if image_data is None:\r\n+ log.warning(\"Attempted to calculate stats on None image data.\")\r\n+ return None\r\n+ try:\r\n+ # Use float64 for calculations to avoid potential overflow/precision issues\r\n+ data_float = image_data.astype(np.float64)\r\n+\r\n+ if len(data_float.shape) == 2: # Grayscale (H, W)\r\n+ min_val = float(np.min(data_float))\r\n+ max_val = float(np.max(data_float))\r\n+ mean_val = float(np.mean(data_float))\r\n+ stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n+ log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n+ elif len(data_float.shape) == 3: # Color (H, W, C)\r\n+ channels = data_float.shape[2]\r\n+ min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n+ max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n+ mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n+ # Assuming BGR(A?) order from OpenCV, report as list [B, G, R, (A)]\r\n+ stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n+ log.debug(f\"Calculated {channels}-Channel Stats: Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n+ else:\r\n+ log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n+ return None\r\n+ return stats\r\n+ except Exception as e:\r\n+ log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n+ return {\"error\": str(e)}\r\n+\r\n+\r\n+from collections import defaultdict # Added for grouping\r\n+\r\n+# --- Helper function ---\r\n+def _get_base_map_type(target_map_string: str) -> str:\r\n+ \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n+ match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n+ if match:\r\n+ return match.group(1).upper()\r\n+ return target_map_string.upper() # Fallback if no number suffix\r\n+\r\n+# --- Asset Processor Class ---\r\n+class AssetProcessor:\r\n+ \"\"\"\r\n+ Handles the processing pipeline for a single asset (ZIP or folder).\r\n+ \"\"\"\r\n+ # Define the list of known grayscale map types (adjust as needed)\r\n+ GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n+\r\n+ def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n+ \"\"\"\r\n+ Initializes the processor for a given input asset.\r\n+\r\n+ Args:\r\n+ input_path: Path to the input ZIP file or folder.\r\n+ config: The loaded Configuration object.\r\n+ output_base_path: The base directory where processed output will be saved.\r\n+ overwrite: If True, forces reprocessing even if output exists.\r\n+ \"\"\"\r\n+ if not isinstance(input_path, Path): input_path = Path(input_path)\r\n+ if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n+ if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n+\r\n+ if not input_path.exists():\r\n+ raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n+ if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n+ raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n+\r\n+ self.input_path: Path = input_path\r\n+ self.config: Configuration = config\r\n+ self.output_base_path: Path = output_base_path\r\n+ self.overwrite: bool = overwrite # Store the overwrite flag\r\n+\r\n+ self.temp_dir: Path | None = None # Path to the temporary working directory\r\n+ self.classified_files: dict[str, list[dict]] = {\r\n+ \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n+ }\r\n+ self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n+ self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n+ self.metadata_file_path_temp: Path | None = None\r\n+\r\n+ # Initialize metadata collected during processing\r\n+ self.metadata: dict = {\r\n+ \"asset_name\": \"Unknown\",\r\n+ \"supplier_name\": self.config.supplier_name,\r\n+ \"asset_category\": self.config._core_settings.get('DEFAULT_ASSET_CATEGORY', \"Texture\"),\r\n+ \"archetype\": \"Unknown\",\r\n+ \"maps_present\": [],\r\n+ \"merged_maps\": [],\r\n+ \"shader_features\": [],\r\n+ \"source_files_in_extra\": [],\r\n+ \"image_stats_1k\": {},\r\n+ \"map_details\": {},\r\n+ \"aspect_ratio_change_string\": \"N/A\" # Replaces output_scaling_factors\r\n+ # Processing info added in _generate_metadata_file\r\n+ }\r\n+\r\n+ log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n+\r\n+ def process(self) -> str:\r\n+ \"\"\"\r\n+ Executes the full processing pipeline for the asset.\r\n+ Returns:\r\n+ str: Status (\"processed\", \"skipped\").\r\n+ \"\"\"\r\n+ log.info(f\"Processing asset: {self.input_path.name}\")\r\n+ try:\r\n+ self._setup_workspace()\r\n+ self._extract_input()\r\n+ self._inventory_and_classify_files()\r\n+ self._determine_base_metadata()\r\n+\r\n+ # --- Check if asset should be skipped ---\r\n+ # Ensure asset_name and supplier_name were determined before checking\r\n+ asset_name = self.metadata.get(\"asset_name\")\r\n+ supplier_name = self.metadata.get(\"supplier_name\")\r\n+\r\n+ # Only check for skipping if overwrite is False AND we have valid names\r\n+ if not self.overwrite and asset_name and asset_name != \"UnknownAssetName\" and supplier_name:\r\n+ supplier_sanitized = self._sanitize_filename(supplier_name)\r\n+ asset_name_sanitized = self._sanitize_filename(asset_name)\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ metadata_file_path = final_dir / self.config.metadata_filename\r\n+\r\n+ if final_dir.exists() and metadata_file_path.is_file():\r\n+ log.info(f\"Output directory and metadata found for '{asset_name_sanitized}' and overwrite is False. Skipping.\")\r\n+ # No need to call cleanup here, the finally block will handle it.\r\n+ return \"skipped\" # Return status\r\n+ elif self.overwrite:\r\n+ # Log only if asset name is known, otherwise it's redundant with the initial processing log\r\n+ known_asset_name = self.metadata.get('asset_name', self.input_path.name)\r\n+ # Avoid logging overwrite message if name is still unknown\r\n+ if known_asset_name != \"UnknownAssetName\" and known_asset_name != self.input_path.name:\r\n+ log.info(f\"Overwrite flag is set for '{known_asset_name}'. Processing will continue even if output exists.\")\r\n+ # --- End Skip Check ---\r\n+\r\n+ # Continue with processing if not skipped\r\n+ self._process_maps()\r\n+ self._merge_maps()\r\n+ self._generate_metadata_file()\r\n+ self._organize_output_files()\r\n+ log.info(f\"Asset processing completed successfully for: {self.metadata.get('asset_name', self.input_path.name)}\")\r\n+ return \"processed\" # Return status\r\n+ except Exception as e:\r\n+ # Log error with traceback if it hasn't been logged already\r\n+ if not isinstance(e, (AssetProcessingError, ConfigurationError)): # Avoid double logging known types\r\n+ log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name}: {e}\")\r\n+ # Ensure error is propagated\r\n+ if not isinstance(e, AssetProcessingError):\r\n+ raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n+ else:\r\n+ raise # Re-raise AssetProcessingError or ConfigurationError\r\n+ finally:\r\n+ # Ensure cleanup always happens\r\n+ self._cleanup_workspace()\r\n+\r\n+ def _setup_workspace(self):\r\n+ \"\"\"Creates a temporary directory for processing.\"\"\"\r\n+ try:\r\n+ self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n+ log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n+\r\n+ def _extract_input(self):\r\n+ \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n+\r\n+ log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n+ try:\r\n+ if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n+ log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ zip_ref.extractall(self.temp_dir)\r\n+ log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n+ elif self.input_path.is_dir():\r\n+ log.debug(f\"Copying directory contents: {self.input_path}\")\r\n+ for item in self.input_path.iterdir():\r\n+ destination = self.temp_dir / item.name\r\n+ if item.is_dir():\r\n+ # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n+ try:\r\n+ shutil.copytree(item, destination, dirs_exist_ok=True)\r\n+ except TypeError: # Fallback for older Python\r\n+ if not destination.exists():\r\n+ shutil.copytree(item, destination)\r\n+ else:\r\n+ log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n+\r\n+ else:\r\n+ shutil.copy2(item, destination)\r\n+ log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n+ except zipfile.BadZipFile:\r\n+ raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n+\r\n+ def _inventory_and_classify_files(self):\r\n+ \"\"\"\r\n+ Scans workspace, classifies files according to preset rules, handling\r\n+ 16-bit prioritization and multiple variants of the same base map type.\r\n+ \"\"\"\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n+\r\n+ log.info(\"Scanning and classifying files...\")\r\n+ log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n+ all_files_rel = []\r\n+ for root, _, files in os.walk(self.temp_dir):\r\n+ root_path = Path(root)\r\n+ for file in files:\r\n+ full_path = root_path / file\r\n+ relative_path = full_path.relative_to(self.temp_dir)\r\n+ all_files_rel.append(relative_path)\r\n+\r\n+ log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n+\r\n+ # --- Initialization ---\r\n+ processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n+ potential_map_candidates = [] # List to store potential map file info\r\n+ # Reset classified files (important if this method is ever called multiple times)\r\n+ self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n+\r\n+\r\n+ # --- Step 1: Identify Explicit 'Extra' Files ---\r\n+ log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n+ compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n+ log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path in processed_files: continue\r\n+ for compiled_regex in compiled_extra_regex:\r\n+ if compiled_regex.search(file_rel_path.name):\r\n+ log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n+ self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n+ processed_files.add(file_rel_path)\r\n+ log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n+ break # Stop checking extra patterns for this file\r\n+\r\n+ # --- Step 2: Identify Model Files ---\r\n+ log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n+ compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n+ log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path in processed_files: continue\r\n+ for compiled_regex in compiled_model_regex:\r\n+ if compiled_regex.search(file_rel_path.name):\r\n+ log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n+ self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n+ processed_files.add(file_rel_path)\r\n+ log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n+ break # Stop checking model patterns for this file\r\n+\r\n+ # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n+ log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n+ # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n+ compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n+\r\n+ for file_rel_path in all_files_rel:\r\n+ # Skip files already classified as Extra or Model\r\n+ if file_rel_path in processed_files:\r\n+ continue\r\n+\r\n+ file_stem = file_rel_path.stem\r\n+ match_found = False\r\n+\r\n+ # Iterate through base types and their associated regex tuples\r\n+ for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n+ if match_found: break # Stop checking types for this file once matched\r\n+\r\n+ # Get the original keywords list for the current rule index\r\n+ # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n+ original_rule = None\r\n+ # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n+ if regex_tuples:\r\n+ current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n+ if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n+ rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n+ # Verify it's the correct rule by checking target_type\r\n+ if rule_candidate.get(\"target_type\") == base_map_type:\r\n+ original_rule = rule_candidate\r\n+ else:\r\n+ log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n+ # Fallback search if index doesn't match (shouldn't happen ideally)\r\n+ for idx, rule in enumerate(self.config.map_type_mapping):\r\n+ if rule.get(\"target_type\") == base_map_type:\r\n+ original_rule = rule\r\n+ log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n+ break\r\n+\r\n+ original_keywords_list = []\r\n+ if original_rule and 'keywords' in original_rule:\r\n+ original_keywords_list = original_rule['keywords']\r\n+ else:\r\n+ log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n+\r\n+ for kw_regex, original_keyword, rule_index in regex_tuples:\r\n+ if kw_regex.search(file_stem):\r\n+ log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n+\r\n+ # Find the index of the matched keyword within its rule's list\r\n+ keyword_index_in_rule = -1 # Default if not found\r\n+ if original_keywords_list:\r\n+ try:\r\n+ # Use the original_keyword string directly\r\n+ keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n+ except ValueError:\r\n+ log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n+ else:\r\n+ log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n+\r\n+ # Add candidate only if not already added\r\n+ if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n+ potential_map_candidates.append({\r\n+ 'source_path': file_rel_path,\r\n+ 'matched_keyword': original_keyword,\r\n+ 'base_map_type': base_map_type,\r\n+ 'preset_rule_index': rule_index,\r\n+ 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n+ 'is_16bit_source': False\r\n+ })\r\n+ else:\r\n+ log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n+\r\n+ match_found = True\r\n+ break # Stop checking regex tuples for this base_type once matched\r\n+\r\n+ log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n+\r\n+ # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n+ log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n+ compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n+ for file_rel_path in all_files_rel:\r\n+ # Skip if already processed or already identified as a candidate\r\n+ if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n+ continue\r\n+\r\n+ for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n+ log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n+ match = compiled_regex.search(file_rel_path.name) # Store result\r\n+ if match:\r\n+ log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n+ potential_map_candidates.append({\r\n+ 'source_path': file_rel_path,\r\n+ 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n+ 'base_map_type': base_type,\r\n+ 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n+ 'is_16bit_source': True # Mark as 16-bit immediately\r\n+ })\r\n+ log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n+ # Don't add to processed_files yet, let Step 4 handle filtering\r\n+ break # Stop checking bit depth patterns for this file\r\n+\r\n+ log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n+\r\n+ # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n+ log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n+ compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n+ candidates_to_keep = []\r\n+ candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n+\r\n+ # Mark 16-bit candidates\r\n+ for candidate in potential_map_candidates:\r\n+ base_type = candidate['base_map_type']\r\n+ # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n+ if base_type in compiled_bit_depth_regex:\r\n+ if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n+ candidate['is_16bit_source'] = True\r\n+ log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n+\r\n+\r\n+ # Identify base types that have a 16-bit version present\r\n+ prioritized_16bit_bases = {\r\n+ candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n+ }\r\n+ log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n+\r\n+ # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n+ for candidate in potential_map_candidates:\r\n+ if candidate['is_16bit_source']:\r\n+ candidates_to_keep.append(candidate)\r\n+ log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+ elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n+ candidates_to_keep.append(candidate)\r\n+ log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+ else:\r\n+ # This is an 8-bit candidate whose 16-bit counterpart exists\r\n+ candidates_to_ignore.append(candidate)\r\n+ log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+\r\n+ # Add ignored 8-bit files to the main ignored list\r\n+ for ignored_candidate in candidates_to_ignore:\r\n+ self.classified_files[\"ignored\"].append({\r\n+ 'source_path': ignored_candidate['source_path'],\r\n+ 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n+ })\r\n+ processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n+\r\n+ log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n+\r\n+ # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n+ log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n+ # from collections import defaultdict # Moved import to top of file\r\n+ grouped_by_base_type = defaultdict(list)\r\n+ for candidate in candidates_to_keep:\r\n+ grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n+\r\n+ final_map_list = []\r\n+ for base_map_type, candidates in grouped_by_base_type.items():\r\n+ log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n+\r\n+ # --- NEW SORTING LOGIC ---\r\n+ # Sort candidates based on:\r\n+ # 1. The index of the rule object in the preset's map_type_mapping list.\r\n+ # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n+ # 3. Alphabetical order of the source file path as a tie-breaker.\r\n+ candidates.sort(key=lambda c: (\r\n+ c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n+ c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n+ str(c['source_path'])\r\n+ ))\r\n+ # --- END NEW SORTING LOGIC ---\r\n+\r\n+ # Removed diagnostic log\r\n+\r\n+ # Assign suffixes and add to the final map list\r\n+ for i, final_candidate in enumerate(candidates): # Use the directly sorted list\r\n+ # Determine final map type based on the new rule\r\n+ if base_map_type in self.config.respect_variant_map_types: # Check the new config list\r\n+ # Always assign suffix for types in the list (if more than one or only one)\r\n+ final_map_type = f\"{base_map_type}-{i + 1}\"\r\n+ else:\r\n+ # Never assign suffix for types NOT in the list\r\n+ final_map_type = base_map_type\r\n+\r\n+ final_map_list.append({\r\n+ \"map_type\": final_map_type,\r\n+ \"source_path\": final_candidate[\"source_path\"],\r\n+ \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n+ \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n+ \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n+ })\r\n+ processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n+ log.debug(f\" Final Map: Type='{final_map_type}', Source='{final_candidate['source_path']}', Keyword='{final_candidate['matched_keyword']}', 16bit={final_candidate['is_16bit_source']}\")\r\n+\r\n+ self.classified_files[\"maps\"] = final_map_list\r\n+\r\n+ # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n+ log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n+ remaining_count = 0\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path not in processed_files:\r\n+ log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n+ self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n+ remaining_count += 1\r\n+ # No need to add to processed_files here, it's the final step\r\n+ log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n+\r\n+ # --- Final Summary ---\r\n+ # Update metadata list of files actually moved to extra (including Unrecognised and Ignored)\r\n+ self.metadata[\"source_files_in_extra\"] = sorted([\r\n+ str(f['source_path']) for f in self.classified_files['extra'] + self.classified_files['ignored']\r\n+ ])\r\n+ log.info(f\"File classification complete.\")\r\n+ log.debug(\"--- Final Classification Summary (v2) ---\")\r\n+ map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n+ model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n+ extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n+ ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n+ log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n+ log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n+ log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n+ log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n+ log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n+\r\n+\r\n+ def _determine_base_metadata(self):\r\n+ \"\"\"Determines base_name, asset_category, and archetype.\"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ log.info(\"Determining base metadata...\")\r\n+\r\n+ # --- Determine Asset Category ---\r\n+ self.metadata[\"asset_category\"] = self.config.default_asset_category # Start with default\r\n+ if self.classified_files[\"models\"]:\r\n+ self.metadata[\"asset_category\"] = \"Asset\"\r\n+ log.debug(\"Category set to 'Asset' due to model file presence.\")\r\n+ else:\r\n+ decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n+ found_decal = False\r\n+ candidate_files = [f['source_path'] for f in self.classified_files['maps']] # Check map names first\r\n+ if not candidate_files: # Fallback to checking all files? Maybe too broad. Check Extra?\r\n+ candidate_files = [f['source_path'] for f in self.classified_files['extra']]\r\n+\r\n+ if decal_keywords:\r\n+ for file_path in candidate_files:\r\n+ for keyword in decal_keywords:\r\n+ if keyword.lower() in file_path.name.lower():\r\n+ self.metadata[\"asset_category\"] = \"Decal\"\r\n+ found_decal = True; break\r\n+ if found_decal: break\r\n+ if found_decal: log.debug(\"Category set to 'Decal' due to keyword match.\")\r\n+\r\n+ # --- Determine Base Name ---\r\n+ map_stems = [f['source_path'].stem for f in self.classified_files['maps']]\r\n+ model_stems = [f['source_path'].stem for f in self.classified_files['models']]\r\n+ candidate_stems = map_stems + model_stems\r\n+\r\n+ determined_base_name = \"UnknownAssetName\"\r\n+ if candidate_stems:\r\n+ separator = self.config.source_naming_separator\r\n+ base_index = self.config.source_naming_indices.get('base_name')\r\n+\r\n+ if isinstance(base_index, int):\r\n+ potential_base_names = set()\r\n+ for stem in candidate_stems:\r\n+ parts = stem.split(separator)\r\n+ if len(parts) > base_index:\r\n+ potential_base_names.add(parts[base_index])\r\n+ if len(potential_base_names) == 1:\r\n+ determined_base_name = potential_base_names.pop()\r\n+ log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n+ elif len(potential_base_names) > 1 :\r\n+ log.warning(f\"Multiple potential base names found from parts index {base_index}: {potential_base_names}. Falling back to common prefix.\")\r\n+ # Fallback logic if structured parts method fails or yields multiple names\r\n+ determined_base_name = os.path.commonprefix(candidate_stems)\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Clean trailing separators etc.\r\n+ # else: len is 0, means no valid parts found, use common prefix below\r\n+\r\n+ # If no index or structured parts failed, use common prefix of all relevant stems\r\n+ if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n+ determined_base_name = os.path.commonprefix(candidate_stems)\r\n+ log.debug(f\"Using common prefix: '{determined_base_name}'\")\r\n+ # Clean up common separators/underscores often left by commonprefix\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+\r\n+ # Final cleanup and fallback for base name\r\n+ determined_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n+ self.metadata[\"asset_name\"] = determined_base_name\r\n+ log.debug(f\"Final Determined Base Name: {self.metadata['asset_name']}\")\r\n+\r\n+ # --- Determine Archetype (Usage) ---\r\n+ archetype_rules = self.config.archetype_rules\r\n+ determined_archetype = \"Unknown\"\r\n+ check_stems = [f['source_path'].stem.lower() for f in self.classified_files['maps']]\r\n+ check_stems.extend([f['source_path'].stem.lower() for f in self.classified_files['models']])\r\n+ # Also check the determined base name itself?\r\n+ check_stems.append(self.metadata[\"asset_name\"].lower())\r\n+\r\n+ if check_stems:\r\n+ best_match_archetype = \"Unknown\"\r\n+ highest_match_count = 0 # Simple heuristic: prioritize rule with most keyword hits?\r\n+\r\n+ for rule in archetype_rules:\r\n+ if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n+ arch_name, rules_dict = rule\r\n+ match_any = rules_dict.get(\"match_any\", [])\r\n+ # match_all = rules_dict.get(\"match_all\", []) # Add logic if needed\r\n+\r\n+ current_match_count = 0\r\n+ matched_any_keyword = False\r\n+ if match_any:\r\n+ for keyword in match_any:\r\n+ kw_lower = keyword.lower()\r\n+ for stem in check_stems:\r\n+ # Using simple substring check again\r\n+ if kw_lower in stem:\r\n+ current_match_count += 1\r\n+ matched_any_keyword = True\r\n+ # Optionally break after first hit per keyword? Or count all occurrences? Count all for now.\r\n+\r\n+ # Decide if this rule matches. For now: requires at least one 'match_any' hit.\r\n+ if matched_any_keyword:\r\n+ # Simple approach: first rule that matches wins.\r\n+ # Could be enhanced by prioritizing rules or counting hits.\r\n+ if best_match_archetype == \"Unknown\": # Take the first match\r\n+ best_match_archetype = arch_name\r\n+ log.debug(f\"Tentative archetype match '{arch_name}' based on keywords: {match_any}\")\r\n+ # Break here for \"first match wins\" logic\r\n+ break\r\n+\r\n+ # --- Example: Prioritize by match count (more complex) ---\r\n+ # if current_match_count > highest_match_count:\r\n+ # highest_match_count = current_match_count\r\n+ # best_match_archetype = arch_name\r\n+ # log.debug(f\"New best archetype match '{arch_name}' with count {current_match_count}\")\r\n+ # ----------------------------------------------------------\r\n+\r\n+ determined_archetype = best_match_archetype\r\n+\r\n+ self.metadata[\"archetype\"] = determined_archetype\r\n+ log.debug(f\"Determined Archetype: {self.metadata['archetype']}\")\r\n+ log.info(\"Base metadata determination complete.\")\r\n+\r\n+\r\n+ def _process_maps(self):\r\n+ \"\"\"Loads, processes, resizes, and saves classified map files.\"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ log.info(f\"Processing identified map files for '{self.metadata['asset_name']}'...\")\r\n+ processed_map_types = set()\r\n+\r\n+ # --- Settings retrieval ---\r\n+ resolutions = self.config.image_resolutions\r\n+ stats_res_key = self.config.calculate_stats_resolution\r\n+ stats_target_dim = resolutions.get(stats_res_key)\r\n+ if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped.\")\r\n+ gloss_keywords = self.config.source_glossiness_keywords\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ base_name = self.metadata['asset_name']\r\n+\r\n+ # --- Pre-process Glossiness -> Roughness ---\r\n+ preprocessed_data = {}\r\n+ derived_from_gloss_flag = {}\r\n+ gloss_map_info_for_rough, native_rough_map_info = None, None\r\n+ for map_info in self.classified_files['maps']:\r\n+ if map_info['map_type'] == 'ROUGH':\r\n+ is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n+ if is_gloss: gloss_map_info_for_rough = map_info\r\n+ else: native_rough_map_info = map_info\r\n+\r\n+ rough_source_to_use = None\r\n+ if gloss_map_info_for_rough:\r\n+ rough_source_to_use = gloss_map_info_for_rough\r\n+ derived_from_gloss_flag['ROUGH'] = True\r\n+ if native_rough_map_info:\r\n+ log.warning(f\"Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n+ if native_rough_map_info in self.classified_files['maps']: self.classified_files['maps'].remove(native_rough_map_info)\r\n+ self.classified_files['ignored'].append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n+ elif native_rough_map_info:\r\n+ rough_source_to_use = native_rough_map_info\r\n+ derived_from_gloss_flag['ROUGH'] = False\r\n+\r\n+ if derived_from_gloss_flag.get('ROUGH'):\r\n+ source_path = self.temp_dir / rough_source_to_use['source_path']\r\n+ log.info(f\"Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n+ try:\r\n+ img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n+ if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n+ original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n+ if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n+ if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n+ elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n+ else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n+ # Store tuple: (inverted_float_data, original_dtype)\r\n+ preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n+ log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n+ except Exception as e: log.error(f\"Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n+\r\n+ # --- Main Processing Loop ---\r\n+ maps_to_process = list(self.classified_files['maps'])\r\n+ for map_info in maps_to_process:\r\n+ map_type = map_info['map_type']\r\n+ source_path_rel = map_info['source_path']\r\n+ original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n+ log.info(f\"-- Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n+ img_processed, source_dtype = None, None\r\n+ map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n+\r\n+ try:\r\n+ # --- 1. Get/Load Source Data ---\r\n+ if map_type in preprocessed_data:\r\n+ log.debug(f\"Using pre-processed data for {map_type}.\")\r\n+ # Unpack tuple: (inverted_float_data, original_dtype)\r\n+ img_processed, source_dtype = preprocessed_data[map_type]\r\n+ # No longer need to read the original file just for dtype\r\n+ else:\r\n+ full_source_path = self.temp_dir / source_path_rel\r\n+ # Determine the read flag based on map type\r\n+ read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n+ log.debug(f\"Loading source {source_path_rel.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n+ if img_loaded is None:\r\n+ raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n+ img_processed, source_dtype = img_loaded.copy(), img_loaded.dtype\r\n+ log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape}\")\r\n+ map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n+\r\n+ # --- 2. Handle Alpha Mask ---\r\n+ if map_type == 'MASK' and img_processed is not None:\r\n+ log.debug(\"Processing as MASK type.\")\r\n+ shape = img_processed.shape\r\n+ if len(shape) == 3 and shape[2] == 4: img_processed = img_processed[:, :, 3]\r\n+ elif len(shape) == 3 and shape[2] == 3: img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n+ if img_processed.dtype != np.uint8:\r\n+ log.debug(f\"Converting mask from {img_processed.dtype} to uint8.\")\r\n+ if source_dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ else: img_processed = img_processed.astype(np.uint8)\r\n+\r\n+ if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n+ orig_h, orig_w = img_processed.shape[:2]\r\n+ self.processed_maps_details.setdefault(map_type, {})\r\n+ max_original_dimension = max(orig_w, orig_h) # Get max original dimension\r\n+\r\n+ # --- 3. Resize & Save Loop ---\r\n+ for res_key, target_dim in resolutions.items():\r\n+ # --- Skip Upscaling ---\r\n+ if target_dim > max_original_dimension:\r\n+ log.debug(f\"Skipping {res_key} ({target_dim}px): Target dimension is larger than original max dimension ({max_original_dimension}px).\")\r\n+ continue\r\n+ log.debug(f\"Processing {map_type} for resolution: {res_key}...\")\r\n+ if orig_w <= 0 or orig_h <= 0: log.warning(f\"Invalid original dims, skipping resize {res_key}.\"); continue\r\n+ target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n+ interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n+ try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n+ except Exception as resize_err: log.error(f\"Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n+\r\n+ # --- 3a. Calculate Stats ---\r\n+ if res_key == stats_res_key and stats_target_dim:\r\n+ log.debug(f\"Calculating stats for {map_type} using {res_key} image...\")\r\n+ stats = _calculate_image_stats(img_resized) # Use the already resized image\r\n+ if stats: self.metadata[\"image_stats_1k\"][map_type] = stats\r\n+ else: log.warning(f\"Stats calculation failed for {map_type} at {res_key}.\")\r\n+ # Calculate aspect change string on the lowest processed resolution, only once per asset\r\n+ lowest_res_key = min(resolutions, key=resolutions.get)\r\n+ if self.metadata[\"aspect_ratio_change_string\"] == \"N/A\" and res_key == lowest_res_key:\r\n+ try:\r\n+ aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n+ self.metadata[\"aspect_ratio_change_string\"] = aspect_string\r\n+ log.debug(f\"Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n+ except Exception as aspect_err:\r\n+ log.error(f\"Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n+ self.metadata[\"aspect_ratio_change_string\"] = \"Error\" # Indicate calculation failure\r\n+\r\n+ # --- 3b. Determine Output Bit Depth & Format ---\r\n+ bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n+ current_dtype = img_resized.dtype # Dtype after resize\r\n+ output_dtype_target, output_bit_depth = None, 8 # Defaults\r\n+ if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n+ elif bit_depth_rule == 'respect':\r\n+ if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16 # Respect float source as 16bit uint for non-EXR\r\n+ else: output_dtype_target, output_bit_depth = np.uint8, 8 # Default respect to 8bit\r\n+ else: output_dtype_target, output_bit_depth = np.uint8, 8 # Fallback\r\n+\r\n+ # --- 3c. Determine Output Format based on Input, Rules & Threshold ---\r\n+ output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n+ primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n+ fmt_8bit_config = self.config.get_8bit_output_format() # Usually 'png'\r\n+ threshold = self.config.resolution_threshold_for_jpg # Get threshold value\r\n+ force_lossless = map_type in self.config.force_lossless_map_types\r\n+\r\n+ if force_lossless:\r\n+ log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ # Add EXR type flags if needed, e.g., HALF for 16-bit float\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ # Add compression later if desired, e.g. cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_ZIP\r\n+ else: # Assume png or other lossless 16-bit format\r\n+ # Use fallback if primary isn't suitable or fails? For now, assume primary is lossless.\r\n+ # If primary_fmt_16 is not 'png', maybe default to fallback_fmt_16?\r\n+ if output_format != \"png\":\r\n+ log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16 # Ensure it's a known lossless like png/tif\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ # Add params for other lossless like TIF if supported\r\n+ else: # 8-bit lossless\r\n+ output_format = fmt_8bit_config # Usually 'png'\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else:\r\n+ log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n+ output_format = \"png\"\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n+\r\n+ # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n+ elif output_bit_depth == 8 and target_dim >= threshold:\r\n+ output_format = 'jpg'\r\n+ output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n+ # --- Else: Apply Input/Rule-Based Logic ---\r\n+ else:\r\n+ # Apply force_8bit rule (if not overridden by threshold)\r\n+ if bit_depth_rule == 'force_8bit':\r\n+ output_format = 'png' # Force to PNG as per clarification\r\n+ output_ext = '.png'\r\n+ # output_bit_depth is already 8, output_dtype_target is already uint8\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n+ # Handle specific input extensions if not forced to 8bit PNG\r\n+ elif original_extension == '.jpg' and output_bit_depth == 8:\r\n+ output_format = 'jpg'\r\n+ output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n+ elif original_extension == '.tif':\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) # Add compression later\r\n+ log.debug(f\"Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n+ else: # Fallback for 16-bit from TIF\r\n+ output_format = fallback_fmt_16\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n+ else: # output_bit_depth == 8 from TIF input (and below threshold)\r\n+ output_format = 'png'\r\n+ output_ext = '.png'\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n+ # Handle other inputs (e.g., PNG) or fallbacks\r\n+ else:\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else: # Fallback for 16-bit\r\n+ output_format = fallback_fmt_16\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n+ else: # 8-bit output (and below threshold)\r\n+ output_format = fmt_8bit_config # Use configured 8-bit format\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ elif output_format == \"jpg\":\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n+\r\n+ img_to_save = img_resized.copy() # Work on copy for dtype conversion\r\n+ # --- Apply Dtype Conversion ---\r\n+ if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n+ if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n+ if needs_float16 and img_to_save.dtype != np.float16:\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n+\r\n+ # --- 3d. Construct Filename & Save ---\r\n+ filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n+ output_path_temp = self.temp_dir / filename\r\n+ log.debug(f\"Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n+ saved_successfully, actual_format_saved = False, output_format\r\n+ try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n+ except Exception as save_err:\r\n+ log.error(f\"Save failed ({output_format}): {save_err}\")\r\n+ # --- Try Fallback ---\r\n+ if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n+ log.warning(f\"Attempting fallback: {fallback_fmt_16}\")\r\n+ actual_format_saved = fallback_fmt_16; output_ext = f\".{fallback_fmt_16}\"; # Adjust format/ext\r\n+ filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n+ output_path_temp = self.temp_dir / filename\r\n+ save_params_fallback = [] # Reset params for fallback\r\n+ img_fallback = None; target_fallback_dtype = np.uint16\r\n+ if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, 6])\r\n+ elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n+\r\n+ # Convert EXR source (float16) to uint16 for PNG/TIF fallback\r\n+ #if img_to_save.dtype == np.float16: img_fallback = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(target_fallback_dtype)\r\n+ if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n+ # <<< START MODIFICATION HERE >>>\r\n+ # Check for NaN/Inf before conversion\r\n+ if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n+ log.error(f\"Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n+ continue # Skip fallback if data is bad\r\n+\r\n+ # Clip *after* scaling for uint16 conversion robustness\r\n+ img_scaled = img_to_save * 65535.0\r\n+ img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n+ # <<< END MODIFICATION HERE >>>\r\n+ elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already correct type\r\n+ else: log.error(f\"Cannot convert {img_to_save.dtype} for fallback.\"); continue # Skip fallback\r\n+\r\n+ try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err: log.error(f\"Fallback save failed: {fallback_err}\", exc_info=True)\r\n+\r\n+ # --- 3e. Store Result ---\r\n+ if saved_successfully:\r\n+ self.processed_maps_details[map_type][res_key] = {\r\n+ \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n+ \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n+ \"format\": actual_format_saved\r\n+ }\r\n+ map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n+\r\n+ except Exception as map_proc_err:\r\n+ log.error(f\"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n+ self.processed_maps_details.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n+\r\n+ self.metadata[\"map_details\"][map_type] = map_details # Store details for this map type\r\n+\r\n+ # --- Final Metadata Updates ---\r\n+ processed_map_types = set(k for k, v in self.processed_maps_details.items() if 'error' not in v) # Only count successful\r\n+ self.metadata[\"maps_present\"] = sorted(list(processed_map_types))\r\n+ features = set()\r\n+ for map_type, details in self.metadata[\"map_details\"].items():\r\n+ if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n+ if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n+ res_details = self.processed_maps_details.get(map_type, {})\r\n+ if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n+ self.metadata[\"shader_features\"] = sorted(list(features))\r\n+ log.debug(f\"Determined shader features: {self.metadata['shader_features']}\")\r\n+ log.info(\"Finished processing all map files.\")\r\n+\r\n+\r\n+ #log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n+\r\n+ def _merge_maps(self):\r\n+ \"\"\"Merges channels from different maps based on rules in configuration.\"\"\"\r\n+ # ... (initial checks and getting merge_rules) ...\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Workspace not setup.\")\r\n+\r\n+\r\n+ # <<< FIX: Get merge rules from the configuration object >>>\r\n+\r\n+ merge_rules = self.config.map_merge_rules\r\n+\r\n+ # <<< END FIX >>>\r\n+ log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n+\r\n+ for rule_index, rule in enumerate(merge_rules):\r\n+ # <<< FIX: Assign variables *before* using them >>>\r\n+ output_map_type = rule.get(\"output_map_type\")\r\n+ inputs_mapping = rule.get(\"inputs\")\r\n+ defaults = rule.get(\"defaults\", {})\r\n+ rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n+\r\n+ # <<< FIX: Check if essential rule keys exist *after* assignment >>>\r\n+ if not output_map_type or not inputs_mapping:\r\n+ log.warning(f\"Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs' in rule definition. Rule data: {rule}\")\r\n+ continue # Skip to the next rule in merge_rules\r\n+\r\n+ # Now it's safe to use output_map_type in the log statement\r\n+ log.info(f\"-- Applying merge rule for '{output_map_type}' --\")\r\n+ # <<< END FIX >>>\r\n+\r\n+ self.merged_maps_details.setdefault(output_map_type, {})\r\n+\r\n+ # --- Determine required inputs and their common resolutions ---\r\n+ required_input_types = set(inputs_mapping.values()) # e.g., {'NRM', 'ROUGH'}\r\n+ if not required_input_types:\r\n+ log.warning(f\"Skipping merge rule '{output_map_type}': No input map types defined in 'inputs'.\")\r\n+ continue\r\n+\r\n+ possible_resolutions_per_input = []\r\n+ for input_type in required_input_types:\r\n+ if input_type in self.processed_maps_details:\r\n+ # Get resolution keys where processing didn't error\r\n+ res_keys = {res for res, details in self.processed_maps_details[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n+ if not res_keys:\r\n+ log.warning(f\"Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n+ possible_resolutions_per_input = [] # Cannot proceed if any input is missing all resolutions\r\n+ break\r\n+ possible_resolutions_per_input.append(res_keys)\r\n+ else:\r\n+ log.warning(f\"Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n+ possible_resolutions_per_input = [] # Cannot proceed if any input type is missing\r\n+ break\r\n+\r\n+ if not possible_resolutions_per_input:\r\n+ log.warning(f\"Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n+ continue\r\n+\r\n+ # Find the intersection of resolution keys across all required inputs\r\n+ common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n+\r\n+ if not common_resolutions:\r\n+ log.warning(f\"No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n+ continue\r\n+ log.debug(f\"Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n+ # --- End Common Resolution Logic ---\r\n+\r\n+\r\n+ # <<< LOOP THROUGH COMMON RESOLUTIONS >>>\r\n+ # Use the actual common_resolutions found\r\n+ res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n+ if not res_order:\r\n+ log.warning(f\"Common resolutions {common_resolutions} do not match any target resolutions in config. Skipping merge for '{output_map_type}'.\")\r\n+ continue\r\n+\r\n+ # Sort resolutions to process (optional, but nice for logs)\r\n+ sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n+\r\n+ # Get target pattern from config for filename formatting\r\n+ target_pattern = self.config.target_filename_pattern\r\n+\r\n+ for current_res_key in sorted_res_keys:\r\n+ log.debug(f\"Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n+ try:\r\n+ loaded_inputs = {}\r\n+ input_bit_depths = set()\r\n+ input_original_extensions = {} # Store original extensions for this resolution's inputs\r\n+\r\n+ # --- Load required input maps *at this specific resolution* AND get original extensions ---\r\n+ possible_to_load = True\r\n+ base_name = self.metadata['asset_name']\r\n+ target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B']\r\n+\r\n+ for map_type in required_input_types:\r\n+ res_details = self.processed_maps_details.get(map_type, {}).get(current_res_key)\r\n+ if not res_details or 'path' not in res_details:\r\n+ log.warning(f\"Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge for this resolution.\")\r\n+ possible_to_load = False; break\r\n+\r\n+ # Find original extension from classified data\r\n+ original_ext = '.png' # Default\r\n+ found_original = False\r\n+ for classified_map in self.classified_files[\"maps\"]:\r\n+ # Match based on the base map type (e.g., NRM matches NRM-1)\r\n+ if classified_map['map_type'].startswith(map_type):\r\n+ # Check if the processed path matches (to handle multiple variants if needed, though less likely here)\r\n+ # This assumes processed_maps_details path is relative to temp_dir\r\n+ processed_path_str = str(res_details['path'])\r\n+ classified_path_str = str(classified_map['source_path']) # This is the original source path relative to temp_dir root\r\n+ # A more robust check might involve comparing filenames if paths differ due to processing steps\r\n+ # For now, rely on the base map type match and grab the first extension found\r\n+ original_ext = classified_map.get('original_extension', '.png')\r\n+ found_original = True\r\n+ break # Found the first match for this map_type\r\n+ if not found_original:\r\n+ log.warning(f\"Could not reliably find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n+\r\n+ input_original_extensions[map_type] = original_ext\r\n+\r\n+ # Load the image\r\n+ input_file_path = self.temp_dir / res_details['path']\r\n+ read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n+ log.debug(f\"Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ img = cv2.imread(str(input_file_path), read_flag)\r\n+ if img is None:\r\n+ raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n+ loaded_inputs[map_type] = img\r\n+ input_bit_depths.add(res_details.get('bit_depth', 8))\r\n+\r\n+ if not possible_to_load: continue # Skip this resolution if inputs missing\r\n+\r\n+ # --- Determine dimensions and target_dim for threshold check ---\r\n+ first_map_type = next(iter(required_input_types)) # Get one map type to read dimensions\r\n+ h, w = loaded_inputs[first_map_type].shape[:2]\r\n+ # Get target_dim from the details of the first loaded input for this resolution\r\n+ first_res_details = self.processed_maps_details.get(first_map_type, {}).get(current_res_key)\r\n+ target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n+ num_target_channels = len(target_channels)\r\n+\r\n+ # <<< Re-calculate output_bit_depth based on THIS resolution's inputs >>>\r\n+ max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n+ output_bit_depth = 8\r\n+ if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n+ output_bit_depth = 16\r\n+ log.debug(f\"Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n+\r\n+ # Prepare channels (float32) (same logic as before)\r\n+ merged_channels_float32 = []\r\n+ # Use the defined target_channels list\r\n+ for target_channel in target_channels: # Iterate R, G, B (or specified) order\r\n+ source_map_type = inputs_mapping.get(target_channel)\r\n+ channel_data_float32 = None\r\n+ if source_map_type and source_map_type in loaded_inputs:\r\n+ # ... [Extract channel data as float32 as before] ...\r\n+ img_input = loaded_inputs[source_map_type]\r\n+ if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n+ elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n+ else: img_float = img_input.astype(np.float32)\r\n+ num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n+ if num_source_channels >= 3: # BGR Source\r\n+ if target_channel == 'R': channel_data_float32 = img_float[:, :, 2]\r\n+ elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n+ elif target_channel == 'B': channel_data_float32 = img_float[:, :, 0]\r\n+ elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n+ elif num_source_channels == 1 or len(img_float.shape) == 2: # Grayscale source\r\n+ channel_data_float32 = img_float.reshape(h, w)\r\n+ if channel_data_float32 is None: # Use default if needed\r\n+ default_val = defaults.get(target_channel)\r\n+ if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n+ channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n+ merged_channels_float32.append(channel_data_float32)\r\n+\r\n+\r\n+ # Merge channels (same as before)\r\n+ if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n+ merged_image_float32 = cv2.merge(merged_channels_float32)\r\n+\r\n+ # Final Data Type Conversion (based on recalculated output_bit_depth)\r\n+ img_final_merged = None\r\n+ if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n+ else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+\r\n+ # --- Determine Output Format based on Threshold, Input Hierarchy & Rules ---\r\n+ output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n+ primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n+ fmt_8bit_config = self.config.get_8bit_output_format()\r\n+ threshold = self.config.resolution_threshold_for_jpg\r\n+ force_lossless = output_map_type in self.config.force_lossless_map_types\r\n+\r\n+ if force_lossless:\r\n+ log.debug(f\"Format forced to lossless for merged map type '{output_map_type}'.\")\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: # Assume png or other lossless 16-bit format\r\n+ if output_format != \"png\":\r\n+ log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else: # 8-bit lossless\r\n+ output_format = fmt_8bit_config # Usually 'png'\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else:\r\n+ log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n+ output_format = \"png\"\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n+\r\n+ # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n+ elif output_bit_depth == 8 and target_dim >= threshold:\r\n+ output_format = 'jpg'\r\n+ output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n+ # --- Else: Apply Hierarchy/Rule-Based Logic ---\r\n+ else:\r\n+ involved_extensions = set(input_original_extensions.values())\r\n+ log.debug(f\"Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n+ # Hierarchy: EXR > TIF > PNG > JPG\r\n+ highest_format_str = 'jpg' # Start lowest\r\n+ if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n+ elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n+ elif '.png' in involved_extensions: highest_format_str = 'png'\r\n+\r\n+ # Determine final output format based on hierarchy and target bit depth\r\n+ final_output_format = highest_format_str\r\n+\r\n+ if highest_format_str == 'tif':\r\n+ if output_bit_depth == 16:\r\n+ final_output_format = primary_fmt_16 # Use configured 16-bit pref (EXR/PNG)\r\n+ log.debug(f\"Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n+ else: # 8-bit target\r\n+ final_output_format = 'png' # Force TIF input to PNG for 8-bit merge output\r\n+ log.debug(\"Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n+ else:\r\n+ log.debug(f\"Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n+\r\n+ # Set format/params based on the determined final_output_format\r\n+ output_format = final_output_format\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ elif output_format == \"jpg\": # Should only happen if highest input was JPG and below threshold\r\n+ output_ext = \".jpg\"\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ else:\r\n+ log.error(f\"Unsupported final output format '{output_format}' determined for merged map '{output_map_type}'. Skipping save.\")\r\n+ continue\r\n+\r\n+ # --- JPG 8-bit Check (applies regardless of threshold logic) ---\r\n+ if output_format == \"jpg\" and output_bit_depth == 16:\r\n+ log.warning(f\"Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n+ img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ output_bit_depth = 8 # Correct the recorded bit depth\r\n+\r\n+ # --- Save Merged Map ---\r\n+ image_to_save = img_final_merged # Already converted to target bit depth (or forced to 8bit for JPG)\r\n+\r\n+ # Apply float16 conversion if needed for EXR\r\n+ if needs_float16 and image_to_save.dtype != np.float16:\r\n+ if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n+ elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n+ else: log.warning(f\"Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n+\r\n+ merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n+ merged_output_path_temp = self.temp_dir / merged_filename\r\n+ log.debug(f\"Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n+\r\n+ # --- Add save logic with fallback here ---\r\n+ saved_successfully = False\r\n+ actual_format_saved = output_format\r\n+ try:\r\n+ cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n+ log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n+ saved_successfully = True\r\n+ except Exception as save_err:\r\n+ log.error(f\"Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n+ # Try Fallback for merged map (similar to _process_maps fallback)\r\n+ if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n+ log.warning(f\"Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n+ # ... [ Implement fallback save logic here, converting image_to_save if needed ] ...\r\n+ actual_format_saved = fallback_fmt_16\r\n+ output_ext = f\".{fallback_fmt_16}\"\r\n+ merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n+ merged_output_path_temp = self.temp_dir / merged_filename\r\n+ save_params_fallback = []\r\n+ img_fallback = None\r\n+ target_fallback_dtype = np.uint16\r\n+\r\n+ if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n+\r\n+ if image_to_save.dtype == np.float16:\r\n+ if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(\"NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n+ img_scaled = image_to_save * 65535.0\r\n+ img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n+ elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n+ else: log.error(f\"Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n+\r\n+ try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err: log.error(f\"Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n+ # --- End Fallback Logic ---\r\n+\r\n+ # Record details if save successful\r\n+ if saved_successfully:\r\n+ self.merged_maps_details[output_map_type][current_res_key] = {\r\n+ \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n+ \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n+ }\r\n+ if output_map_type not in self.metadata[\"merged_maps\"]: self.metadata[\"merged_maps\"].append(output_map_type)\r\n+\r\n+ except Exception as merge_res_err:\r\n+ log.error(f\"Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n+ self.merged_maps_details.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n+\r\n+ log.info(\"Finished applying map merging rules.\")\r\n+\r\n+\r\n+ def _generate_metadata_file(self):\r\n+ \"\"\"Gathers all collected metadata and writes it to a JSON file.\"\"\"\r\n+ # ... (Implementation from Response #49) ...\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\":\r\n+ log.warning(\"Asset name unknown, metadata may be incomplete.\")\r\n+\r\n+ log.info(f\"Generating metadata file for '{self.metadata['asset_name']}'...\")\r\n+ final_metadata = self.metadata.copy()\r\n+\r\n+ final_metadata[\"processed_map_resolutions\"] = {}\r\n+ for map_type, res_dict in self.processed_maps_details.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys) # Basic sort\r\n+\r\n+ final_metadata[\"merged_map_resolutions\"] = {}\r\n+ for map_type, res_dict in self.merged_maps_details.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n+\r\n+ # Add processing info\r\n+ final_metadata[\"_processing_info\"] = {\r\n+ \"preset_used\": self.config.preset_name,\r\n+ \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n+ # Optionally add core config details used, carefully\r\n+ }\r\n+\r\n+ # Sort lists\r\n+ for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n+ if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n+\r\n+ metadata_filename = self.config.metadata_filename\r\n+ output_path = self.temp_dir / metadata_filename\r\n+ log.debug(f\"Writing metadata to: {output_path}\")\r\n+ try:\r\n+ with open(output_path, 'w', encoding='utf-8') as f:\r\n+ json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n+ log.info(f\"Metadata file '{metadata_filename}' generated successfully.\")\r\n+ self.metadata_file_path_temp = output_path # Store path for moving\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to write metadata file {output_path}: {e}\") from e\r\n+\r\n+\r\n+ def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n+ \"\"\"\r\n+ Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n+ Returns the string representation.\r\n+ \"\"\"\r\n+ if original_width <= 0 or original_height <= 0:\r\n+ log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n+ return \"InvalidInput\"\r\n+\r\n+ # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n+ if resized_width <= 0 or resized_height <= 0:\r\n+ log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n+ return \"InvalidResize\"\r\n+\r\n+ # Original logic from user feedback\r\n+ width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n+ height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n+\r\n+ normalized_width_change = width_change_percentage / 100\r\n+ normalized_height_change = height_change_percentage / 100\r\n+\r\n+ normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n+ normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n+\r\n+ # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n+ # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n+ if normalized_width_change == 0 and normalized_height_change == 0:\r\n+ closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n+ elif normalized_width_change == 0:\r\n+ closest_value_to_one = abs(normalized_height_change)\r\n+ elif normalized_height_change == 0:\r\n+ closest_value_to_one = abs(normalized_width_change)\r\n+ else:\r\n+ closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n+\r\n+ # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n+ epsilon = 1e-9\r\n+ scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n+\r\n+ scaled_normalized_width_change = scale_factor * normalized_width_change\r\n+ scaled_normalized_height_change = scale_factor * normalized_height_change\r\n+\r\n+ output_width = round(scaled_normalized_width_change, decimals)\r\n+ output_height = round(scaled_normalized_height_change, decimals)\r\n+\r\n+ # Convert to int if exactly 1.0 after rounding\r\n+ if abs(output_width - 1.0) < epsilon: output_width = 1\r\n+ if abs(output_height - 1.0) < epsilon: output_height = 1\r\n+\r\n+ # Determine output string\r\n+ if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n+ output = \"EVEN\"\r\n+ elif output_width != 1 and output_height == 1:\r\n+ output = f\"X{str(output_width).replace('.', '')}\"\r\n+ elif output_height != 1 and output_width == 1:\r\n+ output = f\"Y{str(output_height).replace('.', '')}\"\r\n+ else:\r\n+ # Both changed relative to each other\r\n+ output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n+\r\n+ log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n+ return output\r\n+\r\n+ def _sanitize_filename(self, name: str) -> str:\r\n+ \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n+ # ... (Implementation from Response #51) ...\r\n+ if not isinstance(name, str): name = str(name)\r\n+ name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n+ name = re.sub(r'_+', '_', name)\r\n+ name = name.strip('_')\r\n+ if not name: name = \"invalid_name\"\r\n+ return name\r\n+\r\n+ def _organize_output_files(self):\r\n+ \"\"\"Moves processed files from temp dir to the final output structure.\"\"\"\r\n+ # ... (Implementation from Response #51) ...\r\n+ if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n+ if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing.\")\r\n+ if not self.metadata.get(\"supplier_name\"): raise AssetProcessingError(\"Supplier name missing.\")\r\n+\r\n+ supplier_sanitized = self._sanitize_filename(self.metadata[\"supplier_name\"])\r\n+ asset_name_sanitized = self._sanitize_filename(self.metadata[\"asset_name\"])\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ log.info(f\"Organizing output files into: {final_dir}\")\r\n+ try:\r\n+ # Check if overwriting is allowed before potentially deleting existing dir\r\n+ if final_dir.exists() and self.overwrite:\r\n+ log.warning(f\"Output directory exists and overwrite is True: {final_dir}. Removing existing directory.\")\r\n+ try:\r\n+ shutil.rmtree(final_dir)\r\n+ except Exception as rm_err:\r\n+ raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} during overwrite: {rm_err}\") from rm_err\r\n+ elif final_dir.exists() and not self.overwrite:\r\n+ # This case should ideally be caught by the skip logic earlier,\r\n+ # but adding a warning here as a safeguard.\r\n+ log.warning(f\"Output directory exists: {final_dir}. Overwriting (unexpected - should have been skipped).\")\r\n+\r\n+ final_dir.mkdir(parents=True, exist_ok=True) # Create after potential removal\r\n+ except Exception as e:\r\n+ # Catch potential errors during mkdir if rmtree failed partially?\r\n+ if not isinstance(e, AssetProcessingError): # Avoid wrapping already specific error\r\n+ raise AssetProcessingError(f\"Failed to create final dir {final_dir}: {e}\") from e\r\n+ else:\r\n+ raise # Re-raise the AssetProcessingError from rmtree\r\n+\r\n+ def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n+ if not src_rel_path: log.warning(f\"Missing src path for {file_desc}.\"); return\r\n+ source_abs = self.temp_dir / src_rel_path\r\n+ dest_abs = dest_dir / src_rel_path.name\r\n+ try:\r\n+ if source_abs.exists():\r\n+ log.debug(f\"Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n+ dest_dir.mkdir(parents=True, exist_ok=True) # Ensure sub-dir exists (for Extra/)\r\n+ shutil.move(str(source_abs), str(dest_abs))\r\n+ else: log.warning(f\"Source file missing for {file_desc}: {source_abs}\")\r\n+ except Exception as e: log.error(f\"Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n+\r\n+ # Move maps, merged maps, models, metadata\r\n+ for details_dict in [self.processed_maps_details, self.merged_maps_details]:\r\n+ for map_type, res_dict in details_dict.items():\r\n+ if 'error' in res_dict: continue\r\n+ for res_key, details in res_dict.items():\r\n+ if isinstance(details, dict) and 'path' in details: _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n+ for model_info in self.classified_files.get('models', []): _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n+ if self.metadata_file_path_temp: _safe_move(self.metadata_file_path_temp.relative_to(self.temp_dir), final_dir, \"metadata file\")\r\n+\r\n+ # Move extra/ignored files\r\n+ extra_subdir_name = self.config.extra_files_subdir\r\n+ extra_dir = final_dir / extra_subdir_name\r\n+ files_to_move_extra = self.classified_files.get('extra', []) + self.classified_files.get('ignored', [])\r\n+ if files_to_move_extra:\r\n+ log.debug(f\"Moving {len(files_to_move_extra)} files to '{extra_subdir_name}/'...\")\r\n+ try:\r\n+ extra_dir.mkdir(exist_ok=True)\r\n+ for file_info in files_to_move_extra: _safe_move(file_info.get('source_path'), extra_dir, f\"extra file ({file_info.get('reason', 'Unknown')})\")\r\n+ except Exception as e: log.error(f\"Failed creating/moving to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n+\r\n+ log.info(f\"Finished organizing output for '{asset_name_sanitized}'.\")\r\n+\r\n+\r\n+ def _cleanup_workspace(self):\r\n+ \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n+ # ... (Implementation from Response #45) ...\r\n+ if self.temp_dir and self.temp_dir.exists():\r\n+ try:\r\n+ log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n+ shutil.rmtree(self.temp_dir)\r\n+ self.temp_dir = None\r\n+ log.debug(\"Temporary workspace cleaned up successfully.\")\r\n+ except Exception as e:\r\n+ log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n+\r\n+ # --- Prediction Method ---\r\n+ def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n+ \"\"\"\r\n+ Predicts the final output structure (supplier, asset name) and attempts\r\n+ to predict output filenames for potential map files based on naming conventions.\r\n+ Does not perform full processing or image loading.\r\n+\r\n+ Returns:\r\n+ tuple[str | None, str | None, dict[str, str] | None]:\r\n+ (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n+ where file_predictions_dict maps input filename -> predicted output filename.\r\n+ Returns None if prediction fails critically.\r\n+ \"\"\"\r\n+ log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n+ try:\r\n+ # 1. Get Supplier Name\r\n+ supplier_name = self.config.supplier_name\r\n+ if not supplier_name:\r\n+ log.warning(\"Supplier name not found in configuration during prediction.\")\r\n+ return None\r\n+\r\n+ # 2. List Input Filenames/Stems\r\n+ candidate_stems = set() # Use set for unique stems\r\n+ filenames = []\r\n+ if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n+ try:\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ # Get only filenames, ignore directories\r\n+ filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n+ except zipfile.BadZipFile:\r\n+ log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n+ return None\r\n+ except Exception as zip_err:\r\n+ log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n+ return None # Cannot proceed if we can't list files\r\n+ elif self.input_path.is_dir():\r\n+ try:\r\n+ for item in self.input_path.iterdir():\r\n+ if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n+ filenames.append(item.name)\r\n+ # Note: Not walking subdirs for prediction to keep it fast\r\n+ except Exception as dir_err:\r\n+ log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n+ return None\r\n+\r\n+ if not filenames:\r\n+ log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n+ return None # Return None if no files found\r\n+\r\n+ # 3. Lightweight Classification for Stems and Potential Maps\r\n+ map_type_mapping = self.config.map_type_mapping\r\n+ model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n+ separator = self.config.source_naming_separator\r\n+ processed_filenames = set() # Track full filenames processed\r\n+ potential_map_files = {} # Store fname -> potential map_type\r\n+\r\n+ for fname in filenames:\r\n+ if fname in processed_filenames: continue\r\n+\r\n+ fstem = Path(fname).stem\r\n+ fstem_lower = fstem.lower()\r\n+ name_parts = fstem_lower.split(separator)\r\n+\r\n+ # Check map rules first\r\n+ map_matched = False\r\n+ for mapping_rule in map_type_mapping:\r\n+ source_keywords, standard_map_type = mapping_rule\r\n+ if standard_map_type not in self.config.standard_map_types: continue\r\n+ for keyword in source_keywords:\r\n+ kw_lower = keyword.lower().strip('*')\r\n+ if kw_lower in name_parts:\r\n+ is_exact_match = any(part == kw_lower for part in name_parts)\r\n+ if is_exact_match:\r\n+ candidate_stems.add(fstem) # Add unique stem\r\n+ potential_map_files[fname] = standard_map_type # Store potential type\r\n+ processed_filenames.add(fname)\r\n+ map_matched = True\r\n+ break # Found keyword match for this rule\r\n+ if map_matched: break # Found a rule match for this file\r\n+ if map_matched: continue # Move to next filename if identified as map\r\n+\r\n+ # Check model patterns if not a map\r\n+ for pattern in model_patterns:\r\n+ if fnmatch(fname.lower(), pattern.lower()):\r\n+ candidate_stems.add(fstem) # Still add stem for base name determination\r\n+ processed_filenames.add(fname)\r\n+ # Don't add models to potential_map_files\r\n+ break # Found model match\r\n+\r\n+ # Note: Files matching neither maps nor models are ignored for prediction details\r\n+\r\n+ candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n+ if not candidate_stems_list:\r\n+ log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n+ # Fallback: Use the input path's name itself if no stems found\r\n+ base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ determined_base_name = base_name_fallback\r\n+ else:\r\n+ # 4. Replicate _determine_base_metadata logic for base name\r\n+ determined_base_name = \"UnknownAssetName\"\r\n+ base_index = self.config.source_naming_indices.get('base_name')\r\n+\r\n+ if isinstance(base_index, int):\r\n+ potential_base_names = set()\r\n+ for stem in candidate_stems_list: # Iterate over the list\r\n+ parts = stem.split(separator)\r\n+ if len(parts) > base_index:\r\n+ potential_base_names.add(parts[base_index])\r\n+ if len(potential_base_names) == 1:\r\n+ determined_base_name = potential_base_names.pop()\r\n+ elif len(potential_base_names) > 1:\r\n+ log.debug(f\"Prediction: Multiple potential base names from index {base_index}, using common prefix.\")\r\n+ determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+ # else: Use common prefix below\r\n+\r\n+ if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n+ determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+\r\n+ # 5. Sanitize Names\r\n+ final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n+ final_supplier_name = self._sanitize_filename(supplier_name)\r\n+\r\n+ # 6. Predict Output Filenames\r\n+ file_predictions = {}\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ # Use highest resolution key as a placeholder for prediction\r\n+ highest_res_key = \"Res?\" # Fallback\r\n+ if self.config.image_resolutions:\r\n+ highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n+\r\n+ for input_fname, map_type in potential_map_files.items():\r\n+ # Assume PNG for prediction, extension might change based on bit depth rules later\r\n+ # but this gives a good idea of the renaming.\r\n+ # A more complex prediction could check bit depth rules.\r\n+ predicted_ext = \"png\" # Simple assumption for preview\r\n+ try:\r\n+ predicted_fname = target_pattern.format(\r\n+ base_name=final_base_name,\r\n+ map_type=map_type,\r\n+ resolution=highest_res_key, # Use placeholder resolution\r\n+ ext=predicted_ext\r\n+ )\r\n+ file_predictions[input_fname] = predicted_fname\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n+ file_predictions[input_fname] = \"[Filename Format Error]\"\r\n+\r\n+\r\n+ log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n+ return final_supplier_name, final_base_name, file_predictions\r\n+\r\n+ except Exception as e:\r\n+ log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n+ return None\r\n+\r\n+\r\n+ # --- New Detailed Prediction Method ---\r\n+ def get_detailed_file_predictions(self) -> list[dict] | None:\r\n+ \"\"\"\r\n+ Performs extraction and classification to provide a detailed list of all\r\n+ files found within the asset and their predicted status/output name.\r\n+ Does not perform image processing or file moving.\r\n+\r\n+ Returns:\r\n+ list[dict] | None: A list of dictionaries, each representing a file:\r\n+ {'original_path': str, 'predicted_name': str | None, 'status': str, 'details': str | None}\r\n+ Returns None if a critical error occurs during setup/classification.\r\n+ \"\"\"\r\n+ log.info(f\"Getting detailed file predictions for: {self.input_path.name}\")\r\n+ results = []\r\n+ asset_base_name = \"UnknownAssetName\" # Fallback\r\n+\r\n+ try:\r\n+ # --- Perform necessary setup and classification ---\r\n+ self._setup_workspace()\r\n+ self._extract_input()\r\n+ self._inventory_and_classify_files()\r\n+ self._determine_base_metadata() # Needed for base name prediction\r\n+ asset_base_name = self.metadata.get(\"asset_name\", asset_base_name)\r\n+\r\n+ # --- Prepare for filename prediction ---\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n+ if self.config.image_resolutions:\r\n+ highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n+\r\n+ # --- Process classified files ---\r\n+ # Maps\r\n+ for map_info in self.classified_files.get(\"maps\", []):\r\n+ original_path_str = str(map_info.get(\"source_path\", \"UnknownPath\"))\r\n+ map_type = map_info.get(\"map_type\", \"UnknownType\")\r\n+ # Predicted name for maps should just be the base asset name\r\n+ predicted_name_display = asset_base_name\r\n+ # Concise details\r\n+ details = f\"[{map_type}]\"\r\n+ if map_info.get(\"is_16bit_source\"):\r\n+ details += \" (16-bit)\"\r\n+\r\n+ # Still try to format the full name internally for error checking, but don't display it\r\n+ try:\r\n+ predicted_ext = \"png\" # Assumption for format check\r\n+ _ = target_pattern.format(\r\n+ base_name=asset_base_name,\r\n+ map_type=map_type,\r\n+ resolution=highest_res_key,\r\n+ ext=predicted_ext\r\n+ )\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction format error for {original_path_str}: {fmt_err}\")\r\n+ predicted_name_display = \"[Format Error]\" # Show error in name field\r\n+ details += f\" (Format Key Error: {fmt_err})\"\r\n+ except Exception as pred_err:\r\n+ log.warning(f\"Prediction error for {original_path_str}: {pred_err}\")\r\n+ predicted_name_display = \"[Prediction Error]\" # Show error in name field\r\n+ details += f\" (Error: {pred_err})\"\r\n+\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_name\": predicted_name_display, # Use the base name or error\r\n+ \"status\": \"Mapped\",\r\n+ \"details\": details # Use concise details\r\n+ })\r\n+\r\n+ # Models\r\n+ for model_info in self.classified_files.get(\"models\", []):\r\n+ original_path_str = str(model_info.get(\"source_path\", \"UnknownPath\"))\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_name\": Path(original_path_str).name, # Models usually keep original name\r\n+ \"status\": \"Model\",\r\n+ \"details\": \"[Model]\" # Concise detail\r\n+ })\r\n+\r\n+ # Extra\r\n+ for extra_info in self.classified_files.get(\"extra\", []):\r\n+ original_path_str = str(extra_info.get(\"source_path\", \"UnknownPath\"))\r\n+ reason = extra_info.get('reason', 'Unknown reason')\r\n+ # Determine status and details based on the reason\r\n+ if reason == 'Unrecognised': # Corrected string check\r\n+ status = \"Unrecognised\"\r\n+ details = \"[Unrecognised]\"\r\n+ else:\r\n+ status = \"Extra\"\r\n+ details = f\"Extra ({reason})\" # Show the pattern match reason\r\n+\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_name\": Path(original_path_str).name, # Extra/Unrecognised files keep original name\r\n+ \"status\": status,\r\n+ \"details\": details\r\n+ })\r\n+\r\n+ # Ignored\r\n+ for ignored_info in self.classified_files.get(\"ignored\", []):\r\n+ original_path_str = str(ignored_info.get(\"source_path\", \"UnknownPath\"))\r\n+ reason = ignored_info.get('reason', 'Unknown reason')\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_name\": None, # Ignored files have no output name\r\n+ \"status\": \"Ignored\",\r\n+ \"details\": f\"Ignored ({reason})\" # Keep reason for ignored files\r\n+ })\r\n+\r\n+ log.info(f\"Detailed prediction complete for {self.input_path.name}. Found {len(results)} files.\")\r\n+ return results\r\n+\r\n+ except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n+ # Log critical errors during the prediction process\r\n+ log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n+ # Optionally add a single error entry to results?\r\n+ # results.append({\"original_path\": self.input_path.name, \"predicted_name\": None, \"status\": \"Error\", \"details\": str(e)})\r\n+ # return results # Or return None to indicate failure\r\n+ return None # Indicate critical failure\r\n+ finally:\r\n+ # Ensure cleanup always happens\r\n+ self._cleanup_workspace()\r\n+\r\n+\r\n+# --- End of AssetProcessor Class ---\n\\ No newline at end of file\n" }, { "date": 1745226183898, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -0,0 +1,1813 @@\n+# asset_processor.py\r\n+\r\n+import os\r\n+import shutil\r\n+import tempfile\r\n+import zipfile\r\n+import logging\r\n+import json\r\n+import re\r\n+import time\r\n+from pathlib import Path\r\n+from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n+\r\n+# Attempt to import image processing libraries\r\n+try:\r\n+ import cv2\r\n+ import numpy as np\r\n+except ImportError:\r\n+ print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n+ print(\"pip install opencv-python numpy\")\r\n+ exit(1) # Exit if essential libraries are missing\r\n+\r\n+# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n+try:\r\n+ import OpenEXR\r\n+ import Imath\r\n+ _HAS_OPENEXR = True\r\n+except ImportError:\r\n+ _HAS_OPENEXR = False\r\n+ # Log this information - basic EXR might still work via OpenCV\r\n+ logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n+\r\n+\r\n+# Assuming Configuration class is in configuration.py\r\n+try:\r\n+ from configuration import Configuration, ConfigurationError\r\n+except ImportError:\r\n+ print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n+ print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n+ exit(1)\r\n+\r\n+# Use logger defined in main.py (or configure one here if run standalone)\r\n+log = logging.getLogger(__name__)\r\n+# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\n+if not log.hasHandlers():\r\n+ logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n+\r\n+\r\n+# --- Custom Exception ---\r\n+class AssetProcessingError(Exception):\r\n+ \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n+ pass\r\n+\r\n+# --- Helper Functions ---\r\n+def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n+ \"\"\"Calculates dimensions maintaining aspect ratio, fitting within target_max_dim.\"\"\"\r\n+ if orig_w <= 0 or orig_h <= 0: return (target_max_dim, target_max_dim) # Avoid division by zero\r\n+\r\n+ ratio = orig_w / orig_h\r\n+ if ratio > 1: # Width is dominant\r\n+ target_w = target_max_dim\r\n+ target_h = max(1, round(target_w / ratio)) # Ensure height is at least 1\r\n+ else: # Height is dominant or square\r\n+ target_h = target_max_dim\r\n+ target_w = max(1, round(target_h * ratio)) # Ensure width is at least 1\r\n+ return int(target_w), int(target_h)\r\n+\r\n+def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n+ \"\"\"\r\n+ Calculates min, max, mean for a given numpy image array.\r\n+ Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n+ \"\"\"\r\n+ if image_data is None:\r\n+ log.warning(\"Attempted to calculate stats on None image data.\")\r\n+ return None\r\n+ try:\r\n+ # Use float64 for calculations to avoid potential overflow/precision issues\r\n+ data_float = image_data.astype(np.float64)\r\n+\r\n+ if len(data_float.shape) == 2: # Grayscale (H, W)\r\n+ min_val = float(np.min(data_float))\r\n+ max_val = float(np.max(data_float))\r\n+ mean_val = float(np.mean(data_float))\r\n+ stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n+ log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n+ elif len(data_float.shape) == 3: # Color (H, W, C)\r\n+ channels = data_float.shape[2]\r\n+ min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n+ max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n+ mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n+ # Assuming BGR(A?) order from OpenCV, report as list [B, G, R, (A)]\r\n+ stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n+ log.debug(f\"Calculated {channels}-Channel Stats: Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n+ else:\r\n+ log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n+ return None\r\n+ return stats\r\n+ except Exception as e:\r\n+ log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n+ return {\"error\": str(e)}\r\n+\r\n+\r\n+from collections import defaultdict # Added for grouping\r\n+\r\n+# --- Helper function ---\r\n+def _get_base_map_type(target_map_string: str) -> str:\r\n+ \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n+ match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n+ if match:\r\n+ return match.group(1).upper()\r\n+ return target_map_string.upper() # Fallback if no number suffix\r\n+\r\n+# --- Asset Processor Class ---\r\n+class AssetProcessor:\r\n+ \"\"\"\r\n+ Handles the processing pipeline for a single asset (ZIP or folder).\r\n+ \"\"\"\r\n+ # Define the list of known grayscale map types (adjust as needed)\r\n+ GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n+\r\n+ def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n+ \"\"\"\r\n+ Initializes the processor for a given input asset.\r\n+\r\n+ Args:\r\n+ input_path: Path to the input ZIP file or folder.\r\n+ config: The loaded Configuration object.\r\n+ output_base_path: The base directory where processed output will be saved.\r\n+ overwrite: If True, forces reprocessing even if output exists.\r\n+ \"\"\"\r\n+ if not isinstance(input_path, Path): input_path = Path(input_path)\r\n+ if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n+ if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n+\r\n+ if not input_path.exists():\r\n+ raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n+ if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n+ raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n+\r\n+ self.input_path: Path = input_path\r\n+ self.config: Configuration = config\r\n+ self.output_base_path: Path = output_base_path\r\n+ self.overwrite: bool = overwrite # Store the overwrite flag\r\n+\r\n+ self.temp_dir: Path | None = None # Path to the temporary working directory\r\n+ self.classified_files: dict[str, list[dict]] = {\r\n+ \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n+ }\r\n+ self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n+ self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n+ self.metadata_file_path_temp: Path | None = None\r\n+\r\n+ # Initialize metadata collected during processing\r\n+ self.metadata: dict = {\r\n+ \"asset_name\": \"Unknown\",\r\n+ \"supplier_name\": self.config.supplier_name,\r\n+ \"asset_category\": self.config._core_settings.get('DEFAULT_ASSET_CATEGORY', \"Texture\"),\r\n+ \"archetype\": \"Unknown\",\r\n+ \"maps_present\": [],\r\n+ \"merged_maps\": [],\r\n+ \"shader_features\": [],\r\n+ \"source_files_in_extra\": [],\r\n+ \"image_stats_1k\": {},\r\n+ \"map_details\": {},\r\n+ \"aspect_ratio_change_string\": \"N/A\" # Replaces output_scaling_factors\r\n+ # Processing info added in _generate_metadata_file\r\n+ }\r\n+\r\n+ log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n+\r\n+ def process(self) -> str:\r\n+ \"\"\"\r\n+ Executes the full processing pipeline for the asset.\r\n+ Returns:\r\n+ str: Status (\"processed\", \"skipped\").\r\n+ \"\"\"\r\n+ log.info(f\"Processing asset: {self.input_path.name}\")\r\n+ try:\r\n+ self._setup_workspace()\r\n+ self._extract_input()\r\n+ self._inventory_and_classify_files()\r\n+ self._determine_base_metadata()\r\n+\r\n+ # --- Check if asset should be skipped ---\r\n+ # Ensure asset_name and supplier_name were determined before checking\r\n+ asset_name = self.metadata.get(\"asset_name\")\r\n+ supplier_name = self.metadata.get(\"supplier_name\")\r\n+\r\n+ # Only check for skipping if overwrite is False AND we have valid names\r\n+ if not self.overwrite and asset_name and asset_name != \"UnknownAssetName\" and supplier_name:\r\n+ supplier_sanitized = self._sanitize_filename(supplier_name)\r\n+ asset_name_sanitized = self._sanitize_filename(asset_name)\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ metadata_file_path = final_dir / self.config.metadata_filename\r\n+\r\n+ if final_dir.exists() and metadata_file_path.is_file():\r\n+ log.info(f\"Output directory and metadata found for '{asset_name_sanitized}' and overwrite is False. Skipping.\")\r\n+ # No need to call cleanup here, the finally block will handle it.\r\n+ return \"skipped\" # Return status\r\n+ elif self.overwrite:\r\n+ # Log only if asset name is known, otherwise it's redundant with the initial processing log\r\n+ known_asset_name = self.metadata.get('asset_name', self.input_path.name)\r\n+ # Avoid logging overwrite message if name is still unknown\r\n+ if known_asset_name != \"UnknownAssetName\" and known_asset_name != self.input_path.name:\r\n+ log.info(f\"Overwrite flag is set for '{known_asset_name}'. Processing will continue even if output exists.\")\r\n+ # --- End Skip Check ---\r\n+\r\n+ # Continue with processing if not skipped\r\n+ self._process_maps()\r\n+ self._merge_maps()\r\n+ self._generate_metadata_file()\r\n+ self._organize_output_files()\r\n+ log.info(f\"Asset processing completed successfully for: {self.metadata.get('asset_name', self.input_path.name)}\")\r\n+ return \"processed\" # Return status\r\n+ except Exception as e:\r\n+ # Log error with traceback if it hasn't been logged already\r\n+ if not isinstance(e, (AssetProcessingError, ConfigurationError)): # Avoid double logging known types\r\n+ log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name}: {e}\")\r\n+ # Ensure error is propagated\r\n+ if not isinstance(e, AssetProcessingError):\r\n+ raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n+ else:\r\n+ raise # Re-raise AssetProcessingError or ConfigurationError\r\n+ finally:\r\n+ # Ensure cleanup always happens\r\n+ self._cleanup_workspace()\r\n+\r\n+ def _setup_workspace(self):\r\n+ \"\"\"Creates a temporary directory for processing.\"\"\"\r\n+ try:\r\n+ self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n+ log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n+\r\n+ def _extract_input(self):\r\n+ \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n+\r\n+ log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n+ try:\r\n+ if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n+ log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ zip_ref.extractall(self.temp_dir)\r\n+ log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n+ elif self.input_path.is_dir():\r\n+ log.debug(f\"Copying directory contents: {self.input_path}\")\r\n+ for item in self.input_path.iterdir():\r\n+ destination = self.temp_dir / item.name\r\n+ if item.is_dir():\r\n+ # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n+ try:\r\n+ shutil.copytree(item, destination, dirs_exist_ok=True)\r\n+ except TypeError: # Fallback for older Python\r\n+ if not destination.exists():\r\n+ shutil.copytree(item, destination)\r\n+ else:\r\n+ log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n+\r\n+ else:\r\n+ shutil.copy2(item, destination)\r\n+ log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n+ except zipfile.BadZipFile:\r\n+ raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n+\r\n+ def _inventory_and_classify_files(self):\r\n+ \"\"\"\r\n+ Scans workspace, classifies files according to preset rules, handling\r\n+ 16-bit prioritization and multiple variants of the same base map type.\r\n+ \"\"\"\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n+\r\n+ log.info(\"Scanning and classifying files...\")\r\n+ log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n+ all_files_rel = []\r\n+ for root, _, files in os.walk(self.temp_dir):\r\n+ root_path = Path(root)\r\n+ for file in files:\r\n+ full_path = root_path / file\r\n+ relative_path = full_path.relative_to(self.temp_dir)\r\n+ all_files_rel.append(relative_path)\r\n+\r\n+ log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n+\r\n+ # --- Initialization ---\r\n+ processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n+ potential_map_candidates = [] # List to store potential map file info\r\n+ # Reset classified files (important if this method is ever called multiple times)\r\n+ self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n+\r\n+\r\n+ # --- Step 1: Identify Explicit 'Extra' Files ---\r\n+ log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n+ compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n+ log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path in processed_files: continue\r\n+ for compiled_regex in compiled_extra_regex:\r\n+ if compiled_regex.search(file_rel_path.name):\r\n+ log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n+ self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n+ processed_files.add(file_rel_path)\r\n+ log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n+ break # Stop checking extra patterns for this file\r\n+\r\n+ # --- Step 2: Identify Model Files ---\r\n+ log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n+ compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n+ log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path in processed_files: continue\r\n+ for compiled_regex in compiled_model_regex:\r\n+ if compiled_regex.search(file_rel_path.name):\r\n+ log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n+ self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n+ processed_files.add(file_rel_path)\r\n+ log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n+ break # Stop checking model patterns for this file\r\n+\r\n+ # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n+ log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n+ # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n+ compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n+\r\n+ for file_rel_path in all_files_rel:\r\n+ # Skip files already classified as Extra or Model\r\n+ if file_rel_path in processed_files:\r\n+ continue\r\n+\r\n+ file_stem = file_rel_path.stem\r\n+ match_found = False\r\n+\r\n+ # Iterate through base types and their associated regex tuples\r\n+ for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n+ if match_found: break # Stop checking types for this file once matched\r\n+\r\n+ # Get the original keywords list for the current rule index\r\n+ # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n+ original_rule = None\r\n+ # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n+ if regex_tuples:\r\n+ current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n+ if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n+ rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n+ # Verify it's the correct rule by checking target_type\r\n+ if rule_candidate.get(\"target_type\") == base_map_type:\r\n+ original_rule = rule_candidate\r\n+ else:\r\n+ log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n+ # Fallback search if index doesn't match (shouldn't happen ideally)\r\n+ for idx, rule in enumerate(self.config.map_type_mapping):\r\n+ if rule.get(\"target_type\") == base_map_type:\r\n+ original_rule = rule\r\n+ log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n+ break\r\n+\r\n+ original_keywords_list = []\r\n+ if original_rule and 'keywords' in original_rule:\r\n+ original_keywords_list = original_rule['keywords']\r\n+ else:\r\n+ log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n+\r\n+ for kw_regex, original_keyword, rule_index in regex_tuples:\r\n+ if kw_regex.search(file_stem):\r\n+ log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n+\r\n+ # Find the index of the matched keyword within its rule's list\r\n+ keyword_index_in_rule = -1 # Default if not found\r\n+ if original_keywords_list:\r\n+ try:\r\n+ # Use the original_keyword string directly\r\n+ keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n+ except ValueError:\r\n+ log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n+ else:\r\n+ log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n+\r\n+ # Add candidate only if not already added\r\n+ if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n+ potential_map_candidates.append({\r\n+ 'source_path': file_rel_path,\r\n+ 'matched_keyword': original_keyword,\r\n+ 'base_map_type': base_map_type,\r\n+ 'preset_rule_index': rule_index,\r\n+ 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n+ 'is_16bit_source': False\r\n+ })\r\n+ else:\r\n+ log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n+\r\n+ match_found = True\r\n+ break # Stop checking regex tuples for this base_type once matched\r\n+\r\n+ log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n+\r\n+ # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n+ log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n+ compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n+ for file_rel_path in all_files_rel:\r\n+ # Skip if already processed or already identified as a candidate\r\n+ if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n+ continue\r\n+\r\n+ for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n+ log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n+ match = compiled_regex.search(file_rel_path.name) # Store result\r\n+ if match:\r\n+ log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n+ potential_map_candidates.append({\r\n+ 'source_path': file_rel_path,\r\n+ 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n+ 'base_map_type': base_type,\r\n+ 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n+ 'is_16bit_source': True # Mark as 16-bit immediately\r\n+ })\r\n+ log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n+ # Don't add to processed_files yet, let Step 4 handle filtering\r\n+ break # Stop checking bit depth patterns for this file\r\n+\r\n+ log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n+\r\n+ # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n+ log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n+ compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n+ candidates_to_keep = []\r\n+ candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n+\r\n+ # Mark 16-bit candidates\r\n+ for candidate in potential_map_candidates:\r\n+ base_type = candidate['base_map_type']\r\n+ # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n+ if base_type in compiled_bit_depth_regex:\r\n+ if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n+ candidate['is_16bit_source'] = True\r\n+ log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n+\r\n+\r\n+ # Identify base types that have a 16-bit version present\r\n+ prioritized_16bit_bases = {\r\n+ candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n+ }\r\n+ log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n+\r\n+ # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n+ for candidate in potential_map_candidates:\r\n+ if candidate['is_16bit_source']:\r\n+ candidates_to_keep.append(candidate)\r\n+ log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+ elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n+ candidates_to_keep.append(candidate)\r\n+ log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+ else:\r\n+ # This is an 8-bit candidate whose 16-bit counterpart exists\r\n+ candidates_to_ignore.append(candidate)\r\n+ log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+\r\n+ # Add ignored 8-bit files to the main ignored list\r\n+ for ignored_candidate in candidates_to_ignore:\r\n+ self.classified_files[\"ignored\"].append({\r\n+ 'source_path': ignored_candidate['source_path'],\r\n+ 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n+ })\r\n+ processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n+\r\n+ log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n+\r\n+ # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n+ log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n+ # from collections import defaultdict # Moved import to top of file\r\n+ grouped_by_base_type = defaultdict(list)\r\n+ for candidate in candidates_to_keep:\r\n+ grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n+\r\n+ final_map_list = []\r\n+ for base_map_type, candidates in grouped_by_base_type.items():\r\n+ log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n+\r\n+ # --- NEW SORTING LOGIC ---\r\n+ # Sort candidates based on:\r\n+ # 1. The index of the rule object in the preset's map_type_mapping list.\r\n+ # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n+ # 3. Alphabetical order of the source file path as a tie-breaker.\r\n+ candidates.sort(key=lambda c: (\r\n+ c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n+ c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n+ str(c['source_path'])\r\n+ ))\r\n+ # --- END NEW SORTING LOGIC ---\r\n+\r\n+ # Removed diagnostic log\r\n+\r\n+ # Assign suffixes and add to the final map list\r\n+ for i, final_candidate in enumerate(candidates): # Use the directly sorted list\r\n+ # Determine final map type based on the new rule\r\n+ if base_map_type in self.config.respect_variant_map_types: # Check the new config list\r\n+ # Always assign suffix for types in the list (if more than one or only one)\r\n+ final_map_type = f\"{base_map_type}-{i + 1}\"\r\n+ else:\r\n+ # Never assign suffix for types NOT in the list\r\n+ final_map_type = base_map_type\r\n+\r\n+ final_map_list.append({\r\n+ \"map_type\": final_map_type,\r\n+ \"source_path\": final_candidate[\"source_path\"],\r\n+ \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n+ \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n+ \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n+ })\r\n+ processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n+ log.debug(f\" Final Map: Type='{final_map_type}', Source='{final_candidate['source_path']}', Keyword='{final_candidate['matched_keyword']}', 16bit={final_candidate['is_16bit_source']}\")\r\n+\r\n+ self.classified_files[\"maps\"] = final_map_list\r\n+\r\n+ # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n+ log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n+ remaining_count = 0\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path not in processed_files:\r\n+ log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n+ self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n+ remaining_count += 1\r\n+ # No need to add to processed_files here, it's the final step\r\n+ log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n+\r\n+ # --- Final Summary ---\r\n+ # Update metadata list of files actually moved to extra (including Unrecognised and Ignored)\r\n+ self.metadata[\"source_files_in_extra\"] = sorted([\r\n+ str(f['source_path']) for f in self.classified_files['extra'] + self.classified_files['ignored']\r\n+ ])\r\n+ log.info(f\"File classification complete.\")\r\n+ log.debug(\"--- Final Classification Summary (v2) ---\")\r\n+ map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n+ model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n+ extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n+ ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n+ log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n+ log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n+ log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n+ log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n+ log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n+\r\n+\r\n+ def _determine_base_metadata(self):\r\n+ \"\"\"Determines base_name, asset_category, and archetype.\"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ log.info(\"Determining base metadata...\")\r\n+\r\n+ # --- Determine Asset Category ---\r\n+ self.metadata[\"asset_category\"] = self.config.default_asset_category # Start with default\r\n+ if self.classified_files[\"models\"]:\r\n+ self.metadata[\"asset_category\"] = \"Asset\"\r\n+ log.debug(\"Category set to 'Asset' due to model file presence.\")\r\n+ else:\r\n+ decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n+ found_decal = False\r\n+ candidate_files = [f['source_path'] for f in self.classified_files['maps']] # Check map names first\r\n+ if not candidate_files: # Fallback to checking all files? Maybe too broad. Check Extra?\r\n+ candidate_files = [f['source_path'] for f in self.classified_files['extra']]\r\n+\r\n+ if decal_keywords:\r\n+ for file_path in candidate_files:\r\n+ for keyword in decal_keywords:\r\n+ if keyword.lower() in file_path.name.lower():\r\n+ self.metadata[\"asset_category\"] = \"Decal\"\r\n+ found_decal = True; break\r\n+ if found_decal: break\r\n+ if found_decal: log.debug(\"Category set to 'Decal' due to keyword match.\")\r\n+\r\n+ # --- Determine Base Name ---\r\n+ map_stems = [f['source_path'].stem for f in self.classified_files['maps']]\r\n+ model_stems = [f['source_path'].stem for f in self.classified_files['models']]\r\n+ candidate_stems = map_stems + model_stems\r\n+\r\n+ determined_base_name = \"UnknownAssetName\"\r\n+ if candidate_stems:\r\n+ separator = self.config.source_naming_separator\r\n+ base_index = self.config.source_naming_indices.get('base_name')\r\n+\r\n+ if isinstance(base_index, int):\r\n+ potential_base_names = set()\r\n+ for stem in candidate_stems:\r\n+ parts = stem.split(separator)\r\n+ if len(parts) > base_index:\r\n+ potential_base_names.add(parts[base_index])\r\n+ if len(potential_base_names) == 1:\r\n+ determined_base_name = potential_base_names.pop()\r\n+ log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n+ elif len(potential_base_names) > 1 :\r\n+ log.warning(f\"Multiple potential base names found from parts index {base_index}: {potential_base_names}. Falling back to common prefix.\")\r\n+ # Fallback logic if structured parts method fails or yields multiple names\r\n+ determined_base_name = os.path.commonprefix(candidate_stems)\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Clean trailing separators etc.\r\n+ # else: len is 0, means no valid parts found, use common prefix below\r\n+\r\n+ # If no index or structured parts failed, use common prefix of all relevant stems\r\n+ if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n+ determined_base_name = os.path.commonprefix(candidate_stems)\r\n+ log.debug(f\"Using common prefix: '{determined_base_name}'\")\r\n+ # Clean up common separators/underscores often left by commonprefix\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+\r\n+ # Final cleanup and fallback for base name\r\n+ determined_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n+ self.metadata[\"asset_name\"] = determined_base_name\r\n+ log.debug(f\"Final Determined Base Name: {self.metadata['asset_name']}\")\r\n+\r\n+ # --- Determine Archetype (Usage) ---\r\n+ archetype_rules = self.config.archetype_rules\r\n+ determined_archetype = \"Unknown\"\r\n+ check_stems = [f['source_path'].stem.lower() for f in self.classified_files['maps']]\r\n+ check_stems.extend([f['source_path'].stem.lower() for f in self.classified_files['models']])\r\n+ # Also check the determined base name itself?\r\n+ check_stems.append(self.metadata[\"asset_name\"].lower())\r\n+\r\n+ if check_stems:\r\n+ best_match_archetype = \"Unknown\"\r\n+ highest_match_count = 0 # Simple heuristic: prioritize rule with most keyword hits?\r\n+\r\n+ for rule in archetype_rules:\r\n+ if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n+ arch_name, rules_dict = rule\r\n+ match_any = rules_dict.get(\"match_any\", [])\r\n+ # match_all = rules_dict.get(\"match_all\", []) # Add logic if needed\r\n+\r\n+ current_match_count = 0\r\n+ matched_any_keyword = False\r\n+ if match_any:\r\n+ for keyword in match_any:\r\n+ kw_lower = keyword.lower()\r\n+ for stem in check_stems:\r\n+ # Using simple substring check again\r\n+ if kw_lower in stem:\r\n+ current_match_count += 1\r\n+ matched_any_keyword = True\r\n+ # Optionally break after first hit per keyword? Or count all occurrences? Count all for now.\r\n+\r\n+ # Decide if this rule matches. For now: requires at least one 'match_any' hit.\r\n+ if matched_any_keyword:\r\n+ # Simple approach: first rule that matches wins.\r\n+ # Could be enhanced by prioritizing rules or counting hits.\r\n+ if best_match_archetype == \"Unknown\": # Take the first match\r\n+ best_match_archetype = arch_name\r\n+ log.debug(f\"Tentative archetype match '{arch_name}' based on keywords: {match_any}\")\r\n+ # Break here for \"first match wins\" logic\r\n+ break\r\n+\r\n+ # --- Example: Prioritize by match count (more complex) ---\r\n+ # if current_match_count > highest_match_count:\r\n+ # highest_match_count = current_match_count\r\n+ # best_match_archetype = arch_name\r\n+ # log.debug(f\"New best archetype match '{arch_name}' with count {current_match_count}\")\r\n+ # ----------------------------------------------------------\r\n+\r\n+ determined_archetype = best_match_archetype\r\n+\r\n+ self.metadata[\"archetype\"] = determined_archetype\r\n+ log.debug(f\"Determined Archetype: {self.metadata['archetype']}\")\r\n+ log.info(\"Base metadata determination complete.\")\r\n+\r\n+\r\n+ def _process_maps(self):\r\n+ \"\"\"Loads, processes, resizes, and saves classified map files.\"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ log.info(f\"Processing identified map files for '{self.metadata['asset_name']}'...\")\r\n+ processed_map_types = set()\r\n+\r\n+ # --- Settings retrieval ---\r\n+ resolutions = self.config.image_resolutions\r\n+ stats_res_key = self.config.calculate_stats_resolution\r\n+ stats_target_dim = resolutions.get(stats_res_key)\r\n+ if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped.\")\r\n+ gloss_keywords = self.config.source_glossiness_keywords\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ base_name = self.metadata['asset_name']\r\n+\r\n+ # --- Pre-process Glossiness -> Roughness ---\r\n+ preprocessed_data = {}\r\n+ derived_from_gloss_flag = {}\r\n+ gloss_map_info_for_rough, native_rough_map_info = None, None\r\n+ for map_info in self.classified_files['maps']:\r\n+ if map_info['map_type'] == 'ROUGH':\r\n+ is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n+ if is_gloss: gloss_map_info_for_rough = map_info\r\n+ else: native_rough_map_info = map_info\r\n+\r\n+ rough_source_to_use = None\r\n+ if gloss_map_info_for_rough:\r\n+ rough_source_to_use = gloss_map_info_for_rough\r\n+ derived_from_gloss_flag['ROUGH'] = True\r\n+ if native_rough_map_info:\r\n+ log.warning(f\"Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n+ if native_rough_map_info in self.classified_files['maps']: self.classified_files['maps'].remove(native_rough_map_info)\r\n+ self.classified_files['ignored'].append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n+ elif native_rough_map_info:\r\n+ rough_source_to_use = native_rough_map_info\r\n+ derived_from_gloss_flag['ROUGH'] = False\r\n+\r\n+ if derived_from_gloss_flag.get('ROUGH'):\r\n+ source_path = self.temp_dir / rough_source_to_use['source_path']\r\n+ log.info(f\"Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n+ try:\r\n+ img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n+ if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n+ original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n+ if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n+ if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n+ elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n+ else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n+ # Store tuple: (inverted_float_data, original_dtype)\r\n+ preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n+ log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n+ except Exception as e: log.error(f\"Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n+\r\n+ # --- Main Processing Loop ---\r\n+ maps_to_process = list(self.classified_files['maps'])\r\n+ for map_info in maps_to_process:\r\n+ map_type = map_info['map_type']\r\n+ source_path_rel = map_info['source_path']\r\n+ original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n+ log.info(f\"-- Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n+ img_processed, source_dtype = None, None\r\n+ map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n+\r\n+ try:\r\n+ # --- 1. Get/Load Source Data ---\r\n+ if map_type in preprocessed_data:\r\n+ log.debug(f\"Using pre-processed data for {map_type}.\")\r\n+ # Unpack tuple: (inverted_float_data, original_dtype)\r\n+ img_processed, source_dtype = preprocessed_data[map_type]\r\n+ # No longer need to read the original file just for dtype\r\n+ else:\r\n+ full_source_path = self.temp_dir / source_path_rel\r\n+ # Determine the read flag based on map type\r\n+ read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n+ log.debug(f\"Loading source {source_path_rel.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n+ if img_loaded is None:\r\n+ raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n+ img_processed, source_dtype = img_loaded.copy(), img_loaded.dtype\r\n+ log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape}\")\r\n+ map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n+\r\n+ # --- 2. Handle Alpha Mask ---\r\n+ if map_type == 'MASK' and img_processed is not None:\r\n+ log.debug(\"Processing as MASK type.\")\r\n+ shape = img_processed.shape\r\n+ if len(shape) == 3 and shape[2] == 4: img_processed = img_processed[:, :, 3]\r\n+ elif len(shape) == 3 and shape[2] == 3: img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n+ if img_processed.dtype != np.uint8:\r\n+ log.debug(f\"Converting mask from {img_processed.dtype} to uint8.\")\r\n+ if source_dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ else: img_processed = img_processed.astype(np.uint8)\r\n+\r\n+ if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n+ orig_h, orig_w = img_processed.shape[:2]\r\n+ self.processed_maps_details.setdefault(map_type, {})\r\n+ max_original_dimension = max(orig_w, orig_h) # Get max original dimension\r\n+\r\n+ # --- 3. Resize & Save Loop ---\r\n+ for res_key, target_dim in resolutions.items():\r\n+ # --- Skip Upscaling ---\r\n+ if target_dim > max_original_dimension:\r\n+ log.debug(f\"Skipping {res_key} ({target_dim}px): Target dimension is larger than original max dimension ({max_original_dimension}px).\")\r\n+ continue\r\n+ log.debug(f\"Processing {map_type} for resolution: {res_key}...\")\r\n+ if orig_w <= 0 or orig_h <= 0: log.warning(f\"Invalid original dims, skipping resize {res_key}.\"); continue\r\n+ target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n+ interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n+ try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n+ except Exception as resize_err: log.error(f\"Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n+\r\n+ # --- 3a. Calculate Stats ---\r\n+ if res_key == stats_res_key and stats_target_dim:\r\n+ log.debug(f\"Calculating stats for {map_type} using {res_key} image...\")\r\n+ stats = _calculate_image_stats(img_resized) # Use the already resized image\r\n+ if stats: self.metadata[\"image_stats_1k\"][map_type] = stats\r\n+ else: log.warning(f\"Stats calculation failed for {map_type} at {res_key}.\")\r\n+ # Calculate aspect change string on the lowest processed resolution, only once per asset\r\n+ lowest_res_key = min(resolutions, key=resolutions.get)\r\n+ if self.metadata[\"aspect_ratio_change_string\"] == \"N/A\" and res_key == lowest_res_key:\r\n+ try:\r\n+ aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n+ self.metadata[\"aspect_ratio_change_string\"] = aspect_string\r\n+ log.debug(f\"Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n+ except Exception as aspect_err:\r\n+ log.error(f\"Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n+ self.metadata[\"aspect_ratio_change_string\"] = \"Error\" # Indicate calculation failure\r\n+\r\n+ # --- 3b. Determine Output Bit Depth & Format ---\r\n+ bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n+ current_dtype = img_resized.dtype # Dtype after resize\r\n+ output_dtype_target, output_bit_depth = None, 8 # Defaults\r\n+ if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n+ elif bit_depth_rule == 'respect':\r\n+ if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16 # Respect float source as 16bit uint for non-EXR\r\n+ else: output_dtype_target, output_bit_depth = np.uint8, 8 # Default respect to 8bit\r\n+ else: output_dtype_target, output_bit_depth = np.uint8, 8 # Fallback\r\n+\r\n+ # --- 3c. Determine Output Format based on Input, Rules & Threshold ---\r\n+ output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n+ primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n+ fmt_8bit_config = self.config.get_8bit_output_format() # Usually 'png'\r\n+ threshold = self.config.resolution_threshold_for_jpg # Get threshold value\r\n+ force_lossless = map_type in self.config.force_lossless_map_types\r\n+\r\n+ if force_lossless:\r\n+ log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ # Add EXR type flags if needed, e.g., HALF for 16-bit float\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ # Add compression later if desired, e.g. cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_ZIP\r\n+ else: # Assume png or other lossless 16-bit format\r\n+ # Use fallback if primary isn't suitable or fails? For now, assume primary is lossless.\r\n+ # If primary_fmt_16 is not 'png', maybe default to fallback_fmt_16?\r\n+ if output_format != \"png\":\r\n+ log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16 # Ensure it's a known lossless like png/tif\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ # Add params for other lossless like TIF if supported\r\n+ else: # 8-bit lossless\r\n+ output_format = fmt_8bit_config # Usually 'png'\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else:\r\n+ log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n+ output_format = \"png\"\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n+\r\n+ # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n+ elif output_bit_depth == 8 and target_dim >= threshold:\r\n+ output_format = 'jpg'\r\n+ output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n+ # --- Else: Apply Input/Rule-Based Logic ---\r\n+ else:\r\n+ # Apply force_8bit rule (if not overridden by threshold)\r\n+ if bit_depth_rule == 'force_8bit':\r\n+ output_format = 'png' # Force to PNG as per clarification\r\n+ output_ext = '.png'\r\n+ # output_bit_depth is already 8, output_dtype_target is already uint8\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n+ # Handle specific input extensions if not forced to 8bit PNG\r\n+ elif original_extension == '.jpg' and output_bit_depth == 8:\r\n+ output_format = 'jpg'\r\n+ output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n+ elif original_extension == '.tif':\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) # Add compression later\r\n+ log.debug(f\"Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n+ else: # Fallback for 16-bit from TIF\r\n+ output_format = fallback_fmt_16\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n+ else: # output_bit_depth == 8 from TIF input (and below threshold)\r\n+ output_format = 'png'\r\n+ output_ext = '.png'\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n+ # Handle other inputs (e.g., PNG) or fallbacks\r\n+ else:\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else: # Fallback for 16-bit\r\n+ output_format = fallback_fmt_16\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n+ else: # 8-bit output (and below threshold)\r\n+ output_format = fmt_8bit_config # Use configured 8-bit format\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ elif output_format == \"jpg\":\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n+\r\n+ img_to_save = img_resized.copy() # Work on copy for dtype conversion\r\n+ # --- Apply Dtype Conversion ---\r\n+ if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n+ if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n+ if needs_float16 and img_to_save.dtype != np.float16:\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n+\r\n+ # --- 3d. Construct Filename & Save ---\r\n+ filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n+ output_path_temp = self.temp_dir / filename\r\n+ log.debug(f\"Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n+ saved_successfully, actual_format_saved = False, output_format\r\n+ try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n+ except Exception as save_err:\r\n+ log.error(f\"Save failed ({output_format}): {save_err}\")\r\n+ # --- Try Fallback ---\r\n+ if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n+ log.warning(f\"Attempting fallback: {fallback_fmt_16}\")\r\n+ actual_format_saved = fallback_fmt_16; output_ext = f\".{fallback_fmt_16}\"; # Adjust format/ext\r\n+ filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n+ output_path_temp = self.temp_dir / filename\r\n+ save_params_fallback = [] # Reset params for fallback\r\n+ img_fallback = None; target_fallback_dtype = np.uint16\r\n+ if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, 6])\r\n+ elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n+\r\n+ # Convert EXR source (float16) to uint16 for PNG/TIF fallback\r\n+ #if img_to_save.dtype == np.float16: img_fallback = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(target_fallback_dtype)\r\n+ if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n+ # <<< START MODIFICATION HERE >>>\r\n+ # Check for NaN/Inf before conversion\r\n+ if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n+ log.error(f\"Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n+ continue # Skip fallback if data is bad\r\n+\r\n+ # Clip *after* scaling for uint16 conversion robustness\r\n+ img_scaled = img_to_save * 65535.0\r\n+ img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n+ # <<< END MODIFICATION HERE >>>\r\n+ elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already correct type\r\n+ else: log.error(f\"Cannot convert {img_to_save.dtype} for fallback.\"); continue # Skip fallback\r\n+\r\n+ try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err: log.error(f\"Fallback save failed: {fallback_err}\", exc_info=True)\r\n+\r\n+ # --- 3e. Store Result ---\r\n+ if saved_successfully:\r\n+ self.processed_maps_details[map_type][res_key] = {\r\n+ \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n+ \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n+ \"format\": actual_format_saved\r\n+ }\r\n+ map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n+\r\n+ except Exception as map_proc_err:\r\n+ log.error(f\"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n+ self.processed_maps_details.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n+\r\n+ self.metadata[\"map_details\"][map_type] = map_details # Store details for this map type\r\n+\r\n+ # --- Final Metadata Updates ---\r\n+ processed_map_types = set(k for k, v in self.processed_maps_details.items() if 'error' not in v) # Only count successful\r\n+ self.metadata[\"maps_present\"] = sorted(list(processed_map_types))\r\n+ features = set()\r\n+ for map_type, details in self.metadata[\"map_details\"].items():\r\n+ if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n+ if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n+ res_details = self.processed_maps_details.get(map_type, {})\r\n+ if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n+ self.metadata[\"shader_features\"] = sorted(list(features))\r\n+ log.debug(f\"Determined shader features: {self.metadata['shader_features']}\")\r\n+ log.info(\"Finished processing all map files.\")\r\n+\r\n+\r\n+ #log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n+\r\n+ def _merge_maps(self):\r\n+ \"\"\"Merges channels from different maps based on rules in configuration.\"\"\"\r\n+ # ... (initial checks and getting merge_rules) ...\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Workspace not setup.\")\r\n+\r\n+\r\n+ # <<< FIX: Get merge rules from the configuration object >>>\r\n+\r\n+ merge_rules = self.config.map_merge_rules\r\n+\r\n+ # <<< END FIX >>>\r\n+ log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n+\r\n+ for rule_index, rule in enumerate(merge_rules):\r\n+ # <<< FIX: Assign variables *before* using them >>>\r\n+ output_map_type = rule.get(\"output_map_type\")\r\n+ inputs_mapping = rule.get(\"inputs\")\r\n+ defaults = rule.get(\"defaults\", {})\r\n+ rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n+\r\n+ # <<< FIX: Check if essential rule keys exist *after* assignment >>>\r\n+ if not output_map_type or not inputs_mapping:\r\n+ log.warning(f\"Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs' in rule definition. Rule data: {rule}\")\r\n+ continue # Skip to the next rule in merge_rules\r\n+\r\n+ # Now it's safe to use output_map_type in the log statement\r\n+ log.info(f\"-- Applying merge rule for '{output_map_type}' --\")\r\n+ # <<< END FIX >>>\r\n+\r\n+ self.merged_maps_details.setdefault(output_map_type, {})\r\n+\r\n+ # --- Determine required inputs and their common resolutions ---\r\n+ required_input_types = set(inputs_mapping.values()) # e.g., {'NRM', 'ROUGH'}\r\n+ if not required_input_types:\r\n+ log.warning(f\"Skipping merge rule '{output_map_type}': No input map types defined in 'inputs'.\")\r\n+ continue\r\n+\r\n+ possible_resolutions_per_input = []\r\n+ for input_type in required_input_types:\r\n+ if input_type in self.processed_maps_details:\r\n+ # Get resolution keys where processing didn't error\r\n+ res_keys = {res for res, details in self.processed_maps_details[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n+ if not res_keys:\r\n+ log.warning(f\"Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n+ possible_resolutions_per_input = [] # Cannot proceed if any input is missing all resolutions\r\n+ break\r\n+ possible_resolutions_per_input.append(res_keys)\r\n+ else:\r\n+ log.warning(f\"Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n+ possible_resolutions_per_input = [] # Cannot proceed if any input type is missing\r\n+ break\r\n+\r\n+ if not possible_resolutions_per_input:\r\n+ log.warning(f\"Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n+ continue\r\n+\r\n+ # Find the intersection of resolution keys across all required inputs\r\n+ common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n+\r\n+ if not common_resolutions:\r\n+ log.warning(f\"No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n+ continue\r\n+ log.debug(f\"Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n+ # --- End Common Resolution Logic ---\r\n+\r\n+\r\n+ # <<< LOOP THROUGH COMMON RESOLUTIONS >>>\r\n+ # Use the actual common_resolutions found\r\n+ res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n+ if not res_order:\r\n+ log.warning(f\"Common resolutions {common_resolutions} do not match any target resolutions in config. Skipping merge for '{output_map_type}'.\")\r\n+ continue\r\n+\r\n+ # Sort resolutions to process (optional, but nice for logs)\r\n+ sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n+\r\n+ # Get target pattern from config for filename formatting\r\n+ target_pattern = self.config.target_filename_pattern\r\n+\r\n+ for current_res_key in sorted_res_keys:\r\n+ log.debug(f\"Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n+ try:\r\n+ loaded_inputs = {}\r\n+ input_bit_depths = set()\r\n+ input_original_extensions = {} # Store original extensions for this resolution's inputs\r\n+\r\n+ # --- Load required input maps *at this specific resolution* AND get original extensions ---\r\n+ possible_to_load = True\r\n+ base_name = self.metadata['asset_name']\r\n+ target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B']\r\n+\r\n+ for map_type in required_input_types:\r\n+ res_details = self.processed_maps_details.get(map_type, {}).get(current_res_key)\r\n+ if not res_details or 'path' not in res_details:\r\n+ log.warning(f\"Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge for this resolution.\")\r\n+ possible_to_load = False; break\r\n+\r\n+ # Find original extension from classified data\r\n+ original_ext = '.png' # Default\r\n+ found_original = False\r\n+ for classified_map in self.classified_files[\"maps\"]:\r\n+ # Match based on the base map type (e.g., NRM matches NRM-1)\r\n+ if classified_map['map_type'].startswith(map_type):\r\n+ # Check if the processed path matches (to handle multiple variants if needed, though less likely here)\r\n+ # This assumes processed_maps_details path is relative to temp_dir\r\n+ processed_path_str = str(res_details['path'])\r\n+ classified_path_str = str(classified_map['source_path']) # This is the original source path relative to temp_dir root\r\n+ # A more robust check might involve comparing filenames if paths differ due to processing steps\r\n+ # For now, rely on the base map type match and grab the first extension found\r\n+ original_ext = classified_map.get('original_extension', '.png')\r\n+ found_original = True\r\n+ break # Found the first match for this map_type\r\n+ if not found_original:\r\n+ log.warning(f\"Could not reliably find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n+\r\n+ input_original_extensions[map_type] = original_ext\r\n+\r\n+ # Load the image\r\n+ input_file_path = self.temp_dir / res_details['path']\r\n+ read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n+ log.debug(f\"Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ img = cv2.imread(str(input_file_path), read_flag)\r\n+ if img is None:\r\n+ raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n+ loaded_inputs[map_type] = img\r\n+ input_bit_depths.add(res_details.get('bit_depth', 8))\r\n+\r\n+ if not possible_to_load: continue # Skip this resolution if inputs missing\r\n+\r\n+ # --- Determine dimensions and target_dim for threshold check ---\r\n+ first_map_type = next(iter(required_input_types)) # Get one map type to read dimensions\r\n+ h, w = loaded_inputs[first_map_type].shape[:2]\r\n+ # Get target_dim from the details of the first loaded input for this resolution\r\n+ first_res_details = self.processed_maps_details.get(first_map_type, {}).get(current_res_key)\r\n+ target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n+ num_target_channels = len(target_channels)\r\n+\r\n+ # <<< Re-calculate output_bit_depth based on THIS resolution's inputs >>>\r\n+ max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n+ output_bit_depth = 8\r\n+ if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n+ output_bit_depth = 16\r\n+ log.debug(f\"Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n+\r\n+ # Prepare channels (float32) (same logic as before)\r\n+ merged_channels_float32 = []\r\n+ # Use the defined target_channels list\r\n+ for target_channel in target_channels: # Iterate R, G, B (or specified) order\r\n+ source_map_type = inputs_mapping.get(target_channel)\r\n+ channel_data_float32 = None\r\n+ if source_map_type and source_map_type in loaded_inputs:\r\n+ # ... [Extract channel data as float32 as before] ...\r\n+ img_input = loaded_inputs[source_map_type]\r\n+ if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n+ elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n+ else: img_float = img_input.astype(np.float32)\r\n+ num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n+ if num_source_channels >= 3: # BGR Source\r\n+ if target_channel == 'R': channel_data_float32 = img_float[:, :, 2]\r\n+ elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n+ elif target_channel == 'B': channel_data_float32 = img_float[:, :, 0]\r\n+ elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n+ elif num_source_channels == 1 or len(img_float.shape) == 2: # Grayscale source\r\n+ channel_data_float32 = img_float.reshape(h, w)\r\n+ if channel_data_float32 is None: # Use default if needed\r\n+ default_val = defaults.get(target_channel)\r\n+ if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n+ channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n+ merged_channels_float32.append(channel_data_float32)\r\n+\r\n+\r\n+ # Merge channels (same as before)\r\n+ if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n+ merged_image_float32 = cv2.merge(merged_channels_float32)\r\n+\r\n+ # Final Data Type Conversion (based on recalculated output_bit_depth)\r\n+ img_final_merged = None\r\n+ if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n+ else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+\r\n+ # --- Determine Output Format based on Threshold, Input Hierarchy & Rules ---\r\n+ output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n+ primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n+ fmt_8bit_config = self.config.get_8bit_output_format()\r\n+ threshold = self.config.resolution_threshold_for_jpg\r\n+ force_lossless = output_map_type in self.config.force_lossless_map_types\r\n+\r\n+ if force_lossless:\r\n+ log.debug(f\"Format forced to lossless for merged map type '{output_map_type}'.\")\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: # Assume png or other lossless 16-bit format\r\n+ if output_format != \"png\":\r\n+ log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else: # 8-bit lossless\r\n+ output_format = fmt_8bit_config # Usually 'png'\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else:\r\n+ log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n+ output_format = \"png\"\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n+\r\n+ # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n+ elif output_bit_depth == 8 and target_dim >= threshold:\r\n+ output_format = 'jpg'\r\n+ output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n+ # --- Else: Apply Hierarchy/Rule-Based Logic ---\r\n+ else:\r\n+ involved_extensions = set(input_original_extensions.values())\r\n+ log.debug(f\"Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n+ # Hierarchy: EXR > TIF > PNG > JPG\r\n+ highest_format_str = 'jpg' # Start lowest\r\n+ if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n+ elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n+ elif '.png' in involved_extensions: highest_format_str = 'png'\r\n+\r\n+ # Determine final output format based on hierarchy and target bit depth\r\n+ final_output_format = highest_format_str\r\n+\r\n+ if highest_format_str == 'tif':\r\n+ if output_bit_depth == 16:\r\n+ final_output_format = primary_fmt_16 # Use configured 16-bit pref (EXR/PNG)\r\n+ log.debug(f\"Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n+ else: # 8-bit target\r\n+ final_output_format = 'png' # Force TIF input to PNG for 8-bit merge output\r\n+ log.debug(\"Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n+ else:\r\n+ log.debug(f\"Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n+\r\n+ # Set format/params based on the determined final_output_format\r\n+ output_format = final_output_format\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ elif output_format == \"jpg\": # Should only happen if highest input was JPG and below threshold\r\n+ output_ext = \".jpg\"\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ else:\r\n+ log.error(f\"Unsupported final output format '{output_format}' determined for merged map '{output_map_type}'. Skipping save.\")\r\n+ continue\r\n+\r\n+ # --- JPG 8-bit Check (applies regardless of threshold logic) ---\r\n+ if output_format == \"jpg\" and output_bit_depth == 16:\r\n+ log.warning(f\"Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n+ img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ output_bit_depth = 8 # Correct the recorded bit depth\r\n+\r\n+ # --- Save Merged Map ---\r\n+ image_to_save = img_final_merged # Already converted to target bit depth (or forced to 8bit for JPG)\r\n+\r\n+ # Apply float16 conversion if needed for EXR\r\n+ if needs_float16 and image_to_save.dtype != np.float16:\r\n+ if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n+ elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n+ else: log.warning(f\"Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n+\r\n+ merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n+ merged_output_path_temp = self.temp_dir / merged_filename\r\n+ log.debug(f\"Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n+\r\n+ # --- Add save logic with fallback here ---\r\n+ saved_successfully = False\r\n+ actual_format_saved = output_format\r\n+ try:\r\n+ cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n+ log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n+ saved_successfully = True\r\n+ except Exception as save_err:\r\n+ log.error(f\"Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n+ # Try Fallback for merged map (similar to _process_maps fallback)\r\n+ if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n+ log.warning(f\"Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n+ # ... [ Implement fallback save logic here, converting image_to_save if needed ] ...\r\n+ actual_format_saved = fallback_fmt_16\r\n+ output_ext = f\".{fallback_fmt_16}\"\r\n+ merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n+ merged_output_path_temp = self.temp_dir / merged_filename\r\n+ save_params_fallback = []\r\n+ img_fallback = None\r\n+ target_fallback_dtype = np.uint16\r\n+\r\n+ if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n+\r\n+ if image_to_save.dtype == np.float16:\r\n+ if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(\"NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n+ img_scaled = image_to_save * 65535.0\r\n+ img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n+ elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n+ else: log.error(f\"Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n+\r\n+ try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err: log.error(f\"Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n+ # --- End Fallback Logic ---\r\n+\r\n+ # Record details if save successful\r\n+ if saved_successfully:\r\n+ self.merged_maps_details[output_map_type][current_res_key] = {\r\n+ \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n+ \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n+ }\r\n+ if output_map_type not in self.metadata[\"merged_maps\"]: self.metadata[\"merged_maps\"].append(output_map_type)\r\n+\r\n+ except Exception as merge_res_err:\r\n+ log.error(f\"Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n+ self.merged_maps_details.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n+\r\n+ log.info(\"Finished applying map merging rules.\")\r\n+\r\n+\r\n+ def _generate_metadata_file(self):\r\n+ \"\"\"Gathers all collected metadata and writes it to a JSON file.\"\"\"\r\n+ # ... (Implementation from Response #49) ...\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\":\r\n+ log.warning(\"Asset name unknown, metadata may be incomplete.\")\r\n+\r\n+ log.info(f\"Generating metadata file for '{self.metadata['asset_name']}'...\")\r\n+ final_metadata = self.metadata.copy()\r\n+\r\n+ final_metadata[\"processed_map_resolutions\"] = {}\r\n+ for map_type, res_dict in self.processed_maps_details.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys) # Basic sort\r\n+\r\n+ final_metadata[\"merged_map_resolutions\"] = {}\r\n+ for map_type, res_dict in self.merged_maps_details.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n+\r\n+ # Add processing info\r\n+ final_metadata[\"_processing_info\"] = {\r\n+ \"preset_used\": self.config.preset_name,\r\n+ \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n+ # Optionally add core config details used, carefully\r\n+ }\r\n+\r\n+ # Sort lists\r\n+ for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n+ if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n+\r\n+ metadata_filename = self.config.metadata_filename\r\n+ output_path = self.temp_dir / metadata_filename\r\n+ log.debug(f\"Writing metadata to: {output_path}\")\r\n+ try:\r\n+ with open(output_path, 'w', encoding='utf-8') as f:\r\n+ json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n+ log.info(f\"Metadata file '{metadata_filename}' generated successfully.\")\r\n+ self.metadata_file_path_temp = output_path # Store path for moving\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to write metadata file {output_path}: {e}\") from e\r\n+\r\n+\r\n+ def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n+ \"\"\"\r\n+ Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n+ Returns the string representation.\r\n+ \"\"\"\r\n+ if original_width <= 0 or original_height <= 0:\r\n+ log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n+ return \"InvalidInput\"\r\n+\r\n+ # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n+ if resized_width <= 0 or resized_height <= 0:\r\n+ log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n+ return \"InvalidResize\"\r\n+\r\n+ # Original logic from user feedback\r\n+ width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n+ height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n+\r\n+ normalized_width_change = width_change_percentage / 100\r\n+ normalized_height_change = height_change_percentage / 100\r\n+\r\n+ normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n+ normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n+\r\n+ # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n+ # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n+ if normalized_width_change == 0 and normalized_height_change == 0:\r\n+ closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n+ elif normalized_width_change == 0:\r\n+ closest_value_to_one = abs(normalized_height_change)\r\n+ elif normalized_height_change == 0:\r\n+ closest_value_to_one = abs(normalized_width_change)\r\n+ else:\r\n+ closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n+\r\n+ # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n+ epsilon = 1e-9\r\n+ scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n+\r\n+ scaled_normalized_width_change = scale_factor * normalized_width_change\r\n+ scaled_normalized_height_change = scale_factor * normalized_height_change\r\n+\r\n+ output_width = round(scaled_normalized_width_change, decimals)\r\n+ output_height = round(scaled_normalized_height_change, decimals)\r\n+\r\n+ # Convert to int if exactly 1.0 after rounding\r\n+ if abs(output_width - 1.0) < epsilon: output_width = 1\r\n+ if abs(output_height - 1.0) < epsilon: output_height = 1\r\n+\r\n+ # Determine output string\r\n+ if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n+ output = \"EVEN\"\r\n+ elif output_width != 1 and output_height == 1:\r\n+ output = f\"X{str(output_width).replace('.', '')}\"\r\n+ elif output_height != 1 and output_width == 1:\r\n+ output = f\"Y{str(output_height).replace('.', '')}\"\r\n+ else:\r\n+ # Both changed relative to each other\r\n+ output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n+\r\n+ log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n+ return output\r\n+\r\n+ def _sanitize_filename(self, name: str) -> str:\r\n+ \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n+ # ... (Implementation from Response #51) ...\r\n+ if not isinstance(name, str): name = str(name)\r\n+ name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n+ name = re.sub(r'_+', '_', name)\r\n+ name = name.strip('_')\r\n+ if not name: name = \"invalid_name\"\r\n+ return name\r\n+\r\n+ def _organize_output_files(self):\r\n+ \"\"\"Moves processed files from temp dir to the final output structure.\"\"\"\r\n+ # ... (Implementation from Response #51) ...\r\n+ if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n+ if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing.\")\r\n+ if not self.metadata.get(\"supplier_name\"): raise AssetProcessingError(\"Supplier name missing.\")\r\n+\r\n+ supplier_sanitized = self._sanitize_filename(self.metadata[\"supplier_name\"])\r\n+ asset_name_sanitized = self._sanitize_filename(self.metadata[\"asset_name\"])\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ log.info(f\"Organizing output files into: {final_dir}\")\r\n+ try:\r\n+ # Check if overwriting is allowed before potentially deleting existing dir\r\n+ if final_dir.exists() and self.overwrite:\r\n+ log.warning(f\"Output directory exists and overwrite is True: {final_dir}. Removing existing directory.\")\r\n+ try:\r\n+ shutil.rmtree(final_dir)\r\n+ except Exception as rm_err:\r\n+ raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} during overwrite: {rm_err}\") from rm_err\r\n+ elif final_dir.exists() and not self.overwrite:\r\n+ # This case should ideally be caught by the skip logic earlier,\r\n+ # but adding a warning here as a safeguard.\r\n+ log.warning(f\"Output directory exists: {final_dir}. Overwriting (unexpected - should have been skipped).\")\r\n+\r\n+ final_dir.mkdir(parents=True, exist_ok=True) # Create after potential removal\r\n+ except Exception as e:\r\n+ # Catch potential errors during mkdir if rmtree failed partially?\r\n+ if not isinstance(e, AssetProcessingError): # Avoid wrapping already specific error\r\n+ raise AssetProcessingError(f\"Failed to create final dir {final_dir}: {e}\") from e\r\n+ else:\r\n+ raise # Re-raise the AssetProcessingError from rmtree\r\n+\r\n+ def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n+ if not src_rel_path: log.warning(f\"Missing src path for {file_desc}.\"); return\r\n+ source_abs = self.temp_dir / src_rel_path\r\n+ dest_abs = dest_dir / src_rel_path.name\r\n+ try:\r\n+ if source_abs.exists():\r\n+ log.debug(f\"Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n+ dest_dir.mkdir(parents=True, exist_ok=True) # Ensure sub-dir exists (for Extra/)\r\n+ shutil.move(str(source_abs), str(dest_abs))\r\n+ else: log.warning(f\"Source file missing for {file_desc}: {source_abs}\")\r\n+ except Exception as e: log.error(f\"Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n+\r\n+ # Move maps, merged maps, models, metadata\r\n+ for details_dict in [self.processed_maps_details, self.merged_maps_details]:\r\n+ for map_type, res_dict in details_dict.items():\r\n+ if 'error' in res_dict: continue\r\n+ for res_key, details in res_dict.items():\r\n+ if isinstance(details, dict) and 'path' in details: _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n+ for model_info in self.classified_files.get('models', []): _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n+ if self.metadata_file_path_temp: _safe_move(self.metadata_file_path_temp.relative_to(self.temp_dir), final_dir, \"metadata file\")\r\n+\r\n+ # Move extra/ignored files\r\n+ extra_subdir_name = self.config.extra_files_subdir\r\n+ extra_dir = final_dir / extra_subdir_name\r\n+ files_to_move_extra = self.classified_files.get('extra', []) + self.classified_files.get('ignored', [])\r\n+ if files_to_move_extra:\r\n+ log.debug(f\"Moving {len(files_to_move_extra)} files to '{extra_subdir_name}/'...\")\r\n+ try:\r\n+ extra_dir.mkdir(exist_ok=True)\r\n+ for file_info in files_to_move_extra: _safe_move(file_info.get('source_path'), extra_dir, f\"extra file ({file_info.get('reason', 'Unknown')})\")\r\n+ except Exception as e: log.error(f\"Failed creating/moving to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n+\r\n+ log.info(f\"Finished organizing output for '{asset_name_sanitized}'.\")\r\n+\r\n+\r\n+ def _cleanup_workspace(self):\r\n+ \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n+ # ... (Implementation from Response #45) ...\r\n+ if self.temp_dir and self.temp_dir.exists():\r\n+ try:\r\n+ log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n+ shutil.rmtree(self.temp_dir)\r\n+ self.temp_dir = None\r\n+ log.debug(\"Temporary workspace cleaned up successfully.\")\r\n+ except Exception as e:\r\n+ log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n+\r\n+ # --- Prediction Method ---\r\n+ def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n+ \"\"\"\r\n+ Predicts the final output structure (supplier, asset name) and attempts\r\n+ to predict output filenames for potential map files based on naming conventions.\r\n+ Does not perform full processing or image loading.\r\n+\r\n+ Returns:\r\n+ tuple[str | None, str | None, dict[str, str] | None]:\r\n+ (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n+ where file_predictions_dict maps input filename -> predicted output filename.\r\n+ Returns None if prediction fails critically.\r\n+ \"\"\"\r\n+ log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n+ try:\r\n+ # 1. Get Supplier Name\r\n+ supplier_name = self.config.supplier_name\r\n+ if not supplier_name:\r\n+ log.warning(\"Supplier name not found in configuration during prediction.\")\r\n+ return None\r\n+\r\n+ # 2. List Input Filenames/Stems\r\n+ candidate_stems = set() # Use set for unique stems\r\n+ filenames = []\r\n+ if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n+ try:\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ # Get only filenames, ignore directories\r\n+ filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n+ except zipfile.BadZipFile:\r\n+ log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n+ return None\r\n+ except Exception as zip_err:\r\n+ log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n+ return None # Cannot proceed if we can't list files\r\n+ elif self.input_path.is_dir():\r\n+ try:\r\n+ for item in self.input_path.iterdir():\r\n+ if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n+ filenames.append(item.name)\r\n+ # Note: Not walking subdirs for prediction to keep it fast\r\n+ except Exception as dir_err:\r\n+ log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n+ return None\r\n+\r\n+ if not filenames:\r\n+ log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n+ return None # Return None if no files found\r\n+\r\n+ # 3. Lightweight Classification for Stems and Potential Maps\r\n+ map_type_mapping = self.config.map_type_mapping\r\n+ model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n+ separator = self.config.source_naming_separator\r\n+ processed_filenames = set() # Track full filenames processed\r\n+ potential_map_files = {} # Store fname -> potential map_type\r\n+\r\n+ for fname in filenames:\r\n+ if fname in processed_filenames: continue\r\n+\r\n+ fstem = Path(fname).stem\r\n+ fstem_lower = fstem.lower()\r\n+ name_parts = fstem_lower.split(separator)\r\n+\r\n+ # Check map rules first\r\n+ map_matched = False\r\n+ for mapping_rule in map_type_mapping:\r\n+ source_keywords, standard_map_type = mapping_rule\r\n+ if standard_map_type not in self.config.standard_map_types: continue\r\n+ for keyword in source_keywords:\r\n+ kw_lower = keyword.lower().strip('*')\r\n+ if kw_lower in name_parts:\r\n+ is_exact_match = any(part == kw_lower for part in name_parts)\r\n+ if is_exact_match:\r\n+ candidate_stems.add(fstem) # Add unique stem\r\n+ potential_map_files[fname] = standard_map_type # Store potential type\r\n+ processed_filenames.add(fname)\r\n+ map_matched = True\r\n+ break # Found keyword match for this rule\r\n+ if map_matched: break # Found a rule match for this file\r\n+ if map_matched: continue # Move to next filename if identified as map\r\n+\r\n+ # Check model patterns if not a map\r\n+ for pattern in model_patterns:\r\n+ if fnmatch(fname.lower(), pattern.lower()):\r\n+ candidate_stems.add(fstem) # Still add stem for base name determination\r\n+ processed_filenames.add(fname)\r\n+ # Don't add models to potential_map_files\r\n+ break # Found model match\r\n+\r\n+ # Note: Files matching neither maps nor models are ignored for prediction details\r\n+\r\n+ candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n+ if not candidate_stems_list:\r\n+ log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n+ # Fallback: Use the input path's name itself if no stems found\r\n+ base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ determined_base_name = base_name_fallback\r\n+ else:\r\n+ # 4. Replicate _determine_base_metadata logic for base name\r\n+ determined_base_name = \"UnknownAssetName\"\r\n+ base_index = self.config.source_naming_indices.get('base_name')\r\n+\r\n+ if isinstance(base_index, int):\r\n+ potential_base_names = set()\r\n+ for stem in candidate_stems_list: # Iterate over the list\r\n+ parts = stem.split(separator)\r\n+ if len(parts) > base_index:\r\n+ potential_base_names.add(parts[base_index])\r\n+ if len(potential_base_names) == 1:\r\n+ determined_base_name = potential_base_names.pop()\r\n+ elif len(potential_base_names) > 1:\r\n+ log.debug(f\"Prediction: Multiple potential base names from index {base_index}, using common prefix.\")\r\n+ determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+ # else: Use common prefix below\r\n+\r\n+ if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n+ determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+\r\n+ # 5. Sanitize Names\r\n+ final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n+ final_supplier_name = self._sanitize_filename(supplier_name)\r\n+\r\n+ # 6. Predict Output Filenames\r\n+ file_predictions = {}\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ # Use highest resolution key as a placeholder for prediction\r\n+ highest_res_key = \"Res?\" # Fallback\r\n+ if self.config.image_resolutions:\r\n+ highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n+\r\n+ for input_fname, map_type in potential_map_files.items():\r\n+ # Assume PNG for prediction, extension might change based on bit depth rules later\r\n+ # but this gives a good idea of the renaming.\r\n+ # A more complex prediction could check bit depth rules.\r\n+ predicted_ext = \"png\" # Simple assumption for preview\r\n+ try:\r\n+ predicted_fname = target_pattern.format(\r\n+ base_name=final_base_name,\r\n+ map_type=map_type,\r\n+ resolution=highest_res_key, # Use placeholder resolution\r\n+ ext=predicted_ext\r\n+ )\r\n+ file_predictions[input_fname] = predicted_fname\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n+ file_predictions[input_fname] = \"[Filename Format Error]\"\r\n+\r\n+\r\n+ log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n+ return final_supplier_name, final_base_name, file_predictions\r\n+\r\n+ except Exception as e:\r\n+ log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n+ return None\r\n+\r\n+\r\n+ # --- New Detailed Prediction Method ---\r\n+ def get_detailed_file_predictions(self) -> list[dict] | None:\r\n+ \"\"\"\r\n+ Performs extraction and classification to provide a detailed list of all\r\n+ files found within the asset and their predicted status/output name.\r\n+ Does not perform image processing or file moving.\r\n+\r\n+ Returns:\r\n+ list[dict] | None: A list of dictionaries, each representing a file:\r\n+ {'original_path': str, 'predicted_name': str | None, 'status': str, 'details': str | None}\r\n+ Returns None if a critical error occurs during setup/classification.\r\n+ \"\"\"\r\n+ log.info(f\"Getting detailed file predictions for: {self.input_path.name}\")\r\n+ results = []\r\n+ asset_base_name = \"UnknownAssetName\" # Fallback\r\n+\r\n+ try:\r\n+ # --- Perform necessary setup and classification ---\r\n+ self._setup_workspace()\r\n+ self._extract_input()\r\n+ self._inventory_and_classify_files()\r\n+ self._determine_base_metadata() # Needed for base name prediction\r\n+ asset_base_name = self.metadata.get(\"asset_name\", asset_base_name)\r\n+\r\n+ # --- Prepare for filename prediction ---\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n+ if self.config.image_resolutions:\r\n+ highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n+\r\n+ # --- Process classified files ---\r\n+ # Maps\r\n+ for map_info in self.classified_files.get(\"maps\", []):\r\n+ original_path_str = str(map_info.get(\"source_path\", \"UnknownPath\"))\r\n+ map_type = map_info.get(\"map_type\", \"UnknownType\")\r\n+ # Predicted name for maps should just be the base asset name\r\n+ predicted_name_display = asset_base_name\r\n+ # Concise details\r\n+ details = f\"[{map_type}]\"\r\n+ if map_info.get(\"is_16bit_source\"):\r\n+ details += \" (16-bit)\"\r\n+\r\n+ # Still try to format the full name internally for error checking, but don't display it\r\n+ try:\r\n+ predicted_ext = \"png\" # Assumption for format check\r\n+ _ = target_pattern.format(\r\n+ base_name=asset_base_name,\r\n+ map_type=map_type,\r\n+ resolution=highest_res_key,\r\n+ ext=predicted_ext\r\n+ )\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction format error for {original_path_str}: {fmt_err}\")\r\n+ predicted_name_display = \"[Format Error]\" # Show error in name field\r\n+ details += f\" (Format Key Error: {fmt_err})\"\r\n+ except Exception as pred_err:\r\n+ log.warning(f\"Prediction error for {original_path_str}: {pred_err}\")\r\n+ predicted_name_display = \"[Prediction Error]\" # Show error in name field\r\n+ details += f\" (Error: {pred_err})\"\r\n+\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_name\": predicted_name_display, # Use the base name or error\r\n+ \"status\": \"Mapped\",\r\n+ \"details\": details # Use concise details\r\n+ })\r\n+\r\n+ # Models\r\n+ for model_info in self.classified_files.get(\"models\", []):\r\n+ original_path_str = str(model_info.get(\"source_path\", \"UnknownPath\"))\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_name\": Path(original_path_str).name, # Models usually keep original name\r\n+ \"status\": \"Model\",\r\n+ \"details\": \"[Model]\" # Concise detail\r\n+ })\r\n+\r\n+ # Extra\r\n+ for extra_info in self.classified_files.get(\"extra\", []):\r\n+ original_path_str = str(extra_info.get(\"source_path\", \"UnknownPath\"))\r\n+ reason = extra_info.get('reason', 'Unknown reason')\r\n+ # Determine status and details based on the reason\r\n+ if reason == 'Unrecognised': # Corrected string check\r\n+ status = \"Unrecognised\"\r\n+ details = \"[Unrecognised]\"\r\n+ else:\r\n+ status = \"Extra\"\r\n+ details = f\"Extra ({reason})\" # Show the pattern match reason\r\n+\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_name\": Path(original_path_str).name, # Extra/Unrecognised files keep original name\r\n+ \"status\": status,\r\n+ \"details\": details\r\n+ })\r\n+\r\n+ # Ignored\r\n+ for ignored_info in self.classified_files.get(\"ignored\", []):\r\n+ original_path_str = str(ignored_info.get(\"source_path\", \"UnknownPath\"))\r\n+ reason = ignored_info.get('reason', 'Unknown reason')\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_name\": None, # Ignored files have no output name\r\n+ \"status\": \"Ignored\",\r\n+ \"details\": f\"Ignored ({reason})\" # Keep reason for ignored files\r\n+ })\r\n+\r\n+ log.info(f\"Detailed prediction complete for {self.input_path.name}. Found {len(results)} files.\")\r\n+ return results\r\n+\r\n+ except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n+ # Log critical errors during the prediction process\r\n+ log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n+ # Optionally add a single error entry to results?\r\n+ # results.append({\"original_path\": self.input_path.name, \"predicted_name\": None, \"status\": \"Error\", \"details\": str(e)})\r\n+ # return results # Or return None to indicate failure\r\n+ return None # Indicate critical failure\r\n+ finally:\r\n+ # Ensure cleanup always happens\r\n+ self._cleanup_workspace()\r\n+\r\n+\r\n+# --- End of AssetProcessor Class ---\n\\ No newline at end of file\n" }, { "date": 1745226283363, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -782,9 +782,11 @@\n if stats: self.metadata[\"image_stats_1k\"][map_type] = stats\r\n else: log.warning(f\"Stats calculation failed for {map_type} at {res_key}.\")\r\n # Calculate aspect change string on the lowest processed resolution, only once per asset\r\n lowest_res_key = min(resolutions, key=resolutions.get)\r\n+ log.debug(f\"Aspect ratio check: res_key='{res_key}', lowest_res_key='{lowest_res_key}', current_aspect_string='{self.metadata['aspect_ratio_change_string']}'\")\r\n if self.metadata[\"aspect_ratio_change_string\"] == \"N/A\" and res_key == lowest_res_key:\r\n+ log.debug(\"Aspect ratio calculation condition met.\")\r\n try:\r\n aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n self.metadata[\"aspect_ratio_change_string\"] = aspect_string\r\n log.debug(f\"Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n" }, { "date": 1745261144619, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -75,9 +75,15 @@\n return None\r\n try:\r\n # Use float64 for calculations to avoid potential overflow/precision issues\r\n data_float = image_data.astype(np.float64)\r\n+ log.debug(f\"Stats calculation: data_float dtype: {data_float.dtype}, shape: {data_float.shape}\")\r\n+ # Log a few sample values to check range, especially for 16-bit\r\n+ if data_float.size > 0:\r\n+ sample_values = data_float.flatten()[:10] # Get first 10 values\r\n+ log.debug(f\"Stats calculation: Sample values (first 10): {sample_values.tolist()}\")\r\n \r\n+\r\n if len(data_float.shape) == 2: # Grayscale (H, W)\r\n min_val = float(np.min(data_float))\r\n max_val = float(np.max(data_float))\r\n mean_val = float(np.mean(data_float))\r\n@@ -1811,3631 +1817,5 @@\n # Ensure cleanup always happens\r\n self._cleanup_workspace()\r\n \r\n \r\n-# --- End of AssetProcessor Class ---\n-# asset_processor.py\r\n-\r\n-import os\r\n-import shutil\r\n-import tempfile\r\n-import zipfile\r\n-import logging\r\n-import json\r\n-import re\r\n-import time\r\n-from pathlib import Path\r\n-from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n-\r\n-# Attempt to import image processing libraries\r\n-try:\r\n- import cv2\r\n- import numpy as np\r\n-except ImportError:\r\n- print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n- print(\"pip install opencv-python numpy\")\r\n- exit(1) # Exit if essential libraries are missing\r\n-\r\n-# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n-try:\r\n- import OpenEXR\r\n- import Imath\r\n- _HAS_OPENEXR = True\r\n-except ImportError:\r\n- _HAS_OPENEXR = False\r\n- # Log this information - basic EXR might still work via OpenCV\r\n- logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n-\r\n-\r\n-# Assuming Configuration class is in configuration.py\r\n-try:\r\n- from configuration import Configuration, ConfigurationError\r\n-except ImportError:\r\n- print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n- print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n- exit(1)\r\n-\r\n-# Use logger defined in main.py (or configure one here if run standalone)\r\n-log = logging.getLogger(__name__)\r\n-# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\n-if not log.hasHandlers():\r\n- logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n-\r\n-\r\n-# --- Custom Exception ---\r\n-class AssetProcessingError(Exception):\r\n- \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n- pass\r\n-\r\n-# --- Helper Functions ---\r\n-def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n- \"\"\"Calculates dimensions maintaining aspect ratio, fitting within target_max_dim.\"\"\"\r\n- if orig_w <= 0 or orig_h <= 0: return (target_max_dim, target_max_dim) # Avoid division by zero\r\n-\r\n- ratio = orig_w / orig_h\r\n- if ratio > 1: # Width is dominant\r\n- target_w = target_max_dim\r\n- target_h = max(1, round(target_w / ratio)) # Ensure height is at least 1\r\n- else: # Height is dominant or square\r\n- target_h = target_max_dim\r\n- target_w = max(1, round(target_h * ratio)) # Ensure width is at least 1\r\n- return int(target_w), int(target_h)\r\n-\r\n-def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n- \"\"\"\r\n- Calculates min, max, mean for a given numpy image array.\r\n- Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n- \"\"\"\r\n- if image_data is None:\r\n- log.warning(\"Attempted to calculate stats on None image data.\")\r\n- return None\r\n- try:\r\n- # Use float64 for calculations to avoid potential overflow/precision issues\r\n- data_float = image_data.astype(np.float64)\r\n-\r\n- if len(data_float.shape) == 2: # Grayscale (H, W)\r\n- min_val = float(np.min(data_float))\r\n- max_val = float(np.max(data_float))\r\n- mean_val = float(np.mean(data_float))\r\n- stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n- elif len(data_float.shape) == 3: # Color (H, W, C)\r\n- channels = data_float.shape[2]\r\n- min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n- max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n- mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n- # Assuming BGR(A?) order from OpenCV, report as list [B, G, R, (A)]\r\n- stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated {channels}-Channel Stats: Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n- else:\r\n- log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n- return None\r\n- return stats\r\n- except Exception as e:\r\n- log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n- return {\"error\": str(e)}\r\n-\r\n-\r\n-from collections import defaultdict # Added for grouping\r\n-\r\n-# --- Helper function ---\r\n-def _get_base_map_type(target_map_string: str) -> str:\r\n- \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n- match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n- if match:\r\n- return match.group(1).upper()\r\n- return target_map_string.upper() # Fallback if no number suffix\r\n-\r\n-# --- Asset Processor Class ---\r\n-class AssetProcessor:\r\n- \"\"\"\r\n- Handles the processing pipeline for a single asset (ZIP or folder).\r\n- \"\"\"\r\n- # Define the list of known grayscale map types (adjust as needed)\r\n- GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n-\r\n- def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n- \"\"\"\r\n- Initializes the processor for a given input asset.\r\n-\r\n- Args:\r\n- input_path: Path to the input ZIP file or folder.\r\n- config: The loaded Configuration object.\r\n- output_base_path: The base directory where processed output will be saved.\r\n- overwrite: If True, forces reprocessing even if output exists.\r\n- \"\"\"\r\n- if not isinstance(input_path, Path): input_path = Path(input_path)\r\n- if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n- if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n-\r\n- if not input_path.exists():\r\n- raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n- if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n- raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n-\r\n- self.input_path: Path = input_path\r\n- self.config: Configuration = config\r\n- self.output_base_path: Path = output_base_path\r\n- self.overwrite: bool = overwrite # Store the overwrite flag\r\n-\r\n- self.temp_dir: Path | None = None # Path to the temporary working directory\r\n- self.classified_files: dict[str, list[dict]] = {\r\n- \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n- }\r\n- self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n- self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n- self.metadata_file_path_temp: Path | None = None\r\n-\r\n- # Initialize metadata collected during processing\r\n- self.metadata: dict = {\r\n- \"asset_name\": \"Unknown\",\r\n- \"supplier_name\": self.config.supplier_name,\r\n- \"asset_category\": self.config._core_settings.get('DEFAULT_ASSET_CATEGORY', \"Texture\"),\r\n- \"archetype\": \"Unknown\",\r\n- \"maps_present\": [],\r\n- \"merged_maps\": [],\r\n- \"shader_features\": [],\r\n- \"source_files_in_extra\": [],\r\n- \"image_stats_1k\": {},\r\n- \"map_details\": {},\r\n- \"aspect_ratio_change_string\": \"N/A\" # Replaces output_scaling_factors\r\n- # Processing info added in _generate_metadata_file\r\n- }\r\n-\r\n- log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n-\r\n- def process(self) -> str:\r\n- \"\"\"\r\n- Executes the full processing pipeline for the asset.\r\n- Returns:\r\n- str: Status (\"processed\", \"skipped\").\r\n- \"\"\"\r\n- log.info(f\"Processing asset: {self.input_path.name}\")\r\n- try:\r\n- self._setup_workspace()\r\n- self._extract_input()\r\n- self._inventory_and_classify_files()\r\n- self._determine_base_metadata()\r\n-\r\n- # --- Check if asset should be skipped ---\r\n- # Ensure asset_name and supplier_name were determined before checking\r\n- asset_name = self.metadata.get(\"asset_name\")\r\n- supplier_name = self.metadata.get(\"supplier_name\")\r\n-\r\n- # Only check for skipping if overwrite is False AND we have valid names\r\n- if not self.overwrite and asset_name and asset_name != \"UnknownAssetName\" and supplier_name:\r\n- supplier_sanitized = self._sanitize_filename(supplier_name)\r\n- asset_name_sanitized = self._sanitize_filename(asset_name)\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- metadata_file_path = final_dir / self.config.metadata_filename\r\n-\r\n- if final_dir.exists() and metadata_file_path.is_file():\r\n- log.info(f\"Output directory and metadata found for '{asset_name_sanitized}' and overwrite is False. Skipping.\")\r\n- # No need to call cleanup here, the finally block will handle it.\r\n- return \"skipped\" # Return status\r\n- elif self.overwrite:\r\n- # Log only if asset name is known, otherwise it's redundant with the initial processing log\r\n- known_asset_name = self.metadata.get('asset_name', self.input_path.name)\r\n- # Avoid logging overwrite message if name is still unknown\r\n- if known_asset_name != \"UnknownAssetName\" and known_asset_name != self.input_path.name:\r\n- log.info(f\"Overwrite flag is set for '{known_asset_name}'. Processing will continue even if output exists.\")\r\n- # --- End Skip Check ---\r\n-\r\n- # Continue with processing if not skipped\r\n- self._process_maps()\r\n- self._merge_maps()\r\n- self._generate_metadata_file()\r\n- self._organize_output_files()\r\n- log.info(f\"Asset processing completed successfully for: {self.metadata.get('asset_name', self.input_path.name)}\")\r\n- return \"processed\" # Return status\r\n- except Exception as e:\r\n- # Log error with traceback if it hasn't been logged already\r\n- if not isinstance(e, (AssetProcessingError, ConfigurationError)): # Avoid double logging known types\r\n- log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name}: {e}\")\r\n- # Ensure error is propagated\r\n- if not isinstance(e, AssetProcessingError):\r\n- raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n- else:\r\n- raise # Re-raise AssetProcessingError or ConfigurationError\r\n- finally:\r\n- # Ensure cleanup always happens\r\n- self._cleanup_workspace()\r\n-\r\n- def _setup_workspace(self):\r\n- \"\"\"Creates a temporary directory for processing.\"\"\"\r\n- try:\r\n- self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n- log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n-\r\n- def _extract_input(self):\r\n- \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n-\r\n- log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n- try:\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- zip_ref.extractall(self.temp_dir)\r\n- log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n- elif self.input_path.is_dir():\r\n- log.debug(f\"Copying directory contents: {self.input_path}\")\r\n- for item in self.input_path.iterdir():\r\n- destination = self.temp_dir / item.name\r\n- if item.is_dir():\r\n- # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n- try:\r\n- shutil.copytree(item, destination, dirs_exist_ok=True)\r\n- except TypeError: # Fallback for older Python\r\n- if not destination.exists():\r\n- shutil.copytree(item, destination)\r\n- else:\r\n- log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n-\r\n- else:\r\n- shutil.copy2(item, destination)\r\n- log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n- except zipfile.BadZipFile:\r\n- raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n-\r\n- def _inventory_and_classify_files(self):\r\n- \"\"\"\r\n- Scans workspace, classifies files according to preset rules, handling\r\n- 16-bit prioritization and multiple variants of the same base map type.\r\n- \"\"\"\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n-\r\n- log.info(\"Scanning and classifying files...\")\r\n- log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n- all_files_rel = []\r\n- for root, _, files in os.walk(self.temp_dir):\r\n- root_path = Path(root)\r\n- for file in files:\r\n- full_path = root_path / file\r\n- relative_path = full_path.relative_to(self.temp_dir)\r\n- all_files_rel.append(relative_path)\r\n-\r\n- log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n-\r\n- # --- Initialization ---\r\n- processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n- potential_map_candidates = [] # List to store potential map file info\r\n- # Reset classified files (important if this method is ever called multiple times)\r\n- self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n-\r\n-\r\n- # --- Step 1: Identify Explicit 'Extra' Files ---\r\n- log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n- compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n- log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path in processed_files: continue\r\n- for compiled_regex in compiled_extra_regex:\r\n- if compiled_regex.search(file_rel_path.name):\r\n- log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n- self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n- processed_files.add(file_rel_path)\r\n- log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n- break # Stop checking extra patterns for this file\r\n-\r\n- # --- Step 2: Identify Model Files ---\r\n- log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n- compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n- log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path in processed_files: continue\r\n- for compiled_regex in compiled_model_regex:\r\n- if compiled_regex.search(file_rel_path.name):\r\n- log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n- self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n- processed_files.add(file_rel_path)\r\n- log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n- break # Stop checking model patterns for this file\r\n-\r\n- # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n- log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n- # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n- compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n-\r\n- for file_rel_path in all_files_rel:\r\n- # Skip files already classified as Extra or Model\r\n- if file_rel_path in processed_files:\r\n- continue\r\n-\r\n- file_stem = file_rel_path.stem\r\n- match_found = False\r\n-\r\n- # Iterate through base types and their associated regex tuples\r\n- for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n- if match_found: break # Stop checking types for this file once matched\r\n-\r\n- # Get the original keywords list for the current rule index\r\n- # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n- original_rule = None\r\n- # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n- if regex_tuples:\r\n- current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n- if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n- rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n- # Verify it's the correct rule by checking target_type\r\n- if rule_candidate.get(\"target_type\") == base_map_type:\r\n- original_rule = rule_candidate\r\n- else:\r\n- log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n- # Fallback search if index doesn't match (shouldn't happen ideally)\r\n- for idx, rule in enumerate(self.config.map_type_mapping):\r\n- if rule.get(\"target_type\") == base_map_type:\r\n- original_rule = rule\r\n- log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n- break\r\n-\r\n- original_keywords_list = []\r\n- if original_rule and 'keywords' in original_rule:\r\n- original_keywords_list = original_rule['keywords']\r\n- else:\r\n- log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n-\r\n- for kw_regex, original_keyword, rule_index in regex_tuples:\r\n- if kw_regex.search(file_stem):\r\n- log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n-\r\n- # Find the index of the matched keyword within its rule's list\r\n- keyword_index_in_rule = -1 # Default if not found\r\n- if original_keywords_list:\r\n- try:\r\n- # Use the original_keyword string directly\r\n- keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n- except ValueError:\r\n- log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n- else:\r\n- log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n-\r\n- # Add candidate only if not already added\r\n- if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n- potential_map_candidates.append({\r\n- 'source_path': file_rel_path,\r\n- 'matched_keyword': original_keyword,\r\n- 'base_map_type': base_map_type,\r\n- 'preset_rule_index': rule_index,\r\n- 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n- 'is_16bit_source': False\r\n- })\r\n- else:\r\n- log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n-\r\n- match_found = True\r\n- break # Stop checking regex tuples for this base_type once matched\r\n-\r\n- log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n-\r\n- # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n- log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n- compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n- for file_rel_path in all_files_rel:\r\n- # Skip if already processed or already identified as a candidate\r\n- if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n- continue\r\n-\r\n- for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n- log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n- match = compiled_regex.search(file_rel_path.name) # Store result\r\n- if match:\r\n- log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n- potential_map_candidates.append({\r\n- 'source_path': file_rel_path,\r\n- 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n- 'base_map_type': base_type,\r\n- 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n- 'is_16bit_source': True # Mark as 16-bit immediately\r\n- })\r\n- log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n- # Don't add to processed_files yet, let Step 4 handle filtering\r\n- break # Stop checking bit depth patterns for this file\r\n-\r\n- log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n-\r\n- # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n- log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n- compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n- candidates_to_keep = []\r\n- candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n-\r\n- # Mark 16-bit candidates\r\n- for candidate in potential_map_candidates:\r\n- base_type = candidate['base_map_type']\r\n- # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n- if base_type in compiled_bit_depth_regex:\r\n- if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n- candidate['is_16bit_source'] = True\r\n- log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n-\r\n-\r\n- # Identify base types that have a 16-bit version present\r\n- prioritized_16bit_bases = {\r\n- candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n- }\r\n- log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n-\r\n- # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n- for candidate in potential_map_candidates:\r\n- if candidate['is_16bit_source']:\r\n- candidates_to_keep.append(candidate)\r\n- log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n- elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n- candidates_to_keep.append(candidate)\r\n- log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n- else:\r\n- # This is an 8-bit candidate whose 16-bit counterpart exists\r\n- candidates_to_ignore.append(candidate)\r\n- log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n-\r\n- # Add ignored 8-bit files to the main ignored list\r\n- for ignored_candidate in candidates_to_ignore:\r\n- self.classified_files[\"ignored\"].append({\r\n- 'source_path': ignored_candidate['source_path'],\r\n- 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n- })\r\n- processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n-\r\n- log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n-\r\n- # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n- log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n- # from collections import defaultdict # Moved import to top of file\r\n- grouped_by_base_type = defaultdict(list)\r\n- for candidate in candidates_to_keep:\r\n- grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n-\r\n- final_map_list = []\r\n- for base_map_type, candidates in grouped_by_base_type.items():\r\n- log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n-\r\n- # --- NEW SORTING LOGIC ---\r\n- # Sort candidates based on:\r\n- # 1. The index of the rule object in the preset's map_type_mapping list.\r\n- # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n- # 3. Alphabetical order of the source file path as a tie-breaker.\r\n- candidates.sort(key=lambda c: (\r\n- c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n- c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n- str(c['source_path'])\r\n- ))\r\n- # --- END NEW SORTING LOGIC ---\r\n-\r\n- # Removed diagnostic log\r\n-\r\n- # Assign suffixes and add to the final map list\r\n- for i, final_candidate in enumerate(candidates): # Use the directly sorted list\r\n- # Determine final map type based on the new rule\r\n- if base_map_type in self.config.respect_variant_map_types: # Check the new config list\r\n- # Always assign suffix for types in the list (if more than one or only one)\r\n- final_map_type = f\"{base_map_type}-{i + 1}\"\r\n- else:\r\n- # Never assign suffix for types NOT in the list\r\n- final_map_type = base_map_type\r\n-\r\n- final_map_list.append({\r\n- \"map_type\": final_map_type,\r\n- \"source_path\": final_candidate[\"source_path\"],\r\n- \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n- \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n- \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n- })\r\n- processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n- log.debug(f\" Final Map: Type='{final_map_type}', Source='{final_candidate['source_path']}', Keyword='{final_candidate['matched_keyword']}', 16bit={final_candidate['is_16bit_source']}\")\r\n-\r\n- self.classified_files[\"maps\"] = final_map_list\r\n-\r\n- # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n- log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n- remaining_count = 0\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path not in processed_files:\r\n- log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n- self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n- remaining_count += 1\r\n- # No need to add to processed_files here, it's the final step\r\n- log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n-\r\n- # --- Final Summary ---\r\n- # Update metadata list of files actually moved to extra (including Unrecognised and Ignored)\r\n- self.metadata[\"source_files_in_extra\"] = sorted([\r\n- str(f['source_path']) for f in self.classified_files['extra'] + self.classified_files['ignored']\r\n- ])\r\n- log.info(f\"File classification complete.\")\r\n- log.debug(\"--- Final Classification Summary (v2) ---\")\r\n- map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n- model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n- extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n- ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n- log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n- log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n- log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n- log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n- log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n-\r\n-\r\n- def _determine_base_metadata(self):\r\n- \"\"\"Determines base_name, asset_category, and archetype.\"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- log.info(\"Determining base metadata...\")\r\n-\r\n- # --- Determine Asset Category ---\r\n- self.metadata[\"asset_category\"] = self.config.default_asset_category # Start with default\r\n- if self.classified_files[\"models\"]:\r\n- self.metadata[\"asset_category\"] = \"Asset\"\r\n- log.debug(\"Category set to 'Asset' due to model file presence.\")\r\n- else:\r\n- decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n- found_decal = False\r\n- candidate_files = [f['source_path'] for f in self.classified_files['maps']] # Check map names first\r\n- if not candidate_files: # Fallback to checking all files? Maybe too broad. Check Extra?\r\n- candidate_files = [f['source_path'] for f in self.classified_files['extra']]\r\n-\r\n- if decal_keywords:\r\n- for file_path in candidate_files:\r\n- for keyword in decal_keywords:\r\n- if keyword.lower() in file_path.name.lower():\r\n- self.metadata[\"asset_category\"] = \"Decal\"\r\n- found_decal = True; break\r\n- if found_decal: break\r\n- if found_decal: log.debug(\"Category set to 'Decal' due to keyword match.\")\r\n-\r\n- # --- Determine Base Name ---\r\n- map_stems = [f['source_path'].stem for f in self.classified_files['maps']]\r\n- model_stems = [f['source_path'].stem for f in self.classified_files['models']]\r\n- candidate_stems = map_stems + model_stems\r\n-\r\n- determined_base_name = \"UnknownAssetName\"\r\n- if candidate_stems:\r\n- separator = self.config.source_naming_separator\r\n- base_index = self.config.source_naming_indices.get('base_name')\r\n-\r\n- if isinstance(base_index, int):\r\n- potential_base_names = set()\r\n- for stem in candidate_stems:\r\n- parts = stem.split(separator)\r\n- if len(parts) > base_index:\r\n- potential_base_names.add(parts[base_index])\r\n- if len(potential_base_names) == 1:\r\n- determined_base_name = potential_base_names.pop()\r\n- log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n- elif len(potential_base_names) > 1 :\r\n- log.warning(f\"Multiple potential base names found from parts index {base_index}: {potential_base_names}. Falling back to common prefix.\")\r\n- # Fallback logic if structured parts method fails or yields multiple names\r\n- determined_base_name = os.path.commonprefix(candidate_stems)\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Clean trailing separators etc.\r\n- # else: len is 0, means no valid parts found, use common prefix below\r\n-\r\n- # If no index or structured parts failed, use common prefix of all relevant stems\r\n- if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n- determined_base_name = os.path.commonprefix(candidate_stems)\r\n- log.debug(f\"Using common prefix: '{determined_base_name}'\")\r\n- # Clean up common separators/underscores often left by commonprefix\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n-\r\n- # Final cleanup and fallback for base name\r\n- determined_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n- self.metadata[\"asset_name\"] = determined_base_name\r\n- log.debug(f\"Final Determined Base Name: {self.metadata['asset_name']}\")\r\n-\r\n- # --- Determine Archetype (Usage) ---\r\n- archetype_rules = self.config.archetype_rules\r\n- determined_archetype = \"Unknown\"\r\n- check_stems = [f['source_path'].stem.lower() for f in self.classified_files['maps']]\r\n- check_stems.extend([f['source_path'].stem.lower() for f in self.classified_files['models']])\r\n- # Also check the determined base name itself?\r\n- check_stems.append(self.metadata[\"asset_name\"].lower())\r\n-\r\n- if check_stems:\r\n- best_match_archetype = \"Unknown\"\r\n- highest_match_count = 0 # Simple heuristic: prioritize rule with most keyword hits?\r\n-\r\n- for rule in archetype_rules:\r\n- if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n- arch_name, rules_dict = rule\r\n- match_any = rules_dict.get(\"match_any\", [])\r\n- # match_all = rules_dict.get(\"match_all\", []) # Add logic if needed\r\n-\r\n- current_match_count = 0\r\n- matched_any_keyword = False\r\n- if match_any:\r\n- for keyword in match_any:\r\n- kw_lower = keyword.lower()\r\n- for stem in check_stems:\r\n- # Using simple substring check again\r\n- if kw_lower in stem:\r\n- current_match_count += 1\r\n- matched_any_keyword = True\r\n- # Optionally break after first hit per keyword? Or count all occurrences? Count all for now.\r\n-\r\n- # Decide if this rule matches. For now: requires at least one 'match_any' hit.\r\n- if matched_any_keyword:\r\n- # Simple approach: first rule that matches wins.\r\n- # Could be enhanced by prioritizing rules or counting hits.\r\n- if best_match_archetype == \"Unknown\": # Take the first match\r\n- best_match_archetype = arch_name\r\n- log.debug(f\"Tentative archetype match '{arch_name}' based on keywords: {match_any}\")\r\n- # Break here for \"first match wins\" logic\r\n- break\r\n-\r\n- # --- Example: Prioritize by match count (more complex) ---\r\n- # if current_match_count > highest_match_count:\r\n- # highest_match_count = current_match_count\r\n- # best_match_archetype = arch_name\r\n- # log.debug(f\"New best archetype match '{arch_name}' with count {current_match_count}\")\r\n- # ----------------------------------------------------------\r\n-\r\n- determined_archetype = best_match_archetype\r\n-\r\n- self.metadata[\"archetype\"] = determined_archetype\r\n- log.debug(f\"Determined Archetype: {self.metadata['archetype']}\")\r\n- log.info(\"Base metadata determination complete.\")\r\n-\r\n-\r\n- def _process_maps(self):\r\n- \"\"\"Loads, processes, resizes, and saves classified map files.\"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- log.info(f\"Processing identified map files for '{self.metadata['asset_name']}'...\")\r\n- processed_map_types = set()\r\n-\r\n- # --- Settings retrieval ---\r\n- resolutions = self.config.image_resolutions\r\n- stats_res_key = self.config.calculate_stats_resolution\r\n- stats_target_dim = resolutions.get(stats_res_key)\r\n- if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped.\")\r\n- gloss_keywords = self.config.source_glossiness_keywords\r\n- target_pattern = self.config.target_filename_pattern\r\n- base_name = self.metadata['asset_name']\r\n-\r\n- # --- Pre-process Glossiness -> Roughness ---\r\n- preprocessed_data = {}\r\n- derived_from_gloss_flag = {}\r\n- gloss_map_info_for_rough, native_rough_map_info = None, None\r\n- for map_info in self.classified_files['maps']:\r\n- if map_info['map_type'] == 'ROUGH':\r\n- is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n- if is_gloss: gloss_map_info_for_rough = map_info\r\n- else: native_rough_map_info = map_info\r\n-\r\n- rough_source_to_use = None\r\n- if gloss_map_info_for_rough:\r\n- rough_source_to_use = gloss_map_info_for_rough\r\n- derived_from_gloss_flag['ROUGH'] = True\r\n- if native_rough_map_info:\r\n- log.warning(f\"Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n- if native_rough_map_info in self.classified_files['maps']: self.classified_files['maps'].remove(native_rough_map_info)\r\n- self.classified_files['ignored'].append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n- elif native_rough_map_info:\r\n- rough_source_to_use = native_rough_map_info\r\n- derived_from_gloss_flag['ROUGH'] = False\r\n-\r\n- if derived_from_gloss_flag.get('ROUGH'):\r\n- source_path = self.temp_dir / rough_source_to_use['source_path']\r\n- log.info(f\"Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n- try:\r\n- img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n- if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n- original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n- if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n- if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n- elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n- else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n- # Store tuple: (inverted_float_data, original_dtype)\r\n- preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n- log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n- except Exception as e: log.error(f\"Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n-\r\n- # --- Main Processing Loop ---\r\n- maps_to_process = list(self.classified_files['maps'])\r\n- for map_info in maps_to_process:\r\n- map_type = map_info['map_type']\r\n- source_path_rel = map_info['source_path']\r\n- original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n- log.info(f\"-- Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n- img_processed, source_dtype = None, None\r\n- map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n-\r\n- try:\r\n- # --- 1. Get/Load Source Data ---\r\n- if map_type in preprocessed_data:\r\n- log.debug(f\"Using pre-processed data for {map_type}.\")\r\n- # Unpack tuple: (inverted_float_data, original_dtype)\r\n- img_processed, source_dtype = preprocessed_data[map_type]\r\n- # No longer need to read the original file just for dtype\r\n- else:\r\n- full_source_path = self.temp_dir / source_path_rel\r\n- # Determine the read flag based on map type\r\n- read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- log.debug(f\"Loading source {source_path_rel.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n- img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n- if img_loaded is None:\r\n- raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n- img_processed, source_dtype = img_loaded.copy(), img_loaded.dtype\r\n- log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape}\")\r\n- map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n-\r\n- # --- 2. Handle Alpha Mask ---\r\n- if map_type == 'MASK' and img_processed is not None:\r\n- log.debug(\"Processing as MASK type.\")\r\n- shape = img_processed.shape\r\n- if len(shape) == 3 and shape[2] == 4: img_processed = img_processed[:, :, 3]\r\n- elif len(shape) == 3 and shape[2] == 3: img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n- if img_processed.dtype != np.uint8:\r\n- log.debug(f\"Converting mask from {img_processed.dtype} to uint8.\")\r\n- if source_dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- else: img_processed = img_processed.astype(np.uint8)\r\n-\r\n- if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n- orig_h, orig_w = img_processed.shape[:2]\r\n- self.processed_maps_details.setdefault(map_type, {})\r\n- max_original_dimension = max(orig_w, orig_h) # Get max original dimension\r\n-\r\n- # --- 3. Resize & Save Loop ---\r\n- for res_key, target_dim in resolutions.items():\r\n- # --- Skip Upscaling ---\r\n- if target_dim > max_original_dimension:\r\n- log.debug(f\"Skipping {res_key} ({target_dim}px): Target dimension is larger than original max dimension ({max_original_dimension}px).\")\r\n- continue\r\n- log.debug(f\"Processing {map_type} for resolution: {res_key}...\")\r\n- if orig_w <= 0 or orig_h <= 0: log.warning(f\"Invalid original dims, skipping resize {res_key}.\"); continue\r\n- target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n- interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n- try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n- except Exception as resize_err: log.error(f\"Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n-\r\n- # --- 3a. Calculate Stats ---\r\n- if res_key == stats_res_key and stats_target_dim:\r\n- log.debug(f\"Calculating stats for {map_type} using {res_key} image...\")\r\n- stats = _calculate_image_stats(img_resized) # Use the already resized image\r\n- if stats: self.metadata[\"image_stats_1k\"][map_type] = stats\r\n- else: log.warning(f\"Stats calculation failed for {map_type} at {res_key}.\")\r\n- # Calculate aspect change string on the lowest processed resolution, only once per asset\r\n- lowest_res_key = min(resolutions, key=resolutions.get)\r\n- if self.metadata[\"aspect_ratio_change_string\"] == \"N/A\" and res_key == lowest_res_key:\r\n- try:\r\n- aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n- self.metadata[\"aspect_ratio_change_string\"] = aspect_string\r\n- log.debug(f\"Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n- except Exception as aspect_err:\r\n- log.error(f\"Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n- self.metadata[\"aspect_ratio_change_string\"] = \"Error\" # Indicate calculation failure\r\n-\r\n- # --- 3b. Determine Output Bit Depth & Format ---\r\n- bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n- current_dtype = img_resized.dtype # Dtype after resize\r\n- output_dtype_target, output_bit_depth = None, 8 # Defaults\r\n- if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n- elif bit_depth_rule == 'respect':\r\n- if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16 # Respect float source as 16bit uint for non-EXR\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8 # Default respect to 8bit\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8 # Fallback\r\n-\r\n- # --- 3c. Determine Output Format based on Input, Rules & Threshold ---\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format() # Usually 'png'\r\n- threshold = self.config.resolution_threshold_for_jpg # Get threshold value\r\n- force_lossless = map_type in self.config.force_lossless_map_types\r\n-\r\n- if force_lossless:\r\n- log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- # Add EXR type flags if needed, e.g., HALF for 16-bit float\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- # Add compression later if desired, e.g. cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_ZIP\r\n- else: # Assume png or other lossless 16-bit format\r\n- # Use fallback if primary isn't suitable or fails? For now, assume primary is lossless.\r\n- # If primary_fmt_16 is not 'png', maybe default to fallback_fmt_16?\r\n- if output_format != \"png\":\r\n- log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16 # Ensure it's a known lossless like png/tif\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- # Add params for other lossless like TIF if supported\r\n- else: # 8-bit lossless\r\n- output_format = fmt_8bit_config # Usually 'png'\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n- output_format = \"png\"\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n-\r\n- # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n- elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- # --- Else: Apply Input/Rule-Based Logic ---\r\n- else:\r\n- # Apply force_8bit rule (if not overridden by threshold)\r\n- if bit_depth_rule == 'force_8bit':\r\n- output_format = 'png' # Force to PNG as per clarification\r\n- output_ext = '.png'\r\n- # output_bit_depth is already 8, output_dtype_target is already uint8\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n- # Handle specific input extensions if not forced to 8bit PNG\r\n- elif original_extension == '.jpg' and output_bit_depth == 8:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n- elif original_extension == '.tif':\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) # Add compression later\r\n- log.debug(f\"Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n- else: # Fallback for 16-bit from TIF\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- else: # output_bit_depth == 8 from TIF input (and below threshold)\r\n- output_format = 'png'\r\n- output_ext = '.png'\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n- # Handle other inputs (e.g., PNG) or fallbacks\r\n- else:\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # Fallback for 16-bit\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n- else: # 8-bit output (and below threshold)\r\n- output_format = fmt_8bit_config # Use configured 8-bit format\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\":\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n-\r\n- img_to_save = img_resized.copy() # Work on copy for dtype conversion\r\n- # --- Apply Dtype Conversion ---\r\n- if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n- if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- if needs_float16 and img_to_save.dtype != np.float16:\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n-\r\n- # --- 3d. Construct Filename & Save ---\r\n- filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n- output_path_temp = self.temp_dir / filename\r\n- log.debug(f\"Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n- saved_successfully, actual_format_saved = False, output_format\r\n- try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n- except Exception as save_err:\r\n- log.error(f\"Save failed ({output_format}): {save_err}\")\r\n- # --- Try Fallback ---\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Attempting fallback: {fallback_fmt_16}\")\r\n- actual_format_saved = fallback_fmt_16; output_ext = f\".{fallback_fmt_16}\"; # Adjust format/ext\r\n- filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n- output_path_temp = self.temp_dir / filename\r\n- save_params_fallback = [] # Reset params for fallback\r\n- img_fallback = None; target_fallback_dtype = np.uint16\r\n- if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, 6])\r\n- elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n-\r\n- # Convert EXR source (float16) to uint16 for PNG/TIF fallback\r\n- #if img_to_save.dtype == np.float16: img_fallback = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(target_fallback_dtype)\r\n- if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n- # <<< START MODIFICATION HERE >>>\r\n- # Check for NaN/Inf before conversion\r\n- if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n- log.error(f\"Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n- continue # Skip fallback if data is bad\r\n-\r\n- # Clip *after* scaling for uint16 conversion robustness\r\n- img_scaled = img_to_save * 65535.0\r\n- img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- # <<< END MODIFICATION HERE >>>\r\n- elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already correct type\r\n- else: log.error(f\"Cannot convert {img_to_save.dtype} for fallback.\"); continue # Skip fallback\r\n-\r\n- try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Fallback save failed: {fallback_err}\", exc_info=True)\r\n-\r\n- # --- 3e. Store Result ---\r\n- if saved_successfully:\r\n- self.processed_maps_details[map_type][res_key] = {\r\n- \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n- \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n- \"format\": actual_format_saved\r\n- }\r\n- map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n-\r\n- except Exception as map_proc_err:\r\n- log.error(f\"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n- self.processed_maps_details.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n-\r\n- self.metadata[\"map_details\"][map_type] = map_details # Store details for this map type\r\n-\r\n- # --- Final Metadata Updates ---\r\n- processed_map_types = set(k for k, v in self.processed_maps_details.items() if 'error' not in v) # Only count successful\r\n- self.metadata[\"maps_present\"] = sorted(list(processed_map_types))\r\n- features = set()\r\n- for map_type, details in self.metadata[\"map_details\"].items():\r\n- if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n- if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n- res_details = self.processed_maps_details.get(map_type, {})\r\n- if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n- self.metadata[\"shader_features\"] = sorted(list(features))\r\n- log.debug(f\"Determined shader features: {self.metadata['shader_features']}\")\r\n- log.info(\"Finished processing all map files.\")\r\n-\r\n-\r\n- #log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n-\r\n- def _merge_maps(self):\r\n- \"\"\"Merges channels from different maps based on rules in configuration.\"\"\"\r\n- # ... (initial checks and getting merge_rules) ...\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Workspace not setup.\")\r\n-\r\n-\r\n- # <<< FIX: Get merge rules from the configuration object >>>\r\n-\r\n- merge_rules = self.config.map_merge_rules\r\n-\r\n- # <<< END FIX >>>\r\n- log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n-\r\n- for rule_index, rule in enumerate(merge_rules):\r\n- # <<< FIX: Assign variables *before* using them >>>\r\n- output_map_type = rule.get(\"output_map_type\")\r\n- inputs_mapping = rule.get(\"inputs\")\r\n- defaults = rule.get(\"defaults\", {})\r\n- rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n-\r\n- # <<< FIX: Check if essential rule keys exist *after* assignment >>>\r\n- if not output_map_type or not inputs_mapping:\r\n- log.warning(f\"Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs' in rule definition. Rule data: {rule}\")\r\n- continue # Skip to the next rule in merge_rules\r\n-\r\n- # Now it's safe to use output_map_type in the log statement\r\n- log.info(f\"-- Applying merge rule for '{output_map_type}' --\")\r\n- # <<< END FIX >>>\r\n-\r\n- self.merged_maps_details.setdefault(output_map_type, {})\r\n-\r\n- # --- Determine required inputs and their common resolutions ---\r\n- required_input_types = set(inputs_mapping.values()) # e.g., {'NRM', 'ROUGH'}\r\n- if not required_input_types:\r\n- log.warning(f\"Skipping merge rule '{output_map_type}': No input map types defined in 'inputs'.\")\r\n- continue\r\n-\r\n- possible_resolutions_per_input = []\r\n- for input_type in required_input_types:\r\n- if input_type in self.processed_maps_details:\r\n- # Get resolution keys where processing didn't error\r\n- res_keys = {res for res, details in self.processed_maps_details[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n- if not res_keys:\r\n- log.warning(f\"Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n- possible_resolutions_per_input = [] # Cannot proceed if any input is missing all resolutions\r\n- break\r\n- possible_resolutions_per_input.append(res_keys)\r\n- else:\r\n- log.warning(f\"Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n- possible_resolutions_per_input = [] # Cannot proceed if any input type is missing\r\n- break\r\n-\r\n- if not possible_resolutions_per_input:\r\n- log.warning(f\"Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n- continue\r\n-\r\n- # Find the intersection of resolution keys across all required inputs\r\n- common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n-\r\n- if not common_resolutions:\r\n- log.warning(f\"No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n- continue\r\n- log.debug(f\"Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n- # --- End Common Resolution Logic ---\r\n-\r\n-\r\n- # <<< LOOP THROUGH COMMON RESOLUTIONS >>>\r\n- # Use the actual common_resolutions found\r\n- res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n- if not res_order:\r\n- log.warning(f\"Common resolutions {common_resolutions} do not match any target resolutions in config. Skipping merge for '{output_map_type}'.\")\r\n- continue\r\n-\r\n- # Sort resolutions to process (optional, but nice for logs)\r\n- sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n-\r\n- # Get target pattern from config for filename formatting\r\n- target_pattern = self.config.target_filename_pattern\r\n-\r\n- for current_res_key in sorted_res_keys:\r\n- log.debug(f\"Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n- try:\r\n- loaded_inputs = {}\r\n- input_bit_depths = set()\r\n- input_original_extensions = {} # Store original extensions for this resolution's inputs\r\n-\r\n- # --- Load required input maps *at this specific resolution* AND get original extensions ---\r\n- possible_to_load = True\r\n- base_name = self.metadata['asset_name']\r\n- target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B']\r\n-\r\n- for map_type in required_input_types:\r\n- res_details = self.processed_maps_details.get(map_type, {}).get(current_res_key)\r\n- if not res_details or 'path' not in res_details:\r\n- log.warning(f\"Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge for this resolution.\")\r\n- possible_to_load = False; break\r\n-\r\n- # Find original extension from classified data\r\n- original_ext = '.png' # Default\r\n- found_original = False\r\n- for classified_map in self.classified_files[\"maps\"]:\r\n- # Match based on the base map type (e.g., NRM matches NRM-1)\r\n- if classified_map['map_type'].startswith(map_type):\r\n- # Check if the processed path matches (to handle multiple variants if needed, though less likely here)\r\n- # This assumes processed_maps_details path is relative to temp_dir\r\n- processed_path_str = str(res_details['path'])\r\n- classified_path_str = str(classified_map['source_path']) # This is the original source path relative to temp_dir root\r\n- # A more robust check might involve comparing filenames if paths differ due to processing steps\r\n- # For now, rely on the base map type match and grab the first extension found\r\n- original_ext = classified_map.get('original_extension', '.png')\r\n- found_original = True\r\n- break # Found the first match for this map_type\r\n- if not found_original:\r\n- log.warning(f\"Could not reliably find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n-\r\n- input_original_extensions[map_type] = original_ext\r\n-\r\n- # Load the image\r\n- input_file_path = self.temp_dir / res_details['path']\r\n- read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- log.debug(f\"Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n- img = cv2.imread(str(input_file_path), read_flag)\r\n- if img is None:\r\n- raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n- loaded_inputs[map_type] = img\r\n- input_bit_depths.add(res_details.get('bit_depth', 8))\r\n-\r\n- if not possible_to_load: continue # Skip this resolution if inputs missing\r\n-\r\n- # --- Determine dimensions and target_dim for threshold check ---\r\n- first_map_type = next(iter(required_input_types)) # Get one map type to read dimensions\r\n- h, w = loaded_inputs[first_map_type].shape[:2]\r\n- # Get target_dim from the details of the first loaded input for this resolution\r\n- first_res_details = self.processed_maps_details.get(first_map_type, {}).get(current_res_key)\r\n- target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n- num_target_channels = len(target_channels)\r\n-\r\n- # <<< Re-calculate output_bit_depth based on THIS resolution's inputs >>>\r\n- max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n- output_bit_depth = 8\r\n- if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n- output_bit_depth = 16\r\n- log.debug(f\"Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n-\r\n- # Prepare channels (float32) (same logic as before)\r\n- merged_channels_float32 = []\r\n- # Use the defined target_channels list\r\n- for target_channel in target_channels: # Iterate R, G, B (or specified) order\r\n- source_map_type = inputs_mapping.get(target_channel)\r\n- channel_data_float32 = None\r\n- if source_map_type and source_map_type in loaded_inputs:\r\n- # ... [Extract channel data as float32 as before] ...\r\n- img_input = loaded_inputs[source_map_type]\r\n- if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n- elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n- else: img_float = img_input.astype(np.float32)\r\n- num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n- if num_source_channels >= 3: # BGR Source\r\n- if target_channel == 'R': channel_data_float32 = img_float[:, :, 2]\r\n- elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n- elif target_channel == 'B': channel_data_float32 = img_float[:, :, 0]\r\n- elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n- elif num_source_channels == 1 or len(img_float.shape) == 2: # Grayscale source\r\n- channel_data_float32 = img_float.reshape(h, w)\r\n- if channel_data_float32 is None: # Use default if needed\r\n- default_val = defaults.get(target_channel)\r\n- if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n- channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n- merged_channels_float32.append(channel_data_float32)\r\n-\r\n-\r\n- # Merge channels (same as before)\r\n- if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n- merged_image_float32 = cv2.merge(merged_channels_float32)\r\n-\r\n- # Final Data Type Conversion (based on recalculated output_bit_depth)\r\n- img_final_merged = None\r\n- if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n-\r\n- # --- Determine Output Format based on Threshold, Input Hierarchy & Rules ---\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format()\r\n- threshold = self.config.resolution_threshold_for_jpg\r\n- force_lossless = output_map_type in self.config.force_lossless_map_types\r\n-\r\n- if force_lossless:\r\n- log.debug(f\"Format forced to lossless for merged map type '{output_map_type}'.\")\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else: # Assume png or other lossless 16-bit format\r\n- if output_format != \"png\":\r\n- log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # 8-bit lossless\r\n- output_format = fmt_8bit_config # Usually 'png'\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n- output_format = \"png\"\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n-\r\n- # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n- elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- # --- Else: Apply Hierarchy/Rule-Based Logic ---\r\n- else:\r\n- involved_extensions = set(input_original_extensions.values())\r\n- log.debug(f\"Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n- # Hierarchy: EXR > TIF > PNG > JPG\r\n- highest_format_str = 'jpg' # Start lowest\r\n- if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n- elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n- elif '.png' in involved_extensions: highest_format_str = 'png'\r\n-\r\n- # Determine final output format based on hierarchy and target bit depth\r\n- final_output_format = highest_format_str\r\n-\r\n- if highest_format_str == 'tif':\r\n- if output_bit_depth == 16:\r\n- final_output_format = primary_fmt_16 # Use configured 16-bit pref (EXR/PNG)\r\n- log.debug(f\"Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n- else: # 8-bit target\r\n- final_output_format = 'png' # Force TIF input to PNG for 8-bit merge output\r\n- log.debug(\"Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n- else:\r\n- log.debug(f\"Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n-\r\n- # Set format/params based on the determined final_output_format\r\n- output_format = final_output_format\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\": # Should only happen if highest input was JPG and below threshold\r\n- output_ext = \".jpg\"\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- else:\r\n- log.error(f\"Unsupported final output format '{output_format}' determined for merged map '{output_map_type}'. Skipping save.\")\r\n- continue\r\n-\r\n- # --- JPG 8-bit Check (applies regardless of threshold logic) ---\r\n- if output_format == \"jpg\" and output_bit_depth == 16:\r\n- log.warning(f\"Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n- img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- output_bit_depth = 8 # Correct the recorded bit depth\r\n-\r\n- # --- Save Merged Map ---\r\n- image_to_save = img_final_merged # Already converted to target bit depth (or forced to 8bit for JPG)\r\n-\r\n- # Apply float16 conversion if needed for EXR\r\n- if needs_float16 and image_to_save.dtype != np.float16:\r\n- if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- else: log.warning(f\"Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n-\r\n- merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n- merged_output_path_temp = self.temp_dir / merged_filename\r\n- log.debug(f\"Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n-\r\n- # --- Add save logic with fallback here ---\r\n- saved_successfully = False\r\n- actual_format_saved = output_format\r\n- try:\r\n- cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n- log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n- saved_successfully = True\r\n- except Exception as save_err:\r\n- log.error(f\"Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n- # Try Fallback for merged map (similar to _process_maps fallback)\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n- # ... [ Implement fallback save logic here, converting image_to_save if needed ] ...\r\n- actual_format_saved = fallback_fmt_16\r\n- output_ext = f\".{fallback_fmt_16}\"\r\n- merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n- merged_output_path_temp = self.temp_dir / merged_filename\r\n- save_params_fallback = []\r\n- img_fallback = None\r\n- target_fallback_dtype = np.uint16\r\n-\r\n- if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n-\r\n- if image_to_save.dtype == np.float16:\r\n- if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(\"NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n- img_scaled = image_to_save * 65535.0\r\n- img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n- else: log.error(f\"Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n-\r\n- try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n- # --- End Fallback Logic ---\r\n-\r\n- # Record details if save successful\r\n- if saved_successfully:\r\n- self.merged_maps_details[output_map_type][current_res_key] = {\r\n- \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n- \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n- }\r\n- if output_map_type not in self.metadata[\"merged_maps\"]: self.metadata[\"merged_maps\"].append(output_map_type)\r\n-\r\n- except Exception as merge_res_err:\r\n- log.error(f\"Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n- self.merged_maps_details.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n-\r\n- log.info(\"Finished applying map merging rules.\")\r\n-\r\n-\r\n- def _generate_metadata_file(self):\r\n- \"\"\"Gathers all collected metadata and writes it to a JSON file.\"\"\"\r\n- # ... (Implementation from Response #49) ...\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\":\r\n- log.warning(\"Asset name unknown, metadata may be incomplete.\")\r\n-\r\n- log.info(f\"Generating metadata file for '{self.metadata['asset_name']}'...\")\r\n- final_metadata = self.metadata.copy()\r\n-\r\n- final_metadata[\"processed_map_resolutions\"] = {}\r\n- for map_type, res_dict in self.processed_maps_details.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n- if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys) # Basic sort\r\n-\r\n- final_metadata[\"merged_map_resolutions\"] = {}\r\n- for map_type, res_dict in self.merged_maps_details.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n- if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n-\r\n- # Add processing info\r\n- final_metadata[\"_processing_info\"] = {\r\n- \"preset_used\": self.config.preset_name,\r\n- \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n- # Optionally add core config details used, carefully\r\n- }\r\n-\r\n- # Sort lists\r\n- for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n- if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n-\r\n- metadata_filename = self.config.metadata_filename\r\n- output_path = self.temp_dir / metadata_filename\r\n- log.debug(f\"Writing metadata to: {output_path}\")\r\n- try:\r\n- with open(output_path, 'w', encoding='utf-8') as f:\r\n- json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n- log.info(f\"Metadata file '{metadata_filename}' generated successfully.\")\r\n- self.metadata_file_path_temp = output_path # Store path for moving\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to write metadata file {output_path}: {e}\") from e\r\n-\r\n-\r\n- def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n- \"\"\"\r\n- Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n- Returns the string representation.\r\n- \"\"\"\r\n- if original_width <= 0 or original_height <= 0:\r\n- log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n- return \"InvalidInput\"\r\n-\r\n- # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n- if resized_width <= 0 or resized_height <= 0:\r\n- log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n- return \"InvalidResize\"\r\n-\r\n- # Original logic from user feedback\r\n- width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n- height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n-\r\n- normalized_width_change = width_change_percentage / 100\r\n- normalized_height_change = height_change_percentage / 100\r\n-\r\n- normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n- normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n-\r\n- # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n- # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n- if normalized_width_change == 0 and normalized_height_change == 0:\r\n- closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n- elif normalized_width_change == 0:\r\n- closest_value_to_one = abs(normalized_height_change)\r\n- elif normalized_height_change == 0:\r\n- closest_value_to_one = abs(normalized_width_change)\r\n- else:\r\n- closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n-\r\n- # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n- epsilon = 1e-9\r\n- scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n-\r\n- scaled_normalized_width_change = scale_factor * normalized_width_change\r\n- scaled_normalized_height_change = scale_factor * normalized_height_change\r\n-\r\n- output_width = round(scaled_normalized_width_change, decimals)\r\n- output_height = round(scaled_normalized_height_change, decimals)\r\n-\r\n- # Convert to int if exactly 1.0 after rounding\r\n- if abs(output_width - 1.0) < epsilon: output_width = 1\r\n- if abs(output_height - 1.0) < epsilon: output_height = 1\r\n-\r\n- # Determine output string\r\n- if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n- output = \"EVEN\"\r\n- elif output_width != 1 and output_height == 1:\r\n- output = f\"X{str(output_width).replace('.', '')}\"\r\n- elif output_height != 1 and output_width == 1:\r\n- output = f\"Y{str(output_height).replace('.', '')}\"\r\n- else:\r\n- # Both changed relative to each other\r\n- output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n-\r\n- log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n- return output\r\n-\r\n- def _sanitize_filename(self, name: str) -> str:\r\n- \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n- # ... (Implementation from Response #51) ...\r\n- if not isinstance(name, str): name = str(name)\r\n- name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n- name = re.sub(r'_+', '_', name)\r\n- name = name.strip('_')\r\n- if not name: name = \"invalid_name\"\r\n- return name\r\n-\r\n- def _organize_output_files(self):\r\n- \"\"\"Moves processed files from temp dir to the final output structure.\"\"\"\r\n- # ... (Implementation from Response #51) ...\r\n- if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n- if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing.\")\r\n- if not self.metadata.get(\"supplier_name\"): raise AssetProcessingError(\"Supplier name missing.\")\r\n-\r\n- supplier_sanitized = self._sanitize_filename(self.metadata[\"supplier_name\"])\r\n- asset_name_sanitized = self._sanitize_filename(self.metadata[\"asset_name\"])\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- log.info(f\"Organizing output files into: {final_dir}\")\r\n- try:\r\n- # Check if overwriting is allowed before potentially deleting existing dir\r\n- if final_dir.exists() and self.overwrite:\r\n- log.warning(f\"Output directory exists and overwrite is True: {final_dir}. Removing existing directory.\")\r\n- try:\r\n- shutil.rmtree(final_dir)\r\n- except Exception as rm_err:\r\n- raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} during overwrite: {rm_err}\") from rm_err\r\n- elif final_dir.exists() and not self.overwrite:\r\n- # This case should ideally be caught by the skip logic earlier,\r\n- # but adding a warning here as a safeguard.\r\n- log.warning(f\"Output directory exists: {final_dir}. Overwriting (unexpected - should have been skipped).\")\r\n-\r\n- final_dir.mkdir(parents=True, exist_ok=True) # Create after potential removal\r\n- except Exception as e:\r\n- # Catch potential errors during mkdir if rmtree failed partially?\r\n- if not isinstance(e, AssetProcessingError): # Avoid wrapping already specific error\r\n- raise AssetProcessingError(f\"Failed to create final dir {final_dir}: {e}\") from e\r\n- else:\r\n- raise # Re-raise the AssetProcessingError from rmtree\r\n-\r\n- def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n- if not src_rel_path: log.warning(f\"Missing src path for {file_desc}.\"); return\r\n- source_abs = self.temp_dir / src_rel_path\r\n- dest_abs = dest_dir / src_rel_path.name\r\n- try:\r\n- if source_abs.exists():\r\n- log.debug(f\"Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n- dest_dir.mkdir(parents=True, exist_ok=True) # Ensure sub-dir exists (for Extra/)\r\n- shutil.move(str(source_abs), str(dest_abs))\r\n- else: log.warning(f\"Source file missing for {file_desc}: {source_abs}\")\r\n- except Exception as e: log.error(f\"Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n-\r\n- # Move maps, merged maps, models, metadata\r\n- for details_dict in [self.processed_maps_details, self.merged_maps_details]:\r\n- for map_type, res_dict in details_dict.items():\r\n- if 'error' in res_dict: continue\r\n- for res_key, details in res_dict.items():\r\n- if isinstance(details, dict) and 'path' in details: _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n- for model_info in self.classified_files.get('models', []): _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n- if self.metadata_file_path_temp: _safe_move(self.metadata_file_path_temp.relative_to(self.temp_dir), final_dir, \"metadata file\")\r\n-\r\n- # Move extra/ignored files\r\n- extra_subdir_name = self.config.extra_files_subdir\r\n- extra_dir = final_dir / extra_subdir_name\r\n- files_to_move_extra = self.classified_files.get('extra', []) + self.classified_files.get('ignored', [])\r\n- if files_to_move_extra:\r\n- log.debug(f\"Moving {len(files_to_move_extra)} files to '{extra_subdir_name}/'...\")\r\n- try:\r\n- extra_dir.mkdir(exist_ok=True)\r\n- for file_info in files_to_move_extra: _safe_move(file_info.get('source_path'), extra_dir, f\"extra file ({file_info.get('reason', 'Unknown')})\")\r\n- except Exception as e: log.error(f\"Failed creating/moving to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n-\r\n- log.info(f\"Finished organizing output for '{asset_name_sanitized}'.\")\r\n-\r\n-\r\n- def _cleanup_workspace(self):\r\n- \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n- # ... (Implementation from Response #45) ...\r\n- if self.temp_dir and self.temp_dir.exists():\r\n- try:\r\n- log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n- shutil.rmtree(self.temp_dir)\r\n- self.temp_dir = None\r\n- log.debug(\"Temporary workspace cleaned up successfully.\")\r\n- except Exception as e:\r\n- log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n-\r\n- # --- Prediction Method ---\r\n- def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n- \"\"\"\r\n- Predicts the final output structure (supplier, asset name) and attempts\r\n- to predict output filenames for potential map files based on naming conventions.\r\n- Does not perform full processing or image loading.\r\n-\r\n- Returns:\r\n- tuple[str | None, str | None, dict[str, str] | None]:\r\n- (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n- where file_predictions_dict maps input filename -> predicted output filename.\r\n- Returns None if prediction fails critically.\r\n- \"\"\"\r\n- log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n- try:\r\n- # 1. Get Supplier Name\r\n- supplier_name = self.config.supplier_name\r\n- if not supplier_name:\r\n- log.warning(\"Supplier name not found in configuration during prediction.\")\r\n- return None\r\n-\r\n- # 2. List Input Filenames/Stems\r\n- candidate_stems = set() # Use set for unique stems\r\n- filenames = []\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- try:\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- # Get only filenames, ignore directories\r\n- filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n- except zipfile.BadZipFile:\r\n- log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n- return None\r\n- except Exception as zip_err:\r\n- log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n- return None # Cannot proceed if we can't list files\r\n- elif self.input_path.is_dir():\r\n- try:\r\n- for item in self.input_path.iterdir():\r\n- if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n- filenames.append(item.name)\r\n- # Note: Not walking subdirs for prediction to keep it fast\r\n- except Exception as dir_err:\r\n- log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n- return None\r\n-\r\n- if not filenames:\r\n- log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n- return None # Return None if no files found\r\n-\r\n- # 3. Lightweight Classification for Stems and Potential Maps\r\n- map_type_mapping = self.config.map_type_mapping\r\n- model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n- separator = self.config.source_naming_separator\r\n- processed_filenames = set() # Track full filenames processed\r\n- potential_map_files = {} # Store fname -> potential map_type\r\n-\r\n- for fname in filenames:\r\n- if fname in processed_filenames: continue\r\n-\r\n- fstem = Path(fname).stem\r\n- fstem_lower = fstem.lower()\r\n- name_parts = fstem_lower.split(separator)\r\n-\r\n- # Check map rules first\r\n- map_matched = False\r\n- for mapping_rule in map_type_mapping:\r\n- source_keywords, standard_map_type = mapping_rule\r\n- if standard_map_type not in self.config.standard_map_types: continue\r\n- for keyword in source_keywords:\r\n- kw_lower = keyword.lower().strip('*')\r\n- if kw_lower in name_parts:\r\n- is_exact_match = any(part == kw_lower for part in name_parts)\r\n- if is_exact_match:\r\n- candidate_stems.add(fstem) # Add unique stem\r\n- potential_map_files[fname] = standard_map_type # Store potential type\r\n- processed_filenames.add(fname)\r\n- map_matched = True\r\n- break # Found keyword match for this rule\r\n- if map_matched: break # Found a rule match for this file\r\n- if map_matched: continue # Move to next filename if identified as map\r\n-\r\n- # Check model patterns if not a map\r\n- for pattern in model_patterns:\r\n- if fnmatch(fname.lower(), pattern.lower()):\r\n- candidate_stems.add(fstem) # Still add stem for base name determination\r\n- processed_filenames.add(fname)\r\n- # Don't add models to potential_map_files\r\n- break # Found model match\r\n-\r\n- # Note: Files matching neither maps nor models are ignored for prediction details\r\n-\r\n- candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n- if not candidate_stems_list:\r\n- log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n- # Fallback: Use the input path's name itself if no stems found\r\n- base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n- determined_base_name = base_name_fallback\r\n- else:\r\n- # 4. Replicate _determine_base_metadata logic for base name\r\n- determined_base_name = \"UnknownAssetName\"\r\n- base_index = self.config.source_naming_indices.get('base_name')\r\n-\r\n- if isinstance(base_index, int):\r\n- potential_base_names = set()\r\n- for stem in candidate_stems_list: # Iterate over the list\r\n- parts = stem.split(separator)\r\n- if len(parts) > base_index:\r\n- potential_base_names.add(parts[base_index])\r\n- if len(potential_base_names) == 1:\r\n- determined_base_name = potential_base_names.pop()\r\n- elif len(potential_base_names) > 1:\r\n- log.debug(f\"Prediction: Multiple potential base names from index {base_index}, using common prefix.\")\r\n- determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n- # else: Use common prefix below\r\n-\r\n- if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n- determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n-\r\n- # 5. Sanitize Names\r\n- final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n- final_supplier_name = self._sanitize_filename(supplier_name)\r\n-\r\n- # 6. Predict Output Filenames\r\n- file_predictions = {}\r\n- target_pattern = self.config.target_filename_pattern\r\n- # Use highest resolution key as a placeholder for prediction\r\n- highest_res_key = \"Res?\" # Fallback\r\n- if self.config.image_resolutions:\r\n- highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n-\r\n- for input_fname, map_type in potential_map_files.items():\r\n- # Assume PNG for prediction, extension might change based on bit depth rules later\r\n- # but this gives a good idea of the renaming.\r\n- # A more complex prediction could check bit depth rules.\r\n- predicted_ext = \"png\" # Simple assumption for preview\r\n- try:\r\n- predicted_fname = target_pattern.format(\r\n- base_name=final_base_name,\r\n- map_type=map_type,\r\n- resolution=highest_res_key, # Use placeholder resolution\r\n- ext=predicted_ext\r\n- )\r\n- file_predictions[input_fname] = predicted_fname\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n- file_predictions[input_fname] = \"[Filename Format Error]\"\r\n-\r\n-\r\n- log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n- return final_supplier_name, final_base_name, file_predictions\r\n-\r\n- except Exception as e:\r\n- log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- return None\r\n-\r\n-\r\n- # --- New Detailed Prediction Method ---\r\n- def get_detailed_file_predictions(self) -> list[dict] | None:\r\n- \"\"\"\r\n- Performs extraction and classification to provide a detailed list of all\r\n- files found within the asset and their predicted status/output name.\r\n- Does not perform image processing or file moving.\r\n-\r\n- Returns:\r\n- list[dict] | None: A list of dictionaries, each representing a file:\r\n- {'original_path': str, 'predicted_name': str | None, 'status': str, 'details': str | None}\r\n- Returns None if a critical error occurs during setup/classification.\r\n- \"\"\"\r\n- log.info(f\"Getting detailed file predictions for: {self.input_path.name}\")\r\n- results = []\r\n- asset_base_name = \"UnknownAssetName\" # Fallback\r\n-\r\n- try:\r\n- # --- Perform necessary setup and classification ---\r\n- self._setup_workspace()\r\n- self._extract_input()\r\n- self._inventory_and_classify_files()\r\n- self._determine_base_metadata() # Needed for base name prediction\r\n- asset_base_name = self.metadata.get(\"asset_name\", asset_base_name)\r\n-\r\n- # --- Prepare for filename prediction ---\r\n- target_pattern = self.config.target_filename_pattern\r\n- highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n- if self.config.image_resolutions:\r\n- highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n-\r\n- # --- Process classified files ---\r\n- # Maps\r\n- for map_info in self.classified_files.get(\"maps\", []):\r\n- original_path_str = str(map_info.get(\"source_path\", \"UnknownPath\"))\r\n- map_type = map_info.get(\"map_type\", \"UnknownType\")\r\n- # Predicted name for maps should just be the base asset name\r\n- predicted_name_display = asset_base_name\r\n- # Concise details\r\n- details = f\"[{map_type}]\"\r\n- if map_info.get(\"is_16bit_source\"):\r\n- details += \" (16-bit)\"\r\n-\r\n- # Still try to format the full name internally for error checking, but don't display it\r\n- try:\r\n- predicted_ext = \"png\" # Assumption for format check\r\n- _ = target_pattern.format(\r\n- base_name=asset_base_name,\r\n- map_type=map_type,\r\n- resolution=highest_res_key,\r\n- ext=predicted_ext\r\n- )\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction format error for {original_path_str}: {fmt_err}\")\r\n- predicted_name_display = \"[Format Error]\" # Show error in name field\r\n- details += f\" (Format Key Error: {fmt_err})\"\r\n- except Exception as pred_err:\r\n- log.warning(f\"Prediction error for {original_path_str}: {pred_err}\")\r\n- predicted_name_display = \"[Prediction Error]\" # Show error in name field\r\n- details += f\" (Error: {pred_err})\"\r\n-\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": predicted_name_display, # Use the base name or error\r\n- \"status\": \"Mapped\",\r\n- \"details\": details # Use concise details\r\n- })\r\n-\r\n- # Models\r\n- for model_info in self.classified_files.get(\"models\", []):\r\n- original_path_str = str(model_info.get(\"source_path\", \"UnknownPath\"))\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": Path(original_path_str).name, # Models usually keep original name\r\n- \"status\": \"Model\",\r\n- \"details\": \"[Model]\" # Concise detail\r\n- })\r\n-\r\n- # Extra\r\n- for extra_info in self.classified_files.get(\"extra\", []):\r\n- original_path_str = str(extra_info.get(\"source_path\", \"UnknownPath\"))\r\n- reason = extra_info.get('reason', 'Unknown reason')\r\n- # Determine status and details based on the reason\r\n- if reason == 'Unrecognised': # Corrected string check\r\n- status = \"Unrecognised\"\r\n- details = \"[Unrecognised]\"\r\n- else:\r\n- status = \"Extra\"\r\n- details = f\"Extra ({reason})\" # Show the pattern match reason\r\n-\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": Path(original_path_str).name, # Extra/Unrecognised files keep original name\r\n- \"status\": status,\r\n- \"details\": details\r\n- })\r\n-\r\n- # Ignored\r\n- for ignored_info in self.classified_files.get(\"ignored\", []):\r\n- original_path_str = str(ignored_info.get(\"source_path\", \"UnknownPath\"))\r\n- reason = ignored_info.get('reason', 'Unknown reason')\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": None, # Ignored files have no output name\r\n- \"status\": \"Ignored\",\r\n- \"details\": f\"Ignored ({reason})\" # Keep reason for ignored files\r\n- })\r\n-\r\n- log.info(f\"Detailed prediction complete for {self.input_path.name}. Found {len(results)} files.\")\r\n- return results\r\n-\r\n- except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n- # Log critical errors during the prediction process\r\n- log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- # Optionally add a single error entry to results?\r\n- # results.append({\"original_path\": self.input_path.name, \"predicted_name\": None, \"status\": \"Error\", \"details\": str(e)})\r\n- # return results # Or return None to indicate failure\r\n- return None # Indicate critical failure\r\n- finally:\r\n- # Ensure cleanup always happens\r\n- self._cleanup_workspace()\r\n-\r\n-\r\n-# --- End of AssetProcessor Class ---\n-# asset_processor.py\r\n-\r\n-import os\r\n-import shutil\r\n-import tempfile\r\n-import zipfile\r\n-import logging\r\n-import json\r\n-import re\r\n-import time\r\n-from pathlib import Path\r\n-from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n-\r\n-# Attempt to import image processing libraries\r\n-try:\r\n- import cv2\r\n- import numpy as np\r\n-except ImportError:\r\n- print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n- print(\"pip install opencv-python numpy\")\r\n- exit(1) # Exit if essential libraries are missing\r\n-\r\n-# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n-try:\r\n- import OpenEXR\r\n- import Imath\r\n- _HAS_OPENEXR = True\r\n-except ImportError:\r\n- _HAS_OPENEXR = False\r\n- # Log this information - basic EXR might still work via OpenCV\r\n- logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n-\r\n-\r\n-# Assuming Configuration class is in configuration.py\r\n-try:\r\n- from configuration import Configuration, ConfigurationError\r\n-except ImportError:\r\n- print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n- print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n- exit(1)\r\n-\r\n-# Use logger defined in main.py (or configure one here if run standalone)\r\n-log = logging.getLogger(__name__)\r\n-# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\n-if not log.hasHandlers():\r\n- logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n-\r\n-\r\n-# --- Custom Exception ---\r\n-class AssetProcessingError(Exception):\r\n- \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n- pass\r\n-\r\n-# --- Helper Functions ---\r\n-def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n- \"\"\"Calculates dimensions maintaining aspect ratio, fitting within target_max_dim.\"\"\"\r\n- if orig_w <= 0 or orig_h <= 0: return (target_max_dim, target_max_dim) # Avoid division by zero\r\n-\r\n- ratio = orig_w / orig_h\r\n- if ratio > 1: # Width is dominant\r\n- target_w = target_max_dim\r\n- target_h = max(1, round(target_w / ratio)) # Ensure height is at least 1\r\n- else: # Height is dominant or square\r\n- target_h = target_max_dim\r\n- target_w = max(1, round(target_h * ratio)) # Ensure width is at least 1\r\n- return int(target_w), int(target_h)\r\n-\r\n-def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n- \"\"\"\r\n- Calculates min, max, mean for a given numpy image array.\r\n- Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n- \"\"\"\r\n- if image_data is None:\r\n- log.warning(\"Attempted to calculate stats on None image data.\")\r\n- return None\r\n- try:\r\n- # Use float64 for calculations to avoid potential overflow/precision issues\r\n- data_float = image_data.astype(np.float64)\r\n-\r\n- if len(data_float.shape) == 2: # Grayscale (H, W)\r\n- min_val = float(np.min(data_float))\r\n- max_val = float(np.max(data_float))\r\n- mean_val = float(np.mean(data_float))\r\n- stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n- elif len(data_float.shape) == 3: # Color (H, W, C)\r\n- channels = data_float.shape[2]\r\n- min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n- max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n- mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n- # Assuming BGR(A?) order from OpenCV, report as list [B, G, R, (A)]\r\n- stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated {channels}-Channel Stats: Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n- else:\r\n- log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n- return None\r\n- return stats\r\n- except Exception as e:\r\n- log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n- return {\"error\": str(e)}\r\n-\r\n-\r\n-from collections import defaultdict # Added for grouping\r\n-\r\n-# --- Helper function ---\r\n-def _get_base_map_type(target_map_string: str) -> str:\r\n- \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n- match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n- if match:\r\n- return match.group(1).upper()\r\n- return target_map_string.upper() # Fallback if no number suffix\r\n-\r\n-# --- Asset Processor Class ---\r\n-class AssetProcessor:\r\n- \"\"\"\r\n- Handles the processing pipeline for a single asset (ZIP or folder).\r\n- \"\"\"\r\n- # Define the list of known grayscale map types (adjust as needed)\r\n- GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n-\r\n- def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n- \"\"\"\r\n- Initializes the processor for a given input asset.\r\n-\r\n- Args:\r\n- input_path: Path to the input ZIP file or folder.\r\n- config: The loaded Configuration object.\r\n- output_base_path: The base directory where processed output will be saved.\r\n- overwrite: If True, forces reprocessing even if output exists.\r\n- \"\"\"\r\n- if not isinstance(input_path, Path): input_path = Path(input_path)\r\n- if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n- if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n-\r\n- if not input_path.exists():\r\n- raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n- if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n- raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n-\r\n- self.input_path: Path = input_path\r\n- self.config: Configuration = config\r\n- self.output_base_path: Path = output_base_path\r\n- self.overwrite: bool = overwrite # Store the overwrite flag\r\n-\r\n- self.temp_dir: Path | None = None # Path to the temporary working directory\r\n- self.classified_files: dict[str, list[dict]] = {\r\n- \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n- }\r\n- self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n- self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n- self.metadata_file_path_temp: Path | None = None\r\n-\r\n- # Initialize metadata collected during processing\r\n- self.metadata: dict = {\r\n- \"asset_name\": \"Unknown\",\r\n- \"supplier_name\": self.config.supplier_name,\r\n- \"asset_category\": self.config._core_settings.get('DEFAULT_ASSET_CATEGORY', \"Texture\"),\r\n- \"archetype\": \"Unknown\",\r\n- \"maps_present\": [],\r\n- \"merged_maps\": [],\r\n- \"shader_features\": [],\r\n- \"source_files_in_extra\": [],\r\n- \"image_stats_1k\": {},\r\n- \"map_details\": {},\r\n- \"aspect_ratio_change_string\": \"N/A\" # Replaces output_scaling_factors\r\n- # Processing info added in _generate_metadata_file\r\n- }\r\n-\r\n- log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n-\r\n- def process(self) -> str:\r\n- \"\"\"\r\n- Executes the full processing pipeline for the asset.\r\n- Returns:\r\n- str: Status (\"processed\", \"skipped\").\r\n- \"\"\"\r\n- log.info(f\"Processing asset: {self.input_path.name}\")\r\n- try:\r\n- self._setup_workspace()\r\n- self._extract_input()\r\n- self._inventory_and_classify_files()\r\n- self._determine_base_metadata()\r\n-\r\n- # --- Check if asset should be skipped ---\r\n- # Ensure asset_name and supplier_name were determined before checking\r\n- asset_name = self.metadata.get(\"asset_name\")\r\n- supplier_name = self.metadata.get(\"supplier_name\")\r\n-\r\n- # Only check for skipping if overwrite is False AND we have valid names\r\n- if not self.overwrite and asset_name and asset_name != \"UnknownAssetName\" and supplier_name:\r\n- supplier_sanitized = self._sanitize_filename(supplier_name)\r\n- asset_name_sanitized = self._sanitize_filename(asset_name)\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- metadata_file_path = final_dir / self.config.metadata_filename\r\n-\r\n- if final_dir.exists() and metadata_file_path.is_file():\r\n- log.info(f\"Output directory and metadata found for '{asset_name_sanitized}' and overwrite is False. Skipping.\")\r\n- # No need to call cleanup here, the finally block will handle it.\r\n- return \"skipped\" # Return status\r\n- elif self.overwrite:\r\n- # Log only if asset name is known, otherwise it's redundant with the initial processing log\r\n- known_asset_name = self.metadata.get('asset_name', self.input_path.name)\r\n- # Avoid logging overwrite message if name is still unknown\r\n- if known_asset_name != \"UnknownAssetName\" and known_asset_name != self.input_path.name:\r\n- log.info(f\"Overwrite flag is set for '{known_asset_name}'. Processing will continue even if output exists.\")\r\n- # --- End Skip Check ---\r\n-\r\n- # Continue with processing if not skipped\r\n- self._process_maps()\r\n- self._merge_maps()\r\n- self._generate_metadata_file()\r\n- self._organize_output_files()\r\n- log.info(f\"Asset processing completed successfully for: {self.metadata.get('asset_name', self.input_path.name)}\")\r\n- return \"processed\" # Return status\r\n- except Exception as e:\r\n- # Log error with traceback if it hasn't been logged already\r\n- if not isinstance(e, (AssetProcessingError, ConfigurationError)): # Avoid double logging known types\r\n- log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name}: {e}\")\r\n- # Ensure error is propagated\r\n- if not isinstance(e, AssetProcessingError):\r\n- raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n- else:\r\n- raise # Re-raise AssetProcessingError or ConfigurationError\r\n- finally:\r\n- # Ensure cleanup always happens\r\n- self._cleanup_workspace()\r\n-\r\n- def _setup_workspace(self):\r\n- \"\"\"Creates a temporary directory for processing.\"\"\"\r\n- try:\r\n- self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n- log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n-\r\n- def _extract_input(self):\r\n- \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n-\r\n- log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n- try:\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- zip_ref.extractall(self.temp_dir)\r\n- log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n- elif self.input_path.is_dir():\r\n- log.debug(f\"Copying directory contents: {self.input_path}\")\r\n- for item in self.input_path.iterdir():\r\n- destination = self.temp_dir / item.name\r\n- if item.is_dir():\r\n- # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n- try:\r\n- shutil.copytree(item, destination, dirs_exist_ok=True)\r\n- except TypeError: # Fallback for older Python\r\n- if not destination.exists():\r\n- shutil.copytree(item, destination)\r\n- else:\r\n- log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n-\r\n- else:\r\n- shutil.copy2(item, destination)\r\n- log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n- except zipfile.BadZipFile:\r\n- raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n-\r\n- def _inventory_and_classify_files(self):\r\n- \"\"\"\r\n- Scans workspace, classifies files according to preset rules, handling\r\n- 16-bit prioritization and multiple variants of the same base map type.\r\n- \"\"\"\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n-\r\n- log.info(\"Scanning and classifying files...\")\r\n- log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n- all_files_rel = []\r\n- for root, _, files in os.walk(self.temp_dir):\r\n- root_path = Path(root)\r\n- for file in files:\r\n- full_path = root_path / file\r\n- relative_path = full_path.relative_to(self.temp_dir)\r\n- all_files_rel.append(relative_path)\r\n-\r\n- log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n-\r\n- # --- Initialization ---\r\n- processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n- potential_map_candidates = [] # List to store potential map file info\r\n- # Reset classified files (important if this method is ever called multiple times)\r\n- self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n-\r\n-\r\n- # --- Step 1: Identify Explicit 'Extra' Files ---\r\n- log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n- compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n- log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path in processed_files: continue\r\n- for compiled_regex in compiled_extra_regex:\r\n- if compiled_regex.search(file_rel_path.name):\r\n- log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n- self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n- processed_files.add(file_rel_path)\r\n- log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n- break # Stop checking extra patterns for this file\r\n-\r\n- # --- Step 2: Identify Model Files ---\r\n- log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n- compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n- log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path in processed_files: continue\r\n- for compiled_regex in compiled_model_regex:\r\n- if compiled_regex.search(file_rel_path.name):\r\n- log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n- self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n- processed_files.add(file_rel_path)\r\n- log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n- break # Stop checking model patterns for this file\r\n-\r\n- # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n- log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n- # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n- compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n-\r\n- for file_rel_path in all_files_rel:\r\n- # Skip files already classified as Extra or Model\r\n- if file_rel_path in processed_files:\r\n- continue\r\n-\r\n- file_stem = file_rel_path.stem\r\n- match_found = False\r\n-\r\n- # Iterate through base types and their associated regex tuples\r\n- for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n- if match_found: break # Stop checking types for this file once matched\r\n-\r\n- # Get the original keywords list for the current rule index\r\n- # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n- original_rule = None\r\n- # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n- if regex_tuples:\r\n- current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n- if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n- rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n- # Verify it's the correct rule by checking target_type\r\n- if rule_candidate.get(\"target_type\") == base_map_type:\r\n- original_rule = rule_candidate\r\n- else:\r\n- log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n- # Fallback search if index doesn't match (shouldn't happen ideally)\r\n- for idx, rule in enumerate(self.config.map_type_mapping):\r\n- if rule.get(\"target_type\") == base_map_type:\r\n- original_rule = rule\r\n- log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n- break\r\n-\r\n- original_keywords_list = []\r\n- if original_rule and 'keywords' in original_rule:\r\n- original_keywords_list = original_rule['keywords']\r\n- else:\r\n- log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n-\r\n- for kw_regex, original_keyword, rule_index in regex_tuples:\r\n- if kw_regex.search(file_stem):\r\n- log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n-\r\n- # Find the index of the matched keyword within its rule's list\r\n- keyword_index_in_rule = -1 # Default if not found\r\n- if original_keywords_list:\r\n- try:\r\n- # Use the original_keyword string directly\r\n- keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n- except ValueError:\r\n- log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n- else:\r\n- log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n-\r\n- # Add candidate only if not already added\r\n- if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n- potential_map_candidates.append({\r\n- 'source_path': file_rel_path,\r\n- 'matched_keyword': original_keyword,\r\n- 'base_map_type': base_map_type,\r\n- 'preset_rule_index': rule_index,\r\n- 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n- 'is_16bit_source': False\r\n- })\r\n- else:\r\n- log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n-\r\n- match_found = True\r\n- break # Stop checking regex tuples for this base_type once matched\r\n-\r\n- log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n-\r\n- # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n- log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n- compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n- for file_rel_path in all_files_rel:\r\n- # Skip if already processed or already identified as a candidate\r\n- if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n- continue\r\n-\r\n- for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n- log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n- match = compiled_regex.search(file_rel_path.name) # Store result\r\n- if match:\r\n- log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n- potential_map_candidates.append({\r\n- 'source_path': file_rel_path,\r\n- 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n- 'base_map_type': base_type,\r\n- 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n- 'is_16bit_source': True # Mark as 16-bit immediately\r\n- })\r\n- log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n- # Don't add to processed_files yet, let Step 4 handle filtering\r\n- break # Stop checking bit depth patterns for this file\r\n-\r\n- log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n-\r\n- # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n- log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n- compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n- candidates_to_keep = []\r\n- candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n-\r\n- # Mark 16-bit candidates\r\n- for candidate in potential_map_candidates:\r\n- base_type = candidate['base_map_type']\r\n- # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n- if base_type in compiled_bit_depth_regex:\r\n- if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n- candidate['is_16bit_source'] = True\r\n- log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n-\r\n-\r\n- # Identify base types that have a 16-bit version present\r\n- prioritized_16bit_bases = {\r\n- candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n- }\r\n- log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n-\r\n- # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n- for candidate in potential_map_candidates:\r\n- if candidate['is_16bit_source']:\r\n- candidates_to_keep.append(candidate)\r\n- log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n- elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n- candidates_to_keep.append(candidate)\r\n- log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n- else:\r\n- # This is an 8-bit candidate whose 16-bit counterpart exists\r\n- candidates_to_ignore.append(candidate)\r\n- log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n-\r\n- # Add ignored 8-bit files to the main ignored list\r\n- for ignored_candidate in candidates_to_ignore:\r\n- self.classified_files[\"ignored\"].append({\r\n- 'source_path': ignored_candidate['source_path'],\r\n- 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n- })\r\n- processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n-\r\n- log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n-\r\n- # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n- log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n- # from collections import defaultdict # Moved import to top of file\r\n- grouped_by_base_type = defaultdict(list)\r\n- for candidate in candidates_to_keep:\r\n- grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n-\r\n- final_map_list = []\r\n- for base_map_type, candidates in grouped_by_base_type.items():\r\n- log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n-\r\n- # --- NEW SORTING LOGIC ---\r\n- # Sort candidates based on:\r\n- # 1. The index of the rule object in the preset's map_type_mapping list.\r\n- # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n- # 3. Alphabetical order of the source file path as a tie-breaker.\r\n- candidates.sort(key=lambda c: (\r\n- c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n- c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n- str(c['source_path'])\r\n- ))\r\n- # --- END NEW SORTING LOGIC ---\r\n-\r\n- # Removed diagnostic log\r\n-\r\n- # Assign suffixes and add to the final map list\r\n- for i, final_candidate in enumerate(candidates): # Use the directly sorted list\r\n- # Determine final map type based on the new rule\r\n- if base_map_type in self.config.respect_variant_map_types: # Check the new config list\r\n- # Always assign suffix for types in the list (if more than one or only one)\r\n- final_map_type = f\"{base_map_type}-{i + 1}\"\r\n- else:\r\n- # Never assign suffix for types NOT in the list\r\n- final_map_type = base_map_type\r\n-\r\n- final_map_list.append({\r\n- \"map_type\": final_map_type,\r\n- \"source_path\": final_candidate[\"source_path\"],\r\n- \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n- \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n- \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n- })\r\n- processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n- log.debug(f\" Final Map: Type='{final_map_type}', Source='{final_candidate['source_path']}', Keyword='{final_candidate['matched_keyword']}', 16bit={final_candidate['is_16bit_source']}\")\r\n-\r\n- self.classified_files[\"maps\"] = final_map_list\r\n-\r\n- # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n- log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n- remaining_count = 0\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path not in processed_files:\r\n- log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n- self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n- remaining_count += 1\r\n- # No need to add to processed_files here, it's the final step\r\n- log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n-\r\n- # --- Final Summary ---\r\n- # Update metadata list of files actually moved to extra (including Unrecognised and Ignored)\r\n- self.metadata[\"source_files_in_extra\"] = sorted([\r\n- str(f['source_path']) for f in self.classified_files['extra'] + self.classified_files['ignored']\r\n- ])\r\n- log.info(f\"File classification complete.\")\r\n- log.debug(\"--- Final Classification Summary (v2) ---\")\r\n- map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n- model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n- extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n- ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n- log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n- log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n- log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n- log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n- log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n-\r\n-\r\n- def _determine_base_metadata(self):\r\n- \"\"\"Determines base_name, asset_category, and archetype.\"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- log.info(\"Determining base metadata...\")\r\n-\r\n- # --- Determine Asset Category ---\r\n- self.metadata[\"asset_category\"] = self.config.default_asset_category # Start with default\r\n- if self.classified_files[\"models\"]:\r\n- self.metadata[\"asset_category\"] = \"Asset\"\r\n- log.debug(\"Category set to 'Asset' due to model file presence.\")\r\n- else:\r\n- decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n- found_decal = False\r\n- candidate_files = [f['source_path'] for f in self.classified_files['maps']] # Check map names first\r\n- if not candidate_files: # Fallback to checking all files? Maybe too broad. Check Extra?\r\n- candidate_files = [f['source_path'] for f in self.classified_files['extra']]\r\n-\r\n- if decal_keywords:\r\n- for file_path in candidate_files:\r\n- for keyword in decal_keywords:\r\n- if keyword.lower() in file_path.name.lower():\r\n- self.metadata[\"asset_category\"] = \"Decal\"\r\n- found_decal = True; break\r\n- if found_decal: break\r\n- if found_decal: log.debug(\"Category set to 'Decal' due to keyword match.\")\r\n-\r\n- # --- Determine Base Name ---\r\n- map_stems = [f['source_path'].stem for f in self.classified_files['maps']]\r\n- model_stems = [f['source_path'].stem for f in self.classified_files['models']]\r\n- candidate_stems = map_stems + model_stems\r\n-\r\n- determined_base_name = \"UnknownAssetName\"\r\n- if candidate_stems:\r\n- separator = self.config.source_naming_separator\r\n- base_index = self.config.source_naming_indices.get('base_name')\r\n-\r\n- if isinstance(base_index, int):\r\n- potential_base_names = set()\r\n- for stem in candidate_stems:\r\n- parts = stem.split(separator)\r\n- if len(parts) > base_index:\r\n- potential_base_names.add(parts[base_index])\r\n- if len(potential_base_names) == 1:\r\n- determined_base_name = potential_base_names.pop()\r\n- log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n- elif len(potential_base_names) > 1 :\r\n- log.warning(f\"Multiple potential base names found from parts index {base_index}: {potential_base_names}. Falling back to common prefix.\")\r\n- # Fallback logic if structured parts method fails or yields multiple names\r\n- determined_base_name = os.path.commonprefix(candidate_stems)\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Clean trailing separators etc.\r\n- # else: len is 0, means no valid parts found, use common prefix below\r\n-\r\n- # If no index or structured parts failed, use common prefix of all relevant stems\r\n- if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n- determined_base_name = os.path.commonprefix(candidate_stems)\r\n- log.debug(f\"Using common prefix: '{determined_base_name}'\")\r\n- # Clean up common separators/underscores often left by commonprefix\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n-\r\n- # Final cleanup and fallback for base name\r\n- determined_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n- self.metadata[\"asset_name\"] = determined_base_name\r\n- log.debug(f\"Final Determined Base Name: {self.metadata['asset_name']}\")\r\n-\r\n- # --- Determine Archetype (Usage) ---\r\n- archetype_rules = self.config.archetype_rules\r\n- determined_archetype = \"Unknown\"\r\n- check_stems = [f['source_path'].stem.lower() for f in self.classified_files['maps']]\r\n- check_stems.extend([f['source_path'].stem.lower() for f in self.classified_files['models']])\r\n- # Also check the determined base name itself?\r\n- check_stems.append(self.metadata[\"asset_name\"].lower())\r\n-\r\n- if check_stems:\r\n- best_match_archetype = \"Unknown\"\r\n- highest_match_count = 0 # Simple heuristic: prioritize rule with most keyword hits?\r\n-\r\n- for rule in archetype_rules:\r\n- if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n- arch_name, rules_dict = rule\r\n- match_any = rules_dict.get(\"match_any\", [])\r\n- # match_all = rules_dict.get(\"match_all\", []) # Add logic if needed\r\n-\r\n- current_match_count = 0\r\n- matched_any_keyword = False\r\n- if match_any:\r\n- for keyword in match_any:\r\n- kw_lower = keyword.lower()\r\n- for stem in check_stems:\r\n- # Using simple substring check again\r\n- if kw_lower in stem:\r\n- current_match_count += 1\r\n- matched_any_keyword = True\r\n- # Optionally break after first hit per keyword? Or count all occurrences? Count all for now.\r\n-\r\n- # Decide if this rule matches. For now: requires at least one 'match_any' hit.\r\n- if matched_any_keyword:\r\n- # Simple approach: first rule that matches wins.\r\n- # Could be enhanced by prioritizing rules or counting hits.\r\n- if best_match_archetype == \"Unknown\": # Take the first match\r\n- best_match_archetype = arch_name\r\n- log.debug(f\"Tentative archetype match '{arch_name}' based on keywords: {match_any}\")\r\n- # Break here for \"first match wins\" logic\r\n- break\r\n-\r\n- # --- Example: Prioritize by match count (more complex) ---\r\n- # if current_match_count > highest_match_count:\r\n- # highest_match_count = current_match_count\r\n- # best_match_archetype = arch_name\r\n- # log.debug(f\"New best archetype match '{arch_name}' with count {current_match_count}\")\r\n- # ----------------------------------------------------------\r\n-\r\n- determined_archetype = best_match_archetype\r\n-\r\n- self.metadata[\"archetype\"] = determined_archetype\r\n- log.debug(f\"Determined Archetype: {self.metadata['archetype']}\")\r\n- log.info(\"Base metadata determination complete.\")\r\n-\r\n-\r\n- def _process_maps(self):\r\n- \"\"\"Loads, processes, resizes, and saves classified map files.\"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- log.info(f\"Processing identified map files for '{self.metadata['asset_name']}'...\")\r\n- processed_map_types = set()\r\n-\r\n- # --- Settings retrieval ---\r\n- resolutions = self.config.image_resolutions\r\n- stats_res_key = self.config.calculate_stats_resolution\r\n- stats_target_dim = resolutions.get(stats_res_key)\r\n- if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped.\")\r\n- gloss_keywords = self.config.source_glossiness_keywords\r\n- target_pattern = self.config.target_filename_pattern\r\n- base_name = self.metadata['asset_name']\r\n-\r\n- # --- Pre-process Glossiness -> Roughness ---\r\n- preprocessed_data = {}\r\n- derived_from_gloss_flag = {}\r\n- gloss_map_info_for_rough, native_rough_map_info = None, None\r\n- for map_info in self.classified_files['maps']:\r\n- if map_info['map_type'] == 'ROUGH':\r\n- is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n- if is_gloss: gloss_map_info_for_rough = map_info\r\n- else: native_rough_map_info = map_info\r\n-\r\n- rough_source_to_use = None\r\n- if gloss_map_info_for_rough:\r\n- rough_source_to_use = gloss_map_info_for_rough\r\n- derived_from_gloss_flag['ROUGH'] = True\r\n- if native_rough_map_info:\r\n- log.warning(f\"Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n- if native_rough_map_info in self.classified_files['maps']: self.classified_files['maps'].remove(native_rough_map_info)\r\n- self.classified_files['ignored'].append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n- elif native_rough_map_info:\r\n- rough_source_to_use = native_rough_map_info\r\n- derived_from_gloss_flag['ROUGH'] = False\r\n-\r\n- if derived_from_gloss_flag.get('ROUGH'):\r\n- source_path = self.temp_dir / rough_source_to_use['source_path']\r\n- log.info(f\"Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n- try:\r\n- img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n- if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n- original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n- if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n- if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n- elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n- else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n- # Store tuple: (inverted_float_data, original_dtype)\r\n- preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n- log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n- except Exception as e: log.error(f\"Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n-\r\n- # --- Main Processing Loop ---\r\n- maps_to_process = list(self.classified_files['maps'])\r\n- for map_info in maps_to_process:\r\n- map_type = map_info['map_type']\r\n- source_path_rel = map_info['source_path']\r\n- original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n- log.info(f\"-- Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n- img_processed, source_dtype = None, None\r\n- map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n-\r\n- try:\r\n- # --- 1. Get/Load Source Data ---\r\n- if map_type in preprocessed_data:\r\n- log.debug(f\"Using pre-processed data for {map_type}.\")\r\n- # Unpack tuple: (inverted_float_data, original_dtype)\r\n- img_processed, source_dtype = preprocessed_data[map_type]\r\n- # No longer need to read the original file just for dtype\r\n- else:\r\n- full_source_path = self.temp_dir / source_path_rel\r\n- # Determine the read flag based on map type\r\n- read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- log.debug(f\"Loading source {source_path_rel.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n- img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n- if img_loaded is None:\r\n- raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n- img_processed, source_dtype = img_loaded.copy(), img_loaded.dtype\r\n- log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape}\")\r\n- map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n-\r\n- # --- 2. Handle Alpha Mask ---\r\n- if map_type == 'MASK' and img_processed is not None:\r\n- log.debug(\"Processing as MASK type.\")\r\n- shape = img_processed.shape\r\n- if len(shape) == 3 and shape[2] == 4: img_processed = img_processed[:, :, 3]\r\n- elif len(shape) == 3 and shape[2] == 3: img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n- if img_processed.dtype != np.uint8:\r\n- log.debug(f\"Converting mask from {img_processed.dtype} to uint8.\")\r\n- if source_dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- else: img_processed = img_processed.astype(np.uint8)\r\n-\r\n- if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n- orig_h, orig_w = img_processed.shape[:2]\r\n- self.processed_maps_details.setdefault(map_type, {})\r\n- max_original_dimension = max(orig_w, orig_h) # Get max original dimension\r\n-\r\n- # --- 3. Resize & Save Loop ---\r\n- for res_key, target_dim in resolutions.items():\r\n- # --- Skip Upscaling ---\r\n- if target_dim > max_original_dimension:\r\n- log.debug(f\"Skipping {res_key} ({target_dim}px): Target dimension is larger than original max dimension ({max_original_dimension}px).\")\r\n- continue\r\n- log.debug(f\"Processing {map_type} for resolution: {res_key}...\")\r\n- if orig_w <= 0 or orig_h <= 0: log.warning(f\"Invalid original dims, skipping resize {res_key}.\"); continue\r\n- target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n- interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n- try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n- except Exception as resize_err: log.error(f\"Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n-\r\n- # --- 3a. Calculate Stats ---\r\n- if res_key == stats_res_key and stats_target_dim:\r\n- log.debug(f\"Calculating stats for {map_type} using {res_key} image...\")\r\n- stats = _calculate_image_stats(img_resized) # Use the already resized image\r\n- if stats: self.metadata[\"image_stats_1k\"][map_type] = stats\r\n- else: log.warning(f\"Stats calculation failed for {map_type} at {res_key}.\")\r\n- # Calculate aspect change string on the lowest processed resolution, only once per asset\r\n- lowest_res_key = min(resolutions, key=resolutions.get)\r\n- if self.metadata[\"aspect_ratio_change_string\"] == \"N/A\" and res_key == lowest_res_key:\r\n- try:\r\n- aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n- self.metadata[\"aspect_ratio_change_string\"] = aspect_string\r\n- log.debug(f\"Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n- except Exception as aspect_err:\r\n- log.error(f\"Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n- self.metadata[\"aspect_ratio_change_string\"] = \"Error\" # Indicate calculation failure\r\n-\r\n- # --- 3b. Determine Output Bit Depth & Format ---\r\n- bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n- current_dtype = img_resized.dtype # Dtype after resize\r\n- output_dtype_target, output_bit_depth = None, 8 # Defaults\r\n- if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n- elif bit_depth_rule == 'respect':\r\n- if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16 # Respect float source as 16bit uint for non-EXR\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8 # Default respect to 8bit\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8 # Fallback\r\n-\r\n- # --- 3c. Determine Output Format based on Input, Rules & Threshold ---\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format() # Usually 'png'\r\n- threshold = self.config.resolution_threshold_for_jpg # Get threshold value\r\n- force_lossless = map_type in self.config.force_lossless_map_types\r\n-\r\n- if force_lossless:\r\n- log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- # Add EXR type flags if needed, e.g., HALF for 16-bit float\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- # Add compression later if desired, e.g. cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_ZIP\r\n- else: # Assume png or other lossless 16-bit format\r\n- # Use fallback if primary isn't suitable or fails? For now, assume primary is lossless.\r\n- # If primary_fmt_16 is not 'png', maybe default to fallback_fmt_16?\r\n- if output_format != \"png\":\r\n- log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16 # Ensure it's a known lossless like png/tif\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- # Add params for other lossless like TIF if supported\r\n- else: # 8-bit lossless\r\n- output_format = fmt_8bit_config # Usually 'png'\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n- output_format = \"png\"\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n-\r\n- # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n- elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- # --- Else: Apply Input/Rule-Based Logic ---\r\n- else:\r\n- # Apply force_8bit rule (if not overridden by threshold)\r\n- if bit_depth_rule == 'force_8bit':\r\n- output_format = 'png' # Force to PNG as per clarification\r\n- output_ext = '.png'\r\n- # output_bit_depth is already 8, output_dtype_target is already uint8\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n- # Handle specific input extensions if not forced to 8bit PNG\r\n- elif original_extension == '.jpg' and output_bit_depth == 8:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n- elif original_extension == '.tif':\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) # Add compression later\r\n- log.debug(f\"Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n- else: # Fallback for 16-bit from TIF\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- else: # output_bit_depth == 8 from TIF input (and below threshold)\r\n- output_format = 'png'\r\n- output_ext = '.png'\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n- # Handle other inputs (e.g., PNG) or fallbacks\r\n- else:\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # Fallback for 16-bit\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n- else: # 8-bit output (and below threshold)\r\n- output_format = fmt_8bit_config # Use configured 8-bit format\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\":\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n-\r\n- img_to_save = img_resized.copy() # Work on copy for dtype conversion\r\n- # --- Apply Dtype Conversion ---\r\n- if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n- if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- if needs_float16 and img_to_save.dtype != np.float16:\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n-\r\n- # --- 3d. Construct Filename & Save ---\r\n- filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n- output_path_temp = self.temp_dir / filename\r\n- log.debug(f\"Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n- saved_successfully, actual_format_saved = False, output_format\r\n- try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n- except Exception as save_err:\r\n- log.error(f\"Save failed ({output_format}): {save_err}\")\r\n- # --- Try Fallback ---\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt != output_format:\r\n- log.warning(f\"Attempting fallback: {fallback_fmt}\")\r\n- actual_format_saved = fallback_fmt; output_ext = f\".{fallback_fmt}\"; # Adjust format/ext\r\n- filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n- output_path_temp = self.temp_dir / filename\r\n- save_params_fallback = [] # Reset params for fallback\r\n- img_fallback = None; target_fallback_dtype = np.uint16\r\n- if fallback_fmt == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, 6])\r\n- elif fallback_fmt == \"tif\": pass # Default TIF params\r\n-\r\n- # Convert EXR source (float16) to uint16 for PNG/TIF fallback\r\n- #if img_to_save.dtype == np.float16: img_fallback = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(target_fallback_dtype)\r\n- if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n- # <<< START MODIFICATION HERE >>>\r\n- # Check for NaN/Inf before conversion\r\n- if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n- log.error(f\"Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n- continue # Skip fallback if data is bad\r\n-\r\n- # Clip *after* scaling for uint16 conversion robustness\r\n- img_scaled = img_to_save * 65535.0\r\n- img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- # <<< END MODIFICATION HERE >>>\r\n- elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already correct type\r\n- else: log.error(f\"Cannot convert {img_to_save.dtype} for fallback.\"); continue # Skip fallback\r\n-\r\n- try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved {map_type} ({res_key}) using fallback {fallback_fmt}\")\r\n- except Exception as fallback_err: log.error(f\"Fallback save failed: {fallback_err}\", exc_info=True)\r\n-\r\n- # --- 3e. Store Result ---\r\n- if saved_successfully:\r\n- self.processed_maps_details[map_type][res_key] = {\r\n- \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n- \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n- \"format\": actual_format_saved\r\n- }\r\n- map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n-\r\n- except Exception as map_proc_err:\r\n- log.error(f\"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n- self.processed_maps_details.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n-\r\n- self.metadata[\"map_details\"][map_type] = map_details # Store details for this map type\r\n-\r\n- # --- Final Metadata Updates ---\r\n- processed_map_types = set(k for k, v in self.processed_maps_details.items() if 'error' not in v) # Only count successful\r\n- self.metadata[\"maps_present\"] = sorted(list(processed_map_types))\r\n- features = set()\r\n- for map_type, details in self.metadata[\"map_details\"].items():\r\n- if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n- if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n- res_details = self.processed_maps_details.get(map_type, {})\r\n- if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n- self.metadata[\"shader_features\"] = sorted(list(features))\r\n- log.debug(f\"Determined shader features: {self.metadata['shader_features']}\")\r\n- log.info(\"Finished processing all map files.\")\r\n-\r\n-\r\n- #log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n-\r\n- def _merge_maps(self):\r\n- \"\"\"Merges channels from different maps based on rules in configuration.\"\"\"\r\n- # ... (initial checks and getting merge_rules) ...\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Workspace not setup.\")\r\n-\r\n-\r\n- # <<< FIX: Get merge rules from the configuration object >>>\r\n-\r\n- merge_rules = self.config.map_merge_rules\r\n-\r\n- # <<< END FIX >>>\r\n- log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n-\r\n- for rule_index, rule in enumerate(merge_rules):\r\n- # <<< FIX: Assign variables *before* using them >>>\r\n- output_map_type = rule.get(\"output_map_type\")\r\n- inputs_mapping = rule.get(\"inputs\")\r\n- defaults = rule.get(\"defaults\", {})\r\n- rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n-\r\n- # <<< FIX: Check if essential rule keys exist *after* assignment >>>\r\n- if not output_map_type or not inputs_mapping:\r\n- log.warning(f\"Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs' in rule definition. Rule data: {rule}\")\r\n- continue # Skip to the next rule in merge_rules\r\n-\r\n- # Now it's safe to use output_map_type in the log statement\r\n- log.info(f\"-- Applying merge rule for '{output_map_type}' --\")\r\n- # <<< END FIX >>>\r\n-\r\n- self.merged_maps_details.setdefault(output_map_type, {})\r\n-\r\n- # --- Determine required inputs and their common resolutions ---\r\n- required_input_types = set(inputs_mapping.values()) # e.g., {'NRM', 'ROUGH'}\r\n- if not required_input_types:\r\n- log.warning(f\"Skipping merge rule '{output_map_type}': No input map types defined in 'inputs'.\")\r\n- continue\r\n-\r\n- possible_resolutions_per_input = []\r\n- for input_type in required_input_types:\r\n- if input_type in self.processed_maps_details:\r\n- # Get resolution keys where processing didn't error\r\n- res_keys = {res for res, details in self.processed_maps_details[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n- if not res_keys:\r\n- log.warning(f\"Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n- possible_resolutions_per_input = [] # Cannot proceed if any input is missing all resolutions\r\n- break\r\n- possible_resolutions_per_input.append(res_keys)\r\n- else:\r\n- log.warning(f\"Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n- possible_resolutions_per_input = [] # Cannot proceed if any input type is missing\r\n- break\r\n-\r\n- if not possible_resolutions_per_input:\r\n- log.warning(f\"Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n- continue\r\n-\r\n- # Find the intersection of resolution keys across all required inputs\r\n- common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n-\r\n- if not common_resolutions:\r\n- log.warning(f\"No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n- continue\r\n- log.debug(f\"Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n- # --- End Common Resolution Logic ---\r\n-\r\n-\r\n- # <<< LOOP THROUGH COMMON RESOLUTIONS >>>\r\n- # Use the actual common_resolutions found\r\n- res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n- if not res_order:\r\n- log.warning(f\"Common resolutions {common_resolutions} do not match any target resolutions in config. Skipping merge for '{output_map_type}'.\")\r\n- continue\r\n-\r\n- # Sort resolutions to process (optional, but nice for logs)\r\n- sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n-\r\n- # Get target pattern from config for filename formatting\r\n- target_pattern = self.config.target_filename_pattern\r\n-\r\n- for current_res_key in sorted_res_keys:\r\n- log.debug(f\"Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n- try:\r\n- loaded_inputs = {}\r\n- input_bit_depths = set()\r\n- input_original_extensions = {} # Store original extensions for this resolution's inputs\r\n-\r\n- # --- Load required input maps *at this specific resolution* AND get original extensions ---\r\n- possible_to_load = True\r\n- base_name = self.metadata['asset_name']\r\n- target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B']\r\n-\r\n- for map_type in required_input_types:\r\n- res_details = self.processed_maps_details.get(map_type, {}).get(current_res_key)\r\n- if not res_details or 'path' not in res_details:\r\n- log.warning(f\"Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge for this resolution.\")\r\n- possible_to_load = False; break\r\n-\r\n- # Find original extension from classified data\r\n- original_ext = '.png' # Default\r\n- found_original = False\r\n- for classified_map in self.classified_files[\"maps\"]:\r\n- # Match based on the base map type (e.g., NRM matches NRM-1)\r\n- if classified_map['map_type'].startswith(map_type):\r\n- # Check if the processed path matches (to handle multiple variants if needed, though less likely here)\r\n- # This assumes processed_maps_details path is relative to temp_dir\r\n- processed_path_str = str(res_details['path'])\r\n- classified_path_str = str(classified_map['source_path']) # This is the original source path relative to temp_dir root\r\n- # A more robust check might involve comparing filenames if paths differ due to processing steps\r\n- # For now, rely on the base map type match and grab the first extension found\r\n- original_ext = classified_map.get('original_extension', '.png')\r\n- found_original = True\r\n- break # Found the first match for this map_type\r\n- if not found_original:\r\n- log.warning(f\"Could not reliably find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n-\r\n- input_original_extensions[map_type] = original_ext\r\n-\r\n- # Load the image\r\n- input_file_path = self.temp_dir / res_details['path']\r\n- read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- log.debug(f\"Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n- img = cv2.imread(str(input_file_path), read_flag)\r\n- if img is None:\r\n- raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n- loaded_inputs[map_type] = img\r\n- input_bit_depths.add(res_details.get('bit_depth', 8))\r\n-\r\n- if not possible_to_load: continue # Skip this resolution if inputs missing\r\n-\r\n- # --- Determine dimensions and target_dim for threshold check ---\r\n- first_map_type = next(iter(required_input_types)) # Get one map type to read dimensions\r\n- h, w = loaded_inputs[first_map_type].shape[:2]\r\n- # Get target_dim from the details of the first loaded input for this resolution\r\n- first_res_details = self.processed_maps_details.get(first_map_type, {}).get(current_res_key)\r\n- target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n- num_target_channels = len(target_channels)\r\n-\r\n- # <<< Re-calculate output_bit_depth based on THIS resolution's inputs >>>\r\n- max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n- output_bit_depth = 8\r\n- if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n- output_bit_depth = 16\r\n- log.debug(f\"Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n-\r\n- # Prepare channels (float32) (same logic as before)\r\n- merged_channels_float32 = []\r\n- # Use the defined target_channels list\r\n- for target_channel in target_channels: # Iterate R, G, B (or specified) order\r\n- source_map_type = inputs_mapping.get(target_channel)\r\n- channel_data_float32 = None\r\n- if source_map_type and source_map_type in loaded_inputs:\r\n- # ... [Extract channel data as float32 as before] ...\r\n- img_input = loaded_inputs[source_map_type]\r\n- if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n- elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n- else: img_float = img_input.astype(np.float32)\r\n- num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n- if num_source_channels >= 3: # BGR Source\r\n- if target_channel == 'R': channel_data_float32 = img_float[:, :, 2]\r\n- elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n- elif target_channel == 'B': channel_data_float32 = img_float[:, :, 0]\r\n- elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n- elif num_source_channels == 1 or len(img_float.shape) == 2: # Grayscale source\r\n- channel_data_float32 = img_float.reshape(h, w)\r\n- if channel_data_float32 is None: # Use default if needed\r\n- default_val = defaults.get(target_channel)\r\n- if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n- channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n- merged_channels_float32.append(channel_data_float32)\r\n-\r\n-\r\n- # Merge channels (same as before)\r\n- if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n- merged_image_float32 = cv2.merge(merged_channels_float32)\r\n-\r\n- # Final Data Type Conversion (based on recalculated output_bit_depth)\r\n- img_final_merged = None\r\n- if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n-\r\n- # --- Determine Output Format based on Threshold, Input Hierarchy & Rules ---\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format()\r\n- threshold = self.config.resolution_threshold_for_jpg\r\n- force_lossless = output_map_type in self.config.force_lossless_map_types\r\n-\r\n- if force_lossless:\r\n- log.debug(f\"Format forced to lossless for merged map type '{output_map_type}'.\")\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else: # Assume png or other lossless 16-bit format\r\n- if output_format != \"png\":\r\n- log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # 8-bit lossless\r\n- output_format = fmt_8bit_config # Usually 'png'\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n- output_format = \"png\"\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n-\r\n- # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n- elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- # --- Else: Apply Hierarchy/Rule-Based Logic ---\r\n- else:\r\n- involved_extensions = set(input_original_extensions.values())\r\n- log.debug(f\"Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n- # Hierarchy: EXR > TIF > PNG > JPG\r\n- highest_format_str = 'jpg' # Start lowest\r\n- if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n- elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n- elif '.png' in involved_extensions: highest_format_str = 'png'\r\n-\r\n- # Determine final output format based on hierarchy and target bit depth\r\n- final_output_format = highest_format_str\r\n-\r\n- if highest_format_str == 'tif':\r\n- if output_bit_depth == 16:\r\n- final_output_format = primary_fmt_16 # Use configured 16-bit pref (EXR/PNG)\r\n- log.debug(f\"Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n- else: # 8-bit target\r\n- final_output_format = 'png' # Force TIF input to PNG for 8-bit merge output\r\n- log.debug(\"Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n- else:\r\n- log.debug(f\"Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n-\r\n- # Set format/params based on the determined final_output_format\r\n- output_format = final_output_format\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\": # Should only happen if highest input was JPG and below threshold\r\n- output_ext = \".jpg\"\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- else:\r\n- log.error(f\"Unsupported final output format '{output_format}' determined for merged map '{output_map_type}'. Skipping save.\")\r\n- continue\r\n-\r\n- # --- JPG 8-bit Check (applies regardless of threshold logic) ---\r\n- if output_format == \"jpg\" and output_bit_depth == 16:\r\n- log.warning(f\"Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n- img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- output_bit_depth = 8 # Correct the recorded bit depth\r\n-\r\n- # --- Save Merged Map ---\r\n- image_to_save = img_final_merged # Already converted to target bit depth (or forced to 8bit for JPG)\r\n-\r\n- # Apply float16 conversion if needed for EXR\r\n- if needs_float16 and image_to_save.dtype != np.float16:\r\n- if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- else: log.warning(f\"Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n-\r\n- merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n- merged_output_path_temp = self.temp_dir / merged_filename\r\n- log.debug(f\"Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n-\r\n- # --- Add save logic with fallback here ---\r\n- saved_successfully = False\r\n- actual_format_saved = output_format\r\n- try:\r\n- cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n- log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n- saved_successfully = True\r\n- except Exception as save_err:\r\n- log.error(f\"Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n- # Try Fallback for merged map (similar to _process_maps fallback)\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n- # ... [ Implement fallback save logic here, converting image_to_save if needed ] ...\r\n- actual_format_saved = fallback_fmt_16\r\n- output_ext = f\".{fallback_fmt_16}\"\r\n- merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n- merged_output_path_temp = self.temp_dir / merged_filename\r\n- save_params_fallback = []\r\n- img_fallback = None\r\n- target_fallback_dtype = np.uint16\r\n-\r\n- if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n-\r\n- if image_to_save.dtype == np.float16:\r\n- if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(\"NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n- img_scaled = image_to_save * 65535.0\r\n- img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n- else: log.error(f\"Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n-\r\n- try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n- # --- End Fallback Logic ---\r\n-\r\n- # Record details if save successful\r\n- if saved_successfully:\r\n- self.merged_maps_details[output_map_type][current_res_key] = {\r\n- \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n- \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n- }\r\n- if output_map_type not in self.metadata[\"merged_maps\"]: self.metadata[\"merged_maps\"].append(output_map_type)\r\n-\r\n- except Exception as merge_res_err:\r\n- log.error(f\"Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n- self.merged_maps_details.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n-\r\n- log.info(\"Finished applying map merging rules.\")\r\n-\r\n-\r\n- def _generate_metadata_file(self):\r\n- \"\"\"Gathers all collected metadata and writes it to a JSON file.\"\"\"\r\n- # ... (Implementation from Response #49) ...\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\":\r\n- log.warning(\"Asset name unknown, metadata may be incomplete.\")\r\n-\r\n- log.info(f\"Generating metadata file for '{self.metadata['asset_name']}'...\")\r\n- final_metadata = self.metadata.copy()\r\n-\r\n- final_metadata[\"processed_map_resolutions\"] = {}\r\n- for map_type, res_dict in self.processed_maps_details.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n- if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys) # Basic sort\r\n-\r\n- final_metadata[\"merged_map_resolutions\"] = {}\r\n- for map_type, res_dict in self.merged_maps_details.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n- if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n-\r\n- # Add processing info\r\n- final_metadata[\"_processing_info\"] = {\r\n- \"preset_used\": self.config.preset_name,\r\n- \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n- # Optionally add core config details used, carefully\r\n- }\r\n-\r\n- # Sort lists\r\n- for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n- if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n-\r\n- metadata_filename = self.config.metadata_filename\r\n- output_path = self.temp_dir / metadata_filename\r\n- log.debug(f\"Writing metadata to: {output_path}\")\r\n- try:\r\n- with open(output_path, 'w', encoding='utf-8') as f:\r\n- json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n- log.info(f\"Metadata file '{metadata_filename}' generated successfully.\")\r\n- self.metadata_file_path_temp = output_path # Store path for moving\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to write metadata file {output_path}: {e}\") from e\r\n-\r\n-\r\n- def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n- \"\"\"\r\n- Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n- Returns the string representation.\r\n- \"\"\"\r\n- if original_width <= 0 or original_height <= 0:\r\n- log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n- return \"InvalidInput\"\r\n-\r\n- # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n- if resized_width <= 0 or resized_height <= 0:\r\n- log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n- return \"InvalidResize\"\r\n-\r\n- # Original logic from user feedback\r\n- width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n- height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n-\r\n- normalized_width_change = width_change_percentage / 100\r\n- normalized_height_change = height_change_percentage / 100\r\n-\r\n- normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n- normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n-\r\n- # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n- # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n- if normalized_width_change == 0 and normalized_height_change == 0:\r\n- closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n- elif normalized_width_change == 0:\r\n- closest_value_to_one = abs(normalized_height_change)\r\n- elif normalized_height_change == 0:\r\n- closest_value_to_one = abs(normalized_width_change)\r\n- else:\r\n- closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n-\r\n- # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n- epsilon = 1e-9\r\n- scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n-\r\n- scaled_normalized_width_change = scale_factor * normalized_width_change\r\n- scaled_normalized_height_change = scale_factor * normalized_height_change\r\n-\r\n- output_width = round(scaled_normalized_width_change, decimals)\r\n- output_height = round(scaled_normalized_height_change, decimals)\r\n-\r\n- # Convert to int if exactly 1.0 after rounding\r\n- if abs(output_width - 1.0) < epsilon: output_width = 1\r\n- if abs(output_height - 1.0) < epsilon: output_height = 1\r\n-\r\n- # Determine output string\r\n- if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n- output = \"EVEN\"\r\n- elif output_width != 1 and output_height == 1:\r\n- output = f\"X{str(output_width).replace('.', '')}\"\r\n- elif output_height != 1 and output_width == 1:\r\n- output = f\"Y{str(output_height).replace('.', '')}\"\r\n- else:\r\n- # Both changed relative to each other\r\n- output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n-\r\n- log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n- return output\r\n-\r\n- def _sanitize_filename(self, name: str) -> str:\r\n- \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n- # ... (Implementation from Response #51) ...\r\n- if not isinstance(name, str): name = str(name)\r\n- name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n- name = re.sub(r'_+', '_', name)\r\n- name = name.strip('_')\r\n- if not name: name = \"invalid_name\"\r\n- return name\r\n-\r\n- def _organize_output_files(self):\r\n- \"\"\"Moves processed files from temp dir to the final output structure.\"\"\"\r\n- # ... (Implementation from Response #51) ...\r\n- if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n- if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing.\")\r\n- if not self.metadata.get(\"supplier_name\"): raise AssetProcessingError(\"Supplier name missing.\")\r\n-\r\n- supplier_sanitized = self._sanitize_filename(self.metadata[\"supplier_name\"])\r\n- asset_name_sanitized = self._sanitize_filename(self.metadata[\"asset_name\"])\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- log.info(f\"Organizing output files into: {final_dir}\")\r\n- try:\r\n- # Check if overwriting is allowed before potentially deleting existing dir\r\n- if final_dir.exists() and self.overwrite:\r\n- log.warning(f\"Output directory exists and overwrite is True: {final_dir}. Removing existing directory.\")\r\n- try:\r\n- shutil.rmtree(final_dir)\r\n- except Exception as rm_err:\r\n- raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} during overwrite: {rm_err}\") from rm_err\r\n- elif final_dir.exists() and not self.overwrite:\r\n- # This case should ideally be caught by the skip logic earlier,\r\n- # but adding a warning here as a safeguard.\r\n- log.warning(f\"Output directory exists: {final_dir}. Overwriting (unexpected - should have been skipped).\")\r\n-\r\n- final_dir.mkdir(parents=True, exist_ok=True) # Create after potential removal\r\n- except Exception as e:\r\n- # Catch potential errors during mkdir if rmtree failed partially?\r\n- if not isinstance(e, AssetProcessingError): # Avoid wrapping already specific error\r\n- raise AssetProcessingError(f\"Failed to create final dir {final_dir}: {e}\") from e\r\n- else:\r\n- raise # Re-raise the AssetProcessingError from rmtree\r\n-\r\n- def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n- if not src_rel_path: log.warning(f\"Missing src path for {file_desc}.\"); return\r\n- source_abs = self.temp_dir / src_rel_path\r\n- dest_abs = dest_dir / src_rel_path.name\r\n- try:\r\n- if source_abs.exists():\r\n- log.debug(f\"Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n- dest_dir.mkdir(parents=True, exist_ok=True) # Ensure sub-dir exists (for Extra/)\r\n- shutil.move(str(source_abs), str(dest_abs))\r\n- else: log.warning(f\"Source file missing for {file_desc}: {source_abs}\")\r\n- except Exception as e: log.error(f\"Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n-\r\n- # Move maps, merged maps, models, metadata\r\n- for details_dict in [self.processed_maps_details, self.merged_maps_details]:\r\n- for map_type, res_dict in details_dict.items():\r\n- if 'error' in res_dict: continue\r\n- for res_key, details in res_dict.items():\r\n- if isinstance(details, dict) and 'path' in details: _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n- for model_info in self.classified_files.get('models', []): _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n- if self.metadata_file_path_temp: _safe_move(self.metadata_file_path_temp.relative_to(self.temp_dir), final_dir, \"metadata file\")\r\n-\r\n- # Move extra/ignored files\r\n- extra_subdir_name = self.config.extra_files_subdir\r\n- extra_dir = final_dir / extra_subdir_name\r\n- files_to_move_extra = self.classified_files.get('extra', []) + self.classified_files.get('ignored', [])\r\n- if files_to_move_extra:\r\n- log.debug(f\"Moving {len(files_to_move_extra)} files to '{extra_subdir_name}/'...\")\r\n- try:\r\n- extra_dir.mkdir(exist_ok=True)\r\n- for file_info in files_to_move_extra: _safe_move(file_info.get('source_path'), extra_dir, f\"extra file ({file_info.get('reason', 'Unknown')})\")\r\n- except Exception as e: log.error(f\"Failed creating/moving to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n-\r\n- log.info(f\"Finished organizing output for '{asset_name_sanitized}'.\")\r\n-\r\n-\r\n- def _cleanup_workspace(self):\r\n- \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n- # ... (Implementation from Response #45) ...\r\n- if self.temp_dir and self.temp_dir.exists():\r\n- try:\r\n- log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n- shutil.rmtree(self.temp_dir)\r\n- self.temp_dir = None\r\n- log.debug(\"Temporary workspace cleaned up successfully.\")\r\n- except Exception as e:\r\n- log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n-\r\n- # --- Prediction Method ---\r\n- def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n- \"\"\"\r\n- Predicts the final output structure (supplier, asset name) and attempts\r\n- to predict output filenames for potential map files based on naming conventions.\r\n- Does not perform full processing or image loading.\r\n-\r\n- Returns:\r\n- tuple[str | None, str | None, dict[str, str] | None]:\r\n- (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n- where file_predictions_dict maps input filename -> predicted output filename.\r\n- Returns None if prediction fails critically.\r\n- \"\"\"\r\n- log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n- try:\r\n- # 1. Get Supplier Name\r\n- supplier_name = self.config.supplier_name\r\n- if not supplier_name:\r\n- log.warning(\"Supplier name not found in configuration during prediction.\")\r\n- return None\r\n-\r\n- # 2. List Input Filenames/Stems\r\n- candidate_stems = set() # Use set for unique stems\r\n- filenames = []\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- try:\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- # Get only filenames, ignore directories\r\n- filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n- except zipfile.BadZipFile:\r\n- log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n- return None\r\n- except Exception as zip_err:\r\n- log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n- return None # Cannot proceed if we can't list files\r\n- elif self.input_path.is_dir():\r\n- try:\r\n- for item in self.input_path.iterdir():\r\n- if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n- filenames.append(item.name)\r\n- # Note: Not walking subdirs for prediction to keep it fast\r\n- except Exception as dir_err:\r\n- log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n- return None\r\n-\r\n- if not filenames:\r\n- log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n- return None # Return None if no files found\r\n-\r\n- # 3. Lightweight Classification for Stems and Potential Maps\r\n- map_type_mapping = self.config.map_type_mapping\r\n- model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n- separator = self.config.source_naming_separator\r\n- processed_filenames = set() # Track full filenames processed\r\n- potential_map_files = {} # Store fname -> potential map_type\r\n-\r\n- for fname in filenames:\r\n- if fname in processed_filenames: continue\r\n-\r\n- fstem = Path(fname).stem\r\n- fstem_lower = fstem.lower()\r\n- name_parts = fstem_lower.split(separator)\r\n-\r\n- # Check map rules first\r\n- map_matched = False\r\n- for mapping_rule in map_type_mapping:\r\n- source_keywords, standard_map_type = mapping_rule\r\n- if standard_map_type not in self.config.standard_map_types: continue\r\n- for keyword in source_keywords:\r\n- kw_lower = keyword.lower().strip('*')\r\n- if kw_lower in name_parts:\r\n- is_exact_match = any(part == kw_lower for part in name_parts)\r\n- if is_exact_match:\r\n- candidate_stems.add(fstem) # Add unique stem\r\n- potential_map_files[fname] = standard_map_type # Store potential type\r\n- processed_filenames.add(fname)\r\n- map_matched = True\r\n- break # Found keyword match for this rule\r\n- if map_matched: break # Found a rule match for this file\r\n- if map_matched: continue # Move to next filename if identified as map\r\n-\r\n- # Check model patterns if not a map\r\n- for pattern in model_patterns:\r\n- if fnmatch(fname.lower(), pattern.lower()):\r\n- candidate_stems.add(fstem) # Still add stem for base name determination\r\n- processed_filenames.add(fname)\r\n- # Don't add models to potential_map_files\r\n- break # Found model match\r\n-\r\n- # Note: Files matching neither maps nor models are ignored for prediction details\r\n-\r\n- candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n- if not candidate_stems_list:\r\n- log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n- # Fallback: Use the input path's name itself if no stems found\r\n- base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n- determined_base_name = base_name_fallback\r\n- else:\r\n- # 4. Replicate _determine_base_metadata logic for base name\r\n- determined_base_name = \"UnknownAssetName\"\r\n- base_index = self.config.source_naming_indices.get('base_name')\r\n-\r\n- if isinstance(base_index, int):\r\n- potential_base_names = set()\r\n- for stem in candidate_stems_list: # Iterate over the list\r\n- parts = stem.split(separator)\r\n- if len(parts) > base_index:\r\n- potential_base_names.add(parts[base_index])\r\n- if len(potential_base_names) == 1:\r\n- determined_base_name = potential_base_names.pop()\r\n- elif len(potential_base_names) > 1:\r\n- log.debug(f\"Prediction: Multiple potential base names from index {base_index}, using common prefix.\")\r\n- determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n- # else: Use common prefix below\r\n-\r\n- if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n- determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n-\r\n- # 5. Sanitize Names\r\n- final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n- final_supplier_name = self._sanitize_filename(supplier_name)\r\n-\r\n- # 6. Predict Output Filenames\r\n- file_predictions = {}\r\n- target_pattern = self.config.target_filename_pattern\r\n- # Use highest resolution key as a placeholder for prediction\r\n- highest_res_key = \"Res?\" # Fallback\r\n- if self.config.image_resolutions:\r\n- highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n-\r\n- for input_fname, map_type in potential_map_files.items():\r\n- # Assume PNG for prediction, extension might change based on bit depth rules later\r\n- # but this gives a good idea of the renaming.\r\n- # A more complex prediction could check bit depth rules.\r\n- predicted_ext = \"png\" # Simple assumption for preview\r\n- try:\r\n- predicted_fname = target_pattern.format(\r\n- base_name=final_base_name,\r\n- map_type=map_type,\r\n- resolution=highest_res_key, # Use placeholder resolution\r\n- ext=predicted_ext\r\n- )\r\n- file_predictions[input_fname] = predicted_fname\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n- file_predictions[input_fname] = \"[Filename Format Error]\"\r\n-\r\n-\r\n- log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n- return final_supplier_name, final_base_name, file_predictions\r\n-\r\n- except Exception as e:\r\n- log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- return None\r\n-\r\n-\r\n- # --- New Detailed Prediction Method ---\r\n- def get_detailed_file_predictions(self) -> list[dict] | None:\r\n- \"\"\"\r\n- Performs extraction and classification to provide a detailed list of all\r\n- files found within the asset and their predicted status/output name.\r\n- Does not perform image processing or file moving.\r\n-\r\n- Returns:\r\n- list[dict] | None: A list of dictionaries, each representing a file:\r\n- {'original_path': str, 'predicted_name': str | None, 'status': str, 'details': str | None}\r\n- Returns None if a critical error occurs during setup/classification.\r\n- \"\"\"\r\n- log.info(f\"Getting detailed file predictions for: {self.input_path.name}\")\r\n- results = []\r\n- asset_base_name = \"UnknownAssetName\" # Fallback\r\n-\r\n- try:\r\n- # --- Perform necessary setup and classification ---\r\n- self._setup_workspace()\r\n- self._extract_input()\r\n- self._inventory_and_classify_files()\r\n- self._determine_base_metadata() # Needed for base name prediction\r\n- asset_base_name = self.metadata.get(\"asset_name\", asset_base_name)\r\n-\r\n- # --- Prepare for filename prediction ---\r\n- target_pattern = self.config.target_filename_pattern\r\n- highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n- if self.config.image_resolutions:\r\n- highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n-\r\n- # --- Process classified files ---\r\n- # Maps\r\n- for map_info in self.classified_files.get(\"maps\", []):\r\n- original_path_str = str(map_info.get(\"source_path\", \"UnknownPath\"))\r\n- map_type = map_info.get(\"map_type\", \"UnknownType\")\r\n- # Predicted name for maps should just be the base asset name\r\n- predicted_name_display = asset_base_name\r\n- # Concise details\r\n- details = f\"[{map_type}]\"\r\n- if map_info.get(\"is_16bit_source\"):\r\n- details += \" (16-bit)\"\r\n-\r\n- # Still try to format the full name internally for error checking, but don't display it\r\n- try:\r\n- predicted_ext = \"png\" # Assumption for format check\r\n- _ = target_pattern.format(\r\n- base_name=asset_base_name,\r\n- map_type=map_type,\r\n- resolution=highest_res_key,\r\n- ext=predicted_ext\r\n- )\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction format error for {original_path_str}: {fmt_err}\")\r\n- predicted_name_display = \"[Format Error]\" # Show error in name field\r\n- details += f\" (Format Key Error: {fmt_err})\"\r\n- except Exception as pred_err:\r\n- log.warning(f\"Prediction error for {original_path_str}: {pred_err}\")\r\n- predicted_name_display = \"[Prediction Error]\" # Show error in name field\r\n- details += f\" (Error: {pred_err})\"\r\n-\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": predicted_name_display, # Use the base name or error\r\n- \"status\": \"Mapped\",\r\n- \"details\": details # Use concise details\r\n- })\r\n-\r\n- # Models\r\n- for model_info in self.classified_files.get(\"models\", []):\r\n- original_path_str = str(model_info.get(\"source_path\", \"UnknownPath\"))\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": Path(original_path_str).name, # Models usually keep original name\r\n- \"status\": \"Model\",\r\n- \"details\": \"[Model]\" # Concise detail\r\n- })\r\n-\r\n- # Extra\r\n- for extra_info in self.classified_files.get(\"extra\", []):\r\n- original_path_str = str(extra_info.get(\"source_path\", \"UnknownPath\"))\r\n- reason = extra_info.get('reason', 'Unknown reason')\r\n- # Determine status and details based on the reason\r\n- if reason == 'Unrecognised': # Corrected string check\r\n- status = \"Unrecognised\"\r\n- details = \"[Unrecognised]\"\r\n- else:\r\n- status = \"Extra\"\r\n- details = f\"Extra ({reason})\" # Show the pattern match reason\r\n-\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": Path(original_path_str).name, # Extra/Unrecognised files keep original name\r\n- \"status\": status,\r\n- \"details\": details\r\n- })\r\n-\r\n- # Ignored\r\n- for ignored_info in self.classified_files.get(\"ignored\", []):\r\n- original_path_str = str(ignored_info.get(\"source_path\", \"UnknownPath\"))\r\n- reason = ignored_info.get('reason', 'Unknown reason')\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": None, # Ignored files have no output name\r\n- \"status\": \"Ignored\",\r\n- \"details\": f\"Ignored ({reason})\" # Keep reason for ignored files\r\n- })\r\n-\r\n- log.info(f\"Detailed prediction complete for {self.input_path.name}. Found {len(results)} files.\")\r\n- return results\r\n-\r\n- except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n- # Log critical errors during the prediction process\r\n- log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- # Optionally add a single error entry to results?\r\n- # results.append({\"original_path\": self.input_path.name, \"predicted_name\": None, \"status\": \"Error\", \"details\": str(e)})\r\n- # return results # Or return None to indicate failure\r\n- return None # Indicate critical failure\r\n- finally:\r\n- # Ensure cleanup always happens\r\n- self._cleanup_workspace()\r\n-\r\n-\r\n # --- End of AssetProcessor Class ---\n\\ No newline at end of file\n" }, { "date": 1745261228393, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -75,13 +75,23 @@\n return None\r\n try:\r\n # Use float64 for calculations to avoid potential overflow/precision issues\r\n data_float = image_data.astype(np.float64)\r\n+\r\n+ # Normalize data_float based on original dtype before calculating stats\r\n+ if image_data.dtype == np.uint16:\r\n+ log.debug(\"Stats calculation: Normalizing uint16 data to 0-1 range.\")\r\n+ data_float /= 65535.0\r\n+ elif image_data.dtype == np.uint8:\r\n+ log.debug(\"Stats calculation: Normalizing uint8 data to 0-1 range.\")\r\n+ data_float /= 255.0\r\n+ # Assuming float inputs are already in 0-1 range or similar\r\n+\r\n log.debug(f\"Stats calculation: data_float dtype: {data_float.dtype}, shape: {data_float.shape}\")\r\n- # Log a few sample values to check range, especially for 16-bit\r\n+ # Log a few sample values to check range after normalization\r\n if data_float.size > 0:\r\n sample_values = data_float.flatten()[:10] # Get first 10 values\r\n- log.debug(f\"Stats calculation: Sample values (first 10): {sample_values.tolist()}\")\r\n+ log.debug(f\"Stats calculation: Sample values (first 10) after normalization: {sample_values.tolist()}\")\r\n \r\n \r\n if len(data_float.shape) == 2: # Grayscale (H, W)\r\n min_val = float(np.min(data_float))\r\n" }, { "date": 1745307818892, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -749,8 +749,10 @@\n # Unpack tuple: (inverted_float_data, original_dtype)\r\n img_processed, source_dtype = preprocessed_data[map_type]\r\n # No longer need to read the original file just for dtype\r\n else:\r\n+:start_line:754\r\n+-------\r\n full_source_path = self.temp_dir / source_path_rel\r\n # Determine the read flag based on map type\r\n read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n log.debug(f\"Loading source {source_path_rel.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n@@ -763,17 +765,31 @@\n \r\n # --- 2. Handle Alpha Mask ---\r\n if map_type == 'MASK' and img_processed is not None:\r\n log.debug(\"Processing as MASK type.\")\r\n+ log.debug(f\"MASK processing: Initial shape: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n shape = img_processed.shape\r\n- if len(shape) == 3 and shape[2] == 4: img_processed = img_processed[:, :, 3]\r\n- elif len(shape) == 3 and shape[2] == 3: img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n+ if len(shape) == 3 and shape[2] == 4:\r\n+ log.debug(\"MASK processing: Extracting alpha channel (4-channel source).\") # ADDED LOG\r\n+ img_processed = img_processed[:, :, 3]\r\n+ elif len(shape) == 3 and shape[2] == 3:\r\n+ log.debug(\"MASK processing: Converting BGR to Grayscale (3-channel source).\") # ADDED LOG\r\n+ img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n+ elif len(shape) == 2:\r\n+ log.debug(\"MASK processing: Source is already grayscale (2-channel shape).\") # ADDED LOG\r\n+ else:\r\n+ log.warning(f\"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\") # ADDED LOG\r\n+\r\n+ log.debug(f\"MASK processing: Shape after channel extraction/conversion: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n+\r\n if img_processed.dtype != np.uint8:\r\n log.debug(f\"Converting mask from {img_processed.dtype} to uint8.\")\r\n if source_dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n else: img_processed = img_processed.astype(np.uint8)\r\n+ log.debug(f\"MASK processing: Shape after dtype conversion: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n \r\n+\r\n if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n orig_h, orig_w = img_processed.shape[:2]\r\n self.processed_maps_details.setdefault(map_type, {})\r\n max_original_dimension = max(orig_w, orig_h) # Get max original dimension\r\n" }, { "date": 1745307830419, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -967,12 +967,15 @@\n if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n \r\n+:start_line:955\r\n+-------\r\n # --- 3d. Construct Filename & Save ---\r\n filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n output_path_temp = self.temp_dir / filename\r\n log.debug(f\"Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n+ log.debug(f\"Saving {map_type} ({res_key}): Final image shape: {img_to_save.shape}, dtype: {img_to_save.dtype}\") # ADDED LOG\r\n saved_successfully, actual_format_saved = False, output_format\r\n try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n except Exception as save_err:\r\n log.error(f\"Save failed ({output_format}): {save_err}\")\r\n" }, { "date": 1745307955260, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -749,10 +749,8 @@\n # Unpack tuple: (inverted_float_data, original_dtype)\r\n img_processed, source_dtype = preprocessed_data[map_type]\r\n # No longer need to read the original file just for dtype\r\n else:\r\n-:start_line:754\r\n--------\r\n full_source_path = self.temp_dir / source_path_rel\r\n # Determine the read flag based on map type\r\n read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n log.debug(f\"Loading source {source_path_rel.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n@@ -967,10 +965,8 @@\n if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n \r\n-:start_line:955\r\n--------\r\n # --- 3d. Construct Filename & Save ---\r\n filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n output_path_temp = self.temp_dir / filename\r\n log.debug(f\"Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n@@ -992,18 +988,16 @@\n \r\n # Convert EXR source (float16) to uint16 for PNG/TIF fallback\r\n #if img_to_save.dtype == np.float16: img_fallback = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(target_fallback_dtype)\r\n if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n- # <<< START MODIFICATION HERE >>>\r\n # Check for NaN/Inf before conversion\r\n if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n log.error(f\"Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n continue # Skip fallback if data is bad\r\n \r\n # Clip *after* scaling for uint16 conversion robustness\r\n img_scaled = img_to_save * 65535.0\r\n img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- # <<< END MODIFICATION HERE >>>\r\n elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already correct type\r\n else: log.error(f\"Cannot convert {img_to_save.dtype} for fallback.\"); continue # Skip fallback\r\n \r\n try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n" }, { "date": 1745308306149, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -749,12 +749,15 @@\n # Unpack tuple: (inverted_float_data, original_dtype)\r\n img_processed, source_dtype = preprocessed_data[map_type]\r\n # No longer need to read the original file just for dtype\r\n else:\r\n+:start_line:755\r\n+-------\r\n full_source_path = self.temp_dir / source_path_rel\r\n # Determine the read flag based on map type\r\n- read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- log.debug(f\"Loading source {source_path_rel.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ # Load MASK maps with IMREAD_UNCHANGED to preserve alpha channel\r\n+ read_flag = cv2.IMREAD_UNCHANGED if map_type.upper() == 'MASK' else (cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED)\r\n+ log.debug(f\"Loading source {source_path_rel.name} with flag: {'UNCHANGED' if read_flag == cv2.IMREAD_UNCHANGED else ('GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'DEFAULT')}\") # Updated log\r\n img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n if img_loaded is None:\r\n raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n img_processed, source_dtype = img_loaded.copy(), img_loaded.dtype\r\n@@ -765,24 +768,28 @@\n if map_type == 'MASK' and img_processed is not None:\r\n log.debug(\"Processing as MASK type.\")\r\n log.debug(f\"MASK processing: Initial shape: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n shape = img_processed.shape\r\n+ # Explicitly extract alpha channel if present (4 channels)\r\n if len(shape) == 3 and shape[2] == 4:\r\n log.debug(\"MASK processing: Extracting alpha channel (4-channel source).\") # ADDED LOG\r\n img_processed = img_processed[:, :, 3]\r\n+ # Convert to grayscale if it's a 3-channel image (no alpha)\r\n elif len(shape) == 3 and shape[2] == 3:\r\n log.debug(\"MASK processing: Converting BGR to Grayscale (3-channel source).\") # ADDED LOG\r\n img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n+ # If it's already grayscale (2-channel shape is not standard for images, assuming 2D array)\r\n elif len(shape) == 2:\r\n log.debug(\"MASK processing: Source is already grayscale (2-channel shape).\") # ADDED LOG\r\n else:\r\n log.warning(f\"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\") # ADDED LOG\r\n \r\n log.debug(f\"MASK processing: Shape after channel extraction/conversion: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n \r\n+ # Ensure the mask is uint8 grayscale before proceeding\r\n if img_processed.dtype != np.uint8:\r\n log.debug(f\"Converting mask from {img_processed.dtype} to uint8.\")\r\n- if source_dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ if img_processed.dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n else: img_processed = img_processed.astype(np.uint8)\r\n log.debug(f\"MASK processing: Shape after dtype conversion: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n \r\n" }, { "date": 1745308427903, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -709,9 +709,9 @@\n rough_source_to_use = gloss_map_info_for_rough\r\n derived_from_gloss_flag['ROUGH'] = True\r\n if native_rough_map_info:\r\n log.warning(f\"Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n- if native_rough_map_info in self.classified_files['maps']: self.classified_files['maps'].remove(native_rough_map_info)\r\n+ if native_rough_map_info in self.classified_files['maps']: self.classified_files['maps'].remove(native_rough_info) # Corrected variable name\r\n self.classified_files['ignored'].append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n elif native_rough_map_info:\r\n rough_source_to_use = native_rough_map_info\r\n derived_from_gloss_flag['ROUGH'] = False\r\n@@ -749,10 +749,8 @@\n # Unpack tuple: (inverted_float_data, original_dtype)\r\n img_processed, source_dtype = preprocessed_data[map_type]\r\n # No longer need to read the original file just for dtype\r\n else:\r\n-:start_line:755\r\n--------\r\n full_source_path = self.temp_dir / source_path_rel\r\n # Determine the read flag based on map type\r\n # Load MASK maps with IMREAD_UNCHANGED to preserve alpha channel\r\n read_flag = cv2.IMREAD_UNCHANGED if map_type.upper() == 'MASK' else (cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED)\r\n" }, { "date": 1745309855377, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -104,8 +104,9 @@\n min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n # Assuming BGR(A?) order from OpenCV, report as list [B, G, R, (A)]\r\n+ log.debug(\"DEBUG: _calculate_image_stats is assuming BGR(A?) channel order from OpenCV.\")\r\n stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n log.debug(f\"Calculated {channels}-Channel Stats: Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n else:\r\n log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n@@ -759,8 +760,16 @@\n if img_loaded is None:\r\n raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n img_processed, source_dtype = img_loaded.copy(), img_loaded.dtype\r\n log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape}\")\r\n+ # --- DEBUG LOG: Check channel order assumption ---\r\n+ if len(img_processed.shape) == 3:\r\n+ log.debug(f\"DEBUG: Loaded image shape is {img_processed.shape}. Assuming OpenCV default BGR order.\")\r\n+ # Optional: Log a few pixel values to inspect channel order\r\n+ # if img_processed.size > 0:\r\n+ # sample_pixel = img_processed[0, 0] # Get top-left pixel\r\n+ # log.debug(f\"DEBUG: Sample pixel (top-left): {sample_pixel.tolist()} (Assuming BGR order)\")\r\n+ # --- END DEBUG LOG ---\r\n map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n \r\n # --- 2. Handle Alpha Mask ---\r\n if map_type == 'MASK' and img_processed is not None:\r\n@@ -1166,8 +1175,17 @@\n raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n loaded_inputs[map_type] = img\r\n input_bit_depths.add(res_details.get('bit_depth', 8))\r\n \r\n+ # --- DEBUG LOG: Check channel order of merge inputs ---\r\n+ if len(img.shape) == 3:\r\n+ log.debug(f\"DEBUG: Merge input '{input_file_path.name}' ({map_type}) loaded with shape {img.shape}, dtype {img.dtype}. Assuming OpenCV default BGR order.\")\r\n+ # Optional: Log a few pixel values to inspect channel order\r\n+ # if img.size > 0:\r\n+ # sample_pixel = img[0, 0] # Get top-left pixel\r\n+ # log.debug(f\"DEBUG: Sample pixel (top-left) for merge input: {sample_pixel.tolist()} (Assuming BGR order)\")\r\n+ # --- END DEBUG LOG ---\r\n+\r\n if not possible_to_load: continue # Skip this resolution if inputs missing\r\n \r\n # --- Determine dimensions and target_dim for threshold check ---\r\n first_map_type = next(iter(required_input_types)) # Get one map type to read dimensions\r\n" }, { "date": 1745313656825, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -103,12 +103,11 @@\n channels = data_float.shape[2]\r\n min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n- # Assuming BGR(A?) order from OpenCV, report as list [B, G, R, (A)]\r\n- log.debug(\"DEBUG: _calculate_image_stats is assuming BGR(A?) channel order from OpenCV.\")\r\n+ # The input data_float is now expected to be in RGB order after conversion in _process_maps\r\n stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated {channels}-Channel Stats: Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n+ log.debug(f\"Calculated {channels}-Channel Stats (RGB order): Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n else:\r\n log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n return None\r\n return stats\r\n@@ -758,17 +757,27 @@\n log.debug(f\"Loading source {source_path_rel.name} with flag: {'UNCHANGED' if read_flag == cv2.IMREAD_UNCHANGED else ('GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'DEFAULT')}\") # Updated log\r\n img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n if img_loaded is None:\r\n raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n- img_processed, source_dtype = img_loaded.copy(), img_loaded.dtype\r\n- log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape}\")\r\n+ \r\n+ # Convert BGR to RGB if it's a 3-channel image\r\n+ if len(img_loaded.shape) == 3:\r\n+ log.debug(f\"Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n+ img_processed = cv2.cvtColor(img_loaded, cv2.COLOR_BGR2RGB)\r\n+ else:\r\n+ img_processed = img_loaded.copy() # Copy grayscale or 4-channel directly\r\n+ \r\n+ source_dtype = img_loaded.dtype # Keep original dtype for bit depth info\r\n+ \r\n+ log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape} (After potential BGR->RGB conversion)\")\r\n # --- DEBUG LOG: Check channel order assumption ---\r\n+ # This log is now less relevant as we convert to RGB, but keep for shape info\r\n if len(img_processed.shape) == 3:\r\n- log.debug(f\"DEBUG: Loaded image shape is {img_processed.shape}. Assuming OpenCV default BGR order.\")\r\n+ log.debug(f\"DEBUG: Processed image shape is {img_processed.shape}. Now in RGB order.\")\r\n # Optional: Log a few pixel values to inspect channel order\r\n # if img_processed.size > 0:\r\n # sample_pixel = img_processed[0, 0] # Get top-left pixel\r\n- # log.debug(f\"DEBUG: Sample pixel (top-left): {sample_pixel.tolist()} (Assuming BGR order)\")\r\n+ # log.debug(f\"DEBUG: Sample pixel (top-left): {sample_pixel.tolist()} (Now in RGB order)\")\r\n # --- END DEBUG LOG ---\r\n map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n \r\n # --- 2. Handle Alpha Mask ---\r\n@@ -1214,13 +1223,13 @@\n if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n else: img_float = img_input.astype(np.float32)\r\n num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n- if num_source_channels >= 3: # BGR Source\r\n- if target_channel == 'R': channel_data_float32 = img_float[:, :, 2]\r\n- elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n- elif target_channel == 'B': channel_data_float32 = img_float[:, :, 0]\r\n- elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n+ if num_source_channels >= 3: # Now RGB Source (after conversion in _process_maps)\r\n+ if target_channel == 'R': channel_data_float32 = img_float[:, :, 0] # Red channel (index 0 in RGB)\r\n+ elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1] # Green channel (index 1 in RGB)\r\n+ elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2] # Blue channel (index 2 in RGB)\r\n+ elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3] # Alpha channel (index 3 in RGBA)\r\n elif num_source_channels == 1 or len(img_float.shape) == 2: # Grayscale source\r\n channel_data_float32 = img_float.reshape(h, w)\r\n if channel_data_float32 is None: # Use default if needed\r\n default_val = defaults.get(target_channel)\r\n" }, { "date": 1745313824945, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -591,28 +591,37 @@\n map_stems = [f['source_path'].stem for f in self.classified_files['maps']]\r\n model_stems = [f['source_path'].stem for f in self.classified_files['models']]\r\n candidate_stems = map_stems + model_stems\r\n \r\n+:start_line:595\r\n+-------\r\n determined_base_name = \"UnknownAssetName\"\r\n if candidate_stems:\r\n separator = self.config.source_naming_separator\r\n- base_index = self.config.source_naming_indices.get('base_name')\r\n+ source_naming_indices = self.config.source_naming_indices\r\n+ base_index = source_naming_indices.get('base_name')\r\n+ log.debug(f\"Source naming indices: {source_naming_indices}, Base name index: {base_index}\") # ADDED LOG\r\n \r\n if isinstance(base_index, int):\r\n potential_base_names = set()\r\n for stem in candidate_stems:\r\n parts = stem.split(separator)\r\n+ log.debug(f\" Stem '{stem}' split by '{separator}': {parts}\") # ADDED LOG\r\n if len(parts) > base_index:\r\n potential_base_names.add(parts[base_index])\r\n+ log.debug(f\" Added part at index {base_index}: '{parts[base_index]}'\") # ADDED LOG\r\n+ else:\r\n+ log.debug(f\" Stem '{stem}' does not have enough parts for index {base_index}.\") # ADDED LOG\r\n if len(potential_base_names) == 1:\r\n determined_base_name = potential_base_names.pop()\r\n log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n elif len(potential_base_names) > 1 :\r\n log.warning(f\"Multiple potential base names found from parts index {base_index}: {potential_base_names}. Falling back to common prefix.\")\r\n # Fallback logic if structured parts method fails or yields multiple names\r\n determined_base_name = os.path.commonprefix(candidate_stems)\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Clean trailing separators etc.\r\n- # else: len is 0, means no valid parts found, use common prefix below\r\n+ else: # len is 0, means no valid parts found for the given index\r\n+ log.debug(f\"No valid parts found for base name index {base_index}. Falling back to common prefix.\") # ADDED LOG\r\n \r\n # If no index or structured parts failed, use common prefix of all relevant stems\r\n if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n determined_base_name = os.path.commonprefix(candidate_stems)\r\n" }, { "date": 1745313977420, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -591,37 +591,28 @@\n map_stems = [f['source_path'].stem for f in self.classified_files['maps']]\r\n model_stems = [f['source_path'].stem for f in self.classified_files['models']]\r\n candidate_stems = map_stems + model_stems\r\n \r\n-:start_line:595\r\n--------\r\n determined_base_name = \"UnknownAssetName\"\r\n if candidate_stems:\r\n separator = self.config.source_naming_separator\r\n- source_naming_indices = self.config.source_naming_indices\r\n- base_index = source_naming_indices.get('base_name')\r\n- log.debug(f\"Source naming indices: {source_naming_indices}, Base name index: {base_index}\") # ADDED LOG\r\n+ base_index = self.config.source_naming_indices.get('base_name')\r\n \r\n if isinstance(base_index, int):\r\n potential_base_names = set()\r\n for stem in candidate_stems:\r\n parts = stem.split(separator)\r\n- log.debug(f\" Stem '{stem}' split by '{separator}': {parts}\") # ADDED LOG\r\n if len(parts) > base_index:\r\n potential_base_names.add(parts[base_index])\r\n- log.debug(f\" Added part at index {base_index}: '{parts[base_index]}'\") # ADDED LOG\r\n- else:\r\n- log.debug(f\" Stem '{stem}' does not have enough parts for index {base_index}.\") # ADDED LOG\r\n if len(potential_base_names) == 1:\r\n determined_base_name = potential_base_names.pop()\r\n log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n elif len(potential_base_names) > 1 :\r\n log.warning(f\"Multiple potential base names found from parts index {base_index}: {potential_base_names}. Falling back to common prefix.\")\r\n # Fallback logic if structured parts method fails or yields multiple names\r\n determined_base_name = os.path.commonprefix(candidate_stems)\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Clean trailing separators etc.\r\n- else: # len is 0, means no valid parts found for the given index\r\n- log.debug(f\"No valid parts found for base name index {base_index}. Falling back to common prefix.\") # ADDED LOG\r\n+ # else: len is 0, means no valid parts found, use common prefix below\r\n \r\n # If no index or structured parts failed, use common prefix of all relevant stems\r\n if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n determined_base_name = os.path.commonprefix(candidate_stems)\r\n" }, { "date": 1745314050688, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -594,16 +594,23 @@\n \r\n determined_base_name = \"UnknownAssetName\"\r\n if candidate_stems:\r\n separator = self.config.source_naming_separator\r\n- base_index = self.config.source_naming_indices.get('base_name')\r\n-\r\n+ indices_dict = self.config.source_naming_indices # Get the whole dict first\r\n+ base_index = indices_dict.get('base_name') # Now get the specific index\r\n+ log.debug(f\"Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Base Index='{base_index}'\") # DEBUG: Log separator and indices\r\n+ \r\n if isinstance(base_index, int):\r\n potential_base_names = set()\r\n for stem in candidate_stems:\r\n parts = stem.split(separator)\r\n+ log.debug(f\" Processing stem: '{stem}', Parts: {parts}\") # DEBUG: Log stem and parts\r\n if len(parts) > base_index:\r\n- potential_base_names.add(parts[base_index])\r\n+ extracted_name = parts[base_index]\r\n+ potential_base_names.add(extracted_name)\r\n+ log.debug(f\" Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG: Log extracted name\r\n+ else:\r\n+ log.debug(f\" Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG: Log if index is out of bounds\r\n if len(potential_base_names) == 1:\r\n determined_base_name = potential_base_names.pop()\r\n log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n elif len(potential_base_names) > 1 :\r\n@@ -614,8 +621,9 @@\n # else: len is 0, means no valid parts found, use common prefix below\r\n \r\n # If no index or structured parts failed, use common prefix of all relevant stems\r\n if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n+ log.debug(\"Falling back to common prefix for base name determination.\") # DEBUG: Log fallback\r\n determined_base_name = os.path.commonprefix(candidate_stems)\r\n log.debug(f\"Using common prefix: '{determined_base_name}'\")\r\n # Clean up common separators/underscores often left by commonprefix\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n" }, { "date": 1745314848112, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -595,12 +595,19 @@\n determined_base_name = \"UnknownAssetName\"\r\n if candidate_stems:\r\n separator = self.config.source_naming_separator\r\n indices_dict = self.config.source_naming_indices # Get the whole dict first\r\n- base_index = indices_dict.get('base_name') # Now get the specific index\r\n- log.debug(f\"Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Base Index='{base_index}'\") # DEBUG: Log separator and indices\r\n+ base_index_raw = indices_dict.get('base_name') # Get the raw value\r\n+ log.debug(f\"Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}' (Type: {type(base_index_raw)})\") # DEBUG: Log raw value and type\r\n \r\n- if isinstance(base_index, int):\r\n+ base_index = None\r\n+ if base_index_raw is not None:\r\n+ try:\r\n+ base_index = int(base_index_raw) # <<< EXPLICIT CONVERSION\r\n+ except (ValueError, TypeError):\r\n+ log.warning(f\"Could not convert base_name index '{base_index_raw}' to integer. Falling back.\")\r\n+ \r\n+ if isinstance(base_index, int): # Check should now pass if conversion succeeded\r\n potential_base_names = set()\r\n for stem in candidate_stems:\r\n parts = stem.split(separator)\r\n log.debug(f\" Processing stem: '{stem}', Parts: {parts}\") # DEBUG: Log stem and parts\r\n" }, { "date": 1745315265739, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -596,9 +596,9 @@\n if candidate_stems:\r\n separator = self.config.source_naming_separator\r\n indices_dict = self.config.source_naming_indices # Get the whole dict first\r\n base_index_raw = indices_dict.get('base_name') # Get the raw value\r\n- log.debug(f\"Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}' (Type: {type(base_index_raw)})\") # DEBUG: Log raw value and type\r\n+ # log.debug(f\"Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}' (Type: {type(base_index_raw)})\") # DEBUG: Removed log\r\n \r\n base_index = None\r\n if base_index_raw is not None:\r\n try:\r\n@@ -609,15 +609,15 @@\n if isinstance(base_index, int): # Check should now pass if conversion succeeded\r\n potential_base_names = set()\r\n for stem in candidate_stems:\r\n parts = stem.split(separator)\r\n- log.debug(f\" Processing stem: '{stem}', Parts: {parts}\") # DEBUG: Log stem and parts\r\n+ # log.debug(f\" Processing stem: '{stem}', Parts: {parts}\") # DEBUG: Removed log\r\n if len(parts) > base_index:\r\n extracted_name = parts[base_index]\r\n potential_base_names.add(extracted_name)\r\n- log.debug(f\" Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG: Log extracted name\r\n- else:\r\n- log.debug(f\" Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG: Log if index is out of bounds\r\n+ # log.debug(f\" Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG: Removed log\r\n+ # else: # DEBUG: Removed log\r\n+ # log.debug(f\" Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG: Removed log\r\n if len(potential_base_names) == 1:\r\n determined_base_name = potential_base_names.pop()\r\n log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n elif len(potential_base_names) > 1 :\r\n@@ -628,12 +628,12 @@\n # else: len is 0, means no valid parts found, use common prefix below\r\n \r\n # If no index or structured parts failed, use common prefix of all relevant stems\r\n if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n- log.debug(\"Falling back to common prefix for base name determination.\") # DEBUG: Log fallback\r\n- determined_base_name = os.path.commonprefix(candidate_stems)\r\n- log.debug(f\"Using common prefix: '{determined_base_name}'\")\r\n- # Clean up common separators/underscores often left by commonprefix\r\n+ # log.debug(\"Falling back to common prefix for base name determination.\") # DEBUG: Removed log\r\n+ determined_base_name = os.path.commonprefix(candidate_stems)\r\n+ log.debug(f\"Using common prefix: '{determined_base_name}'\") # Keep this log for now\r\n+ # Clean up common separators/underscores often left by commonprefix\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n \r\n # Final cleanup and fallback for base name\r\n determined_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n" }, { "date": 1745315291522, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1698,39 +1698,59 @@\n break # Found model match\r\n \r\n # Note: Files matching neither maps nor models are ignored for prediction details\r\n \r\n+ log.debug(f\"[PREDICTION] Potential map files identified: {potential_map_files}\") # DEBUG PREDICTION\r\n candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n+ log.debug(f\"[PREDICTION] Candidate stems identified: {candidate_stems_list}\") # DEBUG PREDICTION\r\n if not candidate_stems_list:\r\n log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n # Fallback: Use the input path's name itself if no stems found\r\n base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n determined_base_name = base_name_fallback\r\n else:\r\n # 4. Replicate _determine_base_metadata logic for base name\r\n determined_base_name = \"UnknownAssetName\"\r\n- base_index = self.config.source_naming_indices.get('base_name')\r\n+ separator = self.config.source_naming_separator\r\n+ indices_dict = self.config.source_naming_indices\r\n+ base_index_raw = indices_dict.get('base_name')\r\n+ log.debug(f\"[PREDICTION] Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}'\") # DEBUG PREDICTION\r\n \r\n+ base_index = None\r\n+ if base_index_raw is not None:\r\n+ try:\r\n+ base_index = int(base_index_raw) # Use explicit conversion like in main logic\r\n+ except (ValueError, TypeError):\r\n+ log.warning(f\"[PREDICTION] Could not convert base_name index '{base_index_raw}' to integer.\")\r\n+\r\n if isinstance(base_index, int):\r\n potential_base_names = set()\r\n for stem in candidate_stems_list: # Iterate over the list\r\n parts = stem.split(separator)\r\n+ log.debug(f\"[PREDICTION] Processing stem: '{stem}', Parts: {parts}\") # DEBUG PREDICTION\r\n if len(parts) > base_index:\r\n- potential_base_names.add(parts[base_index])\r\n+ extracted_name = parts[base_index]\r\n+ potential_base_names.add(extracted_name)\r\n+ log.debug(f\"[PREDICTION] Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG PREDICTION\r\n+ else:\r\n+ log.debug(f\"[PREDICTION] Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG PREDICTION\r\n if len(potential_base_names) == 1:\r\n determined_base_name = potential_base_names.pop()\r\n+ log.debug(f\"[PREDICTION] Determined base name '{determined_base_name}' from structured parts (index {base_index}).\") # DEBUG PREDICTION\r\n elif len(potential_base_names) > 1:\r\n- log.debug(f\"Prediction: Multiple potential base names from index {base_index}, using common prefix.\")\r\n+ log.debug(f\"[PREDICTION] Multiple potential base names found from index {base_index}: {potential_base_names}. Falling back to common prefix.\") # DEBUG PREDICTION\r\n determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n # else: Use common prefix below\r\n \r\n if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n+ log.debug(\"[PREDICTION] Falling back to common prefix for base name determination (structured parts failed or no index).\") # DEBUG PREDICTION\r\n determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n \r\n # 5. Sanitize Names\r\n final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n+ log.debug(f\"[PREDICTION] Final determined base name for prediction: '{final_base_name}'\") # DEBUG PREDICTION\r\n final_supplier_name = self._sanitize_filename(supplier_name)\r\n \r\n # 6. Predict Output Filenames\r\n file_predictions = {}\r\n" }, { "date": 1745315317419, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1808,8 +1808,9 @@\n self._extract_input()\r\n self._inventory_and_classify_files()\r\n self._determine_base_metadata() # Needed for base name prediction\r\n asset_base_name = self.metadata.get(\"asset_name\", asset_base_name)\r\n+ log.debug(f\"[DETAILED PREDICTION] Base name determined by _determine_base_metadata: '{asset_base_name}'\") # DEBUG PREDICTION\r\n \r\n # --- Prepare for filename prediction ---\r\n target_pattern = self.config.target_filename_pattern\r\n highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n@@ -1822,8 +1823,9 @@\n original_path_str = str(map_info.get(\"source_path\", \"UnknownPath\"))\r\n map_type = map_info.get(\"map_type\", \"UnknownType\")\r\n # Predicted name for maps should just be the base asset name\r\n predicted_name_display = asset_base_name\r\n+ log.debug(f\"[DETAILED PREDICTION] Using base name '{asset_base_name}' for map '{original_path_str}' prediction.\") # DEBUG PREDICTION\r\n # Concise details\r\n details = f\"[{map_type}]\"\r\n if map_info.get(\"is_16bit_source\"):\r\n details += \" (16-bit)\"\r\n" }, { "date": 1745315336658, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -632,9 +632,9 @@\n # log.debug(\"Falling back to common prefix for base name determination.\") # DEBUG: Removed log\r\n determined_base_name = os.path.commonprefix(candidate_stems)\r\n log.debug(f\"Using common prefix: '{determined_base_name}'\") # Keep this log for now\r\n # Clean up common separators/underscores often left by commonprefix\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Corrected indentation\r\n \r\n # Final cleanup and fallback for base name\r\n determined_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n self.metadata[\"asset_name\"] = determined_base_name\r\n" }, { "date": 1745315893449, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1808,9 +1808,8 @@\n self._extract_input()\r\n self._inventory_and_classify_files()\r\n self._determine_base_metadata() # Needed for base name prediction\r\n asset_base_name = self.metadata.get(\"asset_name\", asset_base_name)\r\n- log.debug(f\"[DETAILED PREDICTION] Base name determined by _determine_base_metadata: '{asset_base_name}'\") # DEBUG PREDICTION\r\n \r\n # --- Prepare for filename prediction ---\r\n target_pattern = self.config.target_filename_pattern\r\n highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n@@ -1823,9 +1822,8 @@\n original_path_str = str(map_info.get(\"source_path\", \"UnknownPath\"))\r\n map_type = map_info.get(\"map_type\", \"UnknownType\")\r\n # Predicted name for maps should just be the base asset name\r\n predicted_name_display = asset_base_name\r\n- log.debug(f\"[DETAILED PREDICTION] Using base name '{asset_base_name}' for map '{original_path_str}' prediction.\") # DEBUG PREDICTION\r\n # Concise details\r\n details = f\"[{map_type}]\"\r\n if map_info.get(\"is_16bit_source\"):\r\n details += \" (16-bit)\"\r\n" }, { "date": 1745316439412, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -560,141 +560,113 @@\n log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n \r\n \r\n- def _determine_base_metadata(self):\r\n- \"\"\"Determines base_name, asset_category, and archetype.\"\"\"\r\n+ def _determine_base_metadata(self) -> Tuple[List[str], Dict[Path, Optional[str]]]:\r\n+ \"\"\"\r\n+ Determines distinct asset base names within the input based on preset rules\r\n+ and maps each relevant source file to its determined base name.\r\n+\r\n+ Returns:\r\n+ Tuple[List[str], Dict[Path, Optional[str]]]:\r\n+ - A list of unique, sanitized base names found.\r\n+ - A dictionary mapping source file relative paths to their determined\r\n+ base name string (or None if no base name could be determined for that file).\r\n+ \"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- log.info(\"Determining base metadata...\")\r\n+ log.info(\"Determining distinct base names and file mapping...\")\r\n \r\n- # --- Determine Asset Category ---\r\n- self.metadata[\"asset_category\"] = self.config.default_asset_category # Start with default\r\n- if self.classified_files[\"models\"]:\r\n- self.metadata[\"asset_category\"] = \"Asset\"\r\n- log.debug(\"Category set to 'Asset' due to model file presence.\")\r\n- else:\r\n- decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n- found_decal = False\r\n- candidate_files = [f['source_path'] for f in self.classified_files['maps']] # Check map names first\r\n- if not candidate_files: # Fallback to checking all files? Maybe too broad. Check Extra?\r\n- candidate_files = [f['source_path'] for f in self.classified_files['extra']]\r\n+ # Combine map and model files for base name determination\r\n+ relevant_files = self.classified_files.get('maps', []) + self.classified_files.get('models', [])\r\n+ if not relevant_files:\r\n+ log.warning(\"No map or model files found to determine base name(s).\")\r\n+ # Fallback: Use input path name as a single asset\r\n+ input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ sanitized_input_name = self._sanitize_filename(input_name or \"UnknownInput\")\r\n+ # Map all files (maps, models, extra, ignored) to this fallback name\r\n+ all_files_paths = [f['source_path'] for cat in self.classified_files.values() for f in cat if 'source_path' in f]\r\n+ file_to_base_name_map = {f_path: sanitized_input_name for f_path in all_files_paths}\r\n+ log.info(f\"Using input path name '{sanitized_input_name}' as the single asset name.\")\r\n+ return [sanitized_input_name], file_to_base_name_map\r\n \r\n- if decal_keywords:\r\n- for file_path in candidate_files:\r\n- for keyword in decal_keywords:\r\n- if keyword.lower() in file_path.name.lower():\r\n- self.metadata[\"asset_category\"] = \"Decal\"\r\n- found_decal = True; break\r\n- if found_decal: break\r\n- if found_decal: log.debug(\"Category set to 'Decal' due to keyword match.\")\r\n+ # --- Determine Base Names from Files ---\r\n+ separator = self.config.source_naming_separator\r\n+ indices_dict = self.config.source_naming_indices\r\n+ base_index_raw = indices_dict.get('base_name')\r\n+ base_index = None\r\n+ if base_index_raw is not None:\r\n+ try:\r\n+ base_index = int(base_index_raw)\r\n+ except (ValueError, TypeError):\r\n+ log.warning(f\"Could not convert base_name index '{base_index_raw}' to integer. Base name determination might be inaccurate.\")\r\n \r\n- # --- Determine Base Name ---\r\n- map_stems = [f['source_path'].stem for f in self.classified_files['maps']]\r\n- model_stems = [f['source_path'].stem for f in self.classified_files['models']]\r\n- candidate_stems = map_stems + model_stems\r\n+ file_to_base_name_map: Dict[Path, Optional[str]] = {}\r\n+ potential_base_names_per_file: Dict[Path, str] = {} # Store potential name for each file path\r\n \r\n- determined_base_name = \"UnknownAssetName\"\r\n- if candidate_stems:\r\n- separator = self.config.source_naming_separator\r\n- indices_dict = self.config.source_naming_indices # Get the whole dict first\r\n- base_index_raw = indices_dict.get('base_name') # Get the raw value\r\n- # log.debug(f\"Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}' (Type: {type(base_index_raw)})\") # DEBUG: Removed log\r\n- \r\n- base_index = None\r\n- if base_index_raw is not None:\r\n- try:\r\n- base_index = int(base_index_raw) # <<< EXPLICIT CONVERSION\r\n- except (ValueError, TypeError):\r\n- log.warning(f\"Could not convert base_name index '{base_index_raw}' to integer. Falling back.\")\r\n- \r\n- if isinstance(base_index, int): # Check should now pass if conversion succeeded\r\n- potential_base_names = set()\r\n- for stem in candidate_stems:\r\n- parts = stem.split(separator)\r\n- # log.debug(f\" Processing stem: '{stem}', Parts: {parts}\") # DEBUG: Removed log\r\n- if len(parts) > base_index:\r\n- extracted_name = parts[base_index]\r\n- potential_base_names.add(extracted_name)\r\n- # log.debug(f\" Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG: Removed log\r\n- # else: # DEBUG: Removed log\r\n- # log.debug(f\" Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG: Removed log\r\n- if len(potential_base_names) == 1:\r\n- determined_base_name = potential_base_names.pop()\r\n- log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n- elif len(potential_base_names) > 1 :\r\n- log.warning(f\"Multiple potential base names found from parts index {base_index}: {potential_base_names}. Falling back to common prefix.\")\r\n- # Fallback logic if structured parts method fails or yields multiple names\r\n- determined_base_name = os.path.commonprefix(candidate_stems)\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Clean trailing separators etc.\r\n- # else: len is 0, means no valid parts found, use common prefix below\r\n+ if isinstance(base_index, int):\r\n+ log.debug(f\"Attempting base name extraction using separator '{separator}' and index {base_index}.\")\r\n+ for file_info in relevant_files:\r\n+ file_path = file_info['source_path']\r\n+ stem = file_path.stem\r\n+ parts = stem.split(separator)\r\n+ if len(parts) > base_index:\r\n+ extracted_name = parts[base_index]\r\n+ sanitized_name = self._sanitize_filename(extracted_name)\r\n+ if sanitized_name: # Ensure we don't add empty names\r\n+ potential_base_names_per_file[file_path] = sanitized_name\r\n+ log.debug(f\" File '{file_path.name}' -> Potential Base Name: '{sanitized_name}'\")\r\n+ else:\r\n+ log.debug(f\" File '{file_path.name}' -> Extracted empty name at index {base_index}. Marking as None.\")\r\n+ file_to_base_name_map[file_path] = None # Explicitly mark as None if extraction yields empty\r\n+ else:\r\n+ log.debug(f\" File '{file_path.name}' -> Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}. Marking as None.\")\r\n+ file_to_base_name_map[file_path] = None # Mark as None if index is invalid for this file\r\n+ else:\r\n+ log.warning(\"Base name index not configured or invalid. Cannot determine distinct assets based on index. Treating as single asset.\")\r\n+ # Fallback to common prefix if no valid index\r\n+ stems = [f['source_path'].stem for f in relevant_files]\r\n+ common_prefix_name = os.path.commonprefix(stems) if stems else \"\"\r\n+ sanitized_common_name = self._sanitize_filename(common_prefix_name or self.input_path.stem or \"UnknownAsset\")\r\n+ log.info(f\"Using common prefix '{sanitized_common_name}' as the single asset name.\")\r\n+ # Map all relevant files to this single name\r\n+ for file_info in relevant_files:\r\n+ potential_base_names_per_file[file_info['source_path']] = sanitized_common_name\r\n \r\n- # If no index or structured parts failed, use common prefix of all relevant stems\r\n- if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n- # log.debug(\"Falling back to common prefix for base name determination.\") # DEBUG: Removed log\r\n- determined_base_name = os.path.commonprefix(candidate_stems)\r\n- log.debug(f\"Using common prefix: '{determined_base_name}'\") # Keep this log for now\r\n- # Clean up common separators/underscores often left by commonprefix\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Corrected indentation\r\n+ # --- Consolidate Distinct Names and Final Mapping ---\r\n+ distinct_base_names_set = set(potential_base_names_per_file.values())\r\n+ distinct_base_names = sorted(list(distinct_base_names_set)) # Sort for consistent processing order\r\n \r\n- # Final cleanup and fallback for base name\r\n- determined_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n- self.metadata[\"asset_name\"] = determined_base_name\r\n- log.debug(f\"Final Determined Base Name: {self.metadata['asset_name']}\")\r\n+ # Populate the final map, including files that didn't match the index rule (marked as None earlier)\r\n+ for file_info in relevant_files:\r\n+ file_path = file_info['source_path']\r\n+ if file_path not in file_to_base_name_map: # If not already marked as None\r\n+ file_to_base_name_map[file_path] = potential_base_names_per_file.get(file_path) # Assign determined name or None if somehow missed\r\n \r\n- # --- Determine Archetype (Usage) ---\r\n- archetype_rules = self.config.archetype_rules\r\n- determined_archetype = \"Unknown\"\r\n- check_stems = [f['source_path'].stem.lower() for f in self.classified_files['maps']]\r\n- check_stems.extend([f['source_path'].stem.lower() for f in self.classified_files['models']])\r\n- # Also check the determined base name itself?\r\n- check_stems.append(self.metadata[\"asset_name\"].lower())\r\n+ # Add files from 'extra' and 'ignored' to the map, marking them as None for base name\r\n+ for category in ['extra', 'ignored']:\r\n+ for file_info in self.classified_files.get(category, []):\r\n+ file_path = file_info['source_path']\r\n+ if file_path not in file_to_base_name_map: # Avoid overwriting if somehow already mapped\r\n+ file_to_base_name_map[file_path] = None\r\n+ log.debug(f\" File '{file_path.name}' (Category: {category}) -> Marked as None (No Base Name).\")\r\n \r\n- if check_stems:\r\n- best_match_archetype = \"Unknown\"\r\n- highest_match_count = 0 # Simple heuristic: prioritize rule with most keyword hits?\r\n \r\n- for rule in archetype_rules:\r\n- if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n- arch_name, rules_dict = rule\r\n- match_any = rules_dict.get(\"match_any\", [])\r\n- # match_all = rules_dict.get(\"match_all\", []) # Add logic if needed\r\n+ if not distinct_base_names:\r\n+ # This case should be rare due to fallbacks, but handle it.\r\n+ log.warning(\"No distinct base names could be determined. Using input name as fallback.\")\r\n+ input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ fallback_name = self._sanitize_filename(input_name or \"FallbackAsset\")\r\n+ distinct_base_names = [fallback_name]\r\n+ # Remap all files to this single fallback name\r\n+ file_to_base_name_map = {f_path: fallback_name for f_path in file_to_base_name_map.keys()}\r\n \r\n- current_match_count = 0\r\n- matched_any_keyword = False\r\n- if match_any:\r\n- for keyword in match_any:\r\n- kw_lower = keyword.lower()\r\n- for stem in check_stems:\r\n- # Using simple substring check again\r\n- if kw_lower in stem:\r\n- current_match_count += 1\r\n- matched_any_keyword = True\r\n- # Optionally break after first hit per keyword? Or count all occurrences? Count all for now.\r\n \r\n- # Decide if this rule matches. For now: requires at least one 'match_any' hit.\r\n- if matched_any_keyword:\r\n- # Simple approach: first rule that matches wins.\r\n- # Could be enhanced by prioritizing rules or counting hits.\r\n- if best_match_archetype == \"Unknown\": # Take the first match\r\n- best_match_archetype = arch_name\r\n- log.debug(f\"Tentative archetype match '{arch_name}' based on keywords: {match_any}\")\r\n- # Break here for \"first match wins\" logic\r\n- break\r\n+ log.info(f\"Determined {len(distinct_base_names)} distinct asset base name(s): {distinct_base_names}\")\r\n+ log.debug(f\"File-to-BaseName Map ({len(file_to_base_name_map)} entries): { {str(k): v for k, v in file_to_base_name_map.items()} }\") # Log string paths for readability\r\n \r\n- # --- Example: Prioritize by match count (more complex) ---\r\n- # if current_match_count > highest_match_count:\r\n- # highest_match_count = current_match_count\r\n- # best_match_archetype = arch_name\r\n- # log.debug(f\"New best archetype match '{arch_name}' with count {current_match_count}\")\r\n- # ----------------------------------------------------------\r\n+ return distinct_base_names, file_to_base_name_map\r\n \r\n- determined_archetype = best_match_archetype\r\n \r\n- self.metadata[\"archetype\"] = determined_archetype\r\n- log.debug(f\"Determined Archetype: {self.metadata['archetype']}\")\r\n- log.info(\"Base metadata determination complete.\")\r\n-\r\n-\r\n def _process_maps(self):\r\n \"\"\"Loads, processes, resizes, and saves classified map files.\"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n log.info(f\"Processing identified map files for '{self.metadata['asset_name']}'...\")\r\n" }, { "date": 1745316458654, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -664,9 +664,89 @@\n log.debug(f\"File-to-BaseName Map ({len(file_to_base_name_map)} entries): { {str(k): v for k, v in file_to_base_name_map.items()} }\") # Log string paths for readability\r\n \r\n return distinct_base_names, file_to_base_name_map\r\n \r\n+ def _determine_single_asset_metadata(self, asset_base_name: str, filtered_classified_files: Dict[str, List[Dict]]) -> Dict[str, str]:\r\n+ \"\"\"\r\n+ Determines the asset_category and archetype for a single, specific asset\r\n+ based on its filtered list of classified files.\r\n \r\n+ Args:\r\n+ asset_base_name: The determined base name for this specific asset.\r\n+ filtered_classified_files: A dictionary containing only the classified\r\n+ files (maps, models, etc.) belonging to this asset.\r\n+\r\n+ Returns:\r\n+ A dictionary containing {\"asset_category\": str, \"archetype\": str}.\r\n+ \"\"\"\r\n+ log.debug(f\"Determining category and archetype for asset: '{asset_base_name}'\")\r\n+ determined_category = self.config.default_asset_category # Start with default\r\n+ determined_archetype = \"Unknown\"\r\n+\r\n+ # --- Determine Asset Category ---\r\n+ if filtered_classified_files.get(\"models\"):\r\n+ determined_category = \"Asset\"\r\n+ log.debug(f\" Category set to 'Asset' for '{asset_base_name}' due to model file presence.\")\r\n+ else:\r\n+ # Check for Decal keywords only if not an Asset\r\n+ decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n+ found_decal = False\r\n+ # Check map names first for decal keywords\r\n+ candidate_files = [f['source_path'] for f in filtered_classified_files.get('maps', [])]\r\n+ # Fallback to checking extra files if no maps found for this asset\r\n+ if not candidate_files:\r\n+ candidate_files = [f['source_path'] for f in filtered_classified_files.get('extra', [])]\r\n+\r\n+ if decal_keywords:\r\n+ for file_path in candidate_files:\r\n+ # Check against the specific file's name within this asset's context\r\n+ for keyword in decal_keywords:\r\n+ if keyword.lower() in file_path.name.lower():\r\n+ determined_category = \"Decal\"\r\n+ found_decal = True; break\r\n+ if found_decal: break\r\n+ if found_decal: log.debug(f\" Category set to 'Decal' for '{asset_base_name}' due to keyword match.\")\r\n+ # If not Asset or Decal, it remains the default (e.g., \"Texture\")\r\n+\r\n+ log.debug(f\" Determined Category for '{asset_base_name}': {determined_category}\")\r\n+\r\n+ # --- Determine Archetype (Usage) ---\r\n+ archetype_rules = self.config.archetype_rules\r\n+ # Use stems from maps and models belonging *only* to this asset\r\n+ check_stems = [f['source_path'].stem.lower() for f in filtered_classified_files.get('maps', [])]\r\n+ check_stems.extend([f['source_path'].stem.lower() for f in filtered_classified_files.get('models', [])])\r\n+ # Also check the determined base name itself\r\n+ check_stems.append(asset_base_name.lower())\r\n+\r\n+ if check_stems:\r\n+ best_match_archetype = \"Unknown\"\r\n+ # Using simple \"first match wins\" logic as before\r\n+ for rule in archetype_rules:\r\n+ if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n+ arch_name, rules_dict = rule\r\n+ match_any = rules_dict.get(\"match_any\", [])\r\n+ matched_any_keyword = False\r\n+ if match_any:\r\n+ for keyword in match_any:\r\n+ kw_lower = keyword.lower()\r\n+ for stem in check_stems:\r\n+ if kw_lower in stem: # Simple substring check\r\n+ matched_any_keyword = True\r\n+ break # Found a match for this keyword\r\n+ if matched_any_keyword: break # Found a match for this rule's keywords\r\n+\r\n+ if matched_any_keyword:\r\n+ best_match_archetype = arch_name\r\n+ log.debug(f\" Archetype match '{arch_name}' for '{asset_base_name}' based on keywords: {match_any}\")\r\n+ break # First rule match wins\r\n+\r\n+ determined_archetype = best_match_archetype\r\n+\r\n+ log.debug(f\" Determined Archetype for '{asset_base_name}': {determined_archetype}\")\r\n+\r\n+ return {\"asset_category\": determined_category, \"archetype\": determined_archetype}\r\n+\r\n+\r\n def _process_maps(self):\r\n \"\"\"Loads, processes, resizes, and saves classified map files.\"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n log.info(f\"Processing identified map files for '{self.metadata['asset_name']}'...\")\r\n" }, { "date": 1745316637936, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -745,28 +745,49 @@\n \r\n return {\"asset_category\": determined_category, \"archetype\": determined_archetype}\r\n \r\n \r\n- def _process_maps(self):\r\n- \"\"\"Loads, processes, resizes, and saves classified map files.\"\"\"\r\n+ def _process_maps(self, filtered_maps_list: List[Dict], current_asset_metadata: Dict) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str, List[Dict]]:\r\n+ \"\"\"\r\n+ Loads, processes, resizes, and saves classified map files for a specific asset.\r\n+\r\n+ Args:\r\n+ filtered_maps_list: List of map dictionaries belonging to the current asset.\r\n+ current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n+\r\n+ Returns:\r\n+ Tuple containing:\r\n+ - processed_maps_details_asset: Dict mapping map_type to resolution details.\r\n+ - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n+ - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n+ - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n+ \"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- log.info(f\"Processing identified map files for '{self.metadata['asset_name']}'...\")\r\n- processed_map_types = set()\r\n+ asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n+ log.info(f\"Processing identified map files for asset '{asset_name}'...\")\r\n \r\n+ # Initialize results specific to this asset\r\n+ processed_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n+ image_stats_asset: Dict[str, Dict] = {}\r\n+ map_details_asset: Dict[str, Dict] = {} # Store details like source bit depth, gloss inversion\r\n+ aspect_ratio_change_string_asset: str = \"N/A\"\r\n+ ignored_rough_maps: List[Dict] = [] # Store ignored native rough maps\r\n+\r\n # --- Settings retrieval ---\r\n resolutions = self.config.image_resolutions\r\n stats_res_key = self.config.calculate_stats_resolution\r\n stats_target_dim = resolutions.get(stats_res_key)\r\n- if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped.\")\r\n+ if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped for '{asset_name}'.\")\r\n gloss_keywords = self.config.source_glossiness_keywords\r\n target_pattern = self.config.target_filename_pattern\r\n- base_name = self.metadata['asset_name']\r\n+ base_name = asset_name # Use the asset name passed in\r\n \r\n # --- Pre-process Glossiness -> Roughness ---\r\n preprocessed_data = {}\r\n derived_from_gloss_flag = {}\r\n gloss_map_info_for_rough, native_rough_map_info = None, None\r\n- for map_info in self.classified_files['maps']:\r\n+ # Use the filtered list for this asset\r\n+ for map_info in filtered_maps_list:\r\n if map_info['map_type'] == 'ROUGH':\r\n is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n if is_gloss: gloss_map_info_for_rough = map_info\r\n else: native_rough_map_info = map_info\r\n@@ -775,247 +796,234 @@\n if gloss_map_info_for_rough:\r\n rough_source_to_use = gloss_map_info_for_rough\r\n derived_from_gloss_flag['ROUGH'] = True\r\n if native_rough_map_info:\r\n- log.warning(f\"Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n- if native_rough_map_info in self.classified_files['maps']: self.classified_files['maps'].remove(native_rough_info) # Corrected variable name\r\n- self.classified_files['ignored'].append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n+ log.warning(f\"Asset '{asset_name}': Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n+ # Instead of modifying lists, just add the ignored info to be returned\r\n+ ignored_rough_maps.append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n+ # We still need to ensure the native rough map isn't processed later in the main loop\r\n elif native_rough_map_info:\r\n rough_source_to_use = native_rough_map_info\r\n derived_from_gloss_flag['ROUGH'] = False\r\n \r\n if derived_from_gloss_flag.get('ROUGH'):\r\n- source_path = self.temp_dir / rough_source_to_use['source_path']\r\n- log.info(f\"Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n- try:\r\n- img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n- if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n- original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n- if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n- if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n- elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n- else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n- # Store tuple: (inverted_float_data, original_dtype)\r\n- preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n- log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n- except Exception as e: log.error(f\"Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n+ # Ensure rough_source_to_use is not None before proceeding\r\n+ if rough_source_to_use:\r\n+ source_path = self.temp_dir / rough_source_to_use['source_path']\r\n+ log.info(f\"Asset '{asset_name}': Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n+ try:\r\n+ img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n+ if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n+ original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n+ if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n+ if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n+ elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n+ else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n+ # Store tuple: (inverted_float_data, original_dtype)\r\n+ preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n+ log.debug(f\"Asset '{asset_name}': Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n+ except Exception as e: log.error(f\"Asset '{asset_name}': Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n+ else:\r\n+ log.error(f\"Asset '{asset_name}': Gloss map identified for ROUGH, but source info is missing.\")\r\n \r\n+\r\n # --- Main Processing Loop ---\r\n- maps_to_process = list(self.classified_files['maps'])\r\n+ # Use the filtered list passed to the function\r\n+ maps_to_process = list(filtered_maps_list)\r\n for map_info in maps_to_process:\r\n map_type = map_info['map_type']\r\n source_path_rel = map_info['source_path']\r\n+\r\n+ # Skip processing native rough map if gloss was prioritized and ignored\r\n+ if map_type == 'ROUGH' and any(ignored['source_path'] == source_path_rel for ignored in ignored_rough_maps):\r\n+ log.debug(f\"Asset '{asset_name}': Skipping processing of native rough map '{source_path_rel}' as gloss version was prioritized.\")\r\n+ continue\r\n+\r\n original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n- log.info(f\"-- Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n+ log.info(f\"-- Asset '{asset_name}': Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n img_processed, source_dtype = None, None\r\n- map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n+ # Store details locally for this asset\r\n+ current_map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n \r\n try:\r\n # --- 1. Get/Load Source Data ---\r\n if map_type in preprocessed_data:\r\n- log.debug(f\"Using pre-processed data for {map_type}.\")\r\n+ log.debug(f\"Asset '{asset_name}': Using pre-processed data for {map_type}.\")\r\n # Unpack tuple: (inverted_float_data, original_dtype)\r\n img_processed, source_dtype = preprocessed_data[map_type]\r\n- # No longer need to read the original file just for dtype\r\n else:\r\n full_source_path = self.temp_dir / source_path_rel\r\n- # Determine the read flag based on map type\r\n- # Load MASK maps with IMREAD_UNCHANGED to preserve alpha channel\r\n read_flag = cv2.IMREAD_UNCHANGED if map_type.upper() == 'MASK' else (cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED)\r\n- log.debug(f\"Loading source {source_path_rel.name} with flag: {'UNCHANGED' if read_flag == cv2.IMREAD_UNCHANGED else ('GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'DEFAULT')}\") # Updated log\r\n+ log.debug(f\"Asset '{asset_name}': Loading source {source_path_rel.name} with flag: {'UNCHANGED' if read_flag == cv2.IMREAD_UNCHANGED else ('GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'DEFAULT')}\")\r\n img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n if img_loaded is None:\r\n raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n- \r\n- # Convert BGR to RGB if it's a 3-channel image\r\n+\r\n if len(img_loaded.shape) == 3:\r\n- log.debug(f\"Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n+ log.debug(f\"Asset '{asset_name}': Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n img_processed = cv2.cvtColor(img_loaded, cv2.COLOR_BGR2RGB)\r\n else:\r\n- img_processed = img_loaded.copy() # Copy grayscale or 4-channel directly\r\n- \r\n- source_dtype = img_loaded.dtype # Keep original dtype for bit depth info\r\n- \r\n- log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape} (After potential BGR->RGB conversion)\")\r\n- # --- DEBUG LOG: Check channel order assumption ---\r\n- # This log is now less relevant as we convert to RGB, but keep for shape info\r\n- if len(img_processed.shape) == 3:\r\n- log.debug(f\"DEBUG: Processed image shape is {img_processed.shape}. Now in RGB order.\")\r\n- # Optional: Log a few pixel values to inspect channel order\r\n- # if img_processed.size > 0:\r\n- # sample_pixel = img_processed[0, 0] # Get top-left pixel\r\n- # log.debug(f\"DEBUG: Sample pixel (top-left): {sample_pixel.tolist()} (Now in RGB order)\")\r\n- # --- END DEBUG LOG ---\r\n- map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n+ img_processed = img_loaded.copy()\r\n \r\n+ source_dtype = img_loaded.dtype\r\n+ log.debug(f\"Asset '{asset_name}': Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape} (After potential BGR->RGB conversion)\")\r\n+\r\n+ current_map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n+\r\n # --- 2. Handle Alpha Mask ---\r\n if map_type == 'MASK' and img_processed is not None:\r\n- log.debug(\"Processing as MASK type.\")\r\n- log.debug(f\"MASK processing: Initial shape: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n+ # [ Existing MASK handling logic remains largely the same, just add asset_name to logs ]\r\n+ log.debug(f\"Asset '{asset_name}': Processing as MASK type.\")\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Initial shape: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n shape = img_processed.shape\r\n- # Explicitly extract alpha channel if present (4 channels)\r\n if len(shape) == 3 and shape[2] == 4:\r\n- log.debug(\"MASK processing: Extracting alpha channel (4-channel source).\") # ADDED LOG\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Extracting alpha channel (4-channel source).\")\r\n img_processed = img_processed[:, :, 3]\r\n- # Convert to grayscale if it's a 3-channel image (no alpha)\r\n elif len(shape) == 3 and shape[2] == 3:\r\n- log.debug(\"MASK processing: Converting BGR to Grayscale (3-channel source).\") # ADDED LOG\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Converting BGR to Grayscale (3-channel source).\")\r\n img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n- # If it's already grayscale (2-channel shape is not standard for images, assuming 2D array)\r\n elif len(shape) == 2:\r\n- log.debug(\"MASK processing: Source is already grayscale (2-channel shape).\") # ADDED LOG\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Source is already grayscale (2-channel shape).\")\r\n else:\r\n- log.warning(f\"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\") # ADDED LOG\r\n+ log.warning(f\"Asset '{asset_name}': MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n \r\n- log.debug(f\"MASK processing: Shape after channel extraction/conversion: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Shape after channel extraction/conversion: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n \r\n- # Ensure the mask is uint8 grayscale before proceeding\r\n if img_processed.dtype != np.uint8:\r\n- log.debug(f\"Converting mask from {img_processed.dtype} to uint8.\")\r\n+ log.debug(f\"Asset '{asset_name}': Converting mask from {img_processed.dtype} to uint8.\")\r\n if img_processed.dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n else: img_processed = img_processed.astype(np.uint8)\r\n- log.debug(f\"MASK processing: Shape after dtype conversion: {img_processed.shape}, dtype: {img_processed.dtype}\") # ADDED LOG\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Shape after dtype conversion: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n \r\n \r\n if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n orig_h, orig_w = img_processed.shape[:2]\r\n- self.processed_maps_details.setdefault(map_type, {})\r\n- max_original_dimension = max(orig_w, orig_h) # Get max original dimension\r\n+ # Use local dictionary for this asset's results\r\n+ processed_maps_details_asset.setdefault(map_type, {})\r\n+ max_original_dimension = max(orig_w, orig_h)\r\n \r\n # --- 3. Resize & Save Loop ---\r\n for res_key, target_dim in resolutions.items():\r\n- # --- Skip Upscaling ---\r\n if target_dim > max_original_dimension:\r\n- log.debug(f\"Skipping {res_key} ({target_dim}px): Target dimension is larger than original max dimension ({max_original_dimension}px).\")\r\n+ log.debug(f\"Asset '{asset_name}': Skipping {res_key} ({target_dim}px) for {map_type}: Target larger than original ({max_original_dimension}px).\")\r\n continue\r\n- log.debug(f\"Processing {map_type} for resolution: {res_key}...\")\r\n- if orig_w <= 0 or orig_h <= 0: log.warning(f\"Invalid original dims, skipping resize {res_key}.\"); continue\r\n+ log.debug(f\"Asset '{asset_name}': Processing {map_type} for resolution: {res_key}...\")\r\n+ if orig_w <= 0 or orig_h <= 0: log.warning(f\"Asset '{asset_name}': Invalid original dims for {map_type}, skipping resize {res_key}.\"); continue\r\n target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n- except Exception as resize_err: log.error(f\"Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n+ except Exception as resize_err: log.error(f\"Asset '{asset_name}': Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n \r\n # --- 3a. Calculate Stats ---\r\n if res_key == stats_res_key and stats_target_dim:\r\n- log.debug(f\"Calculating stats for {map_type} using {res_key} image...\")\r\n- stats = _calculate_image_stats(img_resized) # Use the already resized image\r\n- if stats: self.metadata[\"image_stats_1k\"][map_type] = stats\r\n- else: log.warning(f\"Stats calculation failed for {map_type} at {res_key}.\")\r\n- # Calculate aspect change string on the lowest processed resolution, only once per asset\r\n+ log.debug(f\"Asset '{asset_name}': Calculating stats for {map_type} using {res_key} image...\")\r\n+ stats = _calculate_image_stats(img_resized)\r\n+ # Store stats locally for this asset\r\n+ if stats: image_stats_asset[map_type] = stats\r\n+ else: log.warning(f\"Asset '{asset_name}': Stats calculation failed for {map_type} at {res_key}.\")\r\n+\r\n+ # Calculate aspect change string (only once per asset)\r\n lowest_res_key = min(resolutions, key=resolutions.get)\r\n- log.debug(f\"Aspect ratio check: res_key='{res_key}', lowest_res_key='{lowest_res_key}', current_aspect_string='{self.metadata['aspect_ratio_change_string']}'\")\r\n- if self.metadata[\"aspect_ratio_change_string\"] == \"N/A\" and res_key == lowest_res_key:\r\n- log.debug(\"Aspect ratio calculation condition met.\")\r\n+ # Use local variable for check and assignment\r\n+ if aspect_ratio_change_string_asset == \"N/A\" and res_key == lowest_res_key:\r\n+ log.debug(f\"Asset '{asset_name}': Aspect ratio calculation condition met.\")\r\n try:\r\n aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n- self.metadata[\"aspect_ratio_change_string\"] = aspect_string\r\n- log.debug(f\"Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n+ aspect_ratio_change_string_asset = aspect_string # Store locally\r\n+ log.debug(f\"Asset '{asset_name}': Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n except Exception as aspect_err:\r\n- log.error(f\"Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n- self.metadata[\"aspect_ratio_change_string\"] = \"Error\" # Indicate calculation failure\r\n+ log.error(f\"Asset '{asset_name}': Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n+ aspect_ratio_change_string_asset = \"Error\" # Indicate calculation failure locally\r\n \r\n # --- 3b. Determine Output Bit Depth & Format ---\r\n+ # [ Existing logic for determining bit depth and format remains the same ]\r\n bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n- current_dtype = img_resized.dtype # Dtype after resize\r\n- output_dtype_target, output_bit_depth = None, 8 # Defaults\r\n+ current_dtype = img_resized.dtype\r\n+ output_dtype_target, output_bit_depth = None, 8\r\n if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n elif bit_depth_rule == 'respect':\r\n if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16 # Respect float source as 16bit uint for non-EXR\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8 # Default respect to 8bit\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8 # Fallback\r\n+ elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ else: output_dtype_target, output_bit_depth = np.uint8, 8\r\n+ else: output_dtype_target, output_bit_depth = np.uint8, 8\r\n \r\n- # --- 3c. Determine Output Format based on Input, Rules & Threshold ---\r\n+ # --- 3c. Determine Output Format ---\r\n+ # [ Existing logic for determining output format remains the same, add asset_name to logs ]\r\n output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format() # Usually 'png'\r\n- threshold = self.config.resolution_threshold_for_jpg # Get threshold value\r\n+ fmt_8bit_config = self.config.get_8bit_output_format()\r\n+ threshold = self.config.resolution_threshold_for_jpg\r\n force_lossless = map_type in self.config.force_lossless_map_types\r\n \r\n if force_lossless:\r\n- log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n+ log.debug(f\"Asset '{asset_name}': Format forced to lossless for map type '{map_type}'.\")\r\n+ # ... (rest of force_lossless logic) ...\r\n if output_bit_depth == 16:\r\n- output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n+ output_format = primary_fmt_16\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n- # Add EXR type flags if needed, e.g., HALF for 16-bit float\r\n save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- # Add compression later if desired, e.g. cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_ZIP\r\n- else: # Assume png or other lossless 16-bit format\r\n- # Use fallback if primary isn't suitable or fails? For now, assume primary is lossless.\r\n- # If primary_fmt_16 is not 'png', maybe default to fallback_fmt_16?\r\n+ else:\r\n if output_format != \"png\":\r\n- log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16 # Ensure it's a known lossless like png/tif\r\n+ log.warning(f\"Asset '{asset_name}': Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- # Add params for other lossless like TIF if supported\r\n else: # 8-bit lossless\r\n- output_format = fmt_8bit_config # Usually 'png'\r\n+ output_format = fmt_8bit_config\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n else:\r\n- log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n- output_format = \"png\"\r\n- output_ext = \".png\"\r\n+ log.warning(f\"Asset '{asset_name}': Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n+ output_format = \"png\"; output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n \r\n- # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n+ output_format = 'jpg'; output_ext = '.jpg'\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- # --- Else: Apply Input/Rule-Based Logic ---\r\n+ log.debug(f\"Asset '{asset_name}': Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n else:\r\n- # Apply force_8bit rule (if not overridden by threshold)\r\n+ # ... (rest of format determination logic, adding asset_name to logs) ...\r\n if bit_depth_rule == 'force_8bit':\r\n- output_format = 'png' # Force to PNG as per clarification\r\n- output_ext = '.png'\r\n- # output_bit_depth is already 8, output_dtype_target is already uint8\r\n+ output_format = 'png'; output_ext = '.png'\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n- # Handle specific input extensions if not forced to 8bit PNG\r\n+ log.debug(f\"Asset '{asset_name}': Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n elif original_extension == '.jpg' and output_bit_depth == 8:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n+ output_format = 'jpg'; output_ext = '.jpg'\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n+ log.debug(f\"Asset '{asset_name}': Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n elif original_extension == '.tif':\r\n if output_bit_depth == 16:\r\n output_format = primary_fmt_16\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) # Add compression later\r\n- log.debug(f\"Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ log.debug(f\"Asset '{asset_name}': Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n elif output_format == \"png\":\r\n output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n- else: # Fallback for 16-bit from TIF\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n+ log.debug(f\"Asset '{asset_name}': Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n+ else:\r\n+ output_format = fallback_fmt_16; output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- else: # output_bit_depth == 8 from TIF input (and below threshold)\r\n- output_format = 'png'\r\n- output_ext = '.png'\r\n+ log.debug(f\"Asset '{asset_name}': Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n+ else:\r\n+ output_format = 'png'; output_ext = '.png'\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n- # Handle other inputs (e.g., PNG) or fallbacks\r\n+ log.debug(f\"Asset '{asset_name}': Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n else:\r\n if output_bit_depth == 16:\r\n output_format = primary_fmt_16\r\n if output_format.startswith(\"exr\"):\r\n@@ -1024,28 +1032,28 @@\n elif output_format == \"png\":\r\n output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # Fallback for 16-bit\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n+ else:\r\n+ output_format = fallback_fmt_16; output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n- else: # 8-bit output (and below threshold)\r\n- output_format = fmt_8bit_config # Use configured 8-bit format\r\n- output_ext = f\".{output_format}\"\r\n+ log.debug(f\"Asset '{asset_name}': Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n+ else:\r\n+ output_format = fmt_8bit_config; output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n elif output_format == \"jpg\":\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n+ log.debug(f\"Asset '{asset_name}': Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n \r\n- img_to_save = img_resized.copy() # Work on copy for dtype conversion\r\n+\r\n+ img_to_save = img_resized.copy()\r\n # --- Apply Dtype Conversion ---\r\n+ # [ Existing dtype conversion logic remains the same ]\r\n if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n@@ -1055,75 +1063,69 @@\n if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n \r\n+\r\n # --- 3d. Construct Filename & Save ---\r\n+ # Use base_name (which is the current asset's name)\r\n filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n output_path_temp = self.temp_dir / filename\r\n- log.debug(f\"Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n- log.debug(f\"Saving {map_type} ({res_key}): Final image shape: {img_to_save.shape}, dtype: {img_to_save.dtype}\") # ADDED LOG\r\n+ log.debug(f\"Asset '{asset_name}': Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n+ log.debug(f\"Asset '{asset_name}': Saving {map_type} ({res_key}): Final image shape: {img_to_save.shape}, dtype: {img_to_save.dtype}\")\r\n saved_successfully, actual_format_saved = False, output_format\r\n try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n except Exception as save_err:\r\n- log.error(f\"Save failed ({output_format}): {save_err}\")\r\n+ log.error(f\"Asset '{asset_name}': Save failed ({output_format}) for {map_type} {res_key}: {save_err}\")\r\n # --- Try Fallback ---\r\n if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Attempting fallback: {fallback_fmt_16}\")\r\n- actual_format_saved = fallback_fmt_16; output_ext = f\".{fallback_fmt_16}\"; # Adjust format/ext\r\n+ log.warning(f\"Asset '{asset_name}': Attempting fallback {fallback_fmt_16} for {map_type} {res_key}\")\r\n+ # [ Existing fallback logic remains the same, add asset_name to logs ]\r\n+ actual_format_saved = fallback_fmt_16; output_ext = f\".{fallback_fmt_16}\";\r\n filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n output_path_temp = self.temp_dir / filename\r\n- save_params_fallback = [] # Reset params for fallback\r\n+ save_params_fallback = []\r\n img_fallback = None; target_fallback_dtype = np.uint16\r\n- if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, 6])\r\n- elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n+ if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif fallback_fmt_16 == \"tif\": pass\r\n \r\n- # Convert EXR source (float16) to uint16 for PNG/TIF fallback\r\n- #if img_to_save.dtype == np.float16: img_fallback = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(target_fallback_dtype)\r\n if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n- # Check for NaN/Inf before conversion\r\n if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n- log.error(f\"Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n- continue # Skip fallback if data is bad\r\n-\r\n- # Clip *after* scaling for uint16 conversion robustness\r\n+ log.error(f\"Asset '{asset_name}': Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n+ continue\r\n img_scaled = img_to_save * 65535.0\r\n img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already correct type\r\n- else: log.error(f\"Cannot convert {img_to_save.dtype} for fallback.\"); continue # Skip fallback\r\n+ elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save\r\n+ else: log.error(f\"Asset '{asset_name}': Cannot convert {img_to_save.dtype} for fallback.\"); continue\r\n \r\n- try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Fallback save failed: {fallback_err}\", exc_info=True)\r\n+ try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Asset '{asset_name}': Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err: log.error(f\"Asset '{asset_name}': Fallback save failed for {map_type} {res_key}: {fallback_err}\", exc_info=True)\r\n \r\n+\r\n # --- 3e. Store Result ---\r\n if saved_successfully:\r\n- self.processed_maps_details[map_type][res_key] = {\r\n+ # Store in the local dictionary for this asset\r\n+ processed_maps_details_asset[map_type][res_key] = {\r\n \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n \"format\": actual_format_saved\r\n }\r\n- map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n+ current_map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n \r\n except Exception as map_proc_err:\r\n- log.error(f\"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n- self.processed_maps_details.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n+ log.error(f\"Asset '{asset_name}': Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n+ # Store error in the local dictionary for this asset\r\n+ processed_maps_details_asset.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n \r\n- self.metadata[\"map_details\"][map_type] = map_details # Store details for this map type\r\n+ # Store details locally for this asset\r\n+ map_details_asset[map_type] = current_map_details\r\n \r\n- # --- Final Metadata Updates ---\r\n- processed_map_types = set(k for k, v in self.processed_maps_details.items() if 'error' not in v) # Only count successful\r\n- self.metadata[\"maps_present\"] = sorted(list(processed_map_types))\r\n- features = set()\r\n- for map_type, details in self.metadata[\"map_details\"].items():\r\n- if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n- if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n- res_details = self.processed_maps_details.get(map_type, {})\r\n- if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n- self.metadata[\"shader_features\"] = sorted(list(features))\r\n- log.debug(f\"Determined shader features: {self.metadata['shader_features']}\")\r\n- log.info(\"Finished processing all map files.\")\r\n+ # --- Return results for this specific asset ---\r\n+ log.info(f\"Finished processing map files for asset '{asset_name}'.\")\r\n+ # Note: Final metadata updates (maps_present, shader_features) are handled in the main process loop\r\n+ return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps\r\n \r\n \r\n- #log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n+ def _merge_maps(self):\r\n \r\n def _merge_maps(self):\r\n \"\"\"Merges channels from different maps based on rules in configuration.\"\"\"\r\n # ... (initial checks and getting merge_rules) ...\r\n" }, { "date": 1745316768565, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1125,312 +1125,287 @@\n \r\n \r\n def _merge_maps(self):\r\n \r\n- def _merge_maps(self):\r\n- \"\"\"Merges channels from different maps based on rules in configuration.\"\"\"\r\n- # ... (initial checks and getting merge_rules) ...\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Workspace not setup.\")\r\n+ def _merge_maps(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict) -> Dict[str, Dict[str, Dict]]:\r\n+ \"\"\"\r\n+ Merges channels from different maps for a specific asset based on rules in configuration.\r\n \r\n+ Args:\r\n+ processed_maps_details_asset: Details of successfully processed maps for this asset.\r\n+ filtered_classified_files: Classified files dictionary filtered for this asset.\r\n+ current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n \r\n- # <<< FIX: Get merge rules from the configuration object >>>\r\n+ Returns:\r\n+ Dict[str, Dict[str, Dict]]: Details of successfully merged maps for this asset.\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n \r\n merge_rules = self.config.map_merge_rules\r\n+ log.info(f\"Asset '{asset_name}': Applying {len(merge_rules)} map merging rule(s)...\")\r\n \r\n- # <<< END FIX >>>\r\n- log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n+ # Initialize results for this asset\r\n+ merged_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n \r\n for rule_index, rule in enumerate(merge_rules):\r\n- # <<< FIX: Assign variables *before* using them >>>\r\n output_map_type = rule.get(\"output_map_type\")\r\n inputs_mapping = rule.get(\"inputs\")\r\n defaults = rule.get(\"defaults\", {})\r\n rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n \r\n- # <<< FIX: Check if essential rule keys exist *after* assignment >>>\r\n if not output_map_type or not inputs_mapping:\r\n- log.warning(f\"Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs' in rule definition. Rule data: {rule}\")\r\n- continue # Skip to the next rule in merge_rules\r\n+ log.warning(f\"Asset '{asset_name}': Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs'. Rule: {rule}\")\r\n+ continue\r\n \r\n- # Now it's safe to use output_map_type in the log statement\r\n- log.info(f\"-- Applying merge rule for '{output_map_type}' --\")\r\n- # <<< END FIX >>>\r\n+ log.info(f\"-- Asset '{asset_name}': Applying merge rule for '{output_map_type}' --\")\r\n \r\n- self.merged_maps_details.setdefault(output_map_type, {})\r\n-\r\n- # --- Determine required inputs and their common resolutions ---\r\n- required_input_types = set(inputs_mapping.values()) # e.g., {'NRM', 'ROUGH'}\r\n+ # --- Determine required inputs and their common resolutions for *this asset* ---\r\n+ required_input_types = set(inputs_mapping.values())\r\n if not required_input_types:\r\n- log.warning(f\"Skipping merge rule '{output_map_type}': No input map types defined in 'inputs'.\")\r\n+ log.warning(f\"Asset '{asset_name}': Skipping merge rule '{output_map_type}': No input map types defined.\")\r\n continue\r\n \r\n possible_resolutions_per_input = []\r\n for input_type in required_input_types:\r\n- if input_type in self.processed_maps_details:\r\n- # Get resolution keys where processing didn't error\r\n- res_keys = {res for res, details in self.processed_maps_details[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n+ # Use the processed map details passed for this asset\r\n+ if input_type in processed_maps_details_asset:\r\n+ res_keys = {res for res, details in processed_maps_details_asset[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n if not res_keys:\r\n- log.warning(f\"Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n- possible_resolutions_per_input = [] # Cannot proceed if any input is missing all resolutions\r\n+ log.warning(f\"Asset '{asset_name}': Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n+ possible_resolutions_per_input = []\r\n break\r\n possible_resolutions_per_input.append(res_keys)\r\n else:\r\n- log.warning(f\"Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n- possible_resolutions_per_input = [] # Cannot proceed if any input type is missing\r\n+ log.warning(f\"Asset '{asset_name}': Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n+ possible_resolutions_per_input = []\r\n break\r\n \r\n if not possible_resolutions_per_input:\r\n- log.warning(f\"Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n+ log.warning(f\"Asset '{asset_name}': Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n continue\r\n \r\n- # Find the intersection of resolution keys across all required inputs\r\n common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n \r\n if not common_resolutions:\r\n- log.warning(f\"No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n+ log.warning(f\"Asset '{asset_name}': No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n continue\r\n- log.debug(f\"Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n- # --- End Common Resolution Logic ---\r\n+ log.debug(f\"Asset '{asset_name}': Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n \r\n-\r\n- # <<< LOOP THROUGH COMMON RESOLUTIONS >>>\r\n- # Use the actual common_resolutions found\r\n+ # --- Loop through common resolutions ---\r\n res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n if not res_order:\r\n- log.warning(f\"Common resolutions {common_resolutions} do not match any target resolutions in config. Skipping merge for '{output_map_type}'.\")\r\n+ log.warning(f\"Asset '{asset_name}': Common resolutions {common_resolutions} do not match config. Skipping merge for '{output_map_type}'.\")\r\n continue\r\n \r\n- # Sort resolutions to process (optional, but nice for logs)\r\n sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n-\r\n- # Get target pattern from config for filename formatting\r\n target_pattern = self.config.target_filename_pattern\r\n+ base_name = asset_name # Use current asset's name\r\n \r\n for current_res_key in sorted_res_keys:\r\n- log.debug(f\"Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n+ log.debug(f\"Asset '{asset_name}': Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n try:\r\n loaded_inputs = {}\r\n input_bit_depths = set()\r\n- input_original_extensions = {} # Store original extensions for this resolution's inputs\r\n+ input_original_extensions = {}\r\n \r\n- # --- Load required input maps *at this specific resolution* AND get original extensions ---\r\n+ # --- Load required input maps for this asset and resolution ---\r\n possible_to_load = True\r\n- base_name = self.metadata['asset_name']\r\n- target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B']\r\n+ target_channels = list(inputs_mapping.keys())\r\n \r\n for map_type in required_input_types:\r\n- res_details = self.processed_maps_details.get(map_type, {}).get(current_res_key)\r\n+ # Use processed_maps_details_asset passed in\r\n+ res_details = processed_maps_details_asset.get(map_type, {}).get(current_res_key)\r\n if not res_details or 'path' not in res_details:\r\n- log.warning(f\"Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge for this resolution.\")\r\n+ log.warning(f\"Asset '{asset_name}': Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge.\")\r\n possible_to_load = False; break\r\n \r\n- # Find original extension from classified data\r\n+ # Find original extension from the filtered classified data for this asset\r\n original_ext = '.png' # Default\r\n found_original = False\r\n- for classified_map in self.classified_files[\"maps\"]:\r\n- # Match based on the base map type (e.g., NRM matches NRM-1)\r\n+ # Use filtered_classified_files passed in\r\n+ for classified_map in filtered_classified_files.get(\"maps\", []):\r\n if classified_map['map_type'].startswith(map_type):\r\n- # Check if the processed path matches (to handle multiple variants if needed, though less likely here)\r\n- # This assumes processed_maps_details path is relative to temp_dir\r\n- processed_path_str = str(res_details['path'])\r\n- classified_path_str = str(classified_map['source_path']) # This is the original source path relative to temp_dir root\r\n- # A more robust check might involve comparing filenames if paths differ due to processing steps\r\n- # For now, rely on the base map type match and grab the first extension found\r\n original_ext = classified_map.get('original_extension', '.png')\r\n found_original = True\r\n- break # Found the first match for this map_type\r\n+ break\r\n if not found_original:\r\n- log.warning(f\"Could not reliably find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n+ log.warning(f\"Asset '{asset_name}': Could not find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n \r\n input_original_extensions[map_type] = original_ext\r\n \r\n # Load the image\r\n input_file_path = self.temp_dir / res_details['path']\r\n read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- log.debug(f\"Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ log.debug(f\"Asset '{asset_name}': Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n img = cv2.imread(str(input_file_path), read_flag)\r\n if img is None:\r\n raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n loaded_inputs[map_type] = img\r\n input_bit_depths.add(res_details.get('bit_depth', 8))\r\n \r\n- # --- DEBUG LOG: Check channel order of merge inputs ---\r\n if len(img.shape) == 3:\r\n- log.debug(f\"DEBUG: Merge input '{input_file_path.name}' ({map_type}) loaded with shape {img.shape}, dtype {img.dtype}. Assuming OpenCV default BGR order.\")\r\n- # Optional: Log a few pixel values to inspect channel order\r\n- # if img.size > 0:\r\n- # sample_pixel = img[0, 0] # Get top-left pixel\r\n- # log.debug(f\"DEBUG: Sample pixel (top-left) for merge input: {sample_pixel.tolist()} (Assuming BGR order)\")\r\n- # --- END DEBUG LOG ---\r\n+ log.debug(f\"Asset '{asset_name}': DEBUG: Merge input '{input_file_path.name}' ({map_type}) loaded shape {img.shape}, dtype {img.dtype}.\")\r\n \r\n- if not possible_to_load: continue # Skip this resolution if inputs missing\r\n+ if not possible_to_load: continue\r\n \r\n- # --- Determine dimensions and target_dim for threshold check ---\r\n- first_map_type = next(iter(required_input_types)) # Get one map type to read dimensions\r\n+ # --- Determine dimensions and target_dim ---\r\n+ first_map_type = next(iter(required_input_types))\r\n h, w = loaded_inputs[first_map_type].shape[:2]\r\n- # Get target_dim from the details of the first loaded input for this resolution\r\n- first_res_details = self.processed_maps_details.get(first_map_type, {}).get(current_res_key)\r\n+ first_res_details = processed_maps_details_asset.get(first_map_type, {}).get(current_res_key)\r\n target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n num_target_channels = len(target_channels)\r\n \r\n- # <<< Re-calculate output_bit_depth based on THIS resolution's inputs >>>\r\n+ # --- Determine Output Bit Depth ---\r\n max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n output_bit_depth = 8\r\n if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n output_bit_depth = 16\r\n- log.debug(f\"Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n+ log.debug(f\"Asset '{asset_name}': Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n \r\n- # Prepare channels (float32) (same logic as before)\r\n+ # --- Prepare and Merge Channels ---\r\n+ # [ Existing channel preparation and merging logic remains the same ]\r\n merged_channels_float32 = []\r\n- # Use the defined target_channels list\r\n- for target_channel in target_channels: # Iterate R, G, B (or specified) order\r\n+ for target_channel in target_channels:\r\n source_map_type = inputs_mapping.get(target_channel)\r\n channel_data_float32 = None\r\n if source_map_type and source_map_type in loaded_inputs:\r\n- # ... [Extract channel data as float32 as before] ...\r\n img_input = loaded_inputs[source_map_type]\r\n if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n else: img_float = img_input.astype(np.float32)\r\n num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n- if num_source_channels >= 3: # Now RGB Source (after conversion in _process_maps)\r\n- if target_channel == 'R': channel_data_float32 = img_float[:, :, 0] # Red channel (index 0 in RGB)\r\n- elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1] # Green channel (index 1 in RGB)\r\n- elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2] # Blue channel (index 2 in RGB)\r\n- elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3] # Alpha channel (index 3 in RGBA)\r\n- elif num_source_channels == 1 or len(img_float.shape) == 2: # Grayscale source\r\n+ if num_source_channels >= 3:\r\n+ if target_channel == 'R': channel_data_float32 = img_float[:, :, 0]\r\n+ elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n+ elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2]\r\n+ elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n+ elif num_source_channels == 1 or len(img_float.shape) == 2:\r\n channel_data_float32 = img_float.reshape(h, w)\r\n- if channel_data_float32 is None: # Use default if needed\r\n+ if channel_data_float32 is None:\r\n default_val = defaults.get(target_channel)\r\n if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n merged_channels_float32.append(channel_data_float32)\r\n \r\n-\r\n- # Merge channels (same as before)\r\n if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n merged_image_float32 = cv2.merge(merged_channels_float32)\r\n \r\n- # Final Data Type Conversion (based on recalculated output_bit_depth)\r\n+ # --- Final Data Type Conversion ---\r\n img_final_merged = None\r\n if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n \r\n- # --- Determine Output Format based on Threshold, Input Hierarchy & Rules ---\r\n+ # --- Determine Output Format ---\r\n+ # [ Existing format determination logic remains the same, add asset_name to logs ]\r\n output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n fmt_8bit_config = self.config.get_8bit_output_format()\r\n threshold = self.config.resolution_threshold_for_jpg\r\n force_lossless = output_map_type in self.config.force_lossless_map_types\r\n \r\n if force_lossless:\r\n- log.debug(f\"Format forced to lossless for merged map type '{output_map_type}'.\")\r\n+ log.debug(f\"Asset '{asset_name}': Format forced to lossless for merged map type '{output_map_type}'.\")\r\n+ # ... (rest of force_lossless logic) ...\r\n if output_bit_depth == 16:\r\n- output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n+ output_format = primary_fmt_16\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else: # Assume png or other lossless 16-bit format\r\n+ else:\r\n if output_format != \"png\":\r\n- log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n+ log.warning(f\"Asset '{asset_name}': Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n output_format = fallback_fmt_16\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n else: # 8-bit lossless\r\n- output_format = fmt_8bit_config # Usually 'png'\r\n+ output_format = fmt_8bit_config\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n else:\r\n- log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n- output_format = \"png\"\r\n- output_ext = \".png\"\r\n+ log.warning(f\"Asset '{asset_name}': Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n+ output_format = \"png\"; output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n \r\n- # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'\r\n- output_ext = '.jpg'\r\n+ output_format = 'jpg'; output_ext = '.jpg'\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- # --- Else: Apply Hierarchy/Rule-Based Logic ---\r\n+ log.debug(f\"Asset '{asset_name}': Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n else:\r\n+ # ... (rest of hierarchy logic, add asset_name to logs) ...\r\n involved_extensions = set(input_original_extensions.values())\r\n- log.debug(f\"Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n- # Hierarchy: EXR > TIF > PNG > JPG\r\n- highest_format_str = 'jpg' # Start lowest\r\n+ log.debug(f\"Asset '{asset_name}': Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n+ highest_format_str = 'jpg'\r\n if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n elif '.png' in involved_extensions: highest_format_str = 'png'\r\n \r\n- # Determine final output format based on hierarchy and target bit depth\r\n final_output_format = highest_format_str\r\n-\r\n if highest_format_str == 'tif':\r\n if output_bit_depth == 16:\r\n- final_output_format = primary_fmt_16 # Use configured 16-bit pref (EXR/PNG)\r\n- log.debug(f\"Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n- else: # 8-bit target\r\n- final_output_format = 'png' # Force TIF input to PNG for 8-bit merge output\r\n- log.debug(\"Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n+ final_output_format = primary_fmt_16\r\n+ log.debug(f\"Asset '{asset_name}': Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n+ else:\r\n+ final_output_format = 'png'\r\n+ log.debug(f\"Asset '{asset_name}': Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n else:\r\n- log.debug(f\"Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n+ log.debug(f\"Asset '{asset_name}': Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n \r\n- # Set format/params based on the determined final_output_format\r\n output_format = final_output_format\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n elif output_format == \"png\":\r\n output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\": # Should only happen if highest input was JPG and below threshold\r\n+ elif output_format == \"jpg\":\r\n output_ext = \".jpg\"\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n else:\r\n- log.error(f\"Unsupported final output format '{output_format}' determined for merged map '{output_map_type}'. Skipping save.\")\r\n+ log.error(f\"Asset '{asset_name}': Unsupported final output format '{output_format}' for merged map '{output_map_type}'. Skipping save.\")\r\n continue\r\n \r\n- # --- JPG 8-bit Check (applies regardless of threshold logic) ---\r\n+\r\n+ # --- JPG 8-bit Check ---\r\n if output_format == \"jpg\" and output_bit_depth == 16:\r\n- log.warning(f\"Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n+ log.warning(f\"Asset '{asset_name}': Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- output_bit_depth = 8 # Correct the recorded bit depth\r\n+ output_bit_depth = 8\r\n \r\n # --- Save Merged Map ---\r\n- image_to_save = img_final_merged # Already converted to target bit depth (or forced to 8bit for JPG)\r\n-\r\n- # Apply float16 conversion if needed for EXR\r\n+ image_to_save = img_final_merged\r\n if needs_float16 and image_to_save.dtype != np.float16:\r\n+ # [ Existing float16 conversion logic ]\r\n if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- else: log.warning(f\"Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n+ else: log.warning(f\"Asset '{asset_name}': Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n \r\n+ # Use base_name (current asset's name)\r\n merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n merged_output_path_temp = self.temp_dir / merged_filename\r\n- log.debug(f\"Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n+ log.debug(f\"Asset '{asset_name}': Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n \r\n- # --- Add save logic with fallback here ---\r\n+ # --- Save with Fallback ---\r\n+ # [ Existing save/fallback logic, add asset_name to logs ]\r\n saved_successfully = False\r\n actual_format_saved = output_format\r\n try:\r\n cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n- log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n+ log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n saved_successfully = True\r\n except Exception as save_err:\r\n- log.error(f\"Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n- # Try Fallback for merged map (similar to _process_maps fallback)\r\n+ log.error(f\"Asset '{asset_name}': Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n- # ... [ Implement fallback save logic here, converting image_to_save if needed ] ...\r\n+ log.warning(f\"Asset '{asset_name}': Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n+ # ... [ Fallback save logic, add asset_name to logs ] ...\r\n actual_format_saved = fallback_fmt_16\r\n output_ext = f\".{fallback_fmt_16}\"\r\n merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n merged_output_path_temp = self.temp_dir / merged_filename\r\n@@ -1438,34 +1413,37 @@\n img_fallback = None\r\n target_fallback_dtype = np.uint16\r\n \r\n if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n+ elif fallback_fmt_16 == \"tif\": pass\r\n \r\n if image_to_save.dtype == np.float16:\r\n- if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(\"NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n+ if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(f\"Asset '{asset_name}': NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n img_scaled = image_to_save * 65535.0\r\n img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n- else: log.error(f\"Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n+ else: log.error(f\"Asset '{asset_name}': Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n \r\n- try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n- # --- End Fallback Logic ---\r\n+ try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err: log.error(f\"Asset '{asset_name}': Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n \r\n- # Record details if save successful\r\n+\r\n+ # --- Record details locally ---\r\n if saved_successfully:\r\n- self.merged_maps_details[output_map_type][current_res_key] = {\r\n+ merged_maps_details_asset[output_map_type][current_res_key] = {\r\n \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n }\r\n- if output_map_type not in self.metadata[\"merged_maps\"]: self.metadata[\"merged_maps\"].append(output_map_type)\r\n+ # Note: Adding to metadata[\"merged_maps\"] list happens in the main process loop\r\n \r\n except Exception as merge_res_err:\r\n- log.error(f\"Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n- self.merged_maps_details.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n+ log.error(f\"Asset '{asset_name}': Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n+ # Store error locally for this asset\r\n+ merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n \r\n- log.info(\"Finished applying map merging rules.\")\r\n+ log.info(f\"Asset '{asset_name}': Finished applying map merging rules.\")\r\n+ # Return the details for this asset\r\n+ return merged_maps_details_asset\r\n \r\n \r\n def _generate_metadata_file(self):\r\n \"\"\"Gathers all collected metadata and writes it to a JSON file.\"\"\"\r\n" }, { "date": 1745316812741, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1444,49 +1444,100 @@\n # Return the details for this asset\r\n return merged_maps_details_asset\r\n \r\n \r\n- def _generate_metadata_file(self):\r\n- \"\"\"Gathers all collected metadata and writes it to a JSON file.\"\"\"\r\n- # ... (Implementation from Response #49) ...\r\n+ def _generate_metadata_file(self, current_asset_metadata: Dict, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], map_details_asset: Dict[str, Dict]) -> Path:\r\n+ \"\"\"\r\n+ Gathers metadata for a specific asset and writes it to a temporary JSON file.\r\n+\r\n+ Args:\r\n+ current_asset_metadata: Base metadata for this asset (name, category, archetype, etc.).\r\n+ processed_maps_details_asset: Details of processed maps for this asset.\r\n+ merged_maps_details_asset: Details of merged maps for this asset.\r\n+ filtered_classified_files_asset: Classified files belonging only to this asset.\r\n+ unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n+ map_details_asset: Dictionary containing details like source bit depth, gloss inversion per map type.\r\n+\r\n+\r\n+ Returns:\r\n+ Path: The path to the generated temporary metadata file.\r\n+ \"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\":\r\n- log.warning(\"Asset name unknown, metadata may be incomplete.\")\r\n+ asset_name = current_asset_metadata.get(\"asset_name\")\r\n+ if not asset_name or asset_name == \"UnknownAssetName\":\r\n+ log.warning(\"Asset name unknown during metadata generation, file may be incomplete or incorrectly named.\")\r\n+ asset_name = \"UnknownAsset_Metadata\" # Fallback for filename\r\n \r\n- log.info(f\"Generating metadata file for '{self.metadata['asset_name']}'...\")\r\n- final_metadata = self.metadata.copy()\r\n+ log.info(f\"Generating metadata file for asset '{asset_name}'...\")\r\n+ # Start with the base metadata passed in for this asset\r\n+ final_metadata = current_asset_metadata.copy()\r\n \r\n+ # Populate map details from the specific asset's processing results\r\n final_metadata[\"processed_map_resolutions\"] = {}\r\n- for map_type, res_dict in self.processed_maps_details.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n- if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys) # Basic sort\r\n+ for map_type, res_dict in processed_maps_details_asset.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys)\r\n \r\n final_metadata[\"merged_map_resolutions\"] = {}\r\n- for map_type, res_dict in self.merged_maps_details.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n+ for map_type, res_dict in merged_maps_details_asset.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n \r\n+ # Determine maps present based on successful processing for this asset\r\n+ final_metadata[\"maps_present\"] = sorted(list(processed_maps_details_asset.keys()))\r\n+ final_metadata[\"merged_maps\"] = sorted(list(merged_maps_details_asset.keys()))\r\n+\r\n+ # Determine shader features based on this asset's maps\r\n+ features = set()\r\n+ for map_type, details in map_details_asset.items(): # Use map_details_asset passed in\r\n+ if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n+ if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n+ res_details = processed_maps_details_asset.get(map_type, {})\r\n+ if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n+ final_metadata[\"shader_features\"] = sorted(list(features))\r\n+\r\n+ # Determine source files in this asset's Extra folder\r\n+ # Includes:\r\n+ # - Files originally classified as 'Extra' or 'Unrecognised' belonging to this asset.\r\n+ # - Files originally classified as 'Ignored' belonging to this asset.\r\n+ # - All 'unmatched' files (belonging to no specific asset).\r\n+ source_files_in_extra_set = set()\r\n+ for category in ['extra', 'ignored']:\r\n+ for file_info in filtered_classified_files_asset.get(category, []):\r\n+ source_files_in_extra_set.add(str(file_info['source_path']))\r\n+ # Add all unmatched files\r\n+ for file_path in unmatched_files_paths:\r\n+ source_files_in_extra_set.add(str(file_path))\r\n+ final_metadata[\"source_files_in_extra\"] = sorted(list(source_files_in_extra_set))\r\n+\r\n+ # Add image stats and map details specific to this asset\r\n+ final_metadata[\"image_stats_1k\"] = current_asset_metadata.get(\"image_stats_1k\", {}) # Get from passed metadata\r\n+ final_metadata[\"map_details\"] = map_details_asset # Use map_details_asset passed in\r\n+ final_metadata[\"aspect_ratio_change_string\"] = current_asset_metadata.get(\"aspect_ratio_change_string\", \"N/A\") # Get from passed metadata\r\n+\r\n+\r\n # Add processing info\r\n final_metadata[\"_processing_info\"] = {\r\n \"preset_used\": self.config.preset_name,\r\n \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n- # Optionally add core config details used, carefully\r\n+ \"input_source\": str(self.input_path.name), # Add original input source\r\n }\r\n \r\n- # Sort lists\r\n+ # Sort lists just before writing\r\n for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n \r\n- metadata_filename = self.config.metadata_filename\r\n+ # Use asset name in temporary filename to avoid conflicts\r\n+ metadata_filename = f\"{asset_name}_{self.config.metadata_filename}\"\r\n output_path = self.temp_dir / metadata_filename\r\n- log.debug(f\"Writing metadata to: {output_path}\")\r\n+ log.debug(f\"Writing metadata for asset '{asset_name}' to temporary file: {output_path}\")\r\n try:\r\n with open(output_path, 'w', encoding='utf-8') as f:\r\n json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n- log.info(f\"Metadata file '{metadata_filename}' generated successfully.\")\r\n- self.metadata_file_path_temp = output_path # Store path for moving\r\n+ log.info(f\"Metadata file '{metadata_filename}' generated successfully for asset '{asset_name}'.\")\r\n+ return output_path # Return the path to the temporary file\r\n except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to write metadata file {output_path}: {e}\") from e\r\n+ raise AssetProcessingError(f\"Failed to write metadata file {output_path} for asset '{asset_name}': {e}\") from e\r\n \r\n \r\n def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n \"\"\"\r\n" }, { "date": 1745316836679, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1123,10 +1123,8 @@\n # Note: Final metadata updates (maps_present, shader_features) are handled in the main process loop\r\n return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps\r\n \r\n \r\n- def _merge_maps(self):\r\n-\r\n def _merge_maps(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict) -> Dict[str, Dict[str, Dict]]:\r\n \"\"\"\r\n Merges channels from different maps for a specific asset based on rules in configuration.\r\n \r\n" }, { "date": 1745316968307, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1609,75 +1609,136 @@\n name = name.strip('_')\r\n if not name: name = \"invalid_name\"\r\n return name\r\n \r\n- def _organize_output_files(self):\r\n- \"\"\"Moves processed files from temp dir to the final output structure.\"\"\"\r\n- # ... (Implementation from Response #51) ...\r\n+ def _organize_output_files(self, current_asset_name: str, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], temp_metadata_path: Path):\r\n+ \"\"\"\r\n+ Moves/copies processed files for a specific asset from the temp dir to the final output structure.\r\n+\r\n+ Args:\r\n+ current_asset_name: The sanitized name of the asset being organized.\r\n+ processed_maps_details_asset: Details of processed maps for this asset.\r\n+ merged_maps_details_asset: Details of merged maps for this asset.\r\n+ filtered_classified_files_asset: Classified files dictionary filtered for this asset.\r\n+ unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n+ temp_metadata_path: Path to the temporary metadata file for this asset.\r\n+ \"\"\"\r\n if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n- if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing.\")\r\n- if not self.metadata.get(\"supplier_name\"): raise AssetProcessingError(\"Supplier name missing.\")\r\n+ if not current_asset_name or current_asset_name == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing for organization.\")\r\n+ supplier_name = self.config.supplier_name # Get supplier name from config\r\n+ if not supplier_name: raise AssetProcessingError(\"Supplier name missing from config.\")\r\n \r\n- supplier_sanitized = self._sanitize_filename(self.metadata[\"supplier_name\"])\r\n- asset_name_sanitized = self._sanitize_filename(self.metadata[\"asset_name\"])\r\n+ supplier_sanitized = self._sanitize_filename(supplier_name)\r\n+ asset_name_sanitized = self._sanitize_filename(current_asset_name) # Already sanitized, but ensure consistency\r\n final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- log.info(f\"Organizing output files into: {final_dir}\")\r\n+ log.info(f\"Organizing output files for asset '{asset_name_sanitized}' into: {final_dir}\")\r\n+\r\n try:\r\n- # Check if overwriting is allowed before potentially deleting existing dir\r\n+ # Handle overwrite logic specifically for this asset's directory\r\n if final_dir.exists() and self.overwrite:\r\n- log.warning(f\"Output directory exists and overwrite is True: {final_dir}. Removing existing directory.\")\r\n+ log.warning(f\"Output directory exists for '{asset_name_sanitized}' and overwrite is True. Removing existing directory: {final_dir}\")\r\n try:\r\n shutil.rmtree(final_dir)\r\n except Exception as rm_err:\r\n- raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} during overwrite: {rm_err}\") from rm_err\r\n- elif final_dir.exists() and not self.overwrite:\r\n- # This case should ideally be caught by the skip logic earlier,\r\n- # but adding a warning here as a safeguard.\r\n- log.warning(f\"Output directory exists: {final_dir}. Overwriting (unexpected - should have been skipped).\")\r\n+ raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} for asset '{asset_name_sanitized}' during overwrite: {rm_err}\") from rm_err\r\n+ # Note: Skip check should prevent this if overwrite is False, but mkdir handles exist_ok=True\r\n \r\n- final_dir.mkdir(parents=True, exist_ok=True) # Create after potential removal\r\n+ final_dir.mkdir(parents=True, exist_ok=True)\r\n except Exception as e:\r\n- # Catch potential errors during mkdir if rmtree failed partially?\r\n- if not isinstance(e, AssetProcessingError): # Avoid wrapping already specific error\r\n- raise AssetProcessingError(f\"Failed to create final dir {final_dir}: {e}\") from e\r\n+ if not isinstance(e, AssetProcessingError):\r\n+ raise AssetProcessingError(f\"Failed to create final dir {final_dir} for asset '{asset_name_sanitized}': {e}\") from e\r\n else:\r\n- raise # Re-raise the AssetProcessingError from rmtree\r\n+ raise\r\n \r\n+ # --- Helper for moving files ---\r\n+ # Keep track of files successfully moved to avoid copying them later as 'unmatched'\r\n+ moved_source_files = set()\r\n def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n- if not src_rel_path: log.warning(f\"Missing src path for {file_desc}.\"); return\r\n+ if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc}.\"); return\r\n source_abs = self.temp_dir / src_rel_path\r\n+ # Use the original filename from the source path for the destination\r\n dest_abs = dest_dir / src_rel_path.name\r\n try:\r\n if source_abs.exists():\r\n- log.debug(f\"Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n- dest_dir.mkdir(parents=True, exist_ok=True) # Ensure sub-dir exists (for Extra/)\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n+ dest_dir.mkdir(parents=True, exist_ok=True)\r\n shutil.move(str(source_abs), str(dest_abs))\r\n- else: log.warning(f\"Source file missing for {file_desc}: {source_abs}\")\r\n- except Exception as e: log.error(f\"Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n+ moved_source_files.add(src_rel_path) # Track successfully moved source files\r\n+ else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc}: {source_abs}\")\r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n \r\n- # Move maps, merged maps, models, metadata\r\n- for details_dict in [self.processed_maps_details, self.merged_maps_details]:\r\n+ # --- Helper for copying files (for unmatched extras) ---\r\n+ def _safe_copy(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n+ if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc} copy.\"); return\r\n+ # Skip copying if this source file was already moved (e.g., it was an 'Extra' for this specific asset)\r\n+ if src_rel_path in moved_source_files:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Skipping copy of {file_desc} '{src_rel_path.name}' as it was already moved.\")\r\n+ return\r\n+ source_abs = self.temp_dir / src_rel_path\r\n+ dest_abs = dest_dir / src_rel_path.name\r\n+ try:\r\n+ if source_abs.exists():\r\n+ # Avoid copying if the exact destination file already exists (e.g., from a previous asset's copy)\r\n+ if dest_abs.exists():\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Destination file already exists for {file_desc} copy: {dest_abs.name}. Skipping copy.\")\r\n+ return\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Copying {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n+ dest_dir.mkdir(parents=True, exist_ok=True)\r\n+ shutil.copy2(str(source_abs), str(dest_abs)) # Use copy2 to preserve metadata\r\n+ else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc} copy: {source_abs}\")\r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed copying {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n+\r\n+\r\n+ # --- Move Processed/Merged Maps ---\r\n+ for details_dict in [processed_maps_details_asset, merged_maps_details_asset]:\r\n for map_type, res_dict in details_dict.items():\r\n if 'error' in res_dict: continue\r\n for res_key, details in res_dict.items():\r\n- if isinstance(details, dict) and 'path' in details: _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n- for model_info in self.classified_files.get('models', []): _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n- if self.metadata_file_path_temp: _safe_move(self.metadata_file_path_temp.relative_to(self.temp_dir), final_dir, \"metadata file\")\r\n+ if isinstance(details, dict) and 'path' in details:\r\n+ _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n \r\n- # Move extra/ignored files\r\n+ # --- Move Models specific to this asset ---\r\n+ for model_info in filtered_classified_files_asset.get('models', []):\r\n+ _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n+\r\n+ # --- Move Metadata File ---\r\n+ if temp_metadata_path and temp_metadata_path.exists():\r\n+ final_metadata_path = final_dir / self.config.metadata_filename # Use standard name\r\n+ try:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving metadata file: {temp_metadata_path.name} -> {final_metadata_path.relative_to(self.output_base_path)}\")\r\n+ shutil.move(str(temp_metadata_path), str(final_metadata_path))\r\n+ # No need to add metadata path to moved_source_files as it's uniquely generated\r\n+ except Exception as e:\r\n+ log.error(f\"Asset '{asset_name_sanitized}': Failed moving metadata file '{temp_metadata_path.name}': {e}\", exc_info=True)\r\n+ else:\r\n+ log.warning(f\"Asset '{asset_name_sanitized}': Temporary metadata file path missing or file does not exist: {temp_metadata_path}\")\r\n+\r\n+\r\n+ # --- Handle Extra/Ignored/Unmatched Files ---\r\n extra_subdir_name = self.config.extra_files_subdir\r\n extra_dir = final_dir / extra_subdir_name\r\n- files_to_move_extra = self.classified_files.get('extra', []) + self.classified_files.get('ignored', [])\r\n- if files_to_move_extra:\r\n- log.debug(f\"Moving {len(files_to_move_extra)} files to '{extra_subdir_name}/'...\")\r\n- try:\r\n- extra_dir.mkdir(exist_ok=True)\r\n- for file_info in files_to_move_extra: _safe_move(file_info.get('source_path'), extra_dir, f\"extra file ({file_info.get('reason', 'Unknown')})\")\r\n- except Exception as e: log.error(f\"Failed creating/moving to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n+ if filtered_classified_files_asset.get('extra') or filtered_classified_files_asset.get('ignored') or unmatched_files_paths:\r\n+ try:\r\n+ extra_dir.mkdir(parents=True, exist_ok=True)\r\n \r\n- log.info(f\"Finished organizing output for '{asset_name_sanitized}'.\")\r\n+ # Move asset-specific Extra/Ignored files\r\n+ files_to_move_extra = filtered_classified_files_asset.get('extra', []) + filtered_classified_files_asset.get('ignored', [])\r\n+ if files_to_move_extra:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving {len(files_to_move_extra)} asset-specific files to '{extra_subdir_name}/'...\")\r\n+ for file_info in files_to_move_extra:\r\n+ _safe_move(file_info.get('source_path'), extra_dir, f\"extra/ignored file ({file_info.get('reason', 'Unknown')})\")\r\n \r\n+ # Copy unmatched files\r\n+ if unmatched_files_paths:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Copying {len(unmatched_files_paths)} unmatched files to '{extra_subdir_name}/'...\")\r\n+ for file_path in unmatched_files_paths:\r\n+ _safe_copy(file_path, extra_dir, \"unmatched file\")\r\n \r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed creating/moving/copying to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n+\r\n+ log.info(f\"Finished organizing output for asset '{asset_name_sanitized}'.\")\r\n+\r\n+\r\n def _cleanup_workspace(self):\r\n \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n # ... (Implementation from Response #45) ...\r\n if self.temp_dir and self.temp_dir.exists():\r\n" }, { "date": 1745317171457, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -183,61 +183,165 @@\n }\r\n \r\n log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n \r\n- def process(self) -> str:\r\n+ def process(self) -> Dict[str, List[str]]:\r\n \"\"\"\r\n- Executes the full processing pipeline for the asset.\r\n+ Executes the full processing pipeline for the input path, handling\r\n+ multiple assets within a single input if detected.\r\n+\r\n Returns:\r\n- str: Status (\"processed\", \"skipped\").\r\n+ Dict[str, List[str]]: A dictionary summarizing the status of each\r\n+ detected asset within the input:\r\n+ {\"processed\": [asset_name1, ...],\r\n+ \"skipped\": [asset_name2, ...],\r\n+ \"failed\": [asset_name3, ...]}\r\n \"\"\"\r\n- log.info(f\"Processing asset: {self.input_path.name}\")\r\n+ log.info(f\"Starting processing for input: {self.input_path.name}\")\r\n+ overall_status = {\"processed\": [], \"skipped\": [], \"failed\": []}\r\n+ supplier_name = self.config.supplier_name # Get once\r\n+\r\n try:\r\n self._setup_workspace()\r\n self._extract_input()\r\n- self._inventory_and_classify_files()\r\n- self._determine_base_metadata()\r\n+ self._inventory_and_classify_files() # Classifies all files in self.classified_files\r\n \r\n- # --- Check if asset should be skipped ---\r\n- # Ensure asset_name and supplier_name were determined before checking\r\n- asset_name = self.metadata.get(\"asset_name\")\r\n- supplier_name = self.metadata.get(\"supplier_name\")\r\n+ # Determine distinct assets and file mapping\r\n+ distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n+ unmatched_files_paths = [p for p, name in file_to_base_name_map.items() if name is None]\r\n+ if unmatched_files_paths:\r\n+ log.warning(f\"Found {len(unmatched_files_paths)} files not matched to any specific asset base name. They will be copied to each asset's Extra folder.\")\r\n+ log.debug(f\"Unmatched files: {[str(p) for p in unmatched_files_paths]}\")\r\n \r\n- # Only check for skipping if overwrite is False AND we have valid names\r\n- if not self.overwrite and asset_name and asset_name != \"UnknownAssetName\" and supplier_name:\r\n- supplier_sanitized = self._sanitize_filename(supplier_name)\r\n- asset_name_sanitized = self._sanitize_filename(asset_name)\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- metadata_file_path = final_dir / self.config.metadata_filename\r\n \r\n- if final_dir.exists() and metadata_file_path.is_file():\r\n- log.info(f\"Output directory and metadata found for '{asset_name_sanitized}' and overwrite is False. Skipping.\")\r\n- # No need to call cleanup here, the finally block will handle it.\r\n- return \"skipped\" # Return status\r\n- elif self.overwrite:\r\n- # Log only if asset name is known, otherwise it's redundant with the initial processing log\r\n- known_asset_name = self.metadata.get('asset_name', self.input_path.name)\r\n- # Avoid logging overwrite message if name is still unknown\r\n- if known_asset_name != \"UnknownAssetName\" and known_asset_name != self.input_path.name:\r\n- log.info(f\"Overwrite flag is set for '{known_asset_name}'. Processing will continue even if output exists.\")\r\n- # --- End Skip Check ---\r\n+ # --- Loop through each detected asset ---\r\n+ for current_asset_name in distinct_base_names:\r\n+ log.info(f\"--- Processing detected asset: '{current_asset_name}' ---\")\r\n+ asset_processed = False\r\n+ asset_skipped = False\r\n+ asset_failed = False\r\n+ temp_metadata_path_asset = None # Track metadata file for this asset\r\n \r\n- # Continue with processing if not skipped\r\n- self._process_maps()\r\n- self._merge_maps()\r\n- self._generate_metadata_file()\r\n- self._organize_output_files()\r\n- log.info(f\"Asset processing completed successfully for: {self.metadata.get('asset_name', self.input_path.name)}\")\r\n- return \"processed\" # Return status\r\n+ try:\r\n+ # --- Filter classified files for the current asset ---\r\n+ filtered_classified_files_asset = defaultdict(list)\r\n+ for category, file_list in self.classified_files.items():\r\n+ for file_info in file_list:\r\n+ file_path = file_info.get('source_path')\r\n+ if file_path and file_to_base_name_map.get(file_path) == current_asset_name:\r\n+ filtered_classified_files_asset[category].append(file_info)\r\n+ log.debug(f\"Asset '{current_asset_name}': Filtered files - Maps: {len(filtered_classified_files_asset.get('maps',[]))}, Models: {len(filtered_classified_files_asset.get('models',[]))}, Extra: {len(filtered_classified_files_asset.get('extra',[]))}, Ignored: {len(filtered_classified_files_asset.get('ignored',[]))}\")\r\n+\r\n+ # --- Determine Metadata for this specific asset ---\r\n+ asset_specific_metadata = self._determine_single_asset_metadata(current_asset_name, filtered_classified_files_asset)\r\n+ current_asset_metadata = {\r\n+ \"asset_name\": current_asset_name,\r\n+ \"supplier_name\": supplier_name,\r\n+ \"asset_category\": asset_specific_metadata.get(\"asset_category\", self.config.default_asset_category),\r\n+ \"archetype\": asset_specific_metadata.get(\"archetype\", \"Unknown\"),\r\n+ # Initialize fields that will be populated by processing steps\r\n+ \"maps_present\": [],\r\n+ \"merged_maps\": [],\r\n+ \"shader_features\": [],\r\n+ \"source_files_in_extra\": [], # Will be populated in _generate_metadata\r\n+ \"image_stats_1k\": {},\r\n+ \"map_details\": {},\r\n+ \"aspect_ratio_change_string\": \"N/A\"\r\n+ }\r\n+\r\n+ # --- Skip Check for this specific asset ---\r\n+ if not self.overwrite:\r\n+ supplier_sanitized = self._sanitize_filename(supplier_name)\r\n+ asset_name_sanitized = self._sanitize_filename(current_asset_name)\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ metadata_file_path = final_dir / self.config.metadata_filename\r\n+ if final_dir.exists() and metadata_file_path.is_file():\r\n+ log.info(f\"Output directory and metadata found for asset '{asset_name_sanitized}' and overwrite is False. Skipping this asset.\")\r\n+ overall_status[\"skipped\"].append(current_asset_name)\r\n+ asset_skipped = True\r\n+ continue # Skip to the next asset in the loop\r\n+ elif self.overwrite:\r\n+ log.info(f\"Overwrite flag is set. Processing asset '{current_asset_name}' even if output exists.\")\r\n+\r\n+ # --- Process Maps for this asset ---\r\n+ processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps = self._process_maps(\r\n+ filtered_maps_list=filtered_classified_files_asset.get('maps', []),\r\n+ current_asset_metadata=current_asset_metadata # Pass base metadata\r\n+ )\r\n+ # Update current metadata with results\r\n+ current_asset_metadata[\"image_stats_1k\"] = image_stats_asset\r\n+ current_asset_metadata[\"aspect_ratio_change_string\"] = aspect_ratio_change_string_asset\r\n+ # Add newly ignored rough maps to the asset's specific ignored list\r\n+ if ignored_rough_maps:\r\n+ filtered_classified_files_asset['ignored'].extend(ignored_rough_maps)\r\n+\r\n+\r\n+ # --- Merge Maps for this asset ---\r\n+ merged_maps_details_asset = self._merge_maps(\r\n+ processed_maps_details_asset=processed_maps_details_asset,\r\n+ filtered_classified_files=filtered_classified_files_asset, # Pass filtered files for original ext lookup\r\n+ current_asset_metadata=current_asset_metadata\r\n+ )\r\n+\r\n+ # --- Generate Metadata for this asset ---\r\n+ # Pass map details collected during _process_maps\r\n+ map_details_asset = {k: v for k, v in self.metadata[\"map_details\"].items() if k in processed_maps_details_asset} # Filter map_details for current asset maps\r\n+ temp_metadata_path_asset = self._generate_metadata_file(\r\n+ current_asset_metadata=current_asset_metadata, # Pass the populated dict\r\n+ processed_maps_details_asset=processed_maps_details_asset,\r\n+ merged_maps_details_asset=merged_maps_details_asset,\r\n+ filtered_classified_files_asset=filtered_classified_files_asset,\r\n+ unmatched_files_paths=unmatched_files_paths, # Pass the list of unmatched files\r\n+ map_details_asset=map_details_asset # Pass the filtered map details\r\n+ )\r\n+\r\n+ # --- Organize Output Files for this asset ---\r\n+ self._organize_output_files(\r\n+ current_asset_name=current_asset_name,\r\n+ processed_maps_details_asset=processed_maps_details_asset,\r\n+ merged_maps_details_asset=merged_maps_details_asset,\r\n+ filtered_classified_files_asset=filtered_classified_files_asset,\r\n+ unmatched_files_paths=unmatched_files_paths, # Pass unmatched files for copying\r\n+ temp_metadata_path=temp_metadata_path_asset\r\n+ )\r\n+\r\n+ log.info(f\"--- Asset '{current_asset_name}' processed successfully. ---\")\r\n+ overall_status[\"processed\"].append(current_asset_name)\r\n+ asset_processed = True\r\n+\r\n+ except Exception as asset_err:\r\n+ log.error(f\"--- Failed processing asset '{current_asset_name}': {asset_err} ---\", exc_info=True)\r\n+ overall_status[\"failed\"].append(current_asset_name)\r\n+ asset_failed = True\r\n+ # Continue to the next asset even if one fails\r\n+\r\n+ # --- Determine Final Consolidated Status ---\r\n+ final_status = \"failed\" # Default if nothing else matches\r\n+ if overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"processed\"\r\n+ elif overall_status[\"skipped\"] and not overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"skipped\"\r\n+ elif overall_status[\"processed\"] and overall_status[\"failed\"]:\r\n+ final_status = \"partial_success\" # Indicate some succeeded, some failed\r\n+ elif overall_status[\"processed\"] and overall_status[\"skipped\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"processed\" # Consider processed+skipped as processed overall\r\n+ elif overall_status[\"skipped\"] and overall_status[\"failed\"] and not overall_status[\"processed\"]:\r\n+ final_status = \"failed\" # If only skips and fails, report as failed\r\n+ # Add any other combinations if needed\r\n+\r\n+ log.info(f\"Finished processing input '{self.input_path.name}'. Overall Status: {final_status}. Summary: {overall_status}\")\r\n+ # Return the detailed status dictionary instead of just a string\r\n+ # The wrapper function in main.py will interpret this\r\n+ return overall_status\r\n+\r\n except Exception as e:\r\n- # Log error with traceback if it hasn't been logged already\r\n- if not isinstance(e, (AssetProcessingError, ConfigurationError)): # Avoid double logging known types\r\n- log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name}: {e}\")\r\n- # Ensure error is propagated\r\n+ # Catch errors during initial setup (before asset loop)\r\n+ if not isinstance(e, (AssetProcessingError, ConfigurationError)):\r\n+ log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name} during setup: {e}\")\r\n if not isinstance(e, AssetProcessingError):\r\n raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n else:\r\n- raise # Re-raise AssetProcessingError or ConfigurationError\r\n+ raise\r\n finally:\r\n # Ensure cleanup always happens\r\n self._cleanup_workspace()\r\n \r\n" }, { "date": 1745317723188, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -9,8 +9,10 @@\n import re\r\n import time\r\n from pathlib import Path\r\n from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n+from typing import List, Dict, Tuple, Optional # Added for type hinting\r\n+from collections import defaultdict # Added for grouping\r\n \r\n # Attempt to import image processing libraries\r\n try:\r\n import cv2\r\n@@ -115,10 +117,8 @@\n log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n return {\"error\": str(e)}\r\n \r\n \r\n-from collections import defaultdict # Added for grouping\r\n-\r\n # --- Helper function ---\r\n def _get_base_map_type(target_map_string: str) -> str:\r\n \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n@@ -161,28 +161,14 @@\n self.temp_dir: Path | None = None # Path to the temporary working directory\r\n self.classified_files: dict[str, list[dict]] = {\r\n \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n }\r\n- self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n- self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n- self.metadata_file_path_temp: Path | None = None\r\n+ # These will no longer store instance-wide results, but are kept for potential future use or refactoring\r\n+ # self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n+ # self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n+ # self.metadata_file_path_temp: Path | None = None\r\n+ # self.metadata: dict = {} # Metadata is now handled per-asset within the process loop\r\n \r\n- # Initialize metadata collected during processing\r\n- self.metadata: dict = {\r\n- \"asset_name\": \"Unknown\",\r\n- \"supplier_name\": self.config.supplier_name,\r\n- \"asset_category\": self.config._core_settings.get('DEFAULT_ASSET_CATEGORY', \"Texture\"),\r\n- \"archetype\": \"Unknown\",\r\n- \"maps_present\": [],\r\n- \"merged_maps\": [],\r\n- \"shader_features\": [],\r\n- \"source_files_in_extra\": [],\r\n- \"image_stats_1k\": {},\r\n- \"map_details\": {},\r\n- \"aspect_ratio_change_string\": \"N/A\" # Replaces output_scaling_factors\r\n- # Processing info added in _generate_metadata_file\r\n- }\r\n-\r\n log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n \r\n def process(self) -> Dict[str, List[str]]:\r\n \"\"\"\r\n@@ -219,8 +205,9 @@\n asset_processed = False\r\n asset_skipped = False\r\n asset_failed = False\r\n temp_metadata_path_asset = None # Track metadata file for this asset\r\n+ map_details_asset = {} # Store map details for this asset\r\n \r\n try:\r\n # --- Filter classified files for the current asset ---\r\n filtered_classified_files_asset = defaultdict(list)\r\n@@ -243,9 +230,9 @@\n \"merged_maps\": [],\r\n \"shader_features\": [],\r\n \"source_files_in_extra\": [], # Will be populated in _generate_metadata\r\n \"image_stats_1k\": {},\r\n- \"map_details\": {},\r\n+ \"map_details\": {}, # Will be populated by _process_maps\r\n \"aspect_ratio_change_string\": \"N/A\"\r\n }\r\n \r\n # --- Skip Check for this specific asset ---\r\n@@ -272,8 +259,11 @@\n current_asset_metadata[\"aspect_ratio_change_string\"] = aspect_ratio_change_string_asset\r\n # Add newly ignored rough maps to the asset's specific ignored list\r\n if ignored_rough_maps:\r\n filtered_classified_files_asset['ignored'].extend(ignored_rough_maps)\r\n+ # Store map details (like source bit depth) collected during processing\r\n+ # This was previously stored in self.metadata[\"map_details\"]\r\n+ map_details_asset = {k: v for k, v in current_asset_metadata.pop(\"map_details\", {}).items() if k in processed_maps_details_asset}\r\n \r\n \r\n # --- Merge Maps for this asset ---\r\n merged_maps_details_asset = self._merge_maps(\r\n@@ -282,10 +272,8 @@\n current_asset_metadata=current_asset_metadata\r\n )\r\n \r\n # --- Generate Metadata for this asset ---\r\n- # Pass map details collected during _process_maps\r\n- map_details_asset = {k: v for k, v in self.metadata[\"map_details\"].items() if k in processed_maps_details_asset} # Filter map_details for current asset maps\r\n temp_metadata_path_asset = self._generate_metadata_file(\r\n current_asset_metadata=current_asset_metadata, # Pass the populated dict\r\n processed_maps_details_asset=processed_maps_details_asset,\r\n merged_maps_details_asset=merged_maps_details_asset,\r\n@@ -314,8 +302,9 @@\n asset_failed = True\r\n # Continue to the next asset even if one fails\r\n \r\n # --- Determine Final Consolidated Status ---\r\n+ # This logic remains the same, interpreting the overall_status dict\r\n final_status = \"failed\" # Default if nothing else matches\r\n if overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n final_status = \"processed\"\r\n elif overall_status[\"skipped\"] and not overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n@@ -647,12 +636,9 @@\n # No need to add to processed_files here, it's the final step\r\n log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n \r\n # --- Final Summary ---\r\n- # Update metadata list of files actually moved to extra (including Unrecognised and Ignored)\r\n- self.metadata[\"source_files_in_extra\"] = sorted([\r\n- str(f['source_path']) for f in self.classified_files['extra'] + self.classified_files['ignored']\r\n- ])\r\n+ # Note: self.metadata[\"source_files_in_extra\"] is now populated per-asset in _generate_metadata_file\r\n log.info(f\"File classification complete.\")\r\n log.debug(\"--- Final Classification Summary (v2) ---\")\r\n map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n@@ -2035,120 +2021,162 @@\n # --- New Detailed Prediction Method ---\r\n def get_detailed_file_predictions(self) -> list[dict] | None:\r\n \"\"\"\r\n Performs extraction and classification to provide a detailed list of all\r\n- files found within the asset and their predicted status/output name.\r\n- Does not perform image processing or file moving.\r\n+ files found within the input and their predicted status/output name,\r\n+ handling multiple potential assets within the input.\r\n \r\n Returns:\r\n list[dict] | None: A list of dictionaries, each representing a file:\r\n- {'original_path': str, 'predicted_name': str | None, 'status': str, 'details': str | None}\r\n+ {'original_path': str,\r\n+ 'predicted_asset_name': str | None,\r\n+ 'predicted_output_name': str | None,\r\n+ 'status': str,\r\n+ 'details': str | None}\r\n Returns None if a critical error occurs during setup/classification.\r\n \"\"\"\r\n- log.info(f\"Getting detailed file predictions for: {self.input_path.name}\")\r\n+ log.info(f\"Getting detailed file predictions for input: {self.input_path.name}\")\r\n results = []\r\n- asset_base_name = \"UnknownAssetName\" # Fallback\r\n+ all_files_in_workspace = [] # Keep track of all files found\r\n \r\n try:\r\n # --- Perform necessary setup and classification ---\r\n self._setup_workspace()\r\n self._extract_input()\r\n+ # Run classification - this populates self.classified_files\r\n self._inventory_and_classify_files()\r\n- self._determine_base_metadata() # Needed for base name prediction\r\n- asset_base_name = self.metadata.get(\"asset_name\", asset_base_name)\r\n \r\n+ # --- Determine distinct assets and file mapping ---\r\n+ # This uses the results from _inventory_and_classify_files\r\n+ distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n+ log.debug(f\"Prediction: Determined base names: {distinct_base_names}\")\r\n+ log.debug(f\"Prediction: File to base name map: { {str(k):v for k,v in file_to_base_name_map.items()} }\")\r\n+\r\n # --- Prepare for filename prediction ---\r\n target_pattern = self.config.target_filename_pattern\r\n highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n if self.config.image_resolutions:\r\n highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n \r\n- # --- Process classified files ---\r\n- # Maps\r\n- for map_info in self.classified_files.get(\"maps\", []):\r\n- original_path_str = str(map_info.get(\"source_path\", \"UnknownPath\"))\r\n- map_type = map_info.get(\"map_type\", \"UnknownType\")\r\n- # Predicted name for maps should just be the base asset name\r\n- predicted_name_display = asset_base_name\r\n- # Concise details\r\n- details = f\"[{map_type}]\"\r\n- if map_info.get(\"is_16bit_source\"):\r\n- details += \" (16-bit)\"\r\n+ # --- Process all classified files ---\r\n+ all_classified_files_with_category = []\r\n+ for category, file_list in self.classified_files.items():\r\n+ for file_info in file_list:\r\n+ # Add category info for easier processing below\r\n+ file_info['category'] = category\r\n+ all_classified_files_with_category.append(file_info)\r\n+ # Also collect all original paths found by classification\r\n+ if 'source_path' in file_info:\r\n+ all_files_in_workspace.append(file_info['source_path'])\r\n \r\n- # Still try to format the full name internally for error checking, but don't display it\r\n- try:\r\n- predicted_ext = \"png\" # Assumption for format check\r\n- _ = target_pattern.format(\r\n- base_name=asset_base_name,\r\n- map_type=map_type,\r\n- resolution=highest_res_key,\r\n- ext=predicted_ext\r\n- )\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction format error for {original_path_str}: {fmt_err}\")\r\n- predicted_name_display = \"[Format Error]\" # Show error in name field\r\n- details += f\" (Format Key Error: {fmt_err})\"\r\n- except Exception as pred_err:\r\n- log.warning(f\"Prediction error for {original_path_str}: {pred_err}\")\r\n- predicted_name_display = \"[Prediction Error]\" # Show error in name field\r\n- details += f\" (Error: {pred_err})\"\r\n \r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": predicted_name_display, # Use the base name or error\r\n- \"status\": \"Mapped\",\r\n- \"details\": details # Use concise details\r\n- })\r\n+ # --- Generate results for each file ---\r\n+ processed_paths = set() # Track paths already added to results\r\n+ for file_info in all_classified_files_with_category:\r\n+ original_path = file_info.get(\"source_path\")\r\n+ if not original_path or original_path in processed_paths:\r\n+ continue # Skip if path missing or already processed\r\n \r\n- # Models\r\n- for model_info in self.classified_files.get(\"models\", []):\r\n- original_path_str = str(model_info.get(\"source_path\", \"UnknownPath\"))\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": Path(original_path_str).name, # Models usually keep original name\r\n- \"status\": \"Model\",\r\n- \"details\": \"[Model]\" # Concise detail\r\n- })\r\n+ original_path_str = str(original_path)\r\n+ processed_paths.add(original_path) # Mark as processed\r\n \r\n- # Extra\r\n- for extra_info in self.classified_files.get(\"extra\", []):\r\n- original_path_str = str(extra_info.get(\"source_path\", \"UnknownPath\"))\r\n- reason = extra_info.get('reason', 'Unknown reason')\r\n- # Determine status and details based on the reason\r\n- if reason == 'Unrecognised': # Corrected string check\r\n- status = \"Unrecognised\"\r\n- details = \"[Unrecognised]\"\r\n+ # Determine predicted asset name and status\r\n+ predicted_asset_name = file_to_base_name_map.get(original_path) # Can be None\r\n+ category = file_info['category'] # maps, models, extra, ignored\r\n+ reason = file_info.get('reason') # Specific reason for extra/ignored\r\n+ status = \"Unknown\"\r\n+ details = None\r\n+ predicted_output_name = None # Usually original name, except for maps\r\n+\r\n+ if category == \"maps\":\r\n+ status = \"Mapped\"\r\n+ map_type = file_info.get(\"map_type\", \"UnknownType\")\r\n+ details = f\"[{map_type}]\"\r\n+ if file_info.get(\"is_16bit_source\"): details += \" (16-bit)\"\r\n+ # Predict map output name using its determined asset name\r\n+ if predicted_asset_name:\r\n+ try:\r\n+ predicted_ext = \"png\" # Assume PNG for prediction simplicity\r\n+ predicted_output_name = target_pattern.format(\r\n+ base_name=predicted_asset_name,\r\n+ map_type=map_type,\r\n+ resolution=highest_res_key,\r\n+ ext=predicted_ext\r\n+ )\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction format error for map {original_path_str}: {fmt_err}\")\r\n+ predicted_output_name = \"[Format Error]\"\r\n+ details += f\" (Format Key Error: {fmt_err})\"\r\n+ except Exception as pred_err:\r\n+ log.warning(f\"Prediction error for map {original_path_str}: {pred_err}\")\r\n+ predicted_output_name = \"[Prediction Error]\"\r\n+ details += f\" (Error: {pred_err})\"\r\n+ else:\r\n+ # Should not happen for maps if _determine_base_metadata worked correctly\r\n+ log.warning(f\"Map file '{original_path_str}' has no predicted asset name.\")\r\n+ predicted_output_name = \"[No Asset Name]\"\r\n+\r\n+ elif category == \"models\":\r\n+ status = \"Model\"\r\n+ details = \"[Model]\"\r\n+ predicted_output_name = original_path.name # Models keep original name\r\n+\r\n+ elif category == \"ignored\":\r\n+ status = \"Ignored\"\r\n+ details = f\"Ignored ({reason or 'Unknown reason'})\"\r\n+ predicted_output_name = None # Ignored files have no output\r\n+\r\n+ elif category == \"extra\":\r\n+ if predicted_asset_name is None:\r\n+ # This is an \"Unmatched Extra\" file (includes Unrecognised and explicit Extras without a base name)\r\n+ status = \"Unmatched Extra\"\r\n+ details = f\"[Unmatched Extra ({reason or 'N/A'})]\" # Include original reason if available\r\n+ elif reason == 'Unrecognised':\r\n+ # Unrecognised but belongs to a specific asset\r\n+ status = \"Unrecognised\"\r\n+ details = \"[Unrecognised]\"\r\n+ else:\r\n+ # Explicitly matched an 'extra' pattern and belongs to an asset\r\n+ status = \"Extra\"\r\n+ details = f\"Extra ({reason})\"\r\n+ predicted_output_name = original_path.name # Extra files keep original name\r\n+\r\n else:\r\n- status = \"Extra\"\r\n- details = f\"Extra ({reason})\" # Show the pattern match reason\r\n+ log.warning(f\"Unknown category '{category}' encountered during prediction for {original_path_str}\")\r\n+ status = \"Error\"\r\n+ details = f\"[Unknown Category: {category}]\"\r\n+ predicted_output_name = original_path.name\r\n \r\n+\r\n results.append({\r\n \"original_path\": original_path_str,\r\n- \"predicted_name\": Path(original_path_str).name, # Extra/Unrecognised files keep original name\r\n+ \"predicted_asset_name\": predicted_asset_name, # May be None\r\n+ \"predicted_output_name\": predicted_output_name,\r\n \"status\": status,\r\n \"details\": details\r\n })\r\n \r\n- # Ignored\r\n- for ignored_info in self.classified_files.get(\"ignored\", []):\r\n- original_path_str = str(ignored_info.get(\"source_path\", \"UnknownPath\"))\r\n- reason = ignored_info.get('reason', 'Unknown reason')\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_name\": None, # Ignored files have no output name\r\n- \"status\": \"Ignored\",\r\n- \"details\": f\"Ignored ({reason})\" # Keep reason for ignored files\r\n- })\r\n+ # Add any files found during walk but missed by classification (should be rare)\r\n+ # These are likely unmatched as well.\r\n+ for file_path in all_files_in_workspace:\r\n+ if file_path not in processed_paths:\r\n+ log.warning(f\"File found in workspace but not classified: {file_path}. Adding as Unmatched Extra.\")\r\n+ results.append({\r\n+ \"original_path\": str(file_path),\r\n+ \"predicted_asset_name\": None, # Explicitly None as it wasn't mapped\r\n+ \"predicted_output_name\": file_path.name,\r\n+ \"status\": \"Unmatched Extra\",\r\n+ \"details\": \"[Missed Classification]\"\r\n+ })\r\n \r\n- log.info(f\"Detailed prediction complete for {self.input_path.name}. Found {len(results)} files.\")\r\n+\r\n+ log.info(f\"Detailed prediction complete for input '{self.input_path.name}'. Found {len(results)} files.\")\r\n+ # Sort results by original path for consistent display\r\n+ results.sort(key=lambda x: x.get(\"original_path\", \"\"))\r\n return results\r\n \r\n except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n- # Log critical errors during the prediction process\r\n log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- # Optionally add a single error entry to results?\r\n- # results.append({\"original_path\": self.input_path.name, \"predicted_name\": None, \"status\": \"Error\", \"details\": str(e)})\r\n- # return results # Or return None to indicate failure\r\n return None # Indicate critical failure\r\n finally:\r\n # Ensure cleanup always happens\r\n self._cleanup_workspace()\r\n" }, { "date": 1745319619115, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -586,8 +586,12 @@\n grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n \r\n final_map_list = []\r\n for base_map_type, candidates in grouped_by_base_type.items():\r\n+ # --- DIAGNOSTIC LOGGING START ---\r\n+ candidate_paths_str = [str(c['source_path']) for c in candidates]\r\n+ log.debug(f\" [DIAGNOSIS] Processing base_map_type: '{base_map_type}'. Candidates before sort: {candidate_paths_str}\")\r\n+ # --- DIAGNOSTIC LOGGING END ---\r\n log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n \r\n # --- NEW SORTING LOGIC ---\r\n # Sort candidates based on:\r\n@@ -612,8 +616,11 @@\n else:\r\n # Never assign suffix for types NOT in the list\r\n final_map_type = base_map_type\r\n \r\n+ # --- DIAGNOSTIC LOGGING START ---\r\n+ log.debug(f\" [DIAGNOSIS] Assigning final_map_type '{final_map_type}' to source '{final_candidate['source_path']}' (Index: {i})\")\r\n+ # --- DIAGNOSTIC LOGGING END ---\r\n final_map_list.append({\r\n \"map_type\": final_map_type,\r\n \"source_path\": final_candidate[\"source_path\"],\r\n \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n" }, { "date": 1745319681780, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -217,8 +217,28 @@\n if file_path and file_to_base_name_map.get(file_path) == current_asset_name:\r\n filtered_classified_files_asset[category].append(file_info)\r\n log.debug(f\"Asset '{current_asset_name}': Filtered files - Maps: {len(filtered_classified_files_asset.get('maps',[]))}, Models: {len(filtered_classified_files_asset.get('models',[]))}, Extra: {len(filtered_classified_files_asset.get('extra',[]))}, Ignored: {len(filtered_classified_files_asset.get('ignored',[]))}\")\r\n \r\n+ # --- Assign Suffixes Per-Asset ---\r\n+ log.debug(f\"Asset '{current_asset_name}': Assigning map type suffixes...\")\r\n+ asset_maps = filtered_classified_files_asset.get('maps', [])\r\n+ grouped_asset_maps = defaultdict(list)\r\n+ for map_info in asset_maps:\r\n+ # Group by the base map type stored earlier\r\n+ grouped_asset_maps[map_info['map_type']].append(map_info)\r\n+\r\n+ for base_map_type, maps_in_group in grouped_asset_maps.items():\r\n+ log.debug(f\" Assigning suffixes for base type '{base_map_type}' within asset '{current_asset_name}' ({len(maps_in_group)} maps)\")\r\n+ # Sorting is already done by _inventory_and_classify_files, just need to assign suffix\r\n+ respect_variants = base_map_type in self.config.respect_variant_map_types\r\n+ for i, map_info in enumerate(maps_in_group):\r\n+ if respect_variants:\r\n+ final_map_type = f\"{base_map_type}-{i + 1}\"\r\n+ else:\r\n+ final_map_type = base_map_type\r\n+ log.debug(f\" Updating '{map_info['source_path']}' map_type from '{map_info['map_type']}' to '{final_map_type}'\")\r\n+ map_info['map_type'] = final_map_type # Update the map_type in the dictionary\r\n+\r\n # --- Determine Metadata for this specific asset ---\r\n asset_specific_metadata = self._determine_single_asset_metadata(current_asset_name, filtered_classified_files_asset)\r\n current_asset_metadata = {\r\n \"asset_name\": current_asset_name,\r\n@@ -606,23 +626,14 @@\n # --- END NEW SORTING LOGIC ---\r\n \r\n # Removed diagnostic log\r\n \r\n- # Assign suffixes and add to the final map list\r\n- for i, final_candidate in enumerate(candidates): # Use the directly sorted list\r\n- # Determine final map type based on the new rule\r\n- if base_map_type in self.config.respect_variant_map_types: # Check the new config list\r\n- # Always assign suffix for types in the list (if more than one or only one)\r\n- final_map_type = f\"{base_map_type}-{i + 1}\"\r\n- else:\r\n- # Never assign suffix for types NOT in the list\r\n- final_map_type = base_map_type\r\n-\r\n- # --- DIAGNOSTIC LOGGING START ---\r\n- log.debug(f\" [DIAGNOSIS] Assigning final_map_type '{final_map_type}' to source '{final_candidate['source_path']}' (Index: {i})\")\r\n- # --- DIAGNOSTIC LOGGING END ---\r\n+ # Add sorted candidates to the final list, but without assigning the suffix yet.\r\n+ # Suffix assignment will happen per-asset later.\r\n+ for final_candidate in candidates: # Use the directly sorted list\r\n+ # Store the base map type for now.\r\n final_map_list.append({\r\n- \"map_type\": final_map_type,\r\n+ \"map_type\": base_map_type, # Store BASE type only\r\n \"source_path\": final_candidate[\"source_path\"],\r\n \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n" }, { "date": 1745319901405, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -638,9 +638,8 @@\n \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n })\r\n processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n- log.debug(f\" Final Map: Type='{final_map_type}', Source='{final_candidate['source_path']}', Keyword='{final_candidate['matched_keyword']}', 16bit={final_candidate['is_16bit_source']}\")\r\n \r\n self.classified_files[\"maps\"] = final_map_list\r\n \r\n # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n" }, { "date": 1745320354335, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -226,16 +226,28 @@\n # Group by the base map type stored earlier\r\n grouped_asset_maps[map_info['map_type']].append(map_info)\r\n \r\n for base_map_type, maps_in_group in grouped_asset_maps.items():\r\n- log.debug(f\" Assigning suffixes for base type '{base_map_type}' within asset '{current_asset_name}' ({len(maps_in_group)} maps)\")\r\n+ # --- DIAGNOSTIC LOGGING START ---\r\n+ num_maps_in_group = len(maps_in_group)\r\n+ map_paths_in_group = [str(m['source_path']) for m in maps_in_group]\r\n+ respect_variants = base_map_type in self.config.respect_variant_map_types\r\n+ log.debug(f\" [DIAGNOSIS] Processing Suffixes for: base_map_type='{base_map_type}', asset='{current_asset_name}'\")\r\n+ log.debug(f\" [DIAGNOSIS] respect_variants ({base_map_type} in {self.config.respect_variant_map_types}?): {respect_variants}\")\r\n+ log.debug(f\" [DIAGNOSIS] num_maps_in_group: {num_maps_in_group}\")\r\n+ log.debug(f\" [DIAGNOSIS] maps_in_group paths: {map_paths_in_group}\")\r\n+ # --- DIAGNOSTIC LOGGING END ---\r\n+\r\n # Sorting is already done by _inventory_and_classify_files, just need to assign suffix\r\n- respect_variants = base_map_type in self.config.respect_variant_map_types\r\n+ # respect_variants = base_map_type in self.config.respect_variant_map_types # Moved calculation up for logging\r\n for i, map_info in enumerate(maps_in_group):\r\n if respect_variants:\r\n final_map_type = f\"{base_map_type}-{i + 1}\"\r\n else:\r\n final_map_type = base_map_type\r\n+ # --- DIAGNOSTIC LOGGING START ---\r\n+ log.debug(f\" [DIAGNOSIS] Assigning final_map_type='{final_map_type}' to map: '{map_info['source_path']}' (Index: {i})\")\r\n+ # --- DIAGNOSTIC LOGGING END ---\r\n log.debug(f\" Updating '{map_info['source_path']}' map_type from '{map_info['map_type']}' to '{final_map_type}'\")\r\n map_info['map_type'] = final_map_type # Update the map_type in the dictionary\r\n \r\n # --- Determine Metadata for this specific asset ---\r\n" }, { "date": 1745321003613, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -226,28 +226,16 @@\n # Group by the base map type stored earlier\r\n grouped_asset_maps[map_info['map_type']].append(map_info)\r\n \r\n for base_map_type, maps_in_group in grouped_asset_maps.items():\r\n- # --- DIAGNOSTIC LOGGING START ---\r\n- num_maps_in_group = len(maps_in_group)\r\n- map_paths_in_group = [str(m['source_path']) for m in maps_in_group]\r\n+ log.debug(f\" Assigning suffixes for base type '{base_map_type}' within asset '{current_asset_name}' ({len(maps_in_group)} maps)\")\r\n+ # Sorting is already done by _inventory_and_classify_files, just need to assign suffix\r\n respect_variants = base_map_type in self.config.respect_variant_map_types\r\n- log.debug(f\" [DIAGNOSIS] Processing Suffixes for: base_map_type='{base_map_type}', asset='{current_asset_name}'\")\r\n- log.debug(f\" [DIAGNOSIS] respect_variants ({base_map_type} in {self.config.respect_variant_map_types}?): {respect_variants}\")\r\n- log.debug(f\" [DIAGNOSIS] num_maps_in_group: {num_maps_in_group}\")\r\n- log.debug(f\" [DIAGNOSIS] maps_in_group paths: {map_paths_in_group}\")\r\n- # --- DIAGNOSTIC LOGGING END ---\r\n-\r\n- # Sorting is already done by _inventory_and_classify_files, just need to assign suffix\r\n- # respect_variants = base_map_type in self.config.respect_variant_map_types # Moved calculation up for logging\r\n for i, map_info in enumerate(maps_in_group):\r\n if respect_variants:\r\n final_map_type = f\"{base_map_type}-{i + 1}\"\r\n else:\r\n final_map_type = base_map_type\r\n- # --- DIAGNOSTIC LOGGING START ---\r\n- log.debug(f\" [DIAGNOSIS] Assigning final_map_type='{final_map_type}' to map: '{map_info['source_path']}' (Index: {i})\")\r\n- # --- DIAGNOSTIC LOGGING END ---\r\n log.debug(f\" Updating '{map_info['source_path']}' map_type from '{map_info['map_type']}' to '{final_map_type}'\")\r\n map_info['map_type'] = final_map_type # Update the map_type in the dictionary\r\n \r\n # --- Determine Metadata for this specific asset ---\r\n" }, { "date": 1745321021622, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -0,0 +1,2239 @@\n+# asset_processor.py\r\n+\r\n+import os\r\n+import shutil\r\n+import tempfile\r\n+import zipfile\r\n+import logging\r\n+import json\r\n+import re\r\n+import time\r\n+from pathlib import Path\r\n+from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n+from typing import List, Dict, Tuple, Optional # Added for type hinting\r\n+from collections import defaultdict # Added for grouping\r\n+\r\n+# Attempt to import image processing libraries\r\n+try:\r\n+ import cv2\r\n+ import numpy as np\r\n+except ImportError:\r\n+ print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n+ print(\"pip install opencv-python numpy\")\r\n+ exit(1) # Exit if essential libraries are missing\r\n+\r\n+# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n+try:\r\n+ import OpenEXR\r\n+ import Imath\r\n+ _HAS_OPENEXR = True\r\n+except ImportError:\r\n+ _HAS_OPENEXR = False\r\n+ # Log this information - basic EXR might still work via OpenCV\r\n+ logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n+\r\n+\r\n+# Assuming Configuration class is in configuration.py\r\n+try:\r\n+ from configuration import Configuration, ConfigurationError\r\n+except ImportError:\r\n+ print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n+ print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n+ exit(1)\r\n+\r\n+# Use logger defined in main.py (or configure one here if run standalone)\r\n+log = logging.getLogger(__name__)\r\n+# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\n+if not log.hasHandlers():\r\n+ logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n+\r\n+\r\n+# --- Custom Exception ---\r\n+class AssetProcessingError(Exception):\r\n+ \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n+ pass\r\n+\r\n+# --- Helper Functions ---\r\n+def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n+ \"\"\"Calculates dimensions maintaining aspect ratio, fitting within target_max_dim.\"\"\"\r\n+ if orig_w <= 0 or orig_h <= 0: return (target_max_dim, target_max_dim) # Avoid division by zero\r\n+\r\n+ ratio = orig_w / orig_h\r\n+ if ratio > 1: # Width is dominant\r\n+ target_w = target_max_dim\r\n+ target_h = max(1, round(target_w / ratio)) # Ensure height is at least 1\r\n+ else: # Height is dominant or square\r\n+ target_h = target_max_dim\r\n+ target_w = max(1, round(target_h * ratio)) # Ensure width is at least 1\r\n+ return int(target_w), int(target_h)\r\n+\r\n+def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n+ \"\"\"\r\n+ Calculates min, max, mean for a given numpy image array.\r\n+ Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n+ \"\"\"\r\n+ if image_data is None:\r\n+ log.warning(\"Attempted to calculate stats on None image data.\")\r\n+ return None\r\n+ try:\r\n+ # Use float64 for calculations to avoid potential overflow/precision issues\r\n+ data_float = image_data.astype(np.float64)\r\n+\r\n+ # Normalize data_float based on original dtype before calculating stats\r\n+ if image_data.dtype == np.uint16:\r\n+ log.debug(\"Stats calculation: Normalizing uint16 data to 0-1 range.\")\r\n+ data_float /= 65535.0\r\n+ elif image_data.dtype == np.uint8:\r\n+ log.debug(\"Stats calculation: Normalizing uint8 data to 0-1 range.\")\r\n+ data_float /= 255.0\r\n+ # Assuming float inputs are already in 0-1 range or similar\r\n+\r\n+ log.debug(f\"Stats calculation: data_float dtype: {data_float.dtype}, shape: {data_float.shape}\")\r\n+ # Log a few sample values to check range after normalization\r\n+ if data_float.size > 0:\r\n+ sample_values = data_float.flatten()[:10] # Get first 10 values\r\n+ log.debug(f\"Stats calculation: Sample values (first 10) after normalization: {sample_values.tolist()}\")\r\n+\r\n+\r\n+ if len(data_float.shape) == 2: # Grayscale (H, W)\r\n+ min_val = float(np.min(data_float))\r\n+ max_val = float(np.max(data_float))\r\n+ mean_val = float(np.mean(data_float))\r\n+ stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n+ log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n+ elif len(data_float.shape) == 3: # Color (H, W, C)\r\n+ channels = data_float.shape[2]\r\n+ min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n+ max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n+ mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n+ # The input data_float is now expected to be in RGB order after conversion in _process_maps\r\n+ stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n+ log.debug(f\"Calculated {channels}-Channel Stats (RGB order): Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n+ else:\r\n+ log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n+ return None\r\n+ return stats\r\n+ except Exception as e:\r\n+ log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n+ return {\"error\": str(e)}\r\n+\r\n+\r\n+# --- Helper function ---\r\n+def _get_base_map_type(target_map_string: str) -> str:\r\n+ \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n+ match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n+ if match:\r\n+ return match.group(1).upper()\r\n+ return target_map_string.upper() # Fallback if no number suffix\r\n+\r\n+# --- Asset Processor Class ---\r\n+class AssetProcessor:\r\n+ \"\"\"\r\n+ Handles the processing pipeline for a single asset (ZIP or folder).\r\n+ \"\"\"\r\n+ # Define the list of known grayscale map types (adjust as needed)\r\n+ GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n+\r\n+ def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n+ \"\"\"\r\n+ Initializes the processor for a given input asset.\r\n+\r\n+ Args:\r\n+ input_path: Path to the input ZIP file or folder.\r\n+ config: The loaded Configuration object.\r\n+ output_base_path: The base directory where processed output will be saved.\r\n+ overwrite: If True, forces reprocessing even if output exists.\r\n+ \"\"\"\r\n+ if not isinstance(input_path, Path): input_path = Path(input_path)\r\n+ if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n+ if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n+\r\n+ if not input_path.exists():\r\n+ raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n+ if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n+ raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n+\r\n+ self.input_path: Path = input_path\r\n+ self.config: Configuration = config\r\n+ self.output_base_path: Path = output_base_path\r\n+ self.overwrite: bool = overwrite # Store the overwrite flag\r\n+\r\n+ self.temp_dir: Path | None = None # Path to the temporary working directory\r\n+ self.classified_files: dict[str, list[dict]] = {\r\n+ \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n+ }\r\n+ # These will no longer store instance-wide results, but are kept for potential future use or refactoring\r\n+ # self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n+ # self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n+ # self.metadata_file_path_temp: Path | None = None\r\n+ # self.metadata: dict = {} # Metadata is now handled per-asset within the process loop\r\n+\r\n+ log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n+\r\n+ def process(self) -> Dict[str, List[str]]:\r\n+ \"\"\"\r\n+ Executes the full processing pipeline for the input path, handling\r\n+ multiple assets within a single input if detected.\r\n+\r\n+ Returns:\r\n+ Dict[str, List[str]]: A dictionary summarizing the status of each\r\n+ detected asset within the input:\r\n+ {\"processed\": [asset_name1, ...],\r\n+ \"skipped\": [asset_name2, ...],\r\n+ \"failed\": [asset_name3, ...]}\r\n+ \"\"\"\r\n+ log.info(f\"Starting processing for input: {self.input_path.name}\")\r\n+ overall_status = {\"processed\": [], \"skipped\": [], \"failed\": []}\r\n+ supplier_name = self.config.supplier_name # Get once\r\n+\r\n+ try:\r\n+ self._setup_workspace()\r\n+ self._extract_input()\r\n+ self._inventory_and_classify_files() # Classifies all files in self.classified_files\r\n+\r\n+ # Determine distinct assets and file mapping\r\n+ distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n+ unmatched_files_paths = [p for p, name in file_to_base_name_map.items() if name is None]\r\n+ if unmatched_files_paths:\r\n+ log.warning(f\"Found {len(unmatched_files_paths)} files not matched to any specific asset base name. They will be copied to each asset's Extra folder.\")\r\n+ log.debug(f\"Unmatched files: {[str(p) for p in unmatched_files_paths]}\")\r\n+\r\n+\r\n+ # --- Loop through each detected asset ---\r\n+ for current_asset_name in distinct_base_names:\r\n+ log.info(f\"--- Processing detected asset: '{current_asset_name}' ---\")\r\n+ asset_processed = False\r\n+ asset_skipped = False\r\n+ asset_failed = False\r\n+ temp_metadata_path_asset = None # Track metadata file for this asset\r\n+ map_details_asset = {} # Store map details for this asset\r\n+\r\n+ try:\r\n+ # --- Filter classified files for the current asset ---\r\n+ filtered_classified_files_asset = defaultdict(list)\r\n+ for category, file_list in self.classified_files.items():\r\n+ for file_info in file_list:\r\n+ file_path = file_info.get('source_path')\r\n+ if file_path and file_to_base_name_map.get(file_path) == current_asset_name:\r\n+ filtered_classified_files_asset[category].append(file_info)\r\n+ log.debug(f\"Asset '{current_asset_name}': Filtered files - Maps: {len(filtered_classified_files_asset.get('maps',[]))}, Models: {len(filtered_classified_files_asset.get('models',[]))}, Extra: {len(filtered_classified_files_asset.get('extra',[]))}, Ignored: {len(filtered_classified_files_asset.get('ignored',[]))}\")\r\n+\r\n+ # --- Assign Suffixes Per-Asset ---\r\n+ log.debug(f\"Asset '{current_asset_name}': Assigning map type suffixes...\")\r\n+ asset_maps = filtered_classified_files_asset.get('maps', [])\r\n+ grouped_asset_maps = defaultdict(list)\r\n+ for map_info in asset_maps:\r\n+ # Group by the base map type stored earlier\r\n+ grouped_asset_maps[map_info['map_type']].append(map_info)\r\n+\r\n+ for base_map_type, maps_in_group in grouped_asset_maps.items():\r\n+ log.debug(f\" Assigning suffixes for base type '{base_map_type}' within asset '{current_asset_name}' ({len(maps_in_group)} maps)\")\r\n+ # Sorting is already done by _inventory_and_classify_files, just need to assign suffix\r\n+ respect_variants = base_map_type in self.config.respect_variant_map_types\r\n+ for i, map_info in enumerate(maps_in_group):\r\n+ if respect_variants:\r\n+ final_map_type = f\"{base_map_type}-{i + 1}\"\r\n+ else:\r\n+ final_map_type = base_map_type\r\n+ log.debug(f\" Updating '{map_info['source_path']}' map_type from '{map_info['map_type']}' to '{final_map_type}'\")\r\n+ map_info['map_type'] = final_map_type # Update the map_type in the dictionary\r\n+\r\n+ # --- Determine Metadata for this specific asset ---\r\n+ asset_specific_metadata = self._determine_single_asset_metadata(current_asset_name, filtered_classified_files_asset)\r\n+ current_asset_metadata = {\r\n+ \"asset_name\": current_asset_name,\r\n+ \"supplier_name\": supplier_name,\r\n+ \"asset_category\": asset_specific_metadata.get(\"asset_category\", self.config.default_asset_category),\r\n+ \"archetype\": asset_specific_metadata.get(\"archetype\", \"Unknown\"),\r\n+ # Initialize fields that will be populated by processing steps\r\n+ \"maps_present\": [],\r\n+ \"merged_maps\": [],\r\n+ \"shader_features\": [],\r\n+ \"source_files_in_extra\": [], # Will be populated in _generate_metadata\r\n+ \"image_stats_1k\": {},\r\n+ \"map_details\": {}, # Will be populated by _process_maps\r\n+ \"aspect_ratio_change_string\": \"N/A\"\r\n+ }\r\n+\r\n+ # --- Skip Check for this specific asset ---\r\n+ if not self.overwrite:\r\n+ supplier_sanitized = self._sanitize_filename(supplier_name)\r\n+ asset_name_sanitized = self._sanitize_filename(current_asset_name)\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ metadata_file_path = final_dir / self.config.metadata_filename\r\n+ if final_dir.exists() and metadata_file_path.is_file():\r\n+ log.info(f\"Output directory and metadata found for asset '{asset_name_sanitized}' and overwrite is False. Skipping this asset.\")\r\n+ overall_status[\"skipped\"].append(current_asset_name)\r\n+ asset_skipped = True\r\n+ continue # Skip to the next asset in the loop\r\n+ elif self.overwrite:\r\n+ log.info(f\"Overwrite flag is set. Processing asset '{current_asset_name}' even if output exists.\")\r\n+\r\n+ # --- Process Maps for this asset ---\r\n+ processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps = self._process_maps(\r\n+ filtered_maps_list=filtered_classified_files_asset.get('maps', []),\r\n+ current_asset_metadata=current_asset_metadata # Pass base metadata\r\n+ )\r\n+ # Update current metadata with results\r\n+ current_asset_metadata[\"image_stats_1k\"] = image_stats_asset\r\n+ current_asset_metadata[\"aspect_ratio_change_string\"] = aspect_ratio_change_string_asset\r\n+ # Add newly ignored rough maps to the asset's specific ignored list\r\n+ if ignored_rough_maps:\r\n+ filtered_classified_files_asset['ignored'].extend(ignored_rough_maps)\r\n+ # Store map details (like source bit depth) collected during processing\r\n+ # This was previously stored in self.metadata[\"map_details\"]\r\n+ map_details_asset = {k: v for k, v in current_asset_metadata.pop(\"map_details\", {}).items() if k in processed_maps_details_asset}\r\n+\r\n+\r\n+ # --- Merge Maps for this asset ---\r\n+ merged_maps_details_asset = self._merge_maps(\r\n+ processed_maps_details_asset=processed_maps_details_asset,\r\n+ filtered_classified_files=filtered_classified_files_asset, # Pass filtered files for original ext lookup\r\n+ current_asset_metadata=current_asset_metadata\r\n+ )\r\n+\r\n+ # --- Generate Metadata for this asset ---\r\n+ temp_metadata_path_asset = self._generate_metadata_file(\r\n+ current_asset_metadata=current_asset_metadata, # Pass the populated dict\r\n+ processed_maps_details_asset=processed_maps_details_asset,\r\n+ merged_maps_details_asset=merged_maps_details_asset,\r\n+ filtered_classified_files_asset=filtered_classified_files_asset,\r\n+ unmatched_files_paths=unmatched_files_paths, # Pass the list of unmatched files\r\n+ map_details_asset=map_details_asset # Pass the filtered map details\r\n+ )\r\n+\r\n+ # --- Organize Output Files for this asset ---\r\n+ self._organize_output_files(\r\n+ current_asset_name=current_asset_name,\r\n+ processed_maps_details_asset=processed_maps_details_asset,\r\n+ merged_maps_details_asset=merged_maps_details_asset,\r\n+ filtered_classified_files_asset=filtered_classified_files_asset,\r\n+ unmatched_files_paths=unmatched_files_paths, # Pass unmatched files for copying\r\n+ temp_metadata_path=temp_metadata_path_asset\r\n+ )\r\n+\r\n+ log.info(f\"--- Asset '{current_asset_name}' processed successfully. ---\")\r\n+ overall_status[\"processed\"].append(current_asset_name)\r\n+ asset_processed = True\r\n+\r\n+ except Exception as asset_err:\r\n+ log.error(f\"--- Failed processing asset '{current_asset_name}': {asset_err} ---\", exc_info=True)\r\n+ overall_status[\"failed\"].append(current_asset_name)\r\n+ asset_failed = True\r\n+ # Continue to the next asset even if one fails\r\n+\r\n+ # --- Determine Final Consolidated Status ---\r\n+ # This logic remains the same, interpreting the overall_status dict\r\n+ final_status = \"failed\" # Default if nothing else matches\r\n+ if overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"processed\"\r\n+ elif overall_status[\"skipped\"] and not overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"skipped\"\r\n+ elif overall_status[\"processed\"] and overall_status[\"failed\"]:\r\n+ final_status = \"partial_success\" # Indicate some succeeded, some failed\r\n+ elif overall_status[\"processed\"] and overall_status[\"skipped\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"processed\" # Consider processed+skipped as processed overall\r\n+ elif overall_status[\"skipped\"] and overall_status[\"failed\"] and not overall_status[\"processed\"]:\r\n+ final_status = \"failed\" # If only skips and fails, report as failed\r\n+ # Add any other combinations if needed\r\n+\r\n+ log.info(f\"Finished processing input '{self.input_path.name}'. Overall Status: {final_status}. Summary: {overall_status}\")\r\n+ # Return the detailed status dictionary instead of just a string\r\n+ # The wrapper function in main.py will interpret this\r\n+ return overall_status\r\n+\r\n+ except Exception as e:\r\n+ # Catch errors during initial setup (before asset loop)\r\n+ if not isinstance(e, (AssetProcessingError, ConfigurationError)):\r\n+ log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name} during setup: {e}\")\r\n+ if not isinstance(e, AssetProcessingError):\r\n+ raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n+ else:\r\n+ raise\r\n+ finally:\r\n+ # Ensure cleanup always happens\r\n+ self._cleanup_workspace()\r\n+\r\n+ def _setup_workspace(self):\r\n+ \"\"\"Creates a temporary directory for processing.\"\"\"\r\n+ try:\r\n+ self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n+ log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n+\r\n+ def _extract_input(self):\r\n+ \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n+\r\n+ log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n+ try:\r\n+ if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n+ log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ zip_ref.extractall(self.temp_dir)\r\n+ log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n+ elif self.input_path.is_dir():\r\n+ log.debug(f\"Copying directory contents: {self.input_path}\")\r\n+ for item in self.input_path.iterdir():\r\n+ destination = self.temp_dir / item.name\r\n+ if item.is_dir():\r\n+ # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n+ try:\r\n+ shutil.copytree(item, destination, dirs_exist_ok=True)\r\n+ except TypeError: # Fallback for older Python\r\n+ if not destination.exists():\r\n+ shutil.copytree(item, destination)\r\n+ else:\r\n+ log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n+\r\n+ else:\r\n+ shutil.copy2(item, destination)\r\n+ log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n+ except zipfile.BadZipFile:\r\n+ raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n+\r\n+ def _inventory_and_classify_files(self):\r\n+ \"\"\"\r\n+ Scans workspace, classifies files according to preset rules, handling\r\n+ 16-bit prioritization and multiple variants of the same base map type.\r\n+ \"\"\"\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n+\r\n+ log.info(\"Scanning and classifying files...\")\r\n+ log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n+ all_files_rel = []\r\n+ for root, _, files in os.walk(self.temp_dir):\r\n+ root_path = Path(root)\r\n+ for file in files:\r\n+ full_path = root_path / file\r\n+ relative_path = full_path.relative_to(self.temp_dir)\r\n+ all_files_rel.append(relative_path)\r\n+\r\n+ log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n+\r\n+ # --- Initialization ---\r\n+ processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n+ potential_map_candidates = [] # List to store potential map file info\r\n+ # Reset classified files (important if this method is ever called multiple times)\r\n+ self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n+\r\n+\r\n+ # --- Step 1: Identify Explicit 'Extra' Files ---\r\n+ log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n+ compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n+ log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path in processed_files: continue\r\n+ for compiled_regex in compiled_extra_regex:\r\n+ if compiled_regex.search(file_rel_path.name):\r\n+ log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n+ self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n+ processed_files.add(file_rel_path)\r\n+ log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n+ break # Stop checking extra patterns for this file\r\n+\r\n+ # --- Step 2: Identify Model Files ---\r\n+ log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n+ compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n+ log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path in processed_files: continue\r\n+ for compiled_regex in compiled_model_regex:\r\n+ if compiled_regex.search(file_rel_path.name):\r\n+ log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n+ self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n+ processed_files.add(file_rel_path)\r\n+ log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n+ break # Stop checking model patterns for this file\r\n+\r\n+ # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n+ log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n+ # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n+ compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n+\r\n+ for file_rel_path in all_files_rel:\r\n+ # Skip files already classified as Extra or Model\r\n+ if file_rel_path in processed_files:\r\n+ continue\r\n+\r\n+ file_stem = file_rel_path.stem\r\n+ match_found = False\r\n+\r\n+ # Iterate through base types and their associated regex tuples\r\n+ for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n+ if match_found: break # Stop checking types for this file once matched\r\n+\r\n+ # Get the original keywords list for the current rule index\r\n+ # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n+ original_rule = None\r\n+ # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n+ if regex_tuples:\r\n+ current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n+ if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n+ rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n+ # Verify it's the correct rule by checking target_type\r\n+ if rule_candidate.get(\"target_type\") == base_map_type:\r\n+ original_rule = rule_candidate\r\n+ else:\r\n+ log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n+ # Fallback search if index doesn't match (shouldn't happen ideally)\r\n+ for idx, rule in enumerate(self.config.map_type_mapping):\r\n+ if rule.get(\"target_type\") == base_map_type:\r\n+ original_rule = rule\r\n+ log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n+ break\r\n+\r\n+ original_keywords_list = []\r\n+ if original_rule and 'keywords' in original_rule:\r\n+ original_keywords_list = original_rule['keywords']\r\n+ else:\r\n+ log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n+\r\n+ for kw_regex, original_keyword, rule_index in regex_tuples:\r\n+ if kw_regex.search(file_stem):\r\n+ log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n+\r\n+ # Find the index of the matched keyword within its rule's list\r\n+ keyword_index_in_rule = -1 # Default if not found\r\n+ if original_keywords_list:\r\n+ try:\r\n+ # Use the original_keyword string directly\r\n+ keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n+ except ValueError:\r\n+ log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n+ else:\r\n+ log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n+\r\n+ # Add candidate only if not already added\r\n+ if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n+ potential_map_candidates.append({\r\n+ 'source_path': file_rel_path,\r\n+ 'matched_keyword': original_keyword,\r\n+ 'base_map_type': base_map_type,\r\n+ 'preset_rule_index': rule_index,\r\n+ 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n+ 'is_16bit_source': False\r\n+ })\r\n+ else:\r\n+ log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n+\r\n+ match_found = True\r\n+ break # Stop checking regex tuples for this base_type once matched\r\n+\r\n+ log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n+\r\n+ # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n+ log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n+ compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n+ for file_rel_path in all_files_rel:\r\n+ # Skip if already processed or already identified as a candidate\r\n+ if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n+ continue\r\n+\r\n+ for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n+ log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n+ match = compiled_regex.search(file_rel_path.name) # Store result\r\n+ if match:\r\n+ log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n+ potential_map_candidates.append({\r\n+ 'source_path': file_rel_path,\r\n+ 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n+ 'base_map_type': base_type,\r\n+ 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n+ 'is_16bit_source': True # Mark as 16-bit immediately\r\n+ })\r\n+ log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n+ # Don't add to processed_files yet, let Step 4 handle filtering\r\n+ break # Stop checking bit depth patterns for this file\r\n+\r\n+ log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n+\r\n+ # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n+ log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n+ compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n+ candidates_to_keep = []\r\n+ candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n+\r\n+ # Mark 16-bit candidates\r\n+ for candidate in potential_map_candidates:\r\n+ base_type = candidate['base_map_type']\r\n+ # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n+ if base_type in compiled_bit_depth_regex:\r\n+ if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n+ candidate['is_16bit_source'] = True\r\n+ log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n+\r\n+\r\n+ # Identify base types that have a 16-bit version present\r\n+ prioritized_16bit_bases = {\r\n+ candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n+ }\r\n+ log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n+\r\n+ # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n+ for candidate in potential_map_candidates:\r\n+ if candidate['is_16bit_source']:\r\n+ candidates_to_keep.append(candidate)\r\n+ log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+ elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n+ candidates_to_keep.append(candidate)\r\n+ log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+ else:\r\n+ # This is an 8-bit candidate whose 16-bit counterpart exists\r\n+ candidates_to_ignore.append(candidate)\r\n+ log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+\r\n+ # Add ignored 8-bit files to the main ignored list\r\n+ for ignored_candidate in candidates_to_ignore:\r\n+ self.classified_files[\"ignored\"].append({\r\n+ 'source_path': ignored_candidate['source_path'],\r\n+ 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n+ })\r\n+ processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n+\r\n+ log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n+\r\n+ # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n+ log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n+ # from collections import defaultdict # Moved import to top of file\r\n+ grouped_by_base_type = defaultdict(list)\r\n+ for candidate in candidates_to_keep:\r\n+ grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n+\r\n+ final_map_list = []\r\n+ for base_map_type, candidates in grouped_by_base_type.items():\r\n+ # --- DIAGNOSTIC LOGGING START ---\r\n+ candidate_paths_str = [str(c['source_path']) for c in candidates]\r\n+ log.debug(f\" [DIAGNOSIS] Processing base_map_type: '{base_map_type}'. Candidates before sort: {candidate_paths_str}\")\r\n+ # --- DIAGNOSTIC LOGGING END ---\r\n+ log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n+\r\n+ # --- NEW SORTING LOGIC ---\r\n+ # Sort candidates based on:\r\n+ # 1. The index of the rule object in the preset's map_type_mapping list.\r\n+ # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n+ # 3. Alphabetical order of the source file path as a tie-breaker.\r\n+ candidates.sort(key=lambda c: (\r\n+ c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n+ c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n+ str(c['source_path'])\r\n+ ))\r\n+ # --- END NEW SORTING LOGIC ---\r\n+\r\n+ # Removed diagnostic log\r\n+\r\n+ # Add sorted candidates to the final list, but without assigning the suffix yet.\r\n+ # Suffix assignment will happen per-asset later.\r\n+ for final_candidate in candidates: # Use the directly sorted list\r\n+ # Store the base map type for now.\r\n+ final_map_list.append({\r\n+ \"map_type\": base_map_type, # Store BASE type only\r\n+ \"source_path\": final_candidate[\"source_path\"],\r\n+ \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n+ \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n+ \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n+ })\r\n+ processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n+\r\n+ self.classified_files[\"maps\"] = final_map_list\r\n+\r\n+ # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n+ log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n+ remaining_count = 0\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path not in processed_files:\r\n+ log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n+ self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n+ remaining_count += 1\r\n+ # No need to add to processed_files here, it's the final step\r\n+ log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n+\r\n+ # --- Final Summary ---\r\n+ # Note: self.metadata[\"source_files_in_extra\"] is now populated per-asset in _generate_metadata_file\r\n+ log.info(f\"File classification complete.\")\r\n+ log.debug(\"--- Final Classification Summary (v2) ---\")\r\n+ map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n+ model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n+ extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n+ ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n+ log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n+ log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n+ log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n+ log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n+ log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n+\r\n+\r\n+ def _determine_base_metadata(self) -> Tuple[List[str], Dict[Path, Optional[str]]]:\r\n+ \"\"\"\r\n+ Determines distinct asset base names within the input based on preset rules\r\n+ and maps each relevant source file to its determined base name.\r\n+\r\n+ Returns:\r\n+ Tuple[List[str], Dict[Path, Optional[str]]]:\r\n+ - A list of unique, sanitized base names found.\r\n+ - A dictionary mapping source file relative paths to their determined\r\n+ base name string (or None if no base name could be determined for that file).\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ log.info(\"Determining distinct base names and file mapping...\")\r\n+\r\n+ # Combine map and model files for base name determination\r\n+ relevant_files = self.classified_files.get('maps', []) + self.classified_files.get('models', [])\r\n+ if not relevant_files:\r\n+ log.warning(\"No map or model files found to determine base name(s).\")\r\n+ # Fallback: Use input path name as a single asset\r\n+ input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ sanitized_input_name = self._sanitize_filename(input_name or \"UnknownInput\")\r\n+ # Map all files (maps, models, extra, ignored) to this fallback name\r\n+ all_files_paths = [f['source_path'] for cat in self.classified_files.values() for f in cat if 'source_path' in f]\r\n+ file_to_base_name_map = {f_path: sanitized_input_name for f_path in all_files_paths}\r\n+ log.info(f\"Using input path name '{sanitized_input_name}' as the single asset name.\")\r\n+ return [sanitized_input_name], file_to_base_name_map\r\n+\r\n+ # --- Determine Base Names from Files ---\r\n+ separator = self.config.source_naming_separator\r\n+ indices_dict = self.config.source_naming_indices\r\n+ base_index_raw = indices_dict.get('base_name')\r\n+ base_index = None\r\n+ if base_index_raw is not None:\r\n+ try:\r\n+ base_index = int(base_index_raw)\r\n+ except (ValueError, TypeError):\r\n+ log.warning(f\"Could not convert base_name index '{base_index_raw}' to integer. Base name determination might be inaccurate.\")\r\n+\r\n+ file_to_base_name_map: Dict[Path, Optional[str]] = {}\r\n+ potential_base_names_per_file: Dict[Path, str] = {} # Store potential name for each file path\r\n+\r\n+ if isinstance(base_index, int):\r\n+ log.debug(f\"Attempting base name extraction using separator '{separator}' and index {base_index}.\")\r\n+ for file_info in relevant_files:\r\n+ file_path = file_info['source_path']\r\n+ stem = file_path.stem\r\n+ parts = stem.split(separator)\r\n+ if len(parts) > base_index:\r\n+ extracted_name = parts[base_index]\r\n+ sanitized_name = self._sanitize_filename(extracted_name)\r\n+ if sanitized_name: # Ensure we don't add empty names\r\n+ potential_base_names_per_file[file_path] = sanitized_name\r\n+ log.debug(f\" File '{file_path.name}' -> Potential Base Name: '{sanitized_name}'\")\r\n+ else:\r\n+ log.debug(f\" File '{file_path.name}' -> Extracted empty name at index {base_index}. Marking as None.\")\r\n+ file_to_base_name_map[file_path] = None # Explicitly mark as None if extraction yields empty\r\n+ else:\r\n+ log.debug(f\" File '{file_path.name}' -> Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}. Marking as None.\")\r\n+ file_to_base_name_map[file_path] = None # Mark as None if index is invalid for this file\r\n+ else:\r\n+ log.warning(\"Base name index not configured or invalid. Cannot determine distinct assets based on index. Treating as single asset.\")\r\n+ # Fallback to common prefix if no valid index\r\n+ stems = [f['source_path'].stem for f in relevant_files]\r\n+ common_prefix_name = os.path.commonprefix(stems) if stems else \"\"\r\n+ sanitized_common_name = self._sanitize_filename(common_prefix_name or self.input_path.stem or \"UnknownAsset\")\r\n+ log.info(f\"Using common prefix '{sanitized_common_name}' as the single asset name.\")\r\n+ # Map all relevant files to this single name\r\n+ for file_info in relevant_files:\r\n+ potential_base_names_per_file[file_info['source_path']] = sanitized_common_name\r\n+\r\n+ # --- Consolidate Distinct Names and Final Mapping ---\r\n+ distinct_base_names_set = set(potential_base_names_per_file.values())\r\n+ distinct_base_names = sorted(list(distinct_base_names_set)) # Sort for consistent processing order\r\n+\r\n+ # Populate the final map, including files that didn't match the index rule (marked as None earlier)\r\n+ for file_info in relevant_files:\r\n+ file_path = file_info['source_path']\r\n+ if file_path not in file_to_base_name_map: # If not already marked as None\r\n+ file_to_base_name_map[file_path] = potential_base_names_per_file.get(file_path) # Assign determined name or None if somehow missed\r\n+\r\n+ # Add files from 'extra' and 'ignored' to the map, marking them as None for base name\r\n+ for category in ['extra', 'ignored']:\r\n+ for file_info in self.classified_files.get(category, []):\r\n+ file_path = file_info['source_path']\r\n+ if file_path not in file_to_base_name_map: # Avoid overwriting if somehow already mapped\r\n+ file_to_base_name_map[file_path] = None\r\n+ log.debug(f\" File '{file_path.name}' (Category: {category}) -> Marked as None (No Base Name).\")\r\n+\r\n+\r\n+ if not distinct_base_names:\r\n+ # This case should be rare due to fallbacks, but handle it.\r\n+ log.warning(\"No distinct base names could be determined. Using input name as fallback.\")\r\n+ input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ fallback_name = self._sanitize_filename(input_name or \"FallbackAsset\")\r\n+ distinct_base_names = [fallback_name]\r\n+ # Remap all files to this single fallback name\r\n+ file_to_base_name_map = {f_path: fallback_name for f_path in file_to_base_name_map.keys()}\r\n+\r\n+\r\n+ log.info(f\"Determined {len(distinct_base_names)} distinct asset base name(s): {distinct_base_names}\")\r\n+ log.debug(f\"File-to-BaseName Map ({len(file_to_base_name_map)} entries): { {str(k): v for k, v in file_to_base_name_map.items()} }\") # Log string paths for readability\r\n+\r\n+ return distinct_base_names, file_to_base_name_map\r\n+\r\n+ def _determine_single_asset_metadata(self, asset_base_name: str, filtered_classified_files: Dict[str, List[Dict]]) -> Dict[str, str]:\r\n+ \"\"\"\r\n+ Determines the asset_category and archetype for a single, specific asset\r\n+ based on its filtered list of classified files.\r\n+\r\n+ Args:\r\n+ asset_base_name: The determined base name for this specific asset.\r\n+ filtered_classified_files: A dictionary containing only the classified\r\n+ files (maps, models, etc.) belonging to this asset.\r\n+\r\n+ Returns:\r\n+ A dictionary containing {\"asset_category\": str, \"archetype\": str}.\r\n+ \"\"\"\r\n+ log.debug(f\"Determining category and archetype for asset: '{asset_base_name}'\")\r\n+ determined_category = self.config.default_asset_category # Start with default\r\n+ determined_archetype = \"Unknown\"\r\n+\r\n+ # --- Determine Asset Category ---\r\n+ if filtered_classified_files.get(\"models\"):\r\n+ determined_category = \"Asset\"\r\n+ log.debug(f\" Category set to 'Asset' for '{asset_base_name}' due to model file presence.\")\r\n+ else:\r\n+ # Check for Decal keywords only if not an Asset\r\n+ decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n+ found_decal = False\r\n+ # Check map names first for decal keywords\r\n+ candidate_files = [f['source_path'] for f in filtered_classified_files.get('maps', [])]\r\n+ # Fallback to checking extra files if no maps found for this asset\r\n+ if not candidate_files:\r\n+ candidate_files = [f['source_path'] for f in filtered_classified_files.get('extra', [])]\r\n+\r\n+ if decal_keywords:\r\n+ for file_path in candidate_files:\r\n+ # Check against the specific file's name within this asset's context\r\n+ for keyword in decal_keywords:\r\n+ if keyword.lower() in file_path.name.lower():\r\n+ determined_category = \"Decal\"\r\n+ found_decal = True; break\r\n+ if found_decal: break\r\n+ if found_decal: log.debug(f\" Category set to 'Decal' for '{asset_base_name}' due to keyword match.\")\r\n+ # If not Asset or Decal, it remains the default (e.g., \"Texture\")\r\n+\r\n+ log.debug(f\" Determined Category for '{asset_base_name}': {determined_category}\")\r\n+\r\n+ # --- Determine Archetype (Usage) ---\r\n+ archetype_rules = self.config.archetype_rules\r\n+ # Use stems from maps and models belonging *only* to this asset\r\n+ check_stems = [f['source_path'].stem.lower() for f in filtered_classified_files.get('maps', [])]\r\n+ check_stems.extend([f['source_path'].stem.lower() for f in filtered_classified_files.get('models', [])])\r\n+ # Also check the determined base name itself\r\n+ check_stems.append(asset_base_name.lower())\r\n+\r\n+ if check_stems:\r\n+ best_match_archetype = \"Unknown\"\r\n+ # Using simple \"first match wins\" logic as before\r\n+ for rule in archetype_rules:\r\n+ if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n+ arch_name, rules_dict = rule\r\n+ match_any = rules_dict.get(\"match_any\", [])\r\n+ matched_any_keyword = False\r\n+ if match_any:\r\n+ for keyword in match_any:\r\n+ kw_lower = keyword.lower()\r\n+ for stem in check_stems:\r\n+ if kw_lower in stem: # Simple substring check\r\n+ matched_any_keyword = True\r\n+ break # Found a match for this keyword\r\n+ if matched_any_keyword: break # Found a match for this rule's keywords\r\n+\r\n+ if matched_any_keyword:\r\n+ best_match_archetype = arch_name\r\n+ log.debug(f\" Archetype match '{arch_name}' for '{asset_base_name}' based on keywords: {match_any}\")\r\n+ break # First rule match wins\r\n+\r\n+ determined_archetype = best_match_archetype\r\n+\r\n+ log.debug(f\" Determined Archetype for '{asset_base_name}': {determined_archetype}\")\r\n+\r\n+ return {\"asset_category\": determined_category, \"archetype\": determined_archetype}\r\n+\r\n+\r\n+ def _process_maps(self, filtered_maps_list: List[Dict], current_asset_metadata: Dict) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str, List[Dict]]:\r\n+ \"\"\"\r\n+ Loads, processes, resizes, and saves classified map files for a specific asset.\r\n+\r\n+ Args:\r\n+ filtered_maps_list: List of map dictionaries belonging to the current asset.\r\n+ current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n+\r\n+ Returns:\r\n+ Tuple containing:\r\n+ - processed_maps_details_asset: Dict mapping map_type to resolution details.\r\n+ - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n+ - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n+ - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n+ log.info(f\"Processing identified map files for asset '{asset_name}'...\")\r\n+\r\n+ # Initialize results specific to this asset\r\n+ processed_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n+ image_stats_asset: Dict[str, Dict] = {}\r\n+ map_details_asset: Dict[str, Dict] = {} # Store details like source bit depth, gloss inversion\r\n+ aspect_ratio_change_string_asset: str = \"N/A\"\r\n+ ignored_rough_maps: List[Dict] = [] # Store ignored native rough maps\r\n+\r\n+ # --- Settings retrieval ---\r\n+ resolutions = self.config.image_resolutions\r\n+ stats_res_key = self.config.calculate_stats_resolution\r\n+ stats_target_dim = resolutions.get(stats_res_key)\r\n+ if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped for '{asset_name}'.\")\r\n+ gloss_keywords = self.config.source_glossiness_keywords\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ base_name = asset_name # Use the asset name passed in\r\n+\r\n+ # --- Pre-process Glossiness -> Roughness ---\r\n+ preprocessed_data = {}\r\n+ derived_from_gloss_flag = {}\r\n+ gloss_map_info_for_rough, native_rough_map_info = None, None\r\n+ # Use the filtered list for this asset\r\n+ for map_info in filtered_maps_list:\r\n+ if map_info['map_type'] == 'ROUGH':\r\n+ is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n+ if is_gloss: gloss_map_info_for_rough = map_info\r\n+ else: native_rough_map_info = map_info\r\n+\r\n+ rough_source_to_use = None\r\n+ if gloss_map_info_for_rough:\r\n+ rough_source_to_use = gloss_map_info_for_rough\r\n+ derived_from_gloss_flag['ROUGH'] = True\r\n+ if native_rough_map_info:\r\n+ log.warning(f\"Asset '{asset_name}': Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n+ # Instead of modifying lists, just add the ignored info to be returned\r\n+ ignored_rough_maps.append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n+ # We still need to ensure the native rough map isn't processed later in the main loop\r\n+ elif native_rough_map_info:\r\n+ rough_source_to_use = native_rough_map_info\r\n+ derived_from_gloss_flag['ROUGH'] = False\r\n+\r\n+ if derived_from_gloss_flag.get('ROUGH'):\r\n+ # Ensure rough_source_to_use is not None before proceeding\r\n+ if rough_source_to_use:\r\n+ source_path = self.temp_dir / rough_source_to_use['source_path']\r\n+ log.info(f\"Asset '{asset_name}': Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n+ try:\r\n+ img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n+ if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n+ original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n+ if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n+ if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n+ elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n+ else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n+ # Store tuple: (inverted_float_data, original_dtype)\r\n+ preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n+ log.debug(f\"Asset '{asset_name}': Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n+ except Exception as e: log.error(f\"Asset '{asset_name}': Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n+ else:\r\n+ log.error(f\"Asset '{asset_name}': Gloss map identified for ROUGH, but source info is missing.\")\r\n+\r\n+\r\n+ # --- Main Processing Loop ---\r\n+ # Use the filtered list passed to the function\r\n+ maps_to_process = list(filtered_maps_list)\r\n+ for map_info in maps_to_process:\r\n+ map_type = map_info['map_type']\r\n+ source_path_rel = map_info['source_path']\r\n+\r\n+ # Skip processing native rough map if gloss was prioritized and ignored\r\n+ if map_type == 'ROUGH' and any(ignored['source_path'] == source_path_rel for ignored in ignored_rough_maps):\r\n+ log.debug(f\"Asset '{asset_name}': Skipping processing of native rough map '{source_path_rel}' as gloss version was prioritized.\")\r\n+ continue\r\n+\r\n+ original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n+ log.info(f\"-- Asset '{asset_name}': Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n+ img_processed, source_dtype = None, None\r\n+ # Store details locally for this asset\r\n+ current_map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n+\r\n+ try:\r\n+ # --- 1. Get/Load Source Data ---\r\n+ if map_type in preprocessed_data:\r\n+ log.debug(f\"Asset '{asset_name}': Using pre-processed data for {map_type}.\")\r\n+ # Unpack tuple: (inverted_float_data, original_dtype)\r\n+ img_processed, source_dtype = preprocessed_data[map_type]\r\n+ else:\r\n+ full_source_path = self.temp_dir / source_path_rel\r\n+ read_flag = cv2.IMREAD_UNCHANGED if map_type.upper() == 'MASK' else (cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED)\r\n+ log.debug(f\"Asset '{asset_name}': Loading source {source_path_rel.name} with flag: {'UNCHANGED' if read_flag == cv2.IMREAD_UNCHANGED else ('GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'DEFAULT')}\")\r\n+ img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n+ if img_loaded is None:\r\n+ raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n+\r\n+ if len(img_loaded.shape) == 3:\r\n+ log.debug(f\"Asset '{asset_name}': Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n+ img_processed = cv2.cvtColor(img_loaded, cv2.COLOR_BGR2RGB)\r\n+ else:\r\n+ img_processed = img_loaded.copy()\r\n+\r\n+ source_dtype = img_loaded.dtype\r\n+ log.debug(f\"Asset '{asset_name}': Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape} (After potential BGR->RGB conversion)\")\r\n+\r\n+ current_map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n+\r\n+ # --- 2. Handle Alpha Mask ---\r\n+ if map_type == 'MASK' and img_processed is not None:\r\n+ # [ Existing MASK handling logic remains largely the same, just add asset_name to logs ]\r\n+ log.debug(f\"Asset '{asset_name}': Processing as MASK type.\")\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Initial shape: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n+ shape = img_processed.shape\r\n+ if len(shape) == 3 and shape[2] == 4:\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Extracting alpha channel (4-channel source).\")\r\n+ img_processed = img_processed[:, :, 3]\r\n+ elif len(shape) == 3 and shape[2] == 3:\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Converting BGR to Grayscale (3-channel source).\")\r\n+ img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n+ elif len(shape) == 2:\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Source is already grayscale (2-channel shape).\")\r\n+ else:\r\n+ log.warning(f\"Asset '{asset_name}': MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n+\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Shape after channel extraction/conversion: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n+\r\n+ if img_processed.dtype != np.uint8:\r\n+ log.debug(f\"Asset '{asset_name}': Converting mask from {img_processed.dtype} to uint8.\")\r\n+ if img_processed.dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ else: img_processed = img_processed.astype(np.uint8)\r\n+ log.debug(f\"Asset '{asset_name}': MASK processing: Shape after dtype conversion: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n+\r\n+\r\n+ if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n+ orig_h, orig_w = img_processed.shape[:2]\r\n+ # Use local dictionary for this asset's results\r\n+ processed_maps_details_asset.setdefault(map_type, {})\r\n+ max_original_dimension = max(orig_w, orig_h)\r\n+\r\n+ # --- 3. Resize & Save Loop ---\r\n+ for res_key, target_dim in resolutions.items():\r\n+ if target_dim > max_original_dimension:\r\n+ log.debug(f\"Asset '{asset_name}': Skipping {res_key} ({target_dim}px) for {map_type}: Target larger than original ({max_original_dimension}px).\")\r\n+ continue\r\n+ log.debug(f\"Asset '{asset_name}': Processing {map_type} for resolution: {res_key}...\")\r\n+ if orig_w <= 0 or orig_h <= 0: log.warning(f\"Asset '{asset_name}': Invalid original dims for {map_type}, skipping resize {res_key}.\"); continue\r\n+ target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n+ interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n+ try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n+ except Exception as resize_err: log.error(f\"Asset '{asset_name}': Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n+\r\n+ # --- 3a. Calculate Stats ---\r\n+ if res_key == stats_res_key and stats_target_dim:\r\n+ log.debug(f\"Asset '{asset_name}': Calculating stats for {map_type} using {res_key} image...\")\r\n+ stats = _calculate_image_stats(img_resized)\r\n+ # Store stats locally for this asset\r\n+ if stats: image_stats_asset[map_type] = stats\r\n+ else: log.warning(f\"Asset '{asset_name}': Stats calculation failed for {map_type} at {res_key}.\")\r\n+\r\n+ # Calculate aspect change string (only once per asset)\r\n+ lowest_res_key = min(resolutions, key=resolutions.get)\r\n+ # Use local variable for check and assignment\r\n+ if aspect_ratio_change_string_asset == \"N/A\" and res_key == lowest_res_key:\r\n+ log.debug(f\"Asset '{asset_name}': Aspect ratio calculation condition met.\")\r\n+ try:\r\n+ aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n+ aspect_ratio_change_string_asset = aspect_string # Store locally\r\n+ log.debug(f\"Asset '{asset_name}': Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n+ except Exception as aspect_err:\r\n+ log.error(f\"Asset '{asset_name}': Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n+ aspect_ratio_change_string_asset = \"Error\" # Indicate calculation failure locally\r\n+\r\n+ # --- 3b. Determine Output Bit Depth & Format ---\r\n+ # [ Existing logic for determining bit depth and format remains the same ]\r\n+ bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n+ current_dtype = img_resized.dtype\r\n+ output_dtype_target, output_bit_depth = None, 8\r\n+ if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n+ elif bit_depth_rule == 'respect':\r\n+ if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ else: output_dtype_target, output_bit_depth = np.uint8, 8\r\n+ else: output_dtype_target, output_bit_depth = np.uint8, 8\r\n+\r\n+ # --- 3c. Determine Output Format ---\r\n+ # [ Existing logic for determining output format remains the same, add asset_name to logs ]\r\n+ output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n+ primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n+ fmt_8bit_config = self.config.get_8bit_output_format()\r\n+ threshold = self.config.resolution_threshold_for_jpg\r\n+ force_lossless = map_type in self.config.force_lossless_map_types\r\n+\r\n+ if force_lossless:\r\n+ log.debug(f\"Asset '{asset_name}': Format forced to lossless for map type '{map_type}'.\")\r\n+ # ... (rest of force_lossless logic) ...\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else:\r\n+ if output_format != \"png\":\r\n+ log.warning(f\"Asset '{asset_name}': Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else: # 8-bit lossless\r\n+ output_format = fmt_8bit_config\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else:\r\n+ log.warning(f\"Asset '{asset_name}': Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n+ output_format = \"png\"; output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n+\r\n+ elif output_bit_depth == 8 and target_dim >= threshold:\r\n+ output_format = 'jpg'; output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Asset '{asset_name}': Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n+ else:\r\n+ # ... (rest of format determination logic, adding asset_name to logs) ...\r\n+ if bit_depth_rule == 'force_8bit':\r\n+ output_format = 'png'; output_ext = '.png'\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Asset '{asset_name}': Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n+ elif original_extension == '.jpg' and output_bit_depth == 8:\r\n+ output_format = 'jpg'; output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Asset '{asset_name}': Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n+ elif original_extension == '.tif':\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ log.debug(f\"Asset '{asset_name}': Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Asset '{asset_name}': Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n+ else:\r\n+ output_format = fallback_fmt_16; output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Asset '{asset_name}': Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n+ else:\r\n+ output_format = 'png'; output_ext = '.png'\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Asset '{asset_name}': Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n+ else:\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else:\r\n+ output_format = fallback_fmt_16; output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ log.debug(f\"Asset '{asset_name}': Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n+ else:\r\n+ output_format = fmt_8bit_config; output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ elif output_format == \"jpg\":\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Asset '{asset_name}': Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n+\r\n+\r\n+ img_to_save = img_resized.copy()\r\n+ # --- Apply Dtype Conversion ---\r\n+ # [ Existing dtype conversion logic remains the same ]\r\n+ if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n+ if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n+ if needs_float16 and img_to_save.dtype != np.float16:\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n+\r\n+\r\n+ # --- 3d. Construct Filename & Save ---\r\n+ # Use base_name (which is the current asset's name)\r\n+ filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n+ output_path_temp = self.temp_dir / filename\r\n+ log.debug(f\"Asset '{asset_name}': Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n+ log.debug(f\"Asset '{asset_name}': Saving {map_type} ({res_key}): Final image shape: {img_to_save.shape}, dtype: {img_to_save.dtype}\")\r\n+ saved_successfully, actual_format_saved = False, output_format\r\n+ try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n+ except Exception as save_err:\r\n+ log.error(f\"Asset '{asset_name}': Save failed ({output_format}) for {map_type} {res_key}: {save_err}\")\r\n+ # --- Try Fallback ---\r\n+ if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n+ log.warning(f\"Asset '{asset_name}': Attempting fallback {fallback_fmt_16} for {map_type} {res_key}\")\r\n+ # [ Existing fallback logic remains the same, add asset_name to logs ]\r\n+ actual_format_saved = fallback_fmt_16; output_ext = f\".{fallback_fmt_16}\";\r\n+ filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n+ output_path_temp = self.temp_dir / filename\r\n+ save_params_fallback = []\r\n+ img_fallback = None; target_fallback_dtype = np.uint16\r\n+ if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif fallback_fmt_16 == \"tif\": pass\r\n+\r\n+ if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n+ if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n+ log.error(f\"Asset '{asset_name}': Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n+ continue\r\n+ img_scaled = img_to_save * 65535.0\r\n+ img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n+ elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save\r\n+ else: log.error(f\"Asset '{asset_name}': Cannot convert {img_to_save.dtype} for fallback.\"); continue\r\n+\r\n+ try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Asset '{asset_name}': Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err: log.error(f\"Asset '{asset_name}': Fallback save failed for {map_type} {res_key}: {fallback_err}\", exc_info=True)\r\n+\r\n+\r\n+ # --- 3e. Store Result ---\r\n+ if saved_successfully:\r\n+ # Store in the local dictionary for this asset\r\n+ processed_maps_details_asset[map_type][res_key] = {\r\n+ \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n+ \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n+ \"format\": actual_format_saved\r\n+ }\r\n+ current_map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n+\r\n+ except Exception as map_proc_err:\r\n+ log.error(f\"Asset '{asset_name}': Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n+ # Store error in the local dictionary for this asset\r\n+ processed_maps_details_asset.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n+\r\n+ # Store details locally for this asset\r\n+ map_details_asset[map_type] = current_map_details\r\n+\r\n+ # --- Return results for this specific asset ---\r\n+ log.info(f\"Finished processing map files for asset '{asset_name}'.\")\r\n+ # Note: Final metadata updates (maps_present, shader_features) are handled in the main process loop\r\n+ return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps\r\n+\r\n+\r\n+ def _merge_maps(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict) -> Dict[str, Dict[str, Dict]]:\r\n+ \"\"\"\r\n+ Merges channels from different maps for a specific asset based on rules in configuration.\r\n+\r\n+ Args:\r\n+ processed_maps_details_asset: Details of successfully processed maps for this asset.\r\n+ filtered_classified_files: Classified files dictionary filtered for this asset.\r\n+ current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n+\r\n+ Returns:\r\n+ Dict[str, Dict[str, Dict]]: Details of successfully merged maps for this asset.\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n+\r\n+ merge_rules = self.config.map_merge_rules\r\n+ log.info(f\"Asset '{asset_name}': Applying {len(merge_rules)} map merging rule(s)...\")\r\n+\r\n+ # Initialize results for this asset\r\n+ merged_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n+\r\n+ for rule_index, rule in enumerate(merge_rules):\r\n+ output_map_type = rule.get(\"output_map_type\")\r\n+ inputs_mapping = rule.get(\"inputs\")\r\n+ defaults = rule.get(\"defaults\", {})\r\n+ rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n+\r\n+ if not output_map_type or not inputs_mapping:\r\n+ log.warning(f\"Asset '{asset_name}': Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs'. Rule: {rule}\")\r\n+ continue\r\n+\r\n+ log.info(f\"-- Asset '{asset_name}': Applying merge rule for '{output_map_type}' --\")\r\n+\r\n+ # --- Determine required inputs and their common resolutions for *this asset* ---\r\n+ required_input_types = set(inputs_mapping.values())\r\n+ if not required_input_types:\r\n+ log.warning(f\"Asset '{asset_name}': Skipping merge rule '{output_map_type}': No input map types defined.\")\r\n+ continue\r\n+\r\n+ possible_resolutions_per_input = []\r\n+ for input_type in required_input_types:\r\n+ # Use the processed map details passed for this asset\r\n+ if input_type in processed_maps_details_asset:\r\n+ res_keys = {res for res, details in processed_maps_details_asset[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n+ if not res_keys:\r\n+ log.warning(f\"Asset '{asset_name}': Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n+ possible_resolutions_per_input = []\r\n+ break\r\n+ possible_resolutions_per_input.append(res_keys)\r\n+ else:\r\n+ log.warning(f\"Asset '{asset_name}': Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n+ possible_resolutions_per_input = []\r\n+ break\r\n+\r\n+ if not possible_resolutions_per_input:\r\n+ log.warning(f\"Asset '{asset_name}': Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n+ continue\r\n+\r\n+ common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n+\r\n+ if not common_resolutions:\r\n+ log.warning(f\"Asset '{asset_name}': No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n+ continue\r\n+ log.debug(f\"Asset '{asset_name}': Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n+\r\n+ # --- Loop through common resolutions ---\r\n+ res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n+ if not res_order:\r\n+ log.warning(f\"Asset '{asset_name}': Common resolutions {common_resolutions} do not match config. Skipping merge for '{output_map_type}'.\")\r\n+ continue\r\n+\r\n+ sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ base_name = asset_name # Use current asset's name\r\n+\r\n+ for current_res_key in sorted_res_keys:\r\n+ log.debug(f\"Asset '{asset_name}': Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n+ try:\r\n+ loaded_inputs = {}\r\n+ input_bit_depths = set()\r\n+ input_original_extensions = {}\r\n+\r\n+ # --- Load required input maps for this asset and resolution ---\r\n+ possible_to_load = True\r\n+ target_channels = list(inputs_mapping.keys())\r\n+\r\n+ for map_type in required_input_types:\r\n+ # Use processed_maps_details_asset passed in\r\n+ res_details = processed_maps_details_asset.get(map_type, {}).get(current_res_key)\r\n+ if not res_details or 'path' not in res_details:\r\n+ log.warning(f\"Asset '{asset_name}': Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge.\")\r\n+ possible_to_load = False; break\r\n+\r\n+ # Find original extension from the filtered classified data for this asset\r\n+ original_ext = '.png' # Default\r\n+ found_original = False\r\n+ # Use filtered_classified_files passed in\r\n+ for classified_map in filtered_classified_files.get(\"maps\", []):\r\n+ if classified_map['map_type'].startswith(map_type):\r\n+ original_ext = classified_map.get('original_extension', '.png')\r\n+ found_original = True\r\n+ break\r\n+ if not found_original:\r\n+ log.warning(f\"Asset '{asset_name}': Could not find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n+\r\n+ input_original_extensions[map_type] = original_ext\r\n+\r\n+ # Load the image\r\n+ input_file_path = self.temp_dir / res_details['path']\r\n+ read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n+ log.debug(f\"Asset '{asset_name}': Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ img = cv2.imread(str(input_file_path), read_flag)\r\n+ if img is None:\r\n+ raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n+ loaded_inputs[map_type] = img\r\n+ input_bit_depths.add(res_details.get('bit_depth', 8))\r\n+\r\n+ if len(img.shape) == 3:\r\n+ log.debug(f\"Asset '{asset_name}': DEBUG: Merge input '{input_file_path.name}' ({map_type}) loaded shape {img.shape}, dtype {img.dtype}.\")\r\n+\r\n+ if not possible_to_load: continue\r\n+\r\n+ # --- Determine dimensions and target_dim ---\r\n+ first_map_type = next(iter(required_input_types))\r\n+ h, w = loaded_inputs[first_map_type].shape[:2]\r\n+ first_res_details = processed_maps_details_asset.get(first_map_type, {}).get(current_res_key)\r\n+ target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n+ num_target_channels = len(target_channels)\r\n+\r\n+ # --- Determine Output Bit Depth ---\r\n+ max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n+ output_bit_depth = 8\r\n+ if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n+ output_bit_depth = 16\r\n+ log.debug(f\"Asset '{asset_name}': Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n+\r\n+ # --- Prepare and Merge Channels ---\r\n+ # [ Existing channel preparation and merging logic remains the same ]\r\n+ merged_channels_float32 = []\r\n+ for target_channel in target_channels:\r\n+ source_map_type = inputs_mapping.get(target_channel)\r\n+ channel_data_float32 = None\r\n+ if source_map_type and source_map_type in loaded_inputs:\r\n+ img_input = loaded_inputs[source_map_type]\r\n+ if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n+ elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n+ else: img_float = img_input.astype(np.float32)\r\n+ num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n+ if num_source_channels >= 3:\r\n+ if target_channel == 'R': channel_data_float32 = img_float[:, :, 0]\r\n+ elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n+ elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2]\r\n+ elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n+ elif num_source_channels == 1 or len(img_float.shape) == 2:\r\n+ channel_data_float32 = img_float.reshape(h, w)\r\n+ if channel_data_float32 is None:\r\n+ default_val = defaults.get(target_channel)\r\n+ if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n+ channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n+ merged_channels_float32.append(channel_data_float32)\r\n+\r\n+ if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n+ merged_image_float32 = cv2.merge(merged_channels_float32)\r\n+\r\n+ # --- Final Data Type Conversion ---\r\n+ img_final_merged = None\r\n+ if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n+ else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+\r\n+ # --- Determine Output Format ---\r\n+ # [ Existing format determination logic remains the same, add asset_name to logs ]\r\n+ output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n+ primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n+ fmt_8bit_config = self.config.get_8bit_output_format()\r\n+ threshold = self.config.resolution_threshold_for_jpg\r\n+ force_lossless = output_map_type in self.config.force_lossless_map_types\r\n+\r\n+ if force_lossless:\r\n+ log.debug(f\"Asset '{asset_name}': Format forced to lossless for merged map type '{output_map_type}'.\")\r\n+ # ... (rest of force_lossless logic) ...\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else:\r\n+ if output_format != \"png\":\r\n+ log.warning(f\"Asset '{asset_name}': Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else: # 8-bit lossless\r\n+ output_format = fmt_8bit_config\r\n+ output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\":\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else:\r\n+ log.warning(f\"Asset '{asset_name}': Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n+ output_format = \"png\"; output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n+\r\n+ elif output_bit_depth == 8 and target_dim >= threshold:\r\n+ output_format = 'jpg'; output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Asset '{asset_name}': Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n+ else:\r\n+ # ... (rest of hierarchy logic, add asset_name to logs) ...\r\n+ involved_extensions = set(input_original_extensions.values())\r\n+ log.debug(f\"Asset '{asset_name}': Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n+ highest_format_str = 'jpg'\r\n+ if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n+ elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n+ elif '.png' in involved_extensions: highest_format_str = 'png'\r\n+\r\n+ final_output_format = highest_format_str\r\n+ if highest_format_str == 'tif':\r\n+ if output_bit_depth == 16:\r\n+ final_output_format = primary_fmt_16\r\n+ log.debug(f\"Asset '{asset_name}': Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n+ else:\r\n+ final_output_format = 'png'\r\n+ log.debug(f\"Asset '{asset_name}': Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n+ else:\r\n+ log.debug(f\"Asset '{asset_name}': Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n+\r\n+ output_format = final_output_format\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ elif output_format == \"png\":\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ elif output_format == \"jpg\":\r\n+ output_ext = \".jpg\"\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ else:\r\n+ log.error(f\"Asset '{asset_name}': Unsupported final output format '{output_format}' for merged map '{output_map_type}'. Skipping save.\")\r\n+ continue\r\n+\r\n+\r\n+ # --- JPG 8-bit Check ---\r\n+ if output_format == \"jpg\" and output_bit_depth == 16:\r\n+ log.warning(f\"Asset '{asset_name}': Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n+ img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ output_bit_depth = 8\r\n+\r\n+ # --- Save Merged Map ---\r\n+ image_to_save = img_final_merged\r\n+ if needs_float16 and image_to_save.dtype != np.float16:\r\n+ # [ Existing float16 conversion logic ]\r\n+ if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n+ elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n+ else: log.warning(f\"Asset '{asset_name}': Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n+\r\n+ # Use base_name (current asset's name)\r\n+ merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n+ merged_output_path_temp = self.temp_dir / merged_filename\r\n+ log.debug(f\"Asset '{asset_name}': Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n+\r\n+ # --- Save with Fallback ---\r\n+ # [ Existing save/fallback logic, add asset_name to logs ]\r\n+ saved_successfully = False\r\n+ actual_format_saved = output_format\r\n+ try:\r\n+ cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n+ log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n+ saved_successfully = True\r\n+ except Exception as save_err:\r\n+ log.error(f\"Asset '{asset_name}': Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n+ if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n+ log.warning(f\"Asset '{asset_name}': Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n+ # ... [ Fallback save logic, add asset_name to logs ] ...\r\n+ actual_format_saved = fallback_fmt_16\r\n+ output_ext = f\".{fallback_fmt_16}\"\r\n+ merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n+ merged_output_path_temp = self.temp_dir / merged_filename\r\n+ save_params_fallback = []\r\n+ img_fallback = None\r\n+ target_fallback_dtype = np.uint16\r\n+\r\n+ if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif fallback_fmt_16 == \"tif\": pass\r\n+\r\n+ if image_to_save.dtype == np.float16:\r\n+ if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(f\"Asset '{asset_name}': NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n+ img_scaled = image_to_save * 65535.0\r\n+ img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n+ elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n+ else: log.error(f\"Asset '{asset_name}': Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n+\r\n+ try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err: log.error(f\"Asset '{asset_name}': Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n+\r\n+\r\n+ # --- Record details locally ---\r\n+ if saved_successfully:\r\n+ merged_maps_details_asset[output_map_type][current_res_key] = {\r\n+ \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n+ \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n+ }\r\n+ # Note: Adding to metadata[\"merged_maps\"] list happens in the main process loop\r\n+\r\n+ except Exception as merge_res_err:\r\n+ log.error(f\"Asset '{asset_name}': Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n+ # Store error locally for this asset\r\n+ merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n+\r\n+ log.info(f\"Asset '{asset_name}': Finished applying map merging rules.\")\r\n+ # Return the details for this asset\r\n+ return merged_maps_details_asset\r\n+\r\n+\r\n+ def _generate_metadata_file(self, current_asset_metadata: Dict, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], map_details_asset: Dict[str, Dict]) -> Path:\r\n+ \"\"\"\r\n+ Gathers metadata for a specific asset and writes it to a temporary JSON file.\r\n+\r\n+ Args:\r\n+ current_asset_metadata: Base metadata for this asset (name, category, archetype, etc.).\r\n+ processed_maps_details_asset: Details of processed maps for this asset.\r\n+ merged_maps_details_asset: Details of merged maps for this asset.\r\n+ filtered_classified_files_asset: Classified files belonging only to this asset.\r\n+ unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n+ map_details_asset: Dictionary containing details like source bit depth, gloss inversion per map type.\r\n+\r\n+\r\n+ Returns:\r\n+ Path: The path to the generated temporary metadata file.\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ asset_name = current_asset_metadata.get(\"asset_name\")\r\n+ if not asset_name or asset_name == \"UnknownAssetName\":\r\n+ log.warning(\"Asset name unknown during metadata generation, file may be incomplete or incorrectly named.\")\r\n+ asset_name = \"UnknownAsset_Metadata\" # Fallback for filename\r\n+\r\n+ log.info(f\"Generating metadata file for asset '{asset_name}'...\")\r\n+ # Start with the base metadata passed in for this asset\r\n+ final_metadata = current_asset_metadata.copy()\r\n+\r\n+ # Populate map details from the specific asset's processing results\r\n+ final_metadata[\"processed_map_resolutions\"] = {}\r\n+ for map_type, res_dict in processed_maps_details_asset.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys)\r\n+\r\n+ final_metadata[\"merged_map_resolutions\"] = {}\r\n+ for map_type, res_dict in merged_maps_details_asset.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n+\r\n+ # Determine maps present based on successful processing for this asset\r\n+ final_metadata[\"maps_present\"] = sorted(list(processed_maps_details_asset.keys()))\r\n+ final_metadata[\"merged_maps\"] = sorted(list(merged_maps_details_asset.keys()))\r\n+\r\n+ # Determine shader features based on this asset's maps\r\n+ features = set()\r\n+ for map_type, details in map_details_asset.items(): # Use map_details_asset passed in\r\n+ if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n+ if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n+ res_details = processed_maps_details_asset.get(map_type, {})\r\n+ if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n+ final_metadata[\"shader_features\"] = sorted(list(features))\r\n+\r\n+ # Determine source files in this asset's Extra folder\r\n+ # Includes:\r\n+ # - Files originally classified as 'Extra' or 'Unrecognised' belonging to this asset.\r\n+ # - Files originally classified as 'Ignored' belonging to this asset.\r\n+ # - All 'unmatched' files (belonging to no specific asset).\r\n+ source_files_in_extra_set = set()\r\n+ for category in ['extra', 'ignored']:\r\n+ for file_info in filtered_classified_files_asset.get(category, []):\r\n+ source_files_in_extra_set.add(str(file_info['source_path']))\r\n+ # Add all unmatched files\r\n+ for file_path in unmatched_files_paths:\r\n+ source_files_in_extra_set.add(str(file_path))\r\n+ final_metadata[\"source_files_in_extra\"] = sorted(list(source_files_in_extra_set))\r\n+\r\n+ # Add image stats and map details specific to this asset\r\n+ final_metadata[\"image_stats_1k\"] = current_asset_metadata.get(\"image_stats_1k\", {}) # Get from passed metadata\r\n+ final_metadata[\"map_details\"] = map_details_asset # Use map_details_asset passed in\r\n+ final_metadata[\"aspect_ratio_change_string\"] = current_asset_metadata.get(\"aspect_ratio_change_string\", \"N/A\") # Get from passed metadata\r\n+\r\n+\r\n+ # Add processing info\r\n+ final_metadata[\"_processing_info\"] = {\r\n+ \"preset_used\": self.config.preset_name,\r\n+ \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n+ \"input_source\": str(self.input_path.name), # Add original input source\r\n+ }\r\n+\r\n+ # Sort lists just before writing\r\n+ for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n+ if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n+\r\n+ # Use asset name in temporary filename to avoid conflicts\r\n+ metadata_filename = f\"{asset_name}_{self.config.metadata_filename}\"\r\n+ output_path = self.temp_dir / metadata_filename\r\n+ log.debug(f\"Writing metadata for asset '{asset_name}' to temporary file: {output_path}\")\r\n+ try:\r\n+ with open(output_path, 'w', encoding='utf-8') as f:\r\n+ json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n+ log.info(f\"Metadata file '{metadata_filename}' generated successfully for asset '{asset_name}'.\")\r\n+ return output_path # Return the path to the temporary file\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to write metadata file {output_path} for asset '{asset_name}': {e}\") from e\r\n+\r\n+\r\n+ def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n+ \"\"\"\r\n+ Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n+ Returns the string representation.\r\n+ \"\"\"\r\n+ if original_width <= 0 or original_height <= 0:\r\n+ log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n+ return \"InvalidInput\"\r\n+\r\n+ # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n+ if resized_width <= 0 or resized_height <= 0:\r\n+ log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n+ return \"InvalidResize\"\r\n+\r\n+ # Original logic from user feedback\r\n+ width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n+ height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n+\r\n+ normalized_width_change = width_change_percentage / 100\r\n+ normalized_height_change = height_change_percentage / 100\r\n+\r\n+ normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n+ normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n+\r\n+ # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n+ # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n+ if normalized_width_change == 0 and normalized_height_change == 0:\r\n+ closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n+ elif normalized_width_change == 0:\r\n+ closest_value_to_one = abs(normalized_height_change)\r\n+ elif normalized_height_change == 0:\r\n+ closest_value_to_one = abs(normalized_width_change)\r\n+ else:\r\n+ closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n+\r\n+ # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n+ epsilon = 1e-9\r\n+ scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n+\r\n+ scaled_normalized_width_change = scale_factor * normalized_width_change\r\n+ scaled_normalized_height_change = scale_factor * normalized_height_change\r\n+\r\n+ output_width = round(scaled_normalized_width_change, decimals)\r\n+ output_height = round(scaled_normalized_height_change, decimals)\r\n+\r\n+ # Convert to int if exactly 1.0 after rounding\r\n+ if abs(output_width - 1.0) < epsilon: output_width = 1\r\n+ if abs(output_height - 1.0) < epsilon: output_height = 1\r\n+\r\n+ # Determine output string\r\n+ if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n+ output = \"EVEN\"\r\n+ elif output_width != 1 and output_height == 1:\r\n+ output = f\"X{str(output_width).replace('.', '')}\"\r\n+ elif output_height != 1 and output_width == 1:\r\n+ output = f\"Y{str(output_height).replace('.', '')}\"\r\n+ else:\r\n+ # Both changed relative to each other\r\n+ output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n+\r\n+ log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n+ return output\r\n+\r\n+ def _sanitize_filename(self, name: str) -> str:\r\n+ \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n+ # ... (Implementation from Response #51) ...\r\n+ if not isinstance(name, str): name = str(name)\r\n+ name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n+ name = re.sub(r'_+', '_', name)\r\n+ name = name.strip('_')\r\n+ if not name: name = \"invalid_name\"\r\n+ return name\r\n+\r\n+ def _organize_output_files(self, current_asset_name: str, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], temp_metadata_path: Path):\r\n+ \"\"\"\r\n+ Moves/copies processed files for a specific asset from the temp dir to the final output structure.\r\n+\r\n+ Args:\r\n+ current_asset_name: The sanitized name of the asset being organized.\r\n+ processed_maps_details_asset: Details of processed maps for this asset.\r\n+ merged_maps_details_asset: Details of merged maps for this asset.\r\n+ filtered_classified_files_asset: Classified files dictionary filtered for this asset.\r\n+ unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n+ temp_metadata_path: Path to the temporary metadata file for this asset.\r\n+ \"\"\"\r\n+ if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n+ if not current_asset_name or current_asset_name == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing for organization.\")\r\n+ supplier_name = self.config.supplier_name # Get supplier name from config\r\n+ if not supplier_name: raise AssetProcessingError(\"Supplier name missing from config.\")\r\n+\r\n+ supplier_sanitized = self._sanitize_filename(supplier_name)\r\n+ asset_name_sanitized = self._sanitize_filename(current_asset_name) # Already sanitized, but ensure consistency\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ log.info(f\"Organizing output files for asset '{asset_name_sanitized}' into: {final_dir}\")\r\n+\r\n+ try:\r\n+ # Handle overwrite logic specifically for this asset's directory\r\n+ if final_dir.exists() and self.overwrite:\r\n+ log.warning(f\"Output directory exists for '{asset_name_sanitized}' and overwrite is True. Removing existing directory: {final_dir}\")\r\n+ try:\r\n+ shutil.rmtree(final_dir)\r\n+ except Exception as rm_err:\r\n+ raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} for asset '{asset_name_sanitized}' during overwrite: {rm_err}\") from rm_err\r\n+ # Note: Skip check should prevent this if overwrite is False, but mkdir handles exist_ok=True\r\n+\r\n+ final_dir.mkdir(parents=True, exist_ok=True)\r\n+ except Exception as e:\r\n+ if not isinstance(e, AssetProcessingError):\r\n+ raise AssetProcessingError(f\"Failed to create final dir {final_dir} for asset '{asset_name_sanitized}': {e}\") from e\r\n+ else:\r\n+ raise\r\n+\r\n+ # --- Helper for moving files ---\r\n+ # Keep track of files successfully moved to avoid copying them later as 'unmatched'\r\n+ moved_source_files = set()\r\n+ def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n+ if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc}.\"); return\r\n+ source_abs = self.temp_dir / src_rel_path\r\n+ # Use the original filename from the source path for the destination\r\n+ dest_abs = dest_dir / src_rel_path.name\r\n+ try:\r\n+ if source_abs.exists():\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n+ dest_dir.mkdir(parents=True, exist_ok=True)\r\n+ shutil.move(str(source_abs), str(dest_abs))\r\n+ moved_source_files.add(src_rel_path) # Track successfully moved source files\r\n+ else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc}: {source_abs}\")\r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n+\r\n+ # --- Helper for copying files (for unmatched extras) ---\r\n+ def _safe_copy(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n+ if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc} copy.\"); return\r\n+ # Skip copying if this source file was already moved (e.g., it was an 'Extra' for this specific asset)\r\n+ if src_rel_path in moved_source_files:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Skipping copy of {file_desc} '{src_rel_path.name}' as it was already moved.\")\r\n+ return\r\n+ source_abs = self.temp_dir / src_rel_path\r\n+ dest_abs = dest_dir / src_rel_path.name\r\n+ try:\r\n+ if source_abs.exists():\r\n+ # Avoid copying if the exact destination file already exists (e.g., from a previous asset's copy)\r\n+ if dest_abs.exists():\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Destination file already exists for {file_desc} copy: {dest_abs.name}. Skipping copy.\")\r\n+ return\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Copying {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n+ dest_dir.mkdir(parents=True, exist_ok=True)\r\n+ shutil.copy2(str(source_abs), str(dest_abs)) # Use copy2 to preserve metadata\r\n+ else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc} copy: {source_abs}\")\r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed copying {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n+\r\n+\r\n+ # --- Move Processed/Merged Maps ---\r\n+ for details_dict in [processed_maps_details_asset, merged_maps_details_asset]:\r\n+ for map_type, res_dict in details_dict.items():\r\n+ if 'error' in res_dict: continue\r\n+ for res_key, details in res_dict.items():\r\n+ if isinstance(details, dict) and 'path' in details:\r\n+ _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n+\r\n+ # --- Move Models specific to this asset ---\r\n+ for model_info in filtered_classified_files_asset.get('models', []):\r\n+ _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n+\r\n+ # --- Move Metadata File ---\r\n+ if temp_metadata_path and temp_metadata_path.exists():\r\n+ final_metadata_path = final_dir / self.config.metadata_filename # Use standard name\r\n+ try:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving metadata file: {temp_metadata_path.name} -> {final_metadata_path.relative_to(self.output_base_path)}\")\r\n+ shutil.move(str(temp_metadata_path), str(final_metadata_path))\r\n+ # No need to add metadata path to moved_source_files as it's uniquely generated\r\n+ except Exception as e:\r\n+ log.error(f\"Asset '{asset_name_sanitized}': Failed moving metadata file '{temp_metadata_path.name}': {e}\", exc_info=True)\r\n+ else:\r\n+ log.warning(f\"Asset '{asset_name_sanitized}': Temporary metadata file path missing or file does not exist: {temp_metadata_path}\")\r\n+\r\n+\r\n+ # --- Handle Extra/Ignored/Unmatched Files ---\r\n+ extra_subdir_name = self.config.extra_files_subdir\r\n+ extra_dir = final_dir / extra_subdir_name\r\n+ if filtered_classified_files_asset.get('extra') or filtered_classified_files_asset.get('ignored') or unmatched_files_paths:\r\n+ try:\r\n+ extra_dir.mkdir(parents=True, exist_ok=True)\r\n+\r\n+ # Move asset-specific Extra/Ignored files\r\n+ files_to_move_extra = filtered_classified_files_asset.get('extra', []) + filtered_classified_files_asset.get('ignored', [])\r\n+ if files_to_move_extra:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving {len(files_to_move_extra)} asset-specific files to '{extra_subdir_name}/'...\")\r\n+ for file_info in files_to_move_extra:\r\n+ _safe_move(file_info.get('source_path'), extra_dir, f\"extra/ignored file ({file_info.get('reason', 'Unknown')})\")\r\n+\r\n+ # Copy unmatched files\r\n+ if unmatched_files_paths:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Copying {len(unmatched_files_paths)} unmatched files to '{extra_subdir_name}/'...\")\r\n+ for file_path in unmatched_files_paths:\r\n+ _safe_copy(file_path, extra_dir, \"unmatched file\")\r\n+\r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed creating/moving/copying to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n+\r\n+ log.info(f\"Finished organizing output for asset '{asset_name_sanitized}'.\")\r\n+\r\n+\r\n+ def _cleanup_workspace(self):\r\n+ \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n+ # ... (Implementation from Response #45) ...\r\n+ if self.temp_dir and self.temp_dir.exists():\r\n+ try:\r\n+ log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n+ shutil.rmtree(self.temp_dir)\r\n+ self.temp_dir = None\r\n+ log.debug(\"Temporary workspace cleaned up successfully.\")\r\n+ except Exception as e:\r\n+ log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n+\r\n+ # --- Prediction Method ---\r\n+ def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n+ \"\"\"\r\n+ Predicts the final output structure (supplier, asset name) and attempts\r\n+ to predict output filenames for potential map files based on naming conventions.\r\n+ Does not perform full processing or image loading.\r\n+\r\n+ Returns:\r\n+ tuple[str | None, str | None, dict[str, str] | None]:\r\n+ (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n+ where file_predictions_dict maps input filename -> predicted output filename.\r\n+ Returns None if prediction fails critically.\r\n+ \"\"\"\r\n+ log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n+ try:\r\n+ # 1. Get Supplier Name\r\n+ supplier_name = self.config.supplier_name\r\n+ if not supplier_name:\r\n+ log.warning(\"Supplier name not found in configuration during prediction.\")\r\n+ return None\r\n+\r\n+ # 2. List Input Filenames/Stems\r\n+ candidate_stems = set() # Use set for unique stems\r\n+ filenames = []\r\n+ if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n+ try:\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ # Get only filenames, ignore directories\r\n+ filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n+ except zipfile.BadZipFile:\r\n+ log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n+ return None\r\n+ except Exception as zip_err:\r\n+ log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n+ return None # Cannot proceed if we can't list files\r\n+ elif self.input_path.is_dir():\r\n+ try:\r\n+ for item in self.input_path.iterdir():\r\n+ if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n+ filenames.append(item.name)\r\n+ # Note: Not walking subdirs for prediction to keep it fast\r\n+ except Exception as dir_err:\r\n+ log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n+ return None\r\n+\r\n+ if not filenames:\r\n+ log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n+ return None # Return None if no files found\r\n+\r\n+ # 3. Lightweight Classification for Stems and Potential Maps\r\n+ map_type_mapping = self.config.map_type_mapping\r\n+ model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n+ separator = self.config.source_naming_separator\r\n+ processed_filenames = set() # Track full filenames processed\r\n+ potential_map_files = {} # Store fname -> potential map_type\r\n+\r\n+ for fname in filenames:\r\n+ if fname in processed_filenames: continue\r\n+\r\n+ fstem = Path(fname).stem\r\n+ fstem_lower = fstem.lower()\r\n+ name_parts = fstem_lower.split(separator)\r\n+\r\n+ # Check map rules first\r\n+ map_matched = False\r\n+ for mapping_rule in map_type_mapping:\r\n+ source_keywords, standard_map_type = mapping_rule\r\n+ if standard_map_type not in self.config.standard_map_types: continue\r\n+ for keyword in source_keywords:\r\n+ kw_lower = keyword.lower().strip('*')\r\n+ if kw_lower in name_parts:\r\n+ is_exact_match = any(part == kw_lower for part in name_parts)\r\n+ if is_exact_match:\r\n+ candidate_stems.add(fstem) # Add unique stem\r\n+ potential_map_files[fname] = standard_map_type # Store potential type\r\n+ processed_filenames.add(fname)\r\n+ map_matched = True\r\n+ break # Found keyword match for this rule\r\n+ if map_matched: break # Found a rule match for this file\r\n+ if map_matched: continue # Move to next filename if identified as map\r\n+\r\n+ # Check model patterns if not a map\r\n+ for pattern in model_patterns:\r\n+ if fnmatch(fname.lower(), pattern.lower()):\r\n+ candidate_stems.add(fstem) # Still add stem for base name determination\r\n+ processed_filenames.add(fname)\r\n+ # Don't add models to potential_map_files\r\n+ break # Found model match\r\n+\r\n+ # Note: Files matching neither maps nor models are ignored for prediction details\r\n+\r\n+ log.debug(f\"[PREDICTION] Potential map files identified: {potential_map_files}\") # DEBUG PREDICTION\r\n+ candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n+ log.debug(f\"[PREDICTION] Candidate stems identified: {candidate_stems_list}\") # DEBUG PREDICTION\r\n+ if not candidate_stems_list:\r\n+ log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n+ # Fallback: Use the input path's name itself if no stems found\r\n+ base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ determined_base_name = base_name_fallback\r\n+ else:\r\n+ # 4. Replicate _determine_base_metadata logic for base name\r\n+ determined_base_name = \"UnknownAssetName\"\r\n+ separator = self.config.source_naming_separator\r\n+ indices_dict = self.config.source_naming_indices\r\n+ base_index_raw = indices_dict.get('base_name')\r\n+ log.debug(f\"[PREDICTION] Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}'\") # DEBUG PREDICTION\r\n+\r\n+ base_index = None\r\n+ if base_index_raw is not None:\r\n+ try:\r\n+ base_index = int(base_index_raw) # Use explicit conversion like in main logic\r\n+ except (ValueError, TypeError):\r\n+ log.warning(f\"[PREDICTION] Could not convert base_name index '{base_index_raw}' to integer.\")\r\n+\r\n+ if isinstance(base_index, int):\r\n+ potential_base_names = set()\r\n+ for stem in candidate_stems_list: # Iterate over the list\r\n+ parts = stem.split(separator)\r\n+ log.debug(f\"[PREDICTION] Processing stem: '{stem}', Parts: {parts}\") # DEBUG PREDICTION\r\n+ if len(parts) > base_index:\r\n+ extracted_name = parts[base_index]\r\n+ potential_base_names.add(extracted_name)\r\n+ log.debug(f\"[PREDICTION] Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG PREDICTION\r\n+ else:\r\n+ log.debug(f\"[PREDICTION] Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG PREDICTION\r\n+ if len(potential_base_names) == 1:\r\n+ determined_base_name = potential_base_names.pop()\r\n+ log.debug(f\"[PREDICTION] Determined base name '{determined_base_name}' from structured parts (index {base_index}).\") # DEBUG PREDICTION\r\n+ elif len(potential_base_names) > 1:\r\n+ log.debug(f\"[PREDICTION] Multiple potential base names found from index {base_index}: {potential_base_names}. Falling back to common prefix.\") # DEBUG PREDICTION\r\n+ determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+ # else: Use common prefix below\r\n+\r\n+ if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n+ log.debug(\"[PREDICTION] Falling back to common prefix for base name determination (structured parts failed or no index).\") # DEBUG PREDICTION\r\n+ determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+\r\n+ # 5. Sanitize Names\r\n+ final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n+ log.debug(f\"[PREDICTION] Final determined base name for prediction: '{final_base_name}'\") # DEBUG PREDICTION\r\n+ final_supplier_name = self._sanitize_filename(supplier_name)\r\n+\r\n+ # 6. Predict Output Filenames\r\n+ file_predictions = {}\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ # Use highest resolution key as a placeholder for prediction\r\n+ highest_res_key = \"Res?\" # Fallback\r\n+ if self.config.image_resolutions:\r\n+ highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n+\r\n+ for input_fname, map_type in potential_map_files.items():\r\n+ # Assume PNG for prediction, extension might change based on bit depth rules later\r\n+ # but this gives a good idea of the renaming.\r\n+ # A more complex prediction could check bit depth rules.\r\n+ predicted_ext = \"png\" # Simple assumption for preview\r\n+ try:\r\n+ predicted_fname = target_pattern.format(\r\n+ base_name=final_base_name,\r\n+ map_type=map_type,\r\n+ resolution=highest_res_key, # Use placeholder resolution\r\n+ ext=predicted_ext\r\n+ )\r\n+ file_predictions[input_fname] = predicted_fname\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n+ file_predictions[input_fname] = \"[Filename Format Error]\"\r\n+\r\n+\r\n+ log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n+ return final_supplier_name, final_base_name, file_predictions\r\n+\r\n+ except Exception as e:\r\n+ log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n+ return None\r\n+\r\n+\r\n+ # --- New Detailed Prediction Method ---\r\n+ def get_detailed_file_predictions(self) -> list[dict] | None:\r\n+ \"\"\"\r\n+ Performs extraction and classification to provide a detailed list of all\r\n+ files found within the input and their predicted status/output name,\r\n+ handling multiple potential assets within the input.\r\n+\r\n+ Returns:\r\n+ list[dict] | None: A list of dictionaries, each representing a file:\r\n+ {'original_path': str,\r\n+ 'predicted_asset_name': str | None,\r\n+ 'predicted_output_name': str | None,\r\n+ 'status': str,\r\n+ 'details': str | None}\r\n+ Returns None if a critical error occurs during setup/classification.\r\n+ \"\"\"\r\n+ log.info(f\"Getting detailed file predictions for input: {self.input_path.name}\")\r\n+ results = []\r\n+ all_files_in_workspace = [] # Keep track of all files found\r\n+\r\n+ try:\r\n+ # --- Perform necessary setup and classification ---\r\n+ self._setup_workspace()\r\n+ self._extract_input()\r\n+ # Run classification - this populates self.classified_files\r\n+ self._inventory_and_classify_files()\r\n+\r\n+ # --- Determine distinct assets and file mapping ---\r\n+ # This uses the results from _inventory_and_classify_files\r\n+ distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n+ log.debug(f\"Prediction: Determined base names: {distinct_base_names}\")\r\n+ log.debug(f\"Prediction: File to base name map: { {str(k):v for k,v in file_to_base_name_map.items()} }\")\r\n+\r\n+ # --- Apply Suffixes for Prediction Preview ---\r\n+ # This logic is similar to the main process method but applied to the classified_files list\r\n+ log.debug(\"Prediction: Applying map type suffixes for preview...\")\r\n+ grouped_classified_maps = defaultdict(list)\r\n+ for map_info in self.classified_files.get('maps', []):\r\n+ # Group by the base map type\r\n+ grouped_classified_maps[map_info['map_type']].append(map_info)\r\n+\r\n+ # Create a new list for maps with updated types for prediction\r\n+ maps_with_predicted_types = []\r\n+ for base_map_type, maps_in_group in grouped_classified_maps.items():\r\n+ respect_variants = base_map_type in self.config.respect_variant_map_types\r\n+ # Sort maps within the group for consistent suffixing (using the same key as in _inventory_and_classify_files)\r\n+ maps_in_group.sort(key=lambda c: (\r\n+ c.get('preset_rule_index', 9999),\r\n+ c.get('keyword_index_in_rule', 9999) if 'keyword_index_in_rule' in c else 9999, # Handle potential missing key\r\n+ str(c['source_path'])\r\n+ ))\r\n+\r\n+ for i, map_info in enumerate(maps_in_group):\r\n+ predicted_map_type = f\"{base_map_type}-{i + 1}\" if respect_variants else base_map_type\r\n+ # Create a copy to avoid modifying the original classified_files list in place\r\n+ map_info_copy = map_info.copy()\r\n+ map_info_copy['predicted_map_type'] = predicted_map_type # Store the predicted type\r\n+ maps_with_predicted_types.append(map_info_copy)\r\n+\r\n+ # Replace the original maps list with the one containing predicted types for the next step\r\n+ # Note: This is a temporary list for prediction generation, not modifying the instance's classified_files permanently\r\n+ # self.classified_files[\"maps\"] = maps_with_predicted_types # Avoid modifying instance state\r\n+\r\n+ # --- Prepare for filename prediction ---\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n+ if self.config.image_resolutions:\r\n+ highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n+\r\n+ # --- Process all classified files (including maps with predicted types) ---\r\n+ all_classified_files_with_category = []\r\n+ # Add maps with predicted types first\r\n+ for map_info in maps_with_predicted_types:\r\n+ map_info['category'] = 'maps' # Ensure category is set\r\n+ all_classified_files_with_category.append(map_info)\r\n+ if 'source_path' in map_info:\r\n+ all_files_in_workspace.append(map_info['source_path'])\r\n+\r\n+ # Add other categories (models, extra, ignored)\r\n+ for category in ['models', 'extra', 'ignored']:\r\n+ for file_info in self.classified_files.get(category, []):\r\n+ file_info['category'] = category\r\n+ all_classified_files_with_category.append(file_info)\r\n+ if 'source_path' in file_info:\r\n+ all_files_in_workspace.append(file_info['source_path'])\r\n+\r\n+\r\n+ # --- Generate results for each file ---\r\n+ processed_paths = set() # Track paths already added to results\r\n+ for file_info in all_classified_files_with_category:\r\n+ original_path = file_info.get(\"source_path\")\r\n+ if not original_path or original_path in processed_paths:\r\n+ continue # Skip if path missing or already processed\r\n+\r\n+ original_path_str = str(original_path)\r\n+ processed_paths.add(original_path) # Mark as processed\r\n+\r\n+ # Determine predicted asset name and status\r\n+ predicted_asset_name = file_to_base_name_map.get(original_path) # Can be None\r\n+ category = file_info['category'] # maps, models, extra, ignored\r\n+ reason = file_info.get('reason') # Specific reason for extra/ignored\r\n+ status = \"Unknown\"\r\n+ details = None\r\n+ predicted_output_name = None # Usually original name, except for maps\r\n+\r\n+ if category == \"maps\":\r\n+ status = \"Mapped\"\r\n+ # Use the predicted_map_type for the preview display\r\n+ map_type_for_preview = file_info.get(\"predicted_map_type\", file_info.get(\"map_type\", \"UnknownType\"))\r\n+ details = f\"[{map_type_for_preview}]\"\r\n+ if file_info.get(\"is_16bit_source\"): details += \" (16-bit)\"\r\n+ # Predict map output name using its determined asset name and predicted map type\r\n+ if predicted_asset_name:\r\n+ try:\r\n+ predicted_ext = \"png\" # Assume PNG for prediction simplicity\r\n+ predicted_output_name = target_pattern.format(\r\n+ base_name=predicted_asset_name,\r\n+ map_type=map_type_for_preview, # Use the predicted type here\r\n+ resolution=highest_res_key,\r\n+ ext=predicted_ext\r\n+ )\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction format error for map {original_path_str}: {fmt_err}\")\r\n+ predicted_output_name = \"[Format Error]\"\r\n+ details += f\" (Format Key Error: {fmt_err})\"\r\n+ except Exception as pred_err:\r\n+ log.warning(f\"Prediction error for map {original_path_str}: {pred_err}\")\r\n+ predicted_output_name = \"[Prediction Error]\"\r\n+ details += f\" (Error: {pred_err})\"\r\n+ else:\r\n+ # Should not happen for maps if _determine_base_metadata worked correctly\r\n+ log.warning(f\"Map file '{original_path_str}' has no predicted asset name.\")\r\n+ predicted_output_name = \"[No Asset Name]\"\r\n+\r\n+ elif category == \"models\":\r\n+ status = \"Model\"\r\n+ details = \"[Model]\"\r\n+ predicted_output_name = original_path.name # Models keep original name\r\n+\r\n+ elif category == \"ignored\":\r\n+ status = \"Ignored\"\r\n+ details = f\"Ignored ({reason or 'Unknown reason'})\"\r\n+ predicted_output_name = None # Ignored files have no output\r\n+\r\n+ elif category == \"extra\":\r\n+ if predicted_asset_name is None:\r\n+ # This is an \"Unmatched Extra\" file (includes Unrecognised and explicit Extras without a base name)\r\n+ status = \"Unmatched Extra\"\r\n+ details = f\"[Unmatched Extra ({reason or 'N/A'})]\" # Include original reason if available\r\n+ elif reason == 'Unrecognised':\r\n+ # Unrecognised but belongs to a specific asset\r\n+ status = \"Unrecognised\"\r\n+ details = \"[Unrecognised]\"\r\n+ else:\r\n+ # Explicitly matched an 'extra' pattern and belongs to an asset\r\n+ status = \"Extra\"\r\n+ details = f\"Extra ({reason})\"\r\n+ predicted_output_name = original_path.name # Extra files keep original name\r\n+\r\n+ else:\r\n+ log.warning(f\"Unknown category '{category}' encountered during prediction for {original_path_str}\")\r\n+ status = \"Error\"\r\n+ details = f\"[Unknown Category: {category}]\"\r\n+ predicted_output_name = original_path.name\r\n+\r\n+\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_asset_name\": predicted_asset_name, # May be None\r\n+ \"predicted_output_name\": predicted_output_name,\r\n+ \"status\": status,\r\n+ \"details\": details\r\n+ })\r\n+\r\n+ # Add any files found during walk but missed by classification (should be rare)\r\n+ # These are likely unmatched as well.\r\n+ for file_path in all_files_in_workspace:\r\n+ if file_path not in processed_paths:\r\n+ log.warning(f\"File found in workspace but not classified: {file_path}. Adding as Unmatched Extra.\")\r\n+ results.append({\r\n+ \"original_path\": str(file_path),\r\n+ \"predicted_asset_name\": None, # Explicitly None as it wasn't mapped\r\n+ \"predicted_output_name\": file_path.name,\r\n+ \"status\": \"Unmatched Extra\",\r\n+ \"details\": \"[Missed Classification]\"\r\n+ })\r\n+\r\n+\r\n+ log.info(f\"Detailed prediction complete for input '{self.input_path.name}'. Found {len(results)} files.\")\r\n+ # Sort results by original path for consistent display\r\n+ results.sort(key=lambda x: x.get(\"original_path\", \"\"))\r\n+ return results\r\n+\r\n+ except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n+ log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n+ return None # Indicate critical failure\r\n+ finally:\r\n+ # Ensure cleanup always happens\r\n+ self._cleanup_workspace()\r\n+\r\n+\r\n+# --- End of AssetProcessor Class ---\n\\ No newline at end of file\n" }, { "date": 1745338833423, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1178,9 +1178,22 @@\n output_path_temp = self.temp_dir / filename\r\n log.debug(f\"Asset '{asset_name}': Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n log.debug(f\"Asset '{asset_name}': Saving {map_type} ({res_key}): Final image shape: {img_to_save.shape}, dtype: {img_to_save.dtype}\")\r\n saved_successfully, actual_format_saved = False, output_format\r\n- try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n+ # --- Conditional RGB -> BGR Conversion before saving ---\r\n+ img_save_final = img_to_save # Default to original\r\n+ is_3_channel = len(img_to_save.shape) == 3 and img_to_save.shape[2] == 3\r\n+ if is_3_channel and not output_format.startswith(\"exr\"):\r\n+ log.debug(f\"Asset '{asset_name}': Converting RGB to BGR for saving {map_type} ({res_key}) as {output_format}\")\r\n+ try:\r\n+ img_save_final = cv2.cvtColor(img_to_save, cv2.COLOR_RGB2BGR)\r\n+ except Exception as cvt_err:\r\n+ log.error(f\"Asset '{asset_name}': Failed RGB->BGR conversion for {map_type} ({res_key}): {cvt_err}. Saving original.\")\r\n+ img_save_final = img_to_save # Fallback to original if conversion fails\r\n+\r\n+ try:\r\n+ cv2.imwrite(str(output_path_temp), img_save_final, save_params)\r\n+ saved_successfully = True\r\n except Exception as save_err:\r\n log.error(f\"Asset '{asset_name}': Save failed ({output_format}) for {map_type} {res_key}: {save_err}\")\r\n # --- Try Fallback ---\r\n if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n@@ -1202,12 +1215,28 @@\n img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save\r\n else: log.error(f\"Asset '{asset_name}': Cannot convert {img_to_save.dtype} for fallback.\"); continue\r\n \r\n- try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Asset '{asset_name}': Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Asset '{asset_name}': Fallback save failed for {map_type} {res_key}: {fallback_err}\", exc_info=True)\r\n+ # --- Conditional RGB -> BGR Conversion for fallback ---\r\n+ img_fallback_save_final = img_fallback # Default to original fallback image\r\n+ is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3\r\n+ # Use actual_format_saved for the check here\r\n+ if is_3_channel_fallback and not actual_format_saved.startswith(\"exr\"):\r\n+ log.debug(f\"Asset '{asset_name}': Converting RGB to BGR for fallback saving {map_type} ({res_key}) as {actual_format_saved}\")\r\n+ try:\r\n+ img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR)\r\n+ except Exception as cvt_err_fb:\r\n+ log.error(f\"Asset '{asset_name}': Failed RGB->BGR conversion for fallback {map_type} ({res_key}): {cvt_err_fb}. Saving original fallback.\")\r\n+ img_fallback_save_final = img_fallback # Fallback to original if conversion fails\r\n \r\n+ try:\r\n+ cv2.imwrite(str(output_path_temp), img_fallback_save_final, save_params_fallback)\r\n+ saved_successfully = True\r\n+ log.info(f\" > Asset '{asset_name}': Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err:\r\n+ log.error(f\"Asset '{asset_name}': Fallback save failed for {map_type} {res_key}: {fallback_err}\", exc_info=True)\r\n \r\n+\r\n # --- 3e. Store Result ---\r\n if saved_successfully:\r\n # Store in the local dictionary for this asset\r\n processed_maps_details_asset[map_type][res_key] = {\r\n@@ -2235,2207 +2264,5 @@\n # Ensure cleanup always happens\r\n self._cleanup_workspace()\r\n \r\n \r\n-# --- End of AssetProcessor Class ---\n-# asset_processor.py\r\n-\r\n-import os\r\n-import shutil\r\n-import tempfile\r\n-import zipfile\r\n-import logging\r\n-import json\r\n-import re\r\n-import time\r\n-from pathlib import Path\r\n-from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n-from typing import List, Dict, Tuple, Optional # Added for type hinting\r\n-from collections import defaultdict # Added for grouping\r\n-\r\n-# Attempt to import image processing libraries\r\n-try:\r\n- import cv2\r\n- import numpy as np\r\n-except ImportError:\r\n- print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n- print(\"pip install opencv-python numpy\")\r\n- exit(1) # Exit if essential libraries are missing\r\n-\r\n-# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n-try:\r\n- import OpenEXR\r\n- import Imath\r\n- _HAS_OPENEXR = True\r\n-except ImportError:\r\n- _HAS_OPENEXR = False\r\n- # Log this information - basic EXR might still work via OpenCV\r\n- logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n-\r\n-\r\n-# Assuming Configuration class is in configuration.py\r\n-try:\r\n- from configuration import Configuration, ConfigurationError\r\n-except ImportError:\r\n- print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n- print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n- exit(1)\r\n-\r\n-# Use logger defined in main.py (or configure one here if run standalone)\r\n-log = logging.getLogger(__name__)\r\n-# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\n-if not log.hasHandlers():\r\n- logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n-\r\n-\r\n-# --- Custom Exception ---\r\n-class AssetProcessingError(Exception):\r\n- \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n- pass\r\n-\r\n-# --- Helper Functions ---\r\n-def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n- \"\"\"Calculates dimensions maintaining aspect ratio, fitting within target_max_dim.\"\"\"\r\n- if orig_w <= 0 or orig_h <= 0: return (target_max_dim, target_max_dim) # Avoid division by zero\r\n-\r\n- ratio = orig_w / orig_h\r\n- if ratio > 1: # Width is dominant\r\n- target_w = target_max_dim\r\n- target_h = max(1, round(target_w / ratio)) # Ensure height is at least 1\r\n- else: # Height is dominant or square\r\n- target_h = target_max_dim\r\n- target_w = max(1, round(target_h * ratio)) # Ensure width is at least 1\r\n- return int(target_w), int(target_h)\r\n-\r\n-def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n- \"\"\"\r\n- Calculates min, max, mean for a given numpy image array.\r\n- Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n- \"\"\"\r\n- if image_data is None:\r\n- log.warning(\"Attempted to calculate stats on None image data.\")\r\n- return None\r\n- try:\r\n- # Use float64 for calculations to avoid potential overflow/precision issues\r\n- data_float = image_data.astype(np.float64)\r\n-\r\n- # Normalize data_float based on original dtype before calculating stats\r\n- if image_data.dtype == np.uint16:\r\n- log.debug(\"Stats calculation: Normalizing uint16 data to 0-1 range.\")\r\n- data_float /= 65535.0\r\n- elif image_data.dtype == np.uint8:\r\n- log.debug(\"Stats calculation: Normalizing uint8 data to 0-1 range.\")\r\n- data_float /= 255.0\r\n- # Assuming float inputs are already in 0-1 range or similar\r\n-\r\n- log.debug(f\"Stats calculation: data_float dtype: {data_float.dtype}, shape: {data_float.shape}\")\r\n- # Log a few sample values to check range after normalization\r\n- if data_float.size > 0:\r\n- sample_values = data_float.flatten()[:10] # Get first 10 values\r\n- log.debug(f\"Stats calculation: Sample values (first 10) after normalization: {sample_values.tolist()}\")\r\n-\r\n-\r\n- if len(data_float.shape) == 2: # Grayscale (H, W)\r\n- min_val = float(np.min(data_float))\r\n- max_val = float(np.max(data_float))\r\n- mean_val = float(np.mean(data_float))\r\n- stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n- elif len(data_float.shape) == 3: # Color (H, W, C)\r\n- channels = data_float.shape[2]\r\n- min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n- max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n- mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n- # The input data_float is now expected to be in RGB order after conversion in _process_maps\r\n- stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated {channels}-Channel Stats (RGB order): Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n- else:\r\n- log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n- return None\r\n- return stats\r\n- except Exception as e:\r\n- log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n- return {\"error\": str(e)}\r\n-\r\n-\r\n-# --- Helper function ---\r\n-def _get_base_map_type(target_map_string: str) -> str:\r\n- \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n- match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n- if match:\r\n- return match.group(1).upper()\r\n- return target_map_string.upper() # Fallback if no number suffix\r\n-\r\n-# --- Asset Processor Class ---\r\n-class AssetProcessor:\r\n- \"\"\"\r\n- Handles the processing pipeline for a single asset (ZIP or folder).\r\n- \"\"\"\r\n- # Define the list of known grayscale map types (adjust as needed)\r\n- GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n-\r\n- def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n- \"\"\"\r\n- Initializes the processor for a given input asset.\r\n-\r\n- Args:\r\n- input_path: Path to the input ZIP file or folder.\r\n- config: The loaded Configuration object.\r\n- output_base_path: The base directory where processed output will be saved.\r\n- overwrite: If True, forces reprocessing even if output exists.\r\n- \"\"\"\r\n- if not isinstance(input_path, Path): input_path = Path(input_path)\r\n- if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n- if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n-\r\n- if not input_path.exists():\r\n- raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n- if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n- raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n-\r\n- self.input_path: Path = input_path\r\n- self.config: Configuration = config\r\n- self.output_base_path: Path = output_base_path\r\n- self.overwrite: bool = overwrite # Store the overwrite flag\r\n-\r\n- self.temp_dir: Path | None = None # Path to the temporary working directory\r\n- self.classified_files: dict[str, list[dict]] = {\r\n- \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n- }\r\n- # These will no longer store instance-wide results, but are kept for potential future use or refactoring\r\n- # self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n- # self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n- # self.metadata_file_path_temp: Path | None = None\r\n- # self.metadata: dict = {} # Metadata is now handled per-asset within the process loop\r\n-\r\n- log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n-\r\n- def process(self) -> Dict[str, List[str]]:\r\n- \"\"\"\r\n- Executes the full processing pipeline for the input path, handling\r\n- multiple assets within a single input if detected.\r\n-\r\n- Returns:\r\n- Dict[str, List[str]]: A dictionary summarizing the status of each\r\n- detected asset within the input:\r\n- {\"processed\": [asset_name1, ...],\r\n- \"skipped\": [asset_name2, ...],\r\n- \"failed\": [asset_name3, ...]}\r\n- \"\"\"\r\n- log.info(f\"Starting processing for input: {self.input_path.name}\")\r\n- overall_status = {\"processed\": [], \"skipped\": [], \"failed\": []}\r\n- supplier_name = self.config.supplier_name # Get once\r\n-\r\n- try:\r\n- self._setup_workspace()\r\n- self._extract_input()\r\n- self._inventory_and_classify_files() # Classifies all files in self.classified_files\r\n-\r\n- # Determine distinct assets and file mapping\r\n- distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n- unmatched_files_paths = [p for p, name in file_to_base_name_map.items() if name is None]\r\n- if unmatched_files_paths:\r\n- log.warning(f\"Found {len(unmatched_files_paths)} files not matched to any specific asset base name. They will be copied to each asset's Extra folder.\")\r\n- log.debug(f\"Unmatched files: {[str(p) for p in unmatched_files_paths]}\")\r\n-\r\n-\r\n- # --- Loop through each detected asset ---\r\n- for current_asset_name in distinct_base_names:\r\n- log.info(f\"--- Processing detected asset: '{current_asset_name}' ---\")\r\n- asset_processed = False\r\n- asset_skipped = False\r\n- asset_failed = False\r\n- temp_metadata_path_asset = None # Track metadata file for this asset\r\n- map_details_asset = {} # Store map details for this asset\r\n-\r\n- try:\r\n- # --- Filter classified files for the current asset ---\r\n- filtered_classified_files_asset = defaultdict(list)\r\n- for category, file_list in self.classified_files.items():\r\n- for file_info in file_list:\r\n- file_path = file_info.get('source_path')\r\n- if file_path and file_to_base_name_map.get(file_path) == current_asset_name:\r\n- filtered_classified_files_asset[category].append(file_info)\r\n- log.debug(f\"Asset '{current_asset_name}': Filtered files - Maps: {len(filtered_classified_files_asset.get('maps',[]))}, Models: {len(filtered_classified_files_asset.get('models',[]))}, Extra: {len(filtered_classified_files_asset.get('extra',[]))}, Ignored: {len(filtered_classified_files_asset.get('ignored',[]))}\")\r\n-\r\n- # --- Assign Suffixes Per-Asset ---\r\n- log.debug(f\"Asset '{current_asset_name}': Assigning map type suffixes...\")\r\n- asset_maps = filtered_classified_files_asset.get('maps', [])\r\n- grouped_asset_maps = defaultdict(list)\r\n- for map_info in asset_maps:\r\n- # Group by the base map type stored earlier\r\n- grouped_asset_maps[map_info['map_type']].append(map_info)\r\n-\r\n- for base_map_type, maps_in_group in grouped_asset_maps.items():\r\n- log.debug(f\" Assigning suffixes for base type '{base_map_type}' within asset '{current_asset_name}' ({len(maps_in_group)} maps)\")\r\n- # Sorting is already done by _inventory_and_classify_files, just need to assign suffix\r\n- respect_variants = base_map_type in self.config.respect_variant_map_types\r\n- for i, map_info in enumerate(maps_in_group):\r\n- if respect_variants:\r\n- final_map_type = f\"{base_map_type}-{i + 1}\"\r\n- else:\r\n- final_map_type = base_map_type\r\n- log.debug(f\" Updating '{map_info['source_path']}' map_type from '{map_info['map_type']}' to '{final_map_type}'\")\r\n- map_info['map_type'] = final_map_type # Update the map_type in the dictionary\r\n-\r\n- # --- Determine Metadata for this specific asset ---\r\n- asset_specific_metadata = self._determine_single_asset_metadata(current_asset_name, filtered_classified_files_asset)\r\n- current_asset_metadata = {\r\n- \"asset_name\": current_asset_name,\r\n- \"supplier_name\": supplier_name,\r\n- \"asset_category\": asset_specific_metadata.get(\"asset_category\", self.config.default_asset_category),\r\n- \"archetype\": asset_specific_metadata.get(\"archetype\", \"Unknown\"),\r\n- # Initialize fields that will be populated by processing steps\r\n- \"maps_present\": [],\r\n- \"merged_maps\": [],\r\n- \"shader_features\": [],\r\n- \"source_files_in_extra\": [], # Will be populated in _generate_metadata\r\n- \"image_stats_1k\": {},\r\n- \"map_details\": {}, # Will be populated by _process_maps\r\n- \"aspect_ratio_change_string\": \"N/A\"\r\n- }\r\n-\r\n- # --- Skip Check for this specific asset ---\r\n- if not self.overwrite:\r\n- supplier_sanitized = self._sanitize_filename(supplier_name)\r\n- asset_name_sanitized = self._sanitize_filename(current_asset_name)\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- metadata_file_path = final_dir / self.config.metadata_filename\r\n- if final_dir.exists() and metadata_file_path.is_file():\r\n- log.info(f\"Output directory and metadata found for asset '{asset_name_sanitized}' and overwrite is False. Skipping this asset.\")\r\n- overall_status[\"skipped\"].append(current_asset_name)\r\n- asset_skipped = True\r\n- continue # Skip to the next asset in the loop\r\n- elif self.overwrite:\r\n- log.info(f\"Overwrite flag is set. Processing asset '{current_asset_name}' even if output exists.\")\r\n-\r\n- # --- Process Maps for this asset ---\r\n- processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps = self._process_maps(\r\n- filtered_maps_list=filtered_classified_files_asset.get('maps', []),\r\n- current_asset_metadata=current_asset_metadata # Pass base metadata\r\n- )\r\n- # Update current metadata with results\r\n- current_asset_metadata[\"image_stats_1k\"] = image_stats_asset\r\n- current_asset_metadata[\"aspect_ratio_change_string\"] = aspect_ratio_change_string_asset\r\n- # Add newly ignored rough maps to the asset's specific ignored list\r\n- if ignored_rough_maps:\r\n- filtered_classified_files_asset['ignored'].extend(ignored_rough_maps)\r\n- # Store map details (like source bit depth) collected during processing\r\n- # This was previously stored in self.metadata[\"map_details\"]\r\n- map_details_asset = {k: v for k, v in current_asset_metadata.pop(\"map_details\", {}).items() if k in processed_maps_details_asset}\r\n-\r\n-\r\n- # --- Merge Maps for this asset ---\r\n- merged_maps_details_asset = self._merge_maps(\r\n- processed_maps_details_asset=processed_maps_details_asset,\r\n- filtered_classified_files=filtered_classified_files_asset, # Pass filtered files for original ext lookup\r\n- current_asset_metadata=current_asset_metadata\r\n- )\r\n-\r\n- # --- Generate Metadata for this asset ---\r\n- temp_metadata_path_asset = self._generate_metadata_file(\r\n- current_asset_metadata=current_asset_metadata, # Pass the populated dict\r\n- processed_maps_details_asset=processed_maps_details_asset,\r\n- merged_maps_details_asset=merged_maps_details_asset,\r\n- filtered_classified_files_asset=filtered_classified_files_asset,\r\n- unmatched_files_paths=unmatched_files_paths, # Pass the list of unmatched files\r\n- map_details_asset=map_details_asset # Pass the filtered map details\r\n- )\r\n-\r\n- # --- Organize Output Files for this asset ---\r\n- self._organize_output_files(\r\n- current_asset_name=current_asset_name,\r\n- processed_maps_details_asset=processed_maps_details_asset,\r\n- merged_maps_details_asset=merged_maps_details_asset,\r\n- filtered_classified_files_asset=filtered_classified_files_asset,\r\n- unmatched_files_paths=unmatched_files_paths, # Pass unmatched files for copying\r\n- temp_metadata_path=temp_metadata_path_asset\r\n- )\r\n-\r\n- log.info(f\"--- Asset '{current_asset_name}' processed successfully. ---\")\r\n- overall_status[\"processed\"].append(current_asset_name)\r\n- asset_processed = True\r\n-\r\n- except Exception as asset_err:\r\n- log.error(f\"--- Failed processing asset '{current_asset_name}': {asset_err} ---\", exc_info=True)\r\n- overall_status[\"failed\"].append(current_asset_name)\r\n- asset_failed = True\r\n- # Continue to the next asset even if one fails\r\n-\r\n- # --- Determine Final Consolidated Status ---\r\n- # This logic remains the same, interpreting the overall_status dict\r\n- final_status = \"failed\" # Default if nothing else matches\r\n- if overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n- final_status = \"processed\"\r\n- elif overall_status[\"skipped\"] and not overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n- final_status = \"skipped\"\r\n- elif overall_status[\"processed\"] and overall_status[\"failed\"]:\r\n- final_status = \"partial_success\" # Indicate some succeeded, some failed\r\n- elif overall_status[\"processed\"] and overall_status[\"skipped\"] and not overall_status[\"failed\"]:\r\n- final_status = \"processed\" # Consider processed+skipped as processed overall\r\n- elif overall_status[\"skipped\"] and overall_status[\"failed\"] and not overall_status[\"processed\"]:\r\n- final_status = \"failed\" # If only skips and fails, report as failed\r\n- # Add any other combinations if needed\r\n-\r\n- log.info(f\"Finished processing input '{self.input_path.name}'. Overall Status: {final_status}. Summary: {overall_status}\")\r\n- # Return the detailed status dictionary instead of just a string\r\n- # The wrapper function in main.py will interpret this\r\n- return overall_status\r\n-\r\n- except Exception as e:\r\n- # Catch errors during initial setup (before asset loop)\r\n- if not isinstance(e, (AssetProcessingError, ConfigurationError)):\r\n- log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name} during setup: {e}\")\r\n- if not isinstance(e, AssetProcessingError):\r\n- raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n- else:\r\n- raise\r\n- finally:\r\n- # Ensure cleanup always happens\r\n- self._cleanup_workspace()\r\n-\r\n- def _setup_workspace(self):\r\n- \"\"\"Creates a temporary directory for processing.\"\"\"\r\n- try:\r\n- self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n- log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n-\r\n- def _extract_input(self):\r\n- \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n-\r\n- log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n- try:\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- zip_ref.extractall(self.temp_dir)\r\n- log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n- elif self.input_path.is_dir():\r\n- log.debug(f\"Copying directory contents: {self.input_path}\")\r\n- for item in self.input_path.iterdir():\r\n- destination = self.temp_dir / item.name\r\n- if item.is_dir():\r\n- # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n- try:\r\n- shutil.copytree(item, destination, dirs_exist_ok=True)\r\n- except TypeError: # Fallback for older Python\r\n- if not destination.exists():\r\n- shutil.copytree(item, destination)\r\n- else:\r\n- log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n-\r\n- else:\r\n- shutil.copy2(item, destination)\r\n- log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n- except zipfile.BadZipFile:\r\n- raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n-\r\n- def _inventory_and_classify_files(self):\r\n- \"\"\"\r\n- Scans workspace, classifies files according to preset rules, handling\r\n- 16-bit prioritization and multiple variants of the same base map type.\r\n- \"\"\"\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n-\r\n- log.info(\"Scanning and classifying files...\")\r\n- log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n- all_files_rel = []\r\n- for root, _, files in os.walk(self.temp_dir):\r\n- root_path = Path(root)\r\n- for file in files:\r\n- full_path = root_path / file\r\n- relative_path = full_path.relative_to(self.temp_dir)\r\n- all_files_rel.append(relative_path)\r\n-\r\n- log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n-\r\n- # --- Initialization ---\r\n- processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n- potential_map_candidates = [] # List to store potential map file info\r\n- # Reset classified files (important if this method is ever called multiple times)\r\n- self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n-\r\n-\r\n- # --- Step 1: Identify Explicit 'Extra' Files ---\r\n- log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n- compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n- log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path in processed_files: continue\r\n- for compiled_regex in compiled_extra_regex:\r\n- if compiled_regex.search(file_rel_path.name):\r\n- log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n- self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n- processed_files.add(file_rel_path)\r\n- log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n- break # Stop checking extra patterns for this file\r\n-\r\n- # --- Step 2: Identify Model Files ---\r\n- log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n- compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n- log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path in processed_files: continue\r\n- for compiled_regex in compiled_model_regex:\r\n- if compiled_regex.search(file_rel_path.name):\r\n- log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n- self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n- processed_files.add(file_rel_path)\r\n- log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n- break # Stop checking model patterns for this file\r\n-\r\n- # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n- log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n- # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n- compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n-\r\n- for file_rel_path in all_files_rel:\r\n- # Skip files already classified as Extra or Model\r\n- if file_rel_path in processed_files:\r\n- continue\r\n-\r\n- file_stem = file_rel_path.stem\r\n- match_found = False\r\n-\r\n- # Iterate through base types and their associated regex tuples\r\n- for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n- if match_found: break # Stop checking types for this file once matched\r\n-\r\n- # Get the original keywords list for the current rule index\r\n- # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n- original_rule = None\r\n- # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n- if regex_tuples:\r\n- current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n- if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n- rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n- # Verify it's the correct rule by checking target_type\r\n- if rule_candidate.get(\"target_type\") == base_map_type:\r\n- original_rule = rule_candidate\r\n- else:\r\n- log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n- # Fallback search if index doesn't match (shouldn't happen ideally)\r\n- for idx, rule in enumerate(self.config.map_type_mapping):\r\n- if rule.get(\"target_type\") == base_map_type:\r\n- original_rule = rule\r\n- log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n- break\r\n-\r\n- original_keywords_list = []\r\n- if original_rule and 'keywords' in original_rule:\r\n- original_keywords_list = original_rule['keywords']\r\n- else:\r\n- log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n-\r\n- for kw_regex, original_keyword, rule_index in regex_tuples:\r\n- if kw_regex.search(file_stem):\r\n- log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n-\r\n- # Find the index of the matched keyword within its rule's list\r\n- keyword_index_in_rule = -1 # Default if not found\r\n- if original_keywords_list:\r\n- try:\r\n- # Use the original_keyword string directly\r\n- keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n- except ValueError:\r\n- log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n- else:\r\n- log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n-\r\n- # Add candidate only if not already added\r\n- if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n- potential_map_candidates.append({\r\n- 'source_path': file_rel_path,\r\n- 'matched_keyword': original_keyword,\r\n- 'base_map_type': base_map_type,\r\n- 'preset_rule_index': rule_index,\r\n- 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n- 'is_16bit_source': False\r\n- })\r\n- else:\r\n- log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n-\r\n- match_found = True\r\n- break # Stop checking regex tuples for this base_type once matched\r\n-\r\n- log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n-\r\n- # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n- log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n- compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n- for file_rel_path in all_files_rel:\r\n- # Skip if already processed or already identified as a candidate\r\n- if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n- continue\r\n-\r\n- for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n- log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n- match = compiled_regex.search(file_rel_path.name) # Store result\r\n- if match:\r\n- log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n- potential_map_candidates.append({\r\n- 'source_path': file_rel_path,\r\n- 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n- 'base_map_type': base_type,\r\n- 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n- 'is_16bit_source': True # Mark as 16-bit immediately\r\n- })\r\n- log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n- # Don't add to processed_files yet, let Step 4 handle filtering\r\n- break # Stop checking bit depth patterns for this file\r\n-\r\n- log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n-\r\n- # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n- log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n- compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n- candidates_to_keep = []\r\n- candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n-\r\n- # Mark 16-bit candidates\r\n- for candidate in potential_map_candidates:\r\n- base_type = candidate['base_map_type']\r\n- # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n- if base_type in compiled_bit_depth_regex:\r\n- if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n- candidate['is_16bit_source'] = True\r\n- log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n-\r\n-\r\n- # Identify base types that have a 16-bit version present\r\n- prioritized_16bit_bases = {\r\n- candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n- }\r\n- log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n-\r\n- # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n- for candidate in potential_map_candidates:\r\n- if candidate['is_16bit_source']:\r\n- candidates_to_keep.append(candidate)\r\n- log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n- elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n- candidates_to_keep.append(candidate)\r\n- log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n- else:\r\n- # This is an 8-bit candidate whose 16-bit counterpart exists\r\n- candidates_to_ignore.append(candidate)\r\n- log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n-\r\n- # Add ignored 8-bit files to the main ignored list\r\n- for ignored_candidate in candidates_to_ignore:\r\n- self.classified_files[\"ignored\"].append({\r\n- 'source_path': ignored_candidate['source_path'],\r\n- 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n- })\r\n- processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n-\r\n- log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n-\r\n- # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n- log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n- # from collections import defaultdict # Moved import to top of file\r\n- grouped_by_base_type = defaultdict(list)\r\n- for candidate in candidates_to_keep:\r\n- grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n-\r\n- final_map_list = []\r\n- for base_map_type, candidates in grouped_by_base_type.items():\r\n- # --- DIAGNOSTIC LOGGING START ---\r\n- candidate_paths_str = [str(c['source_path']) for c in candidates]\r\n- log.debug(f\" [DIAGNOSIS] Processing base_map_type: '{base_map_type}'. Candidates before sort: {candidate_paths_str}\")\r\n- # --- DIAGNOSTIC LOGGING END ---\r\n- log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n-\r\n- # --- NEW SORTING LOGIC ---\r\n- # Sort candidates based on:\r\n- # 1. The index of the rule object in the preset's map_type_mapping list.\r\n- # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n- # 3. Alphabetical order of the source file path as a tie-breaker.\r\n- candidates.sort(key=lambda c: (\r\n- c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n- c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n- str(c['source_path'])\r\n- ))\r\n- # --- END NEW SORTING LOGIC ---\r\n-\r\n- # Removed diagnostic log\r\n-\r\n- # Add sorted candidates to the final list, but without assigning the suffix yet.\r\n- # Suffix assignment will happen per-asset later.\r\n- for final_candidate in candidates: # Use the directly sorted list\r\n- # Store the base map type for now.\r\n- final_map_list.append({\r\n- \"map_type\": base_map_type, # Store BASE type only\r\n- \"source_path\": final_candidate[\"source_path\"],\r\n- \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n- \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n- \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n- })\r\n- processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n-\r\n- self.classified_files[\"maps\"] = final_map_list\r\n-\r\n- # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n- log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n- remaining_count = 0\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path not in processed_files:\r\n- log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n- self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n- remaining_count += 1\r\n- # No need to add to processed_files here, it's the final step\r\n- log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n-\r\n- # --- Final Summary ---\r\n- # Note: self.metadata[\"source_files_in_extra\"] is now populated per-asset in _generate_metadata_file\r\n- log.info(f\"File classification complete.\")\r\n- log.debug(\"--- Final Classification Summary (v2) ---\")\r\n- map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n- model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n- extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n- ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n- log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n- log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n- log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n- log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n- log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n-\r\n-\r\n- def _determine_base_metadata(self) -> Tuple[List[str], Dict[Path, Optional[str]]]:\r\n- \"\"\"\r\n- Determines distinct asset base names within the input based on preset rules\r\n- and maps each relevant source file to its determined base name.\r\n-\r\n- Returns:\r\n- Tuple[List[str], Dict[Path, Optional[str]]]:\r\n- - A list of unique, sanitized base names found.\r\n- - A dictionary mapping source file relative paths to their determined\r\n- base name string (or None if no base name could be determined for that file).\r\n- \"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- log.info(\"Determining distinct base names and file mapping...\")\r\n-\r\n- # Combine map and model files for base name determination\r\n- relevant_files = self.classified_files.get('maps', []) + self.classified_files.get('models', [])\r\n- if not relevant_files:\r\n- log.warning(\"No map or model files found to determine base name(s).\")\r\n- # Fallback: Use input path name as a single asset\r\n- input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n- sanitized_input_name = self._sanitize_filename(input_name or \"UnknownInput\")\r\n- # Map all files (maps, models, extra, ignored) to this fallback name\r\n- all_files_paths = [f['source_path'] for cat in self.classified_files.values() for f in cat if 'source_path' in f]\r\n- file_to_base_name_map = {f_path: sanitized_input_name for f_path in all_files_paths}\r\n- log.info(f\"Using input path name '{sanitized_input_name}' as the single asset name.\")\r\n- return [sanitized_input_name], file_to_base_name_map\r\n-\r\n- # --- Determine Base Names from Files ---\r\n- separator = self.config.source_naming_separator\r\n- indices_dict = self.config.source_naming_indices\r\n- base_index_raw = indices_dict.get('base_name')\r\n- base_index = None\r\n- if base_index_raw is not None:\r\n- try:\r\n- base_index = int(base_index_raw)\r\n- except (ValueError, TypeError):\r\n- log.warning(f\"Could not convert base_name index '{base_index_raw}' to integer. Base name determination might be inaccurate.\")\r\n-\r\n- file_to_base_name_map: Dict[Path, Optional[str]] = {}\r\n- potential_base_names_per_file: Dict[Path, str] = {} # Store potential name for each file path\r\n-\r\n- if isinstance(base_index, int):\r\n- log.debug(f\"Attempting base name extraction using separator '{separator}' and index {base_index}.\")\r\n- for file_info in relevant_files:\r\n- file_path = file_info['source_path']\r\n- stem = file_path.stem\r\n- parts = stem.split(separator)\r\n- if len(parts) > base_index:\r\n- extracted_name = parts[base_index]\r\n- sanitized_name = self._sanitize_filename(extracted_name)\r\n- if sanitized_name: # Ensure we don't add empty names\r\n- potential_base_names_per_file[file_path] = sanitized_name\r\n- log.debug(f\" File '{file_path.name}' -> Potential Base Name: '{sanitized_name}'\")\r\n- else:\r\n- log.debug(f\" File '{file_path.name}' -> Extracted empty name at index {base_index}. Marking as None.\")\r\n- file_to_base_name_map[file_path] = None # Explicitly mark as None if extraction yields empty\r\n- else:\r\n- log.debug(f\" File '{file_path.name}' -> Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}. Marking as None.\")\r\n- file_to_base_name_map[file_path] = None # Mark as None if index is invalid for this file\r\n- else:\r\n- log.warning(\"Base name index not configured or invalid. Cannot determine distinct assets based on index. Treating as single asset.\")\r\n- # Fallback to common prefix if no valid index\r\n- stems = [f['source_path'].stem for f in relevant_files]\r\n- common_prefix_name = os.path.commonprefix(stems) if stems else \"\"\r\n- sanitized_common_name = self._sanitize_filename(common_prefix_name or self.input_path.stem or \"UnknownAsset\")\r\n- log.info(f\"Using common prefix '{sanitized_common_name}' as the single asset name.\")\r\n- # Map all relevant files to this single name\r\n- for file_info in relevant_files:\r\n- potential_base_names_per_file[file_info['source_path']] = sanitized_common_name\r\n-\r\n- # --- Consolidate Distinct Names and Final Mapping ---\r\n- distinct_base_names_set = set(potential_base_names_per_file.values())\r\n- distinct_base_names = sorted(list(distinct_base_names_set)) # Sort for consistent processing order\r\n-\r\n- # Populate the final map, including files that didn't match the index rule (marked as None earlier)\r\n- for file_info in relevant_files:\r\n- file_path = file_info['source_path']\r\n- if file_path not in file_to_base_name_map: # If not already marked as None\r\n- file_to_base_name_map[file_path] = potential_base_names_per_file.get(file_path) # Assign determined name or None if somehow missed\r\n-\r\n- # Add files from 'extra' and 'ignored' to the map, marking them as None for base name\r\n- for category in ['extra', 'ignored']:\r\n- for file_info in self.classified_files.get(category, []):\r\n- file_path = file_info['source_path']\r\n- if file_path not in file_to_base_name_map: # Avoid overwriting if somehow already mapped\r\n- file_to_base_name_map[file_path] = None\r\n- log.debug(f\" File '{file_path.name}' (Category: {category}) -> Marked as None (No Base Name).\")\r\n-\r\n-\r\n- if not distinct_base_names:\r\n- # This case should be rare due to fallbacks, but handle it.\r\n- log.warning(\"No distinct base names could be determined. Using input name as fallback.\")\r\n- input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n- fallback_name = self._sanitize_filename(input_name or \"FallbackAsset\")\r\n- distinct_base_names = [fallback_name]\r\n- # Remap all files to this single fallback name\r\n- file_to_base_name_map = {f_path: fallback_name for f_path in file_to_base_name_map.keys()}\r\n-\r\n-\r\n- log.info(f\"Determined {len(distinct_base_names)} distinct asset base name(s): {distinct_base_names}\")\r\n- log.debug(f\"File-to-BaseName Map ({len(file_to_base_name_map)} entries): { {str(k): v for k, v in file_to_base_name_map.items()} }\") # Log string paths for readability\r\n-\r\n- return distinct_base_names, file_to_base_name_map\r\n-\r\n- def _determine_single_asset_metadata(self, asset_base_name: str, filtered_classified_files: Dict[str, List[Dict]]) -> Dict[str, str]:\r\n- \"\"\"\r\n- Determines the asset_category and archetype for a single, specific asset\r\n- based on its filtered list of classified files.\r\n-\r\n- Args:\r\n- asset_base_name: The determined base name for this specific asset.\r\n- filtered_classified_files: A dictionary containing only the classified\r\n- files (maps, models, etc.) belonging to this asset.\r\n-\r\n- Returns:\r\n- A dictionary containing {\"asset_category\": str, \"archetype\": str}.\r\n- \"\"\"\r\n- log.debug(f\"Determining category and archetype for asset: '{asset_base_name}'\")\r\n- determined_category = self.config.default_asset_category # Start with default\r\n- determined_archetype = \"Unknown\"\r\n-\r\n- # --- Determine Asset Category ---\r\n- if filtered_classified_files.get(\"models\"):\r\n- determined_category = \"Asset\"\r\n- log.debug(f\" Category set to 'Asset' for '{asset_base_name}' due to model file presence.\")\r\n- else:\r\n- # Check for Decal keywords only if not an Asset\r\n- decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n- found_decal = False\r\n- # Check map names first for decal keywords\r\n- candidate_files = [f['source_path'] for f in filtered_classified_files.get('maps', [])]\r\n- # Fallback to checking extra files if no maps found for this asset\r\n- if not candidate_files:\r\n- candidate_files = [f['source_path'] for f in filtered_classified_files.get('extra', [])]\r\n-\r\n- if decal_keywords:\r\n- for file_path in candidate_files:\r\n- # Check against the specific file's name within this asset's context\r\n- for keyword in decal_keywords:\r\n- if keyword.lower() in file_path.name.lower():\r\n- determined_category = \"Decal\"\r\n- found_decal = True; break\r\n- if found_decal: break\r\n- if found_decal: log.debug(f\" Category set to 'Decal' for '{asset_base_name}' due to keyword match.\")\r\n- # If not Asset or Decal, it remains the default (e.g., \"Texture\")\r\n-\r\n- log.debug(f\" Determined Category for '{asset_base_name}': {determined_category}\")\r\n-\r\n- # --- Determine Archetype (Usage) ---\r\n- archetype_rules = self.config.archetype_rules\r\n- # Use stems from maps and models belonging *only* to this asset\r\n- check_stems = [f['source_path'].stem.lower() for f in filtered_classified_files.get('maps', [])]\r\n- check_stems.extend([f['source_path'].stem.lower() for f in filtered_classified_files.get('models', [])])\r\n- # Also check the determined base name itself\r\n- check_stems.append(asset_base_name.lower())\r\n-\r\n- if check_stems:\r\n- best_match_archetype = \"Unknown\"\r\n- # Using simple \"first match wins\" logic as before\r\n- for rule in archetype_rules:\r\n- if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n- arch_name, rules_dict = rule\r\n- match_any = rules_dict.get(\"match_any\", [])\r\n- matched_any_keyword = False\r\n- if match_any:\r\n- for keyword in match_any:\r\n- kw_lower = keyword.lower()\r\n- for stem in check_stems:\r\n- if kw_lower in stem: # Simple substring check\r\n- matched_any_keyword = True\r\n- break # Found a match for this keyword\r\n- if matched_any_keyword: break # Found a match for this rule's keywords\r\n-\r\n- if matched_any_keyword:\r\n- best_match_archetype = arch_name\r\n- log.debug(f\" Archetype match '{arch_name}' for '{asset_base_name}' based on keywords: {match_any}\")\r\n- break # First rule match wins\r\n-\r\n- determined_archetype = best_match_archetype\r\n-\r\n- log.debug(f\" Determined Archetype for '{asset_base_name}': {determined_archetype}\")\r\n-\r\n- return {\"asset_category\": determined_category, \"archetype\": determined_archetype}\r\n-\r\n-\r\n- def _process_maps(self, filtered_maps_list: List[Dict], current_asset_metadata: Dict) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str, List[Dict]]:\r\n- \"\"\"\r\n- Loads, processes, resizes, and saves classified map files for a specific asset.\r\n-\r\n- Args:\r\n- filtered_maps_list: List of map dictionaries belonging to the current asset.\r\n- current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n-\r\n- Returns:\r\n- Tuple containing:\r\n- - processed_maps_details_asset: Dict mapping map_type to resolution details.\r\n- - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n- - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n- - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n- \"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n- log.info(f\"Processing identified map files for asset '{asset_name}'...\")\r\n-\r\n- # Initialize results specific to this asset\r\n- processed_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n- image_stats_asset: Dict[str, Dict] = {}\r\n- map_details_asset: Dict[str, Dict] = {} # Store details like source bit depth, gloss inversion\r\n- aspect_ratio_change_string_asset: str = \"N/A\"\r\n- ignored_rough_maps: List[Dict] = [] # Store ignored native rough maps\r\n-\r\n- # --- Settings retrieval ---\r\n- resolutions = self.config.image_resolutions\r\n- stats_res_key = self.config.calculate_stats_resolution\r\n- stats_target_dim = resolutions.get(stats_res_key)\r\n- if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped for '{asset_name}'.\")\r\n- gloss_keywords = self.config.source_glossiness_keywords\r\n- target_pattern = self.config.target_filename_pattern\r\n- base_name = asset_name # Use the asset name passed in\r\n-\r\n- # --- Pre-process Glossiness -> Roughness ---\r\n- preprocessed_data = {}\r\n- derived_from_gloss_flag = {}\r\n- gloss_map_info_for_rough, native_rough_map_info = None, None\r\n- # Use the filtered list for this asset\r\n- for map_info in filtered_maps_list:\r\n- if map_info['map_type'] == 'ROUGH':\r\n- is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n- if is_gloss: gloss_map_info_for_rough = map_info\r\n- else: native_rough_map_info = map_info\r\n-\r\n- rough_source_to_use = None\r\n- if gloss_map_info_for_rough:\r\n- rough_source_to_use = gloss_map_info_for_rough\r\n- derived_from_gloss_flag['ROUGH'] = True\r\n- if native_rough_map_info:\r\n- log.warning(f\"Asset '{asset_name}': Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n- # Instead of modifying lists, just add the ignored info to be returned\r\n- ignored_rough_maps.append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n- # We still need to ensure the native rough map isn't processed later in the main loop\r\n- elif native_rough_map_info:\r\n- rough_source_to_use = native_rough_map_info\r\n- derived_from_gloss_flag['ROUGH'] = False\r\n-\r\n- if derived_from_gloss_flag.get('ROUGH'):\r\n- # Ensure rough_source_to_use is not None before proceeding\r\n- if rough_source_to_use:\r\n- source_path = self.temp_dir / rough_source_to_use['source_path']\r\n- log.info(f\"Asset '{asset_name}': Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n- try:\r\n- img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n- if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n- original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n- if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n- if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n- elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n- else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n- # Store tuple: (inverted_float_data, original_dtype)\r\n- preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n- log.debug(f\"Asset '{asset_name}': Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n- except Exception as e: log.error(f\"Asset '{asset_name}': Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n- else:\r\n- log.error(f\"Asset '{asset_name}': Gloss map identified for ROUGH, but source info is missing.\")\r\n-\r\n-\r\n- # --- Main Processing Loop ---\r\n- # Use the filtered list passed to the function\r\n- maps_to_process = list(filtered_maps_list)\r\n- for map_info in maps_to_process:\r\n- map_type = map_info['map_type']\r\n- source_path_rel = map_info['source_path']\r\n-\r\n- # Skip processing native rough map if gloss was prioritized and ignored\r\n- if map_type == 'ROUGH' and any(ignored['source_path'] == source_path_rel for ignored in ignored_rough_maps):\r\n- log.debug(f\"Asset '{asset_name}': Skipping processing of native rough map '{source_path_rel}' as gloss version was prioritized.\")\r\n- continue\r\n-\r\n- original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n- log.info(f\"-- Asset '{asset_name}': Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n- img_processed, source_dtype = None, None\r\n- # Store details locally for this asset\r\n- current_map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n-\r\n- try:\r\n- # --- 1. Get/Load Source Data ---\r\n- if map_type in preprocessed_data:\r\n- log.debug(f\"Asset '{asset_name}': Using pre-processed data for {map_type}.\")\r\n- # Unpack tuple: (inverted_float_data, original_dtype)\r\n- img_processed, source_dtype = preprocessed_data[map_type]\r\n- else:\r\n- full_source_path = self.temp_dir / source_path_rel\r\n- read_flag = cv2.IMREAD_UNCHANGED if map_type.upper() == 'MASK' else (cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED)\r\n- log.debug(f\"Asset '{asset_name}': Loading source {source_path_rel.name} with flag: {'UNCHANGED' if read_flag == cv2.IMREAD_UNCHANGED else ('GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'DEFAULT')}\")\r\n- img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n- if img_loaded is None:\r\n- raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n-\r\n- if len(img_loaded.shape) == 3:\r\n- log.debug(f\"Asset '{asset_name}': Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n- img_processed = cv2.cvtColor(img_loaded, cv2.COLOR_BGR2RGB)\r\n- else:\r\n- img_processed = img_loaded.copy()\r\n-\r\n- source_dtype = img_loaded.dtype\r\n- log.debug(f\"Asset '{asset_name}': Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape} (After potential BGR->RGB conversion)\")\r\n-\r\n- current_map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n-\r\n- # --- 2. Handle Alpha Mask ---\r\n- if map_type == 'MASK' and img_processed is not None:\r\n- # [ Existing MASK handling logic remains largely the same, just add asset_name to logs ]\r\n- log.debug(f\"Asset '{asset_name}': Processing as MASK type.\")\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Initial shape: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n- shape = img_processed.shape\r\n- if len(shape) == 3 and shape[2] == 4:\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Extracting alpha channel (4-channel source).\")\r\n- img_processed = img_processed[:, :, 3]\r\n- elif len(shape) == 3 and shape[2] == 3:\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Converting BGR to Grayscale (3-channel source).\")\r\n- img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n- elif len(shape) == 2:\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Source is already grayscale (2-channel shape).\")\r\n- else:\r\n- log.warning(f\"Asset '{asset_name}': MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n-\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Shape after channel extraction/conversion: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n-\r\n- if img_processed.dtype != np.uint8:\r\n- log.debug(f\"Asset '{asset_name}': Converting mask from {img_processed.dtype} to uint8.\")\r\n- if img_processed.dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- else: img_processed = img_processed.astype(np.uint8)\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Shape after dtype conversion: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n-\r\n-\r\n- if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n- orig_h, orig_w = img_processed.shape[:2]\r\n- # Use local dictionary for this asset's results\r\n- processed_maps_details_asset.setdefault(map_type, {})\r\n- max_original_dimension = max(orig_w, orig_h)\r\n-\r\n- # --- 3. Resize & Save Loop ---\r\n- for res_key, target_dim in resolutions.items():\r\n- if target_dim > max_original_dimension:\r\n- log.debug(f\"Asset '{asset_name}': Skipping {res_key} ({target_dim}px) for {map_type}: Target larger than original ({max_original_dimension}px).\")\r\n- continue\r\n- log.debug(f\"Asset '{asset_name}': Processing {map_type} for resolution: {res_key}...\")\r\n- if orig_w <= 0 or orig_h <= 0: log.warning(f\"Asset '{asset_name}': Invalid original dims for {map_type}, skipping resize {res_key}.\"); continue\r\n- target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n- interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n- try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n- except Exception as resize_err: log.error(f\"Asset '{asset_name}': Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n-\r\n- # --- 3a. Calculate Stats ---\r\n- if res_key == stats_res_key and stats_target_dim:\r\n- log.debug(f\"Asset '{asset_name}': Calculating stats for {map_type} using {res_key} image...\")\r\n- stats = _calculate_image_stats(img_resized)\r\n- # Store stats locally for this asset\r\n- if stats: image_stats_asset[map_type] = stats\r\n- else: log.warning(f\"Asset '{asset_name}': Stats calculation failed for {map_type} at {res_key}.\")\r\n-\r\n- # Calculate aspect change string (only once per asset)\r\n- lowest_res_key = min(resolutions, key=resolutions.get)\r\n- # Use local variable for check and assignment\r\n- if aspect_ratio_change_string_asset == \"N/A\" and res_key == lowest_res_key:\r\n- log.debug(f\"Asset '{asset_name}': Aspect ratio calculation condition met.\")\r\n- try:\r\n- aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n- aspect_ratio_change_string_asset = aspect_string # Store locally\r\n- log.debug(f\"Asset '{asset_name}': Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n- except Exception as aspect_err:\r\n- log.error(f\"Asset '{asset_name}': Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n- aspect_ratio_change_string_asset = \"Error\" # Indicate calculation failure locally\r\n-\r\n- # --- 3b. Determine Output Bit Depth & Format ---\r\n- # [ Existing logic for determining bit depth and format remains the same ]\r\n- bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n- current_dtype = img_resized.dtype\r\n- output_dtype_target, output_bit_depth = None, 8\r\n- if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n- elif bit_depth_rule == 'respect':\r\n- if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8\r\n-\r\n- # --- 3c. Determine Output Format ---\r\n- # [ Existing logic for determining output format remains the same, add asset_name to logs ]\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format()\r\n- threshold = self.config.resolution_threshold_for_jpg\r\n- force_lossless = map_type in self.config.force_lossless_map_types\r\n-\r\n- if force_lossless:\r\n- log.debug(f\"Asset '{asset_name}': Format forced to lossless for map type '{map_type}'.\")\r\n- # ... (rest of force_lossless logic) ...\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else:\r\n- if output_format != \"png\":\r\n- log.warning(f\"Asset '{asset_name}': Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # 8-bit lossless\r\n- output_format = fmt_8bit_config\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- log.warning(f\"Asset '{asset_name}': Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n- output_format = \"png\"; output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n-\r\n- elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'; output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Asset '{asset_name}': Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- else:\r\n- # ... (rest of format determination logic, adding asset_name to logs) ...\r\n- if bit_depth_rule == 'force_8bit':\r\n- output_format = 'png'; output_ext = '.png'\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n- elif original_extension == '.jpg' and output_bit_depth == 8:\r\n- output_format = 'jpg'; output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Asset '{asset_name}': Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n- elif original_extension == '.tif':\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- log.debug(f\"Asset '{asset_name}': Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n- else:\r\n- output_format = fallback_fmt_16; output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- else:\r\n- output_format = 'png'; output_ext = '.png'\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n- else:\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- output_format = fallback_fmt_16; output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n- else:\r\n- output_format = fmt_8bit_config; output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\":\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Asset '{asset_name}': Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n-\r\n-\r\n- img_to_save = img_resized.copy()\r\n- # --- Apply Dtype Conversion ---\r\n- # [ Existing dtype conversion logic remains the same ]\r\n- if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n- if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- if needs_float16 and img_to_save.dtype != np.float16:\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n-\r\n-\r\n- # --- 3d. Construct Filename & Save ---\r\n- # Use base_name (which is the current asset's name)\r\n- filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n- output_path_temp = self.temp_dir / filename\r\n- log.debug(f\"Asset '{asset_name}': Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n- log.debug(f\"Asset '{asset_name}': Saving {map_type} ({res_key}): Final image shape: {img_to_save.shape}, dtype: {img_to_save.dtype}\")\r\n- saved_successfully, actual_format_saved = False, output_format\r\n- try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n- except Exception as save_err:\r\n- log.error(f\"Asset '{asset_name}': Save failed ({output_format}) for {map_type} {res_key}: {save_err}\")\r\n- # --- Try Fallback ---\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Asset '{asset_name}': Attempting fallback {fallback_fmt_16} for {map_type} {res_key}\")\r\n- # [ Existing fallback logic remains the same, add asset_name to logs ]\r\n- actual_format_saved = fallback_fmt_16; output_ext = f\".{fallback_fmt_16}\";\r\n- filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n- output_path_temp = self.temp_dir / filename\r\n- save_params_fallback = []\r\n- img_fallback = None; target_fallback_dtype = np.uint16\r\n- if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif fallback_fmt_16 == \"tif\": pass\r\n-\r\n- if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n- if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n- log.error(f\"Asset '{asset_name}': Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n- continue\r\n- img_scaled = img_to_save * 65535.0\r\n- img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save\r\n- else: log.error(f\"Asset '{asset_name}': Cannot convert {img_to_save.dtype} for fallback.\"); continue\r\n-\r\n- try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Asset '{asset_name}': Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Asset '{asset_name}': Fallback save failed for {map_type} {res_key}: {fallback_err}\", exc_info=True)\r\n-\r\n-\r\n- # --- 3e. Store Result ---\r\n- if saved_successfully:\r\n- # Store in the local dictionary for this asset\r\n- processed_maps_details_asset[map_type][res_key] = {\r\n- \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n- \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n- \"format\": actual_format_saved\r\n- }\r\n- current_map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n-\r\n- except Exception as map_proc_err:\r\n- log.error(f\"Asset '{asset_name}': Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n- # Store error in the local dictionary for this asset\r\n- processed_maps_details_asset.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n-\r\n- # Store details locally for this asset\r\n- map_details_asset[map_type] = current_map_details\r\n-\r\n- # --- Return results for this specific asset ---\r\n- log.info(f\"Finished processing map files for asset '{asset_name}'.\")\r\n- # Note: Final metadata updates (maps_present, shader_features) are handled in the main process loop\r\n- return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps\r\n-\r\n-\r\n- def _merge_maps(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict) -> Dict[str, Dict[str, Dict]]:\r\n- \"\"\"\r\n- Merges channels from different maps for a specific asset based on rules in configuration.\r\n-\r\n- Args:\r\n- processed_maps_details_asset: Details of successfully processed maps for this asset.\r\n- filtered_classified_files: Classified files dictionary filtered for this asset.\r\n- current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n-\r\n- Returns:\r\n- Dict[str, Dict[str, Dict]]: Details of successfully merged maps for this asset.\r\n- \"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n-\r\n- merge_rules = self.config.map_merge_rules\r\n- log.info(f\"Asset '{asset_name}': Applying {len(merge_rules)} map merging rule(s)...\")\r\n-\r\n- # Initialize results for this asset\r\n- merged_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n-\r\n- for rule_index, rule in enumerate(merge_rules):\r\n- output_map_type = rule.get(\"output_map_type\")\r\n- inputs_mapping = rule.get(\"inputs\")\r\n- defaults = rule.get(\"defaults\", {})\r\n- rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n-\r\n- if not output_map_type or not inputs_mapping:\r\n- log.warning(f\"Asset '{asset_name}': Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs'. Rule: {rule}\")\r\n- continue\r\n-\r\n- log.info(f\"-- Asset '{asset_name}': Applying merge rule for '{output_map_type}' --\")\r\n-\r\n- # --- Determine required inputs and their common resolutions for *this asset* ---\r\n- required_input_types = set(inputs_mapping.values())\r\n- if not required_input_types:\r\n- log.warning(f\"Asset '{asset_name}': Skipping merge rule '{output_map_type}': No input map types defined.\")\r\n- continue\r\n-\r\n- possible_resolutions_per_input = []\r\n- for input_type in required_input_types:\r\n- # Use the processed map details passed for this asset\r\n- if input_type in processed_maps_details_asset:\r\n- res_keys = {res for res, details in processed_maps_details_asset[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n- if not res_keys:\r\n- log.warning(f\"Asset '{asset_name}': Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n- possible_resolutions_per_input = []\r\n- break\r\n- possible_resolutions_per_input.append(res_keys)\r\n- else:\r\n- log.warning(f\"Asset '{asset_name}': Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n- possible_resolutions_per_input = []\r\n- break\r\n-\r\n- if not possible_resolutions_per_input:\r\n- log.warning(f\"Asset '{asset_name}': Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n- continue\r\n-\r\n- common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n-\r\n- if not common_resolutions:\r\n- log.warning(f\"Asset '{asset_name}': No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n- continue\r\n- log.debug(f\"Asset '{asset_name}': Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n-\r\n- # --- Loop through common resolutions ---\r\n- res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n- if not res_order:\r\n- log.warning(f\"Asset '{asset_name}': Common resolutions {common_resolutions} do not match config. Skipping merge for '{output_map_type}'.\")\r\n- continue\r\n-\r\n- sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n- target_pattern = self.config.target_filename_pattern\r\n- base_name = asset_name # Use current asset's name\r\n-\r\n- for current_res_key in sorted_res_keys:\r\n- log.debug(f\"Asset '{asset_name}': Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n- try:\r\n- loaded_inputs = {}\r\n- input_bit_depths = set()\r\n- input_original_extensions = {}\r\n-\r\n- # --- Load required input maps for this asset and resolution ---\r\n- possible_to_load = True\r\n- target_channels = list(inputs_mapping.keys())\r\n-\r\n- for map_type in required_input_types:\r\n- # Use processed_maps_details_asset passed in\r\n- res_details = processed_maps_details_asset.get(map_type, {}).get(current_res_key)\r\n- if not res_details or 'path' not in res_details:\r\n- log.warning(f\"Asset '{asset_name}': Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge.\")\r\n- possible_to_load = False; break\r\n-\r\n- # Find original extension from the filtered classified data for this asset\r\n- original_ext = '.png' # Default\r\n- found_original = False\r\n- # Use filtered_classified_files passed in\r\n- for classified_map in filtered_classified_files.get(\"maps\", []):\r\n- if classified_map['map_type'].startswith(map_type):\r\n- original_ext = classified_map.get('original_extension', '.png')\r\n- found_original = True\r\n- break\r\n- if not found_original:\r\n- log.warning(f\"Asset '{asset_name}': Could not find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n-\r\n- input_original_extensions[map_type] = original_ext\r\n-\r\n- # Load the image\r\n- input_file_path = self.temp_dir / res_details['path']\r\n- read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- log.debug(f\"Asset '{asset_name}': Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n- img = cv2.imread(str(input_file_path), read_flag)\r\n- if img is None:\r\n- raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n- loaded_inputs[map_type] = img\r\n- input_bit_depths.add(res_details.get('bit_depth', 8))\r\n-\r\n- if len(img.shape) == 3:\r\n- log.debug(f\"Asset '{asset_name}': DEBUG: Merge input '{input_file_path.name}' ({map_type}) loaded shape {img.shape}, dtype {img.dtype}.\")\r\n-\r\n- if not possible_to_load: continue\r\n-\r\n- # --- Determine dimensions and target_dim ---\r\n- first_map_type = next(iter(required_input_types))\r\n- h, w = loaded_inputs[first_map_type].shape[:2]\r\n- first_res_details = processed_maps_details_asset.get(first_map_type, {}).get(current_res_key)\r\n- target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n- num_target_channels = len(target_channels)\r\n-\r\n- # --- Determine Output Bit Depth ---\r\n- max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n- output_bit_depth = 8\r\n- if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n- output_bit_depth = 16\r\n- log.debug(f\"Asset '{asset_name}': Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n-\r\n- # --- Prepare and Merge Channels ---\r\n- # [ Existing channel preparation and merging logic remains the same ]\r\n- merged_channels_float32 = []\r\n- for target_channel in target_channels:\r\n- source_map_type = inputs_mapping.get(target_channel)\r\n- channel_data_float32 = None\r\n- if source_map_type and source_map_type in loaded_inputs:\r\n- img_input = loaded_inputs[source_map_type]\r\n- if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n- elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n- else: img_float = img_input.astype(np.float32)\r\n- num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n- if num_source_channels >= 3:\r\n- if target_channel == 'R': channel_data_float32 = img_float[:, :, 0]\r\n- elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n- elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2]\r\n- elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n- elif num_source_channels == 1 or len(img_float.shape) == 2:\r\n- channel_data_float32 = img_float.reshape(h, w)\r\n- if channel_data_float32 is None:\r\n- default_val = defaults.get(target_channel)\r\n- if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n- channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n- merged_channels_float32.append(channel_data_float32)\r\n-\r\n- if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n- merged_image_float32 = cv2.merge(merged_channels_float32)\r\n-\r\n- # --- Final Data Type Conversion ---\r\n- img_final_merged = None\r\n- if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n-\r\n- # --- Determine Output Format ---\r\n- # [ Existing format determination logic remains the same, add asset_name to logs ]\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format()\r\n- threshold = self.config.resolution_threshold_for_jpg\r\n- force_lossless = output_map_type in self.config.force_lossless_map_types\r\n-\r\n- if force_lossless:\r\n- log.debug(f\"Asset '{asset_name}': Format forced to lossless for merged map type '{output_map_type}'.\")\r\n- # ... (rest of force_lossless logic) ...\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else:\r\n- if output_format != \"png\":\r\n- log.warning(f\"Asset '{asset_name}': Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # 8-bit lossless\r\n- output_format = fmt_8bit_config\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- log.warning(f\"Asset '{asset_name}': Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n- output_format = \"png\"; output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n-\r\n- elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'; output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Asset '{asset_name}': Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- else:\r\n- # ... (rest of hierarchy logic, add asset_name to logs) ...\r\n- involved_extensions = set(input_original_extensions.values())\r\n- log.debug(f\"Asset '{asset_name}': Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n- highest_format_str = 'jpg'\r\n- if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n- elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n- elif '.png' in involved_extensions: highest_format_str = 'png'\r\n-\r\n- final_output_format = highest_format_str\r\n- if highest_format_str == 'tif':\r\n- if output_bit_depth == 16:\r\n- final_output_format = primary_fmt_16\r\n- log.debug(f\"Asset '{asset_name}': Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n- else:\r\n- final_output_format = 'png'\r\n- log.debug(f\"Asset '{asset_name}': Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n- else:\r\n- log.debug(f\"Asset '{asset_name}': Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n-\r\n- output_format = final_output_format\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\":\r\n- output_ext = \".jpg\"\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- else:\r\n- log.error(f\"Asset '{asset_name}': Unsupported final output format '{output_format}' for merged map '{output_map_type}'. Skipping save.\")\r\n- continue\r\n-\r\n-\r\n- # --- JPG 8-bit Check ---\r\n- if output_format == \"jpg\" and output_bit_depth == 16:\r\n- log.warning(f\"Asset '{asset_name}': Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n- img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- output_bit_depth = 8\r\n-\r\n- # --- Save Merged Map ---\r\n- image_to_save = img_final_merged\r\n- if needs_float16 and image_to_save.dtype != np.float16:\r\n- # [ Existing float16 conversion logic ]\r\n- if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- else: log.warning(f\"Asset '{asset_name}': Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n-\r\n- # Use base_name (current asset's name)\r\n- merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n- merged_output_path_temp = self.temp_dir / merged_filename\r\n- log.debug(f\"Asset '{asset_name}': Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n-\r\n- # --- Save with Fallback ---\r\n- # [ Existing save/fallback logic, add asset_name to logs ]\r\n- saved_successfully = False\r\n- actual_format_saved = output_format\r\n- try:\r\n- cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n- log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n- saved_successfully = True\r\n- except Exception as save_err:\r\n- log.error(f\"Asset '{asset_name}': Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Asset '{asset_name}': Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n- # ... [ Fallback save logic, add asset_name to logs ] ...\r\n- actual_format_saved = fallback_fmt_16\r\n- output_ext = f\".{fallback_fmt_16}\"\r\n- merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n- merged_output_path_temp = self.temp_dir / merged_filename\r\n- save_params_fallback = []\r\n- img_fallback = None\r\n- target_fallback_dtype = np.uint16\r\n-\r\n- if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif fallback_fmt_16 == \"tif\": pass\r\n-\r\n- if image_to_save.dtype == np.float16:\r\n- if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(f\"Asset '{asset_name}': NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n- img_scaled = image_to_save * 65535.0\r\n- img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n- else: log.error(f\"Asset '{asset_name}': Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n-\r\n- try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Asset '{asset_name}': Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n-\r\n-\r\n- # --- Record details locally ---\r\n- if saved_successfully:\r\n- merged_maps_details_asset[output_map_type][current_res_key] = {\r\n- \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n- \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n- }\r\n- # Note: Adding to metadata[\"merged_maps\"] list happens in the main process loop\r\n-\r\n- except Exception as merge_res_err:\r\n- log.error(f\"Asset '{asset_name}': Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n- # Store error locally for this asset\r\n- merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n-\r\n- log.info(f\"Asset '{asset_name}': Finished applying map merging rules.\")\r\n- # Return the details for this asset\r\n- return merged_maps_details_asset\r\n-\r\n-\r\n- def _generate_metadata_file(self, current_asset_metadata: Dict, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], map_details_asset: Dict[str, Dict]) -> Path:\r\n- \"\"\"\r\n- Gathers metadata for a specific asset and writes it to a temporary JSON file.\r\n-\r\n- Args:\r\n- current_asset_metadata: Base metadata for this asset (name, category, archetype, etc.).\r\n- processed_maps_details_asset: Details of processed maps for this asset.\r\n- merged_maps_details_asset: Details of merged maps for this asset.\r\n- filtered_classified_files_asset: Classified files belonging only to this asset.\r\n- unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n- map_details_asset: Dictionary containing details like source bit depth, gloss inversion per map type.\r\n-\r\n-\r\n- Returns:\r\n- Path: The path to the generated temporary metadata file.\r\n- \"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- asset_name = current_asset_metadata.get(\"asset_name\")\r\n- if not asset_name or asset_name == \"UnknownAssetName\":\r\n- log.warning(\"Asset name unknown during metadata generation, file may be incomplete or incorrectly named.\")\r\n- asset_name = \"UnknownAsset_Metadata\" # Fallback for filename\r\n-\r\n- log.info(f\"Generating metadata file for asset '{asset_name}'...\")\r\n- # Start with the base metadata passed in for this asset\r\n- final_metadata = current_asset_metadata.copy()\r\n-\r\n- # Populate map details from the specific asset's processing results\r\n- final_metadata[\"processed_map_resolutions\"] = {}\r\n- for map_type, res_dict in processed_maps_details_asset.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n- if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys)\r\n-\r\n- final_metadata[\"merged_map_resolutions\"] = {}\r\n- for map_type, res_dict in merged_maps_details_asset.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n- if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n-\r\n- # Determine maps present based on successful processing for this asset\r\n- final_metadata[\"maps_present\"] = sorted(list(processed_maps_details_asset.keys()))\r\n- final_metadata[\"merged_maps\"] = sorted(list(merged_maps_details_asset.keys()))\r\n-\r\n- # Determine shader features based on this asset's maps\r\n- features = set()\r\n- for map_type, details in map_details_asset.items(): # Use map_details_asset passed in\r\n- if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n- if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n- res_details = processed_maps_details_asset.get(map_type, {})\r\n- if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n- final_metadata[\"shader_features\"] = sorted(list(features))\r\n-\r\n- # Determine source files in this asset's Extra folder\r\n- # Includes:\r\n- # - Files originally classified as 'Extra' or 'Unrecognised' belonging to this asset.\r\n- # - Files originally classified as 'Ignored' belonging to this asset.\r\n- # - All 'unmatched' files (belonging to no specific asset).\r\n- source_files_in_extra_set = set()\r\n- for category in ['extra', 'ignored']:\r\n- for file_info in filtered_classified_files_asset.get(category, []):\r\n- source_files_in_extra_set.add(str(file_info['source_path']))\r\n- # Add all unmatched files\r\n- for file_path in unmatched_files_paths:\r\n- source_files_in_extra_set.add(str(file_path))\r\n- final_metadata[\"source_files_in_extra\"] = sorted(list(source_files_in_extra_set))\r\n-\r\n- # Add image stats and map details specific to this asset\r\n- final_metadata[\"image_stats_1k\"] = current_asset_metadata.get(\"image_stats_1k\", {}) # Get from passed metadata\r\n- final_metadata[\"map_details\"] = map_details_asset # Use map_details_asset passed in\r\n- final_metadata[\"aspect_ratio_change_string\"] = current_asset_metadata.get(\"aspect_ratio_change_string\", \"N/A\") # Get from passed metadata\r\n-\r\n-\r\n- # Add processing info\r\n- final_metadata[\"_processing_info\"] = {\r\n- \"preset_used\": self.config.preset_name,\r\n- \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n- \"input_source\": str(self.input_path.name), # Add original input source\r\n- }\r\n-\r\n- # Sort lists just before writing\r\n- for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n- if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n-\r\n- # Use asset name in temporary filename to avoid conflicts\r\n- metadata_filename = f\"{asset_name}_{self.config.metadata_filename}\"\r\n- output_path = self.temp_dir / metadata_filename\r\n- log.debug(f\"Writing metadata for asset '{asset_name}' to temporary file: {output_path}\")\r\n- try:\r\n- with open(output_path, 'w', encoding='utf-8') as f:\r\n- json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n- log.info(f\"Metadata file '{metadata_filename}' generated successfully for asset '{asset_name}'.\")\r\n- return output_path # Return the path to the temporary file\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to write metadata file {output_path} for asset '{asset_name}': {e}\") from e\r\n-\r\n-\r\n- def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n- \"\"\"\r\n- Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n- Returns the string representation.\r\n- \"\"\"\r\n- if original_width <= 0 or original_height <= 0:\r\n- log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n- return \"InvalidInput\"\r\n-\r\n- # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n- if resized_width <= 0 or resized_height <= 0:\r\n- log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n- return \"InvalidResize\"\r\n-\r\n- # Original logic from user feedback\r\n- width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n- height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n-\r\n- normalized_width_change = width_change_percentage / 100\r\n- normalized_height_change = height_change_percentage / 100\r\n-\r\n- normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n- normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n-\r\n- # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n- # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n- if normalized_width_change == 0 and normalized_height_change == 0:\r\n- closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n- elif normalized_width_change == 0:\r\n- closest_value_to_one = abs(normalized_height_change)\r\n- elif normalized_height_change == 0:\r\n- closest_value_to_one = abs(normalized_width_change)\r\n- else:\r\n- closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n-\r\n- # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n- epsilon = 1e-9\r\n- scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n-\r\n- scaled_normalized_width_change = scale_factor * normalized_width_change\r\n- scaled_normalized_height_change = scale_factor * normalized_height_change\r\n-\r\n- output_width = round(scaled_normalized_width_change, decimals)\r\n- output_height = round(scaled_normalized_height_change, decimals)\r\n-\r\n- # Convert to int if exactly 1.0 after rounding\r\n- if abs(output_width - 1.0) < epsilon: output_width = 1\r\n- if abs(output_height - 1.0) < epsilon: output_height = 1\r\n-\r\n- # Determine output string\r\n- if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n- output = \"EVEN\"\r\n- elif output_width != 1 and output_height == 1:\r\n- output = f\"X{str(output_width).replace('.', '')}\"\r\n- elif output_height != 1 and output_width == 1:\r\n- output = f\"Y{str(output_height).replace('.', '')}\"\r\n- else:\r\n- # Both changed relative to each other\r\n- output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n-\r\n- log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n- return output\r\n-\r\n- def _sanitize_filename(self, name: str) -> str:\r\n- \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n- # ... (Implementation from Response #51) ...\r\n- if not isinstance(name, str): name = str(name)\r\n- name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n- name = re.sub(r'_+', '_', name)\r\n- name = name.strip('_')\r\n- if not name: name = \"invalid_name\"\r\n- return name\r\n-\r\n- def _organize_output_files(self, current_asset_name: str, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], temp_metadata_path: Path):\r\n- \"\"\"\r\n- Moves/copies processed files for a specific asset from the temp dir to the final output structure.\r\n-\r\n- Args:\r\n- current_asset_name: The sanitized name of the asset being organized.\r\n- processed_maps_details_asset: Details of processed maps for this asset.\r\n- merged_maps_details_asset: Details of merged maps for this asset.\r\n- filtered_classified_files_asset: Classified files dictionary filtered for this asset.\r\n- unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n- temp_metadata_path: Path to the temporary metadata file for this asset.\r\n- \"\"\"\r\n- if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n- if not current_asset_name or current_asset_name == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing for organization.\")\r\n- supplier_name = self.config.supplier_name # Get supplier name from config\r\n- if not supplier_name: raise AssetProcessingError(\"Supplier name missing from config.\")\r\n-\r\n- supplier_sanitized = self._sanitize_filename(supplier_name)\r\n- asset_name_sanitized = self._sanitize_filename(current_asset_name) # Already sanitized, but ensure consistency\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- log.info(f\"Organizing output files for asset '{asset_name_sanitized}' into: {final_dir}\")\r\n-\r\n- try:\r\n- # Handle overwrite logic specifically for this asset's directory\r\n- if final_dir.exists() and self.overwrite:\r\n- log.warning(f\"Output directory exists for '{asset_name_sanitized}' and overwrite is True. Removing existing directory: {final_dir}\")\r\n- try:\r\n- shutil.rmtree(final_dir)\r\n- except Exception as rm_err:\r\n- raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} for asset '{asset_name_sanitized}' during overwrite: {rm_err}\") from rm_err\r\n- # Note: Skip check should prevent this if overwrite is False, but mkdir handles exist_ok=True\r\n-\r\n- final_dir.mkdir(parents=True, exist_ok=True)\r\n- except Exception as e:\r\n- if not isinstance(e, AssetProcessingError):\r\n- raise AssetProcessingError(f\"Failed to create final dir {final_dir} for asset '{asset_name_sanitized}': {e}\") from e\r\n- else:\r\n- raise\r\n-\r\n- # --- Helper for moving files ---\r\n- # Keep track of files successfully moved to avoid copying them later as 'unmatched'\r\n- moved_source_files = set()\r\n- def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n- if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc}.\"); return\r\n- source_abs = self.temp_dir / src_rel_path\r\n- # Use the original filename from the source path for the destination\r\n- dest_abs = dest_dir / src_rel_path.name\r\n- try:\r\n- if source_abs.exists():\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n- dest_dir.mkdir(parents=True, exist_ok=True)\r\n- shutil.move(str(source_abs), str(dest_abs))\r\n- moved_source_files.add(src_rel_path) # Track successfully moved source files\r\n- else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc}: {source_abs}\")\r\n- except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n-\r\n- # --- Helper for copying files (for unmatched extras) ---\r\n- def _safe_copy(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n- if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc} copy.\"); return\r\n- # Skip copying if this source file was already moved (e.g., it was an 'Extra' for this specific asset)\r\n- if src_rel_path in moved_source_files:\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Skipping copy of {file_desc} '{src_rel_path.name}' as it was already moved.\")\r\n- return\r\n- source_abs = self.temp_dir / src_rel_path\r\n- dest_abs = dest_dir / src_rel_path.name\r\n- try:\r\n- if source_abs.exists():\r\n- # Avoid copying if the exact destination file already exists (e.g., from a previous asset's copy)\r\n- if dest_abs.exists():\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Destination file already exists for {file_desc} copy: {dest_abs.name}. Skipping copy.\")\r\n- return\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Copying {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n- dest_dir.mkdir(parents=True, exist_ok=True)\r\n- shutil.copy2(str(source_abs), str(dest_abs)) # Use copy2 to preserve metadata\r\n- else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc} copy: {source_abs}\")\r\n- except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed copying {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n-\r\n-\r\n- # --- Move Processed/Merged Maps ---\r\n- for details_dict in [processed_maps_details_asset, merged_maps_details_asset]:\r\n- for map_type, res_dict in details_dict.items():\r\n- if 'error' in res_dict: continue\r\n- for res_key, details in res_dict.items():\r\n- if isinstance(details, dict) and 'path' in details:\r\n- _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n-\r\n- # --- Move Models specific to this asset ---\r\n- for model_info in filtered_classified_files_asset.get('models', []):\r\n- _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n-\r\n- # --- Move Metadata File ---\r\n- if temp_metadata_path and temp_metadata_path.exists():\r\n- final_metadata_path = final_dir / self.config.metadata_filename # Use standard name\r\n- try:\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Moving metadata file: {temp_metadata_path.name} -> {final_metadata_path.relative_to(self.output_base_path)}\")\r\n- shutil.move(str(temp_metadata_path), str(final_metadata_path))\r\n- # No need to add metadata path to moved_source_files as it's uniquely generated\r\n- except Exception as e:\r\n- log.error(f\"Asset '{asset_name_sanitized}': Failed moving metadata file '{temp_metadata_path.name}': {e}\", exc_info=True)\r\n- else:\r\n- log.warning(f\"Asset '{asset_name_sanitized}': Temporary metadata file path missing or file does not exist: {temp_metadata_path}\")\r\n-\r\n-\r\n- # --- Handle Extra/Ignored/Unmatched Files ---\r\n- extra_subdir_name = self.config.extra_files_subdir\r\n- extra_dir = final_dir / extra_subdir_name\r\n- if filtered_classified_files_asset.get('extra') or filtered_classified_files_asset.get('ignored') or unmatched_files_paths:\r\n- try:\r\n- extra_dir.mkdir(parents=True, exist_ok=True)\r\n-\r\n- # Move asset-specific Extra/Ignored files\r\n- files_to_move_extra = filtered_classified_files_asset.get('extra', []) + filtered_classified_files_asset.get('ignored', [])\r\n- if files_to_move_extra:\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Moving {len(files_to_move_extra)} asset-specific files to '{extra_subdir_name}/'...\")\r\n- for file_info in files_to_move_extra:\r\n- _safe_move(file_info.get('source_path'), extra_dir, f\"extra/ignored file ({file_info.get('reason', 'Unknown')})\")\r\n-\r\n- # Copy unmatched files\r\n- if unmatched_files_paths:\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Copying {len(unmatched_files_paths)} unmatched files to '{extra_subdir_name}/'...\")\r\n- for file_path in unmatched_files_paths:\r\n- _safe_copy(file_path, extra_dir, \"unmatched file\")\r\n-\r\n- except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed creating/moving/copying to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n-\r\n- log.info(f\"Finished organizing output for asset '{asset_name_sanitized}'.\")\r\n-\r\n-\r\n- def _cleanup_workspace(self):\r\n- \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n- # ... (Implementation from Response #45) ...\r\n- if self.temp_dir and self.temp_dir.exists():\r\n- try:\r\n- log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n- shutil.rmtree(self.temp_dir)\r\n- self.temp_dir = None\r\n- log.debug(\"Temporary workspace cleaned up successfully.\")\r\n- except Exception as e:\r\n- log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n-\r\n- # --- Prediction Method ---\r\n- def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n- \"\"\"\r\n- Predicts the final output structure (supplier, asset name) and attempts\r\n- to predict output filenames for potential map files based on naming conventions.\r\n- Does not perform full processing or image loading.\r\n-\r\n- Returns:\r\n- tuple[str | None, str | None, dict[str, str] | None]:\r\n- (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n- where file_predictions_dict maps input filename -> predicted output filename.\r\n- Returns None if prediction fails critically.\r\n- \"\"\"\r\n- log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n- try:\r\n- # 1. Get Supplier Name\r\n- supplier_name = self.config.supplier_name\r\n- if not supplier_name:\r\n- log.warning(\"Supplier name not found in configuration during prediction.\")\r\n- return None\r\n-\r\n- # 2. List Input Filenames/Stems\r\n- candidate_stems = set() # Use set for unique stems\r\n- filenames = []\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- try:\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- # Get only filenames, ignore directories\r\n- filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n- except zipfile.BadZipFile:\r\n- log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n- return None\r\n- except Exception as zip_err:\r\n- log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n- return None # Cannot proceed if we can't list files\r\n- elif self.input_path.is_dir():\r\n- try:\r\n- for item in self.input_path.iterdir():\r\n- if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n- filenames.append(item.name)\r\n- # Note: Not walking subdirs for prediction to keep it fast\r\n- except Exception as dir_err:\r\n- log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n- return None\r\n-\r\n- if not filenames:\r\n- log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n- return None # Return None if no files found\r\n-\r\n- # 3. Lightweight Classification for Stems and Potential Maps\r\n- map_type_mapping = self.config.map_type_mapping\r\n- model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n- separator = self.config.source_naming_separator\r\n- processed_filenames = set() # Track full filenames processed\r\n- potential_map_files = {} # Store fname -> potential map_type\r\n-\r\n- for fname in filenames:\r\n- if fname in processed_filenames: continue\r\n-\r\n- fstem = Path(fname).stem\r\n- fstem_lower = fstem.lower()\r\n- name_parts = fstem_lower.split(separator)\r\n-\r\n- # Check map rules first\r\n- map_matched = False\r\n- for mapping_rule in map_type_mapping:\r\n- source_keywords, standard_map_type = mapping_rule\r\n- if standard_map_type not in self.config.standard_map_types: continue\r\n- for keyword in source_keywords:\r\n- kw_lower = keyword.lower().strip('*')\r\n- if kw_lower in name_parts:\r\n- is_exact_match = any(part == kw_lower for part in name_parts)\r\n- if is_exact_match:\r\n- candidate_stems.add(fstem) # Add unique stem\r\n- potential_map_files[fname] = standard_map_type # Store potential type\r\n- processed_filenames.add(fname)\r\n- map_matched = True\r\n- break # Found keyword match for this rule\r\n- if map_matched: break # Found a rule match for this file\r\n- if map_matched: continue # Move to next filename if identified as map\r\n-\r\n- # Check model patterns if not a map\r\n- for pattern in model_patterns:\r\n- if fnmatch(fname.lower(), pattern.lower()):\r\n- candidate_stems.add(fstem) # Still add stem for base name determination\r\n- processed_filenames.add(fname)\r\n- # Don't add models to potential_map_files\r\n- break # Found model match\r\n-\r\n- # Note: Files matching neither maps nor models are ignored for prediction details\r\n-\r\n- log.debug(f\"[PREDICTION] Potential map files identified: {potential_map_files}\") # DEBUG PREDICTION\r\n- candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n- log.debug(f\"[PREDICTION] Candidate stems identified: {candidate_stems_list}\") # DEBUG PREDICTION\r\n- if not candidate_stems_list:\r\n- log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n- # Fallback: Use the input path's name itself if no stems found\r\n- base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n- determined_base_name = base_name_fallback\r\n- else:\r\n- # 4. Replicate _determine_base_metadata logic for base name\r\n- determined_base_name = \"UnknownAssetName\"\r\n- separator = self.config.source_naming_separator\r\n- indices_dict = self.config.source_naming_indices\r\n- base_index_raw = indices_dict.get('base_name')\r\n- log.debug(f\"[PREDICTION] Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}'\") # DEBUG PREDICTION\r\n-\r\n- base_index = None\r\n- if base_index_raw is not None:\r\n- try:\r\n- base_index = int(base_index_raw) # Use explicit conversion like in main logic\r\n- except (ValueError, TypeError):\r\n- log.warning(f\"[PREDICTION] Could not convert base_name index '{base_index_raw}' to integer.\")\r\n-\r\n- if isinstance(base_index, int):\r\n- potential_base_names = set()\r\n- for stem in candidate_stems_list: # Iterate over the list\r\n- parts = stem.split(separator)\r\n- log.debug(f\"[PREDICTION] Processing stem: '{stem}', Parts: {parts}\") # DEBUG PREDICTION\r\n- if len(parts) > base_index:\r\n- extracted_name = parts[base_index]\r\n- potential_base_names.add(extracted_name)\r\n- log.debug(f\"[PREDICTION] Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG PREDICTION\r\n- else:\r\n- log.debug(f\"[PREDICTION] Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG PREDICTION\r\n- if len(potential_base_names) == 1:\r\n- determined_base_name = potential_base_names.pop()\r\n- log.debug(f\"[PREDICTION] Determined base name '{determined_base_name}' from structured parts (index {base_index}).\") # DEBUG PREDICTION\r\n- elif len(potential_base_names) > 1:\r\n- log.debug(f\"[PREDICTION] Multiple potential base names found from index {base_index}: {potential_base_names}. Falling back to common prefix.\") # DEBUG PREDICTION\r\n- determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n- # else: Use common prefix below\r\n-\r\n- if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n- log.debug(\"[PREDICTION] Falling back to common prefix for base name determination (structured parts failed or no index).\") # DEBUG PREDICTION\r\n- determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n-\r\n- # 5. Sanitize Names\r\n- final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n- log.debug(f\"[PREDICTION] Final determined base name for prediction: '{final_base_name}'\") # DEBUG PREDICTION\r\n- final_supplier_name = self._sanitize_filename(supplier_name)\r\n-\r\n- # 6. Predict Output Filenames\r\n- file_predictions = {}\r\n- target_pattern = self.config.target_filename_pattern\r\n- # Use highest resolution key as a placeholder for prediction\r\n- highest_res_key = \"Res?\" # Fallback\r\n- if self.config.image_resolutions:\r\n- highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n-\r\n- for input_fname, map_type in potential_map_files.items():\r\n- # Assume PNG for prediction, extension might change based on bit depth rules later\r\n- # but this gives a good idea of the renaming.\r\n- # A more complex prediction could check bit depth rules.\r\n- predicted_ext = \"png\" # Simple assumption for preview\r\n- try:\r\n- predicted_fname = target_pattern.format(\r\n- base_name=final_base_name,\r\n- map_type=map_type,\r\n- resolution=highest_res_key, # Use placeholder resolution\r\n- ext=predicted_ext\r\n- )\r\n- file_predictions[input_fname] = predicted_fname\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n- file_predictions[input_fname] = \"[Filename Format Error]\"\r\n-\r\n-\r\n- log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n- return final_supplier_name, final_base_name, file_predictions\r\n-\r\n- except Exception as e:\r\n- log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- return None\r\n-\r\n-\r\n- # --- New Detailed Prediction Method ---\r\n- def get_detailed_file_predictions(self) -> list[dict] | None:\r\n- \"\"\"\r\n- Performs extraction and classification to provide a detailed list of all\r\n- files found within the input and their predicted status/output name,\r\n- handling multiple potential assets within the input.\r\n-\r\n- Returns:\r\n- list[dict] | None: A list of dictionaries, each representing a file:\r\n- {'original_path': str,\r\n- 'predicted_asset_name': str | None,\r\n- 'predicted_output_name': str | None,\r\n- 'status': str,\r\n- 'details': str | None}\r\n- Returns None if a critical error occurs during setup/classification.\r\n- \"\"\"\r\n- log.info(f\"Getting detailed file predictions for input: {self.input_path.name}\")\r\n- results = []\r\n- all_files_in_workspace = [] # Keep track of all files found\r\n-\r\n- try:\r\n- # --- Perform necessary setup and classification ---\r\n- self._setup_workspace()\r\n- self._extract_input()\r\n- # Run classification - this populates self.classified_files\r\n- self._inventory_and_classify_files()\r\n-\r\n- # --- Determine distinct assets and file mapping ---\r\n- # This uses the results from _inventory_and_classify_files\r\n- distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n- log.debug(f\"Prediction: Determined base names: {distinct_base_names}\")\r\n- log.debug(f\"Prediction: File to base name map: { {str(k):v for k,v in file_to_base_name_map.items()} }\")\r\n-\r\n- # --- Prepare for filename prediction ---\r\n- target_pattern = self.config.target_filename_pattern\r\n- highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n- if self.config.image_resolutions:\r\n- highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n-\r\n- # --- Process all classified files ---\r\n- all_classified_files_with_category = []\r\n- for category, file_list in self.classified_files.items():\r\n- for file_info in file_list:\r\n- # Add category info for easier processing below\r\n- file_info['category'] = category\r\n- all_classified_files_with_category.append(file_info)\r\n- # Also collect all original paths found by classification\r\n- if 'source_path' in file_info:\r\n- all_files_in_workspace.append(file_info['source_path'])\r\n-\r\n-\r\n- # --- Generate results for each file ---\r\n- processed_paths = set() # Track paths already added to results\r\n- for file_info in all_classified_files_with_category:\r\n- original_path = file_info.get(\"source_path\")\r\n- if not original_path or original_path in processed_paths:\r\n- continue # Skip if path missing or already processed\r\n-\r\n- original_path_str = str(original_path)\r\n- processed_paths.add(original_path) # Mark as processed\r\n-\r\n- # Determine predicted asset name and status\r\n- predicted_asset_name = file_to_base_name_map.get(original_path) # Can be None\r\n- category = file_info['category'] # maps, models, extra, ignored\r\n- reason = file_info.get('reason') # Specific reason for extra/ignored\r\n- status = \"Unknown\"\r\n- details = None\r\n- predicted_output_name = None # Usually original name, except for maps\r\n-\r\n- if category == \"maps\":\r\n- status = \"Mapped\"\r\n- map_type = file_info.get(\"map_type\", \"UnknownType\")\r\n- details = f\"[{map_type}]\"\r\n- if file_info.get(\"is_16bit_source\"): details += \" (16-bit)\"\r\n- # Predict map output name using its determined asset name\r\n- if predicted_asset_name:\r\n- try:\r\n- predicted_ext = \"png\" # Assume PNG for prediction simplicity\r\n- predicted_output_name = target_pattern.format(\r\n- base_name=predicted_asset_name,\r\n- map_type=map_type,\r\n- resolution=highest_res_key,\r\n- ext=predicted_ext\r\n- )\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction format error for map {original_path_str}: {fmt_err}\")\r\n- predicted_output_name = \"[Format Error]\"\r\n- details += f\" (Format Key Error: {fmt_err})\"\r\n- except Exception as pred_err:\r\n- log.warning(f\"Prediction error for map {original_path_str}: {pred_err}\")\r\n- predicted_output_name = \"[Prediction Error]\"\r\n- details += f\" (Error: {pred_err})\"\r\n- else:\r\n- # Should not happen for maps if _determine_base_metadata worked correctly\r\n- log.warning(f\"Map file '{original_path_str}' has no predicted asset name.\")\r\n- predicted_output_name = \"[No Asset Name]\"\r\n-\r\n- elif category == \"models\":\r\n- status = \"Model\"\r\n- details = \"[Model]\"\r\n- predicted_output_name = original_path.name # Models keep original name\r\n-\r\n- elif category == \"ignored\":\r\n- status = \"Ignored\"\r\n- details = f\"Ignored ({reason or 'Unknown reason'})\"\r\n- predicted_output_name = None # Ignored files have no output\r\n-\r\n- elif category == \"extra\":\r\n- if predicted_asset_name is None:\r\n- # This is an \"Unmatched Extra\" file (includes Unrecognised and explicit Extras without a base name)\r\n- status = \"Unmatched Extra\"\r\n- details = f\"[Unmatched Extra ({reason or 'N/A'})]\" # Include original reason if available\r\n- elif reason == 'Unrecognised':\r\n- # Unrecognised but belongs to a specific asset\r\n- status = \"Unrecognised\"\r\n- details = \"[Unrecognised]\"\r\n- else:\r\n- # Explicitly matched an 'extra' pattern and belongs to an asset\r\n- status = \"Extra\"\r\n- details = f\"Extra ({reason})\"\r\n- predicted_output_name = original_path.name # Extra files keep original name\r\n-\r\n- else:\r\n- log.warning(f\"Unknown category '{category}' encountered during prediction for {original_path_str}\")\r\n- status = \"Error\"\r\n- details = f\"[Unknown Category: {category}]\"\r\n- predicted_output_name = original_path.name\r\n-\r\n-\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_asset_name\": predicted_asset_name, # May be None\r\n- \"predicted_output_name\": predicted_output_name,\r\n- \"status\": status,\r\n- \"details\": details\r\n- })\r\n-\r\n- # Add any files found during walk but missed by classification (should be rare)\r\n- # These are likely unmatched as well.\r\n- for file_path in all_files_in_workspace:\r\n- if file_path not in processed_paths:\r\n- log.warning(f\"File found in workspace but not classified: {file_path}. Adding as Unmatched Extra.\")\r\n- results.append({\r\n- \"original_path\": str(file_path),\r\n- \"predicted_asset_name\": None, # Explicitly None as it wasn't mapped\r\n- \"predicted_output_name\": file_path.name,\r\n- \"status\": \"Unmatched Extra\",\r\n- \"details\": \"[Missed Classification]\"\r\n- })\r\n-\r\n-\r\n- log.info(f\"Detailed prediction complete for input '{self.input_path.name}'. Found {len(results)} files.\")\r\n- # Sort results by original path for consistent display\r\n- results.sort(key=lambda x: x.get(\"original_path\", \"\"))\r\n- return results\r\n-\r\n- except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n- log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- return None # Indicate critical failure\r\n- finally:\r\n- # Ensure cleanup always happens\r\n- self._cleanup_workspace()\r\n-\r\n-\r\n # --- End of AssetProcessor Class ---\n\\ No newline at end of file\n" }, { "date": 1745338857037, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1529,10 +1529,22 @@\n # --- Save with Fallback ---\r\n # [ Existing save/fallback logic, add asset_name to logs ]\r\n saved_successfully = False\r\n actual_format_saved = output_format\r\n+\r\n+ # --- Conditional RGB -> BGR Conversion before saving ---\r\n+ img_save_final = image_to_save # Default to original\r\n+ is_3_channel = len(image_to_save.shape) == 3 and image_to_save.shape[2] == 3\r\n+ if is_3_channel and not output_format.startswith(\"exr\"):\r\n+ log.debug(f\"Asset '{asset_name}': Converting RGB to BGR for saving merged {output_map_type} ({current_res_key}) as {output_format}\")\r\n+ try:\r\n+ img_save_final = cv2.cvtColor(image_to_save, cv2.COLOR_RGB2BGR)\r\n+ except Exception as cvt_err:\r\n+ log.error(f\"Asset '{asset_name}': Failed RGB->BGR conversion for merged {output_map_type} ({current_res_key}): {cvt_err}. Saving original.\")\r\n+ img_save_final = image_to_save # Fallback to original if conversion fails\r\n+\r\n try:\r\n- cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n+ cv2.imwrite(str(merged_output_path_temp), img_save_final, save_params)\r\n log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n saved_successfully = True\r\n except Exception as save_err:\r\n log.error(f\"Asset '{asset_name}': Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n@@ -1556,12 +1568,28 @@\n img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n else: log.error(f\"Asset '{asset_name}': Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n \r\n- try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err: log.error(f\"Asset '{asset_name}': Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n+ # --- Conditional RGB -> BGR Conversion for fallback ---\r\n+ img_fallback_save_final = img_fallback # Default to original fallback image\r\n+ is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3\r\n+ # Use actual_format_saved for the check here\r\n+ if is_3_channel_fallback and not actual_format_saved.startswith(\"exr\"):\r\n+ log.debug(f\"Asset '{asset_name}': Converting RGB to BGR for fallback saving merged {output_map_type} ({current_res_key}) as {actual_format_saved}\")\r\n+ try:\r\n+ img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR)\r\n+ except Exception as cvt_err_fb:\r\n+ log.error(f\"Asset '{asset_name}': Failed RGB->BGR conversion for fallback merged {output_map_type} ({current_res_key}): {cvt_err_fb}. Saving original fallback.\")\r\n+ img_fallback_save_final = img_fallback # Fallback to original if conversion fails\r\n \r\n+ try:\r\n+ cv2.imwrite(str(merged_output_path_temp), img_fallback_save_final, save_params_fallback)\r\n+ saved_successfully = True\r\n+ log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n+ except Exception as fallback_err:\r\n+ log.error(f\"Asset '{asset_name}': Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n \r\n+\r\n # --- Record details locally ---\r\n if saved_successfully:\r\n merged_maps_details_asset[output_map_type][current_res_key] = {\r\n \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n" }, { "date": 1745340381031, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1,7 +1,8 @@\n # asset_processor.py\r\n \r\n import os\r\n+import math\r\n import shutil\r\n import tempfile\r\n import zipfile\r\n import logging\r\n@@ -125,8 +126,30 @@\n if match:\r\n return match.group(1).upper()\r\n return target_map_string.upper() # Fallback if no number suffix\r\n \r\n+\r\n+def _is_power_of_two(n: int) -> bool:\r\n+ \"\"\"Checks if a number is a power of two.\"\"\"\r\n+ return (n > 0) and (n & (n - 1) == 0)\r\n+\r\n+def get_nearest_pot(value: int) -> int:\r\n+ \"\"\"Finds the nearest power of two to the given value.\"\"\"\r\n+ if value <= 0:\r\n+ return 1 # Or raise error, POT must be positive\r\n+ if _is_power_of_two(value):\r\n+ return value\r\n+\r\n+ # Calculate the powers of two below and above the value\r\n+ lower_pot = 1 << (value.bit_length() - 1)\r\n+ upper_pot = 1 << value.bit_length()\r\n+\r\n+ # Determine which power of two is closer\r\n+ if (value - lower_pot) < (upper_pot - value):\r\n+ return lower_pot\r\n+ else:\r\n+ return upper_pot\r\n+\r\n # --- Asset Processor Class ---\r\n class AssetProcessor:\r\n \"\"\"\r\n Handles the processing pipeline for a single asset (ZIP or folder).\r\n" }, { "date": 1745340409961, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -55,20 +55,36 @@\n pass\r\n \r\n # --- Helper Functions ---\r\n def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n- \"\"\"Calculates dimensions maintaining aspect ratio, fitting within target_max_dim.\"\"\"\r\n- if orig_w <= 0 or orig_h <= 0: return (target_max_dim, target_max_dim) # Avoid division by zero\r\n+ \"\"\"\r\n+ Calculates target dimensions by first scaling to fit target_max_dim\r\n+ while maintaining aspect ratio, then finding the nearest power-of-two\r\n+ value for each resulting dimension (Stretch/Squash to POT).\r\n+ \"\"\"\r\n+ if orig_w <= 0 or orig_h <= 0:\r\n+ # Fallback to target_max_dim if original dimensions are invalid\r\n+ pot_dim = get_nearest_pot(target_max_dim)\r\n+ log.warning(f\"Invalid original dimensions ({orig_w}x{orig_h}). Falling back to nearest POT of target_max_dim: {pot_dim}x{pot_dim}\")\r\n+ return (pot_dim, pot_dim)\r\n \r\n+ # Step 1: Calculate intermediate dimensions maintaining aspect ratio\r\n ratio = orig_w / orig_h\r\n if ratio > 1: # Width is dominant\r\n- target_w = target_max_dim\r\n- target_h = max(1, round(target_w / ratio)) # Ensure height is at least 1\r\n+ scaled_w = target_max_dim\r\n+ scaled_h = max(1, round(scaled_w / ratio))\r\n else: # Height is dominant or square\r\n- target_h = target_max_dim\r\n- target_w = max(1, round(target_h * ratio)) # Ensure width is at least 1\r\n- return int(target_w), int(target_h)\r\n+ scaled_h = target_max_dim\r\n+ scaled_w = max(1, round(scaled_h * ratio))\r\n \r\n+ # Step 2: Find the nearest power of two for each scaled dimension\r\n+ pot_w = get_nearest_pot(scaled_w)\r\n+ pot_h = get_nearest_pot(scaled_h)\r\n+\r\n+ log.debug(f\"POT Calc: Orig=({orig_w}x{orig_h}), MaxDim={target_max_dim} -> Scaled=({scaled_w}x{scaled_h}) -> POT=({pot_w}x{pot_h})\")\r\n+\r\n+ return int(pot_w), int(pot_h)\r\n+\r\n def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n \"\"\"\r\n Calculates min, max, mean for a given numpy image array.\r\n Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n" }, { "date": 1745343337790, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -208,8 +208,381 @@\n # self.metadata: dict = {} # Metadata is now handled per-asset within the process loop\r\n \r\n log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n \r\n+\r\n+ # --- New Helper Function: Load and Transform Source ---\r\n+ def _load_and_transform_source(self, source_path_rel: Path, map_type: str, target_resolution_key: str, is_gloss_source: bool, cache: dict) -> Tuple[Optional[np.ndarray], Optional[np.dtype]]:\r\n+ \"\"\"\r\n+ Loads a source image file, performs initial prep (BGR->RGB, Gloss->Rough),\r\n+ resizes it to the target resolution, and caches the result.\r\n+\r\n+ Args:\r\n+ source_path_rel: Relative path to the source file within the temp directory.\r\n+ map_type: The standard map type (e.g., \"NRM\", \"ROUGH\").\r\n+ target_resolution_key: The key for the target resolution (e.g., \"4K\").\r\n+ is_gloss_source: Boolean indicating if this source should be treated as gloss for inversion.\r\n+ cache: The dictionary used for caching loaded/resized data.\r\n+\r\n+ Returns:\r\n+ Tuple containing:\r\n+ - Resized NumPy array (float32) or None if loading/processing fails.\r\n+ - Original source NumPy dtype or None if loading fails.\r\n+ \"\"\"\r\n+ if not self.temp_dir:\r\n+ log.error(\"Temporary directory not set in _load_and_transform_source.\")\r\n+ return None, None\r\n+\r\n+ cache_key = (source_path_rel, target_resolution_key)\r\n+ if cache_key in cache:\r\n+ log.debug(f\"CACHE HIT: Returning cached data for {source_path_rel} at {target_resolution_key}\")\r\n+ return cache[cache_key] # Return tuple (image_data, source_dtype)\r\n+\r\n+ log.debug(f\"CACHE MISS: Loading and transforming {source_path_rel} for {target_resolution_key}\")\r\n+ full_source_path = self.temp_dir / source_path_rel\r\n+ img_prepared = None\r\n+ source_dtype = None\r\n+\r\n+ try:\r\n+ # --- 1. Load Source Image ---\r\n+ # Determine read flag (Grayscale for specific types, unchanged otherwise)\r\n+ read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n+ # Special case for MASK: always load unchanged first to check alpha\r\n+ if map_type.upper() == 'MASK': read_flag = cv2.IMREAD_UNCHANGED\r\n+\r\n+ log.debug(f\"Loading source {full_source_path.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n+ if img_loaded is None:\r\n+ raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n+ source_dtype = img_loaded.dtype\r\n+ log.debug(f\"Loaded source {full_source_path.name}, dtype: {source_dtype}, shape: {img_loaded.shape}\")\r\n+\r\n+ # --- 2. Initial Preparation (BGR->RGB, Gloss Inversion, MASK handling) ---\r\n+ img_prepared = img_loaded # Start with loaded image\r\n+\r\n+ # BGR -> RGB conversion (only for 3-channel images)\r\n+ if len(img_prepared.shape) == 3 and img_prepared.shape[2] >= 3: # Check for 3 or 4 channels\r\n+ # Ensure it's not already grayscale before attempting conversion\r\n+ if read_flag != cv2.IMREAD_GRAYSCALE:\r\n+ log.debug(f\"Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n+ # Handle 4-channel (BGRA) by converting to RGB first\r\n+ if img_prepared.shape[2] == 4:\r\n+ img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGRA2RGB)\r\n+ else: # 3-channel (BGR)\r\n+ img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGR2RGB)\r\n+ else:\r\n+ log.debug(f\"Skipping BGR->RGB conversion for {source_path_rel.name} as it was loaded grayscale.\")\r\n+ elif len(img_prepared.shape) == 2:\r\n+ log.debug(f\"Image {source_path_rel.name} is grayscale, no BGR->RGB conversion needed.\")\r\n+ else:\r\n+ log.warning(f\"Unexpected image shape {img_prepared.shape} for {source_path_rel.name} after loading.\")\r\n+\r\n+\r\n+ # Gloss -> Roughness Inversion\r\n+ if map_type == 'ROUGH' and is_gloss_source:\r\n+ log.info(f\"Performing Gloss->Roughness inversion for {source_path_rel.name}\")\r\n+ # Ensure grayscale before inversion\r\n+ if len(img_prepared.shape) == 3:\r\n+ img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY) # Use RGB2GRAY as it's already converted\r\n+\r\n+ # Normalize based on original source dtype before inversion\r\n+ if source_dtype == np.uint16:\r\n+ img_float = 1.0 - (img_prepared.astype(np.float32) / 65535.0)\r\n+ elif source_dtype == np.uint8:\r\n+ img_float = 1.0 - (img_prepared.astype(np.float32) / 255.0)\r\n+ else: # Assuming float input is already 0-1 range\r\n+ img_float = 1.0 - img_prepared.astype(np.float32)\r\n+ img_prepared = np.clip(img_float, 0.0, 1.0) # Result is float32\r\n+ log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {source_dtype}\")\r\n+\r\n+ # MASK Handling (Extract alpha or convert) - Ensure it happens after potential BGR->RGB\r\n+ if map_type == 'MASK':\r\n+ log.debug(f\"Processing as MASK type for {source_path_rel.name}.\")\r\n+ shape = img_prepared.shape\r\n+ if len(shape) == 3 and shape[2] == 4:\r\n+ log.debug(\"MASK processing: Extracting alpha channel (4-channel source).\")\r\n+ img_prepared = img_prepared[:, :, 3] # Alpha is usually the 4th channel (index 3)\r\n+ elif len(shape) == 3 and shape[2] == 3:\r\n+ log.debug(\"MASK processing: Converting RGB to Grayscale (3-channel source).\")\r\n+ img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY)\r\n+ elif len(shape) == 2:\r\n+ log.debug(\"MASK processing: Source is already grayscale.\")\r\n+ else:\r\n+ log.warning(f\"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n+ # MASK should ideally be uint8 for saving later, but keep float for now if inverted?\r\n+ # Let _save_image handle final conversion based on format rules.\r\n+\r\n+ # Ensure data is float32 for resizing if it came from gloss inversion\r\n+ if isinstance(img_prepared, np.ndarray) and img_prepared.dtype != np.float32 and map_type == 'ROUGH' and is_gloss_source:\r\n+ img_prepared = img_prepared.astype(np.float32)\r\n+ elif isinstance(img_prepared, np.ndarray) and img_prepared.dtype not in [np.uint8, np.uint16, np.float32, np.float16]:\r\n+ # Convert other potential types (like bool) to float32 for resizing compatibility\r\n+ log.warning(f\"Converting unexpected dtype {img_prepared.dtype} to float32 before resizing.\")\r\n+ img_prepared = img_prepared.astype(np.float32)\r\n+\r\n+\r\n+ # --- 3. Resize ---\r\n+ if img_prepared is None: raise AssetProcessingError(\"Image data is None after initial prep.\")\r\n+ orig_h, orig_w = img_prepared.shape[:2]\r\n+ target_dim_px = self.config.image_resolutions.get(target_resolution_key)\r\n+ if not target_dim_px:\r\n+ raise AssetProcessingError(f\"Target resolution key '{target_resolution_key}' not found in config.\")\r\n+\r\n+ # Avoid upscaling check\r\n+ max_original_dimension = max(orig_w, orig_h)\r\n+ if target_dim_px > max_original_dimension:\r\n+ log.warning(f\"Target dimension {target_dim_px}px is larger than original {max_original_dimension}px for {source_path_rel}. Skipping resize for {target_resolution_key}.\")\r\n+ # Store None in cache for this specific resolution to avoid retrying\r\n+ cache[cache_key] = (None, source_dtype)\r\n+ return None, source_dtype # Indicate resize was skipped\r\n+\r\n+ if orig_w <= 0 or orig_h <= 0:\r\n+ raise AssetProcessingError(f\"Invalid original dimensions ({orig_w}x{orig_h}) for {source_path_rel}.\")\r\n+\r\n+ target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim_px)\r\n+ interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n+ log.debug(f\"Resizing {source_path_rel.name} from ({orig_w}x{orig_h}) to ({target_w}x{target_h}) for {target_resolution_key}\")\r\n+ img_resized = cv2.resize(img_prepared, (target_w, target_h), interpolation=interpolation)\r\n+\r\n+ # --- 4. Cache and Return ---\r\n+ # Ensure result is float32 if it came from gloss inversion, otherwise keep resized dtype\r\n+ final_data_to_cache = img_resized\r\n+ if map_type == 'ROUGH' and is_gloss_source and final_data_to_cache.dtype != np.float32:\r\n+ final_data_to_cache = final_data_to_cache.astype(np.float32)\r\n+\r\n+ log.debug(f\"CACHING result for {cache_key}. Shape: {final_data_to_cache.shape}, Dtype: {final_data_to_cache.dtype}\")\r\n+ cache[cache_key] = (final_data_to_cache, source_dtype)\r\n+ return final_data_to_cache, source_dtype\r\n+\r\n+ except Exception as e:\r\n+ log.error(f\"Error in _load_and_transform_source for {source_path_rel} at {target_resolution_key}: {e}\", exc_info=True)\r\n+ # Cache None to prevent retrying on error for this specific key\r\n+ cache[cache_key] = (None, None)\r\n+ return None, None\r\n+\r\n+\r\n+ # --- New Helper Function: Save Image ---\r\n+ def _save_image(self, image_data: np.ndarray, map_type: str, resolution_key: str, asset_base_name: str, source_info: dict, output_bit_depth_rule: str, temp_dir: Path) -> Optional[Dict]:\r\n+ \"\"\"\r\n+ Handles saving an image NumPy array to a temporary file, including determining\r\n+ format, bit depth, performing final conversions, and fallback logic.\r\n+\r\n+ Args:\r\n+ image_data: NumPy array containing the image data to save.\r\n+ map_type: The standard map type being saved (e.g., \"COL\", \"NRMRGH\").\r\n+ resolution_key: The resolution key (e.g., \"4K\").\r\n+ asset_base_name: The sanitized base name of the asset.\r\n+ source_info: Dictionary containing details about the source(s), e.g.,\r\n+ {'original_extension': '.tif', 'source_bit_depth': 16, 'involved_extensions': {'.tif', '.png'}}\r\n+ output_bit_depth_rule: Rule for determining output bit depth ('respect', 'force_8bit', 'force_16bit', 'respect_inputs').\r\n+ temp_dir: The temporary directory path to save the file in.\r\n+\r\n+ Returns:\r\n+ A dictionary containing details of the saved file (path, width, height,\r\n+ bit_depth, format) or None if saving failed.\r\n+ \"\"\"\r\n+ if image_data is None:\r\n+ log.error(f\"Cannot save image for {map_type} ({resolution_key}): image_data is None.\")\r\n+ return None\r\n+ if not temp_dir or not temp_dir.exists():\r\n+ log.error(f\"Cannot save image for {map_type} ({resolution_key}): temp_dir is invalid.\")\r\n+ return None\r\n+\r\n+ try:\r\n+ h, w = image_data.shape[:2]\r\n+ current_dtype = image_data.dtype\r\n+ log.debug(f\"Saving {map_type} ({resolution_key}) for asset '{asset_base_name}'. Input shape: {image_data.shape}, dtype: {current_dtype}\")\r\n+\r\n+ # --- 1. Determine Output Bit Depth ---\r\n+ source_bpc = source_info.get('source_bit_depth', 8) # Default to 8 if missing\r\n+ max_input_bpc = source_info.get('max_input_bit_depth', source_bpc) # For 'respect_inputs' merge rule\r\n+ output_dtype_target, output_bit_depth = np.uint8, 8 # Default\r\n+\r\n+ if output_bit_depth_rule == 'force_8bit':\r\n+ output_dtype_target, output_bit_depth = np.uint8, 8\r\n+ elif output_bit_depth_rule == 'force_16bit':\r\n+ output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ elif output_bit_depth_rule == 'respect': # For individual maps\r\n+ if source_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ # Handle float source? Assume 16-bit output if source was float? Needs clarification.\r\n+ # For now, stick to uint8/16 based on source_bpc.\r\n+ elif output_bit_depth_rule == 'respect_inputs': # For merged maps\r\n+ if max_input_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ else: # Default to 8-bit if rule is unknown\r\n+ log.warning(f\"Unknown output_bit_depth_rule '{output_bit_depth_rule}'. Defaulting to 8-bit.\")\r\n+ output_dtype_target, output_bit_depth = np.uint8, 8\r\n+\r\n+ log.debug(f\"Target output bit depth: {output_bit_depth}-bit (dtype: {output_dtype_target.__name__}) based on rule '{output_bit_depth_rule}'\")\r\n+\r\n+ # --- 2. Determine Output Format ---\r\n+ output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n+ primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n+ fmt_8bit_config = self.config.get_8bit_output_format()\r\n+ threshold = self.config.resolution_threshold_for_jpg\r\n+ force_lossless = map_type in self.config.force_lossless_map_types\r\n+ original_extension = source_info.get('original_extension', '.png') # Primary source ext\r\n+ involved_extensions = source_info.get('involved_extensions', {original_extension}) # For merges\r\n+ target_dim_px = self.config.image_resolutions.get(resolution_key, 0) # Get target dimension size\r\n+\r\n+ # Apply format determination logic (similar to old _process_maps/_merge_maps)\r\n+ if force_lossless:\r\n+ log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: # Assume PNG if primary 16-bit isn't EXR\r\n+ if output_format != \"png\": log.warning(f\"Primary 16-bit format '{output_format}' not PNG/EXR for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16 if fallback_fmt_16 == \"png\" else \"png\" # Ensure PNG\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else: # 8-bit lossless -> PNG\r\n+ output_format = \"png\"; output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n+\r\n+ elif output_bit_depth == 8 and target_dim_px >= threshold:\r\n+ output_format = 'jpg'; output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {resolution_key} due to resolution threshold ({target_dim_px} >= {threshold}).\")\r\n+ else:\r\n+ # Determine highest format involved (for merges) or use original (for individuals)\r\n+ highest_format_str = 'jpg' # Default lowest\r\n+ relevant_extensions = involved_extensions if map_type in self.config.map_merge_rules else {original_extension}\r\n+ if '.exr' in relevant_extensions: highest_format_str = 'exr'\r\n+ elif '.tif' in relevant_extensions: highest_format_str = 'tif'\r\n+ elif '.png' in relevant_extensions: highest_format_str = 'png'\r\n+\r\n+ if highest_format_str == 'exr':\r\n+ if output_bit_depth == 16: output_format, output_ext, needs_float16 = \"exr\", \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif highest_format_str == 'tif':\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"): output_ext, needs_float16 = \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: output_format = \"png\"; output_ext = \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif highest_format_str == 'png':\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"): output_ext, needs_float16 = \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: output_format = \"png\"; output_ext = \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ else: # Default to configured 8-bit format if highest was JPG or unknown\r\n+ output_format = fmt_8bit_config; output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\": save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif output_format == \"jpg\": save_params.extend([cv2.IMWRITE_JPEG_QUALITY, self.config.jpg_quality])\r\n+\r\n+ # Final check: JPG must be 8-bit\r\n+ if output_format == \"jpg\" and output_bit_depth == 16:\r\n+ log.warning(f\"Output format is JPG, but target bit depth is 16. Forcing 8-bit for {map_type} ({resolution_key}).\")\r\n+ output_dtype_target, output_bit_depth = np.uint8, 8\r\n+\r\n+ log.debug(f\"Determined save format: {output_format}, ext: {output_ext}, bit_depth: {output_bit_depth}, needs_float16: {needs_float16}\")\r\n+\r\n+ # --- 3. Final Data Type Conversion ---\r\n+ img_to_save = image_data.copy() # Work on a copy\r\n+ if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n+ log.debug(f\"Converting image data from {img_to_save.dtype} to uint8 for saving.\")\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ else: img_to_save = img_to_save.astype(np.uint8) # Direct cast for other types (e.g., bool)\r\n+ elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n+ log.debug(f\"Converting image data from {img_to_save.dtype} to uint16 for saving.\")\r\n+ if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257 # Proper 8->16 bit scaling\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n+ else: img_to_save = img_to_save.astype(np.uint16)\r\n+ if needs_float16 and img_to_save.dtype != np.float16:\r\n+ log.debug(f\"Converting image data from {img_to_save.dtype} to float16 for EXR saving.\")\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n+ else: log.warning(f\"Cannot convert {img_to_save.dtype} to float16 for EXR save.\"); return None\r\n+\r\n+ # --- 4. Final Color Space Conversion (RGB -> BGR for non-EXR) ---\r\n+ img_save_final = img_to_save\r\n+ is_3_channel = len(img_to_save.shape) == 3 and img_to_save.shape[2] == 3\r\n+ if is_3_channel and not output_format.startswith(\"exr\"):\r\n+ log.debug(f\"Converting RGB to BGR for saving {map_type} ({resolution_key}) as {output_format}\")\r\n+ try:\r\n+ img_save_final = cv2.cvtColor(img_to_save, cv2.COLOR_RGB2BGR)\r\n+ except Exception as cvt_err:\r\n+ log.error(f\"Failed RGB->BGR conversion before save for {map_type} ({resolution_key}): {cvt_err}. Saving original RGB.\")\r\n+ img_save_final = img_to_save # Fallback\r\n+\r\n+ # --- 5. Construct Filename & Save ---\r\n+ filename = self.config.target_filename_pattern.format(\r\n+ base_name=asset_base_name,\r\n+ map_type=map_type,\r\n+ resolution=resolution_key,\r\n+ ext=output_ext.lstrip('.')\r\n+ )\r\n+ output_path_temp = temp_dir / filename\r\n+ log.debug(f\"Attempting to save: {output_path_temp.name} (Format: {output_format}, Dtype: {img_save_final.dtype})\")\r\n+\r\n+ saved_successfully = False\r\n+ actual_format_saved = output_format\r\n+ try:\r\n+ cv2.imwrite(str(output_path_temp), img_save_final, save_params)\r\n+ saved_successfully = True\r\n+ log.info(f\" > Saved {map_type} ({resolution_key}, {output_bit_depth}-bit) as {output_format}\")\r\n+ except Exception as save_err:\r\n+ log.error(f\"Save failed ({output_format}) for {map_type} {resolution_key}: {save_err}\")\r\n+ # --- Try Fallback ---\r\n+ if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format and fallback_fmt_16 == \"png\":\r\n+ log.warning(f\"Attempting fallback PNG save for {map_type} {resolution_key}\")\r\n+ actual_format_saved = \"png\"; output_ext = \".png\";\r\n+ filename = self.config.target_filename_pattern.format(base_name=asset_base_name, map_type=map_type, resolution=resolution_key, ext=\"png\")\r\n+ output_path_temp = temp_dir / filename\r\n+ save_params_fallback = [cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)]\r\n+ img_fallback = None; target_fallback_dtype = np.uint16\r\n+\r\n+ # Convert original data (before float16 conversion) to uint16 for PNG fallback\r\n+ if img_to_save.dtype == np.float16: # This means original was likely float or uint16/8 converted to float16\r\n+ # Need to get back to uint16 - use the pre-float16 converted data if possible?\r\n+ # Safest is to convert the float16 back to uint16\r\n+ img_scaled = np.clip(img_to_save.astype(np.float32) * 65535.0, 0, 65535)\r\n+ img_fallback = img_scaled.astype(target_fallback_dtype)\r\n+ elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already uint16\r\n+ else: log.error(f\"Cannot convert {img_to_save.dtype} for PNG fallback.\"); return None\r\n+\r\n+ # --- Conditional RGB -> BGR Conversion for fallback ---\r\n+ img_fallback_save_final = img_fallback\r\n+ is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3\r\n+ if is_3_channel_fallback: # PNG is non-EXR\r\n+ log.debug(f\"Converting RGB to BGR for fallback PNG save {map_type} ({resolution_key})\")\r\n+ try: img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR)\r\n+ except Exception as cvt_err_fb: log.error(f\"Failed RGB->BGR conversion for fallback PNG: {cvt_err_fb}. Saving original.\")\r\n+\r\n+ try:\r\n+ cv2.imwrite(str(output_path_temp), img_fallback_save_final, save_params_fallback)\r\n+ saved_successfully = True\r\n+ log.info(f\" > Saved {map_type} ({resolution_key}) using fallback PNG\")\r\n+ except Exception as fallback_err:\r\n+ log.error(f\"Fallback PNG save failed for {map_type} {resolution_key}: {fallback_err}\", exc_info=True)\r\n+ else:\r\n+ log.error(f\"No suitable fallback available or applicable for failed save of {map_type} ({resolution_key}) as {output_format}.\")\r\n+\r\n+\r\n+ # --- 6. Return Result ---\r\n+ if saved_successfully:\r\n+ return {\r\n+ \"path\": output_path_temp.relative_to(self.temp_dir), # Store relative path\r\n+ \"resolution\": resolution_key,\r\n+ \"width\": w, \"height\": h,\r\n+ \"bit_depth\": output_bit_depth,\r\n+ \"format\": actual_format_saved\r\n+ }\r\n+ else:\r\n+ return None # Indicate save failure\r\n+\r\n+ except Exception as e:\r\n+ log.error(f\"Unexpected error in _save_image for {map_type} ({resolution_key}): {e}\", exc_info=True)\r\n+ return None\r\n+\r\n def process(self) -> Dict[str, List[str]]:\r\n \"\"\"\r\n Executes the full processing pipeline for the input path, handling\r\n multiple assets within a single input if detected.\r\n" }, { "date": 1745343399847, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -596,8 +596,9 @@\n \"\"\"\r\n log.info(f\"Starting processing for input: {self.input_path.name}\")\r\n overall_status = {\"processed\": [], \"skipped\": [], \"failed\": []}\r\n supplier_name = self.config.supplier_name # Get once\r\n+ loaded_data_cache = {} # Initialize cache for this process call\r\n \r\n try:\r\n self._setup_workspace()\r\n self._extract_input()\r\n@@ -680,12 +681,13 @@\n continue # Skip to the next asset in the loop\r\n elif self.overwrite:\r\n log.info(f\"Overwrite flag is set. Processing asset '{current_asset_name}' even if output exists.\")\r\n \r\n- # --- Process Maps for this asset ---\r\n- processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps = self._process_maps(\r\n+ # --- Process Individual Maps for this asset ---\r\n+ processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps = self._process_individual_maps(\r\n filtered_maps_list=filtered_classified_files_asset.get('maps', []),\r\n- current_asset_metadata=current_asset_metadata # Pass base metadata\r\n+ current_asset_metadata=current_asset_metadata, # Pass base metadata\r\n+ loaded_data_cache=loaded_data_cache # Pass cache\r\n )\r\n # Update current metadata with results\r\n current_asset_metadata[\"image_stats_1k\"] = image_stats_asset\r\n current_asset_metadata[\"aspect_ratio_change_string\"] = aspect_ratio_change_string_asset\r\n@@ -696,13 +698,14 @@\n # This was previously stored in self.metadata[\"map_details\"]\r\n map_details_asset = {k: v for k, v in current_asset_metadata.pop(\"map_details\", {}).items() if k in processed_maps_details_asset}\r\n \r\n \r\n- # --- Merge Maps for this asset ---\r\n- merged_maps_details_asset = self._merge_maps(\r\n- processed_maps_details_asset=processed_maps_details_asset,\r\n- filtered_classified_files=filtered_classified_files_asset, # Pass filtered files for original ext lookup\r\n- current_asset_metadata=current_asset_metadata\r\n+ # --- Merge Maps from Source for this asset ---\r\n+ merged_maps_details_asset = self._merge_maps_from_source(\r\n+ processed_maps_details_asset=processed_maps_details_asset, # Still needed for source info lookup? Or pass classified files? Check impl.\r\n+ filtered_classified_files=filtered_classified_files_asset,\r\n+ current_asset_metadata=current_asset_metadata,\r\n+ loaded_data_cache=loaded_data_cache # Pass cache\r\n )\r\n \r\n # --- Generate Metadata for this asset ---\r\n temp_metadata_path_asset = self._generate_metadata_file(\r\n@@ -1264,14 +1267,26 @@\n \r\n return {\"asset_category\": determined_category, \"archetype\": determined_archetype}\r\n \r\n \r\n- def _process_maps(self, filtered_maps_list: List[Dict], current_asset_metadata: Dict) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str, List[Dict]]:\r\n+ def _process_individual_maps(self, filtered_maps_list: List[Dict], current_asset_metadata: Dict, loaded_data_cache: dict) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str, List[Dict]]:\r\n \"\"\"\r\n- Loads, processes, resizes, and saves classified map files for a specific asset.\r\n+ Processes, resizes, and saves classified map files for a specific asset\r\n+ that are NOT used as inputs for merge rules. Uses helper functions.\r\n \r\n Args:\r\n filtered_maps_list: List of map dictionaries belonging to the current asset.\r\n+ current_asset_metadata: Metadata dictionary for the current asset.\r\n+ loaded_data_cache: Cache dictionary for loaded/resized source data.\r\n+\r\n+ Returns:\r\n+ Tuple containing:\r\n+ - processed_maps_details_asset: Dict mapping map_type to resolution details.\r\n+ - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n+ - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n+ - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n+ \"\"\"\r\n+ filtered_maps_list: List of map dictionaries belonging to the current asset.\r\n current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n \r\n Returns:\r\n Tuple containing:\r\n@@ -1671,16 +1686,18 @@\n # Note: Final metadata updates (maps_present, shader_features) are handled in the main process loop\r\n return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps\r\n \r\n \r\n- def _merge_maps(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict) -> Dict[str, Dict[str, Dict]]:\r\n+ def _merge_maps_from_source(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict, loaded_data_cache: dict) -> Dict[str, Dict[str, Dict]]:\r\n \"\"\"\r\n- Merges channels from different maps for a specific asset based on rules in configuration.\r\n+ Merges channels from different SOURCE maps for a specific asset based on rules\r\n+ in configuration, using helper functions for loading and saving.\r\n \r\n Args:\r\n- processed_maps_details_asset: Details of successfully processed maps for this asset.\r\n- filtered_classified_files: Classified files dictionary filtered for this asset.\r\n- current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n+ processed_maps_details_asset: Details of successfully processed maps (used for source info lookup).\r\n+ filtered_classified_files: Classified files dictionary filtered for this asset (used for source path lookup).\r\n+ current_asset_metadata: Metadata dictionary for the current asset.\r\n+ loaded_data_cache: Cache dictionary for loaded/resized source data.\r\n \r\n Returns:\r\n Dict[str, Dict[str, Dict]]: Details of successfully merged maps for this asset.\r\n \"\"\"\r\n" }, { "date": 1745343426422, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1284,17 +1284,8 @@\n - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n \"\"\"\r\n- filtered_maps_list: List of map dictionaries belonging to the current asset.\r\n- current_asset_metadata: Metadata dictionary for the current asset (contains asset_name).\r\n-\r\n- Returns:\r\n- Tuple containing:\r\n- - processed_maps_details_asset: Dict mapping map_type to resolution details.\r\n- - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n- - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n- - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n \"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n log.info(f\"Processing identified map files for asset '{asset_name}'...\")\r\n" }, { "date": 1745343446917, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1683,15 +1683,8 @@\n Merges channels from different SOURCE maps for a specific asset based on rules\r\n in configuration, using helper functions for loading and saving.\r\n \r\n Args:\r\n- processed_maps_details_asset: Details of successfully processed maps (used for source info lookup).\r\n- filtered_classified_files: Classified files dictionary filtered for this asset (used for source path lookup).\r\n- current_asset_metadata: Metadata dictionary for the current asset.\r\n- loaded_data_cache: Cache dictionary for loaded/resized source data.\r\n-\r\n- Returns:\r\n- Dict[str, Dict[str, Dict]]: Details of successfully merged maps for this asset.\r\n \"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n \r\n" }, { "date": 1745343902455, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1284,9 +1284,8 @@\n - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n \"\"\"\r\n- \"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n log.info(f\"Processing identified map files for asset '{asset_name}'...\")\r\n \r\n" }, { "date": 1745344444689, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -0,0 +1,2418 @@\n+# asset_processor.py\r\n+\r\n+import os\r\n+import math\r\n+import shutil\r\n+import tempfile\r\n+import zipfile\r\n+import logging\r\n+import json\r\n+import re\r\n+import time\r\n+from pathlib import Path\r\n+from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n+from typing import List, Dict, Tuple, Optional # Added for type hinting\r\n+from collections import defaultdict # Added for grouping\r\n+\r\n+# Attempt to import image processing libraries\r\n+try:\r\n+ import cv2\r\n+ import numpy as np\r\n+except ImportError:\r\n+ print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n+ print(\"pip install opencv-python numpy\")\r\n+ exit(1) # Exit if essential libraries are missing\r\n+\r\n+# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n+try:\r\n+ import OpenEXR\r\n+ import Imath\r\n+ _HAS_OPENEXR = True\r\n+except ImportError:\r\n+ _HAS_OPENEXR = False\r\n+ # Log this information - basic EXR might still work via OpenCV\r\n+ logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n+\r\n+\r\n+# Assuming Configuration class is in configuration.py\r\n+try:\r\n+ from configuration import Configuration, ConfigurationError\r\n+except ImportError:\r\n+ print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n+ print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n+ exit(1)\r\n+\r\n+# Use logger defined in main.py (or configure one here if run standalone)\r\n+log = logging.getLogger(__name__)\r\n+# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\n+if not log.hasHandlers():\r\n+ logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n+\r\n+\r\n+# --- Custom Exception ---\r\n+class AssetProcessingError(Exception):\r\n+ \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n+ pass\r\n+\r\n+# --- Helper Functions ---\r\n+def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n+ \"\"\"\r\n+ Calculates target dimensions by first scaling to fit target_max_dim\r\n+ while maintaining aspect ratio, then finding the nearest power-of-two\r\n+ value for each resulting dimension (Stretch/Squash to POT).\r\n+ \"\"\"\r\n+ if orig_w <= 0 or orig_h <= 0:\r\n+ # Fallback to target_max_dim if original dimensions are invalid\r\n+ pot_dim = get_nearest_pot(target_max_dim)\r\n+ log.warning(f\"Invalid original dimensions ({orig_w}x{orig_h}). Falling back to nearest POT of target_max_dim: {pot_dim}x{pot_dim}\")\r\n+ return (pot_dim, pot_dim)\r\n+\r\n+ # Step 1: Calculate intermediate dimensions maintaining aspect ratio\r\n+ ratio = orig_w / orig_h\r\n+ if ratio > 1: # Width is dominant\r\n+ scaled_w = target_max_dim\r\n+ scaled_h = max(1, round(scaled_w / ratio))\r\n+ else: # Height is dominant or square\r\n+ scaled_h = target_max_dim\r\n+ scaled_w = max(1, round(scaled_h * ratio))\r\n+\r\n+ # Step 2: Find the nearest power of two for each scaled dimension\r\n+ pot_w = get_nearest_pot(scaled_w)\r\n+ pot_h = get_nearest_pot(scaled_h)\r\n+\r\n+ log.debug(f\"POT Calc: Orig=({orig_w}x{orig_h}), MaxDim={target_max_dim} -> Scaled=({scaled_w}x{scaled_h}) -> POT=({pot_w}x{pot_h})\")\r\n+\r\n+ return int(pot_w), int(pot_h)\r\n+\r\n+def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n+ \"\"\"\r\n+ Calculates min, max, mean for a given numpy image array.\r\n+ Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n+ \"\"\"\r\n+ if image_data is None:\r\n+ log.warning(\"Attempted to calculate stats on None image data.\")\r\n+ return None\r\n+ try:\r\n+ # Use float64 for calculations to avoid potential overflow/precision issues\r\n+ data_float = image_data.astype(np.float64)\r\n+\r\n+ # Normalize data_float based on original dtype before calculating stats\r\n+ if image_data.dtype == np.uint16:\r\n+ log.debug(\"Stats calculation: Normalizing uint16 data to 0-1 range.\")\r\n+ data_float /= 65535.0\r\n+ elif image_data.dtype == np.uint8:\r\n+ log.debug(\"Stats calculation: Normalizing uint8 data to 0-1 range.\")\r\n+ data_float /= 255.0\r\n+ # Assuming float inputs are already in 0-1 range or similar\r\n+\r\n+ log.debug(f\"Stats calculation: data_float dtype: {data_float.dtype}, shape: {data_float.shape}\")\r\n+ # Log a few sample values to check range after normalization\r\n+ if data_float.size > 0:\r\n+ sample_values = data_float.flatten()[:10] # Get first 10 values\r\n+ log.debug(f\"Stats calculation: Sample values (first 10) after normalization: {sample_values.tolist()}\")\r\n+\r\n+\r\n+ if len(data_float.shape) == 2: # Grayscale (H, W)\r\n+ min_val = float(np.min(data_float))\r\n+ max_val = float(np.max(data_float))\r\n+ mean_val = float(np.mean(data_float))\r\n+ stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n+ log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n+ elif len(data_float.shape) == 3: # Color (H, W, C)\r\n+ channels = data_float.shape[2]\r\n+ min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n+ max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n+ mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n+ # The input data_float is now expected to be in RGB order after conversion in _process_maps\r\n+ stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n+ log.debug(f\"Calculated {channels}-Channel Stats (RGB order): Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n+ else:\r\n+ log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n+ return None\r\n+ return stats\r\n+ except Exception as e:\r\n+ log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n+ return {\"error\": str(e)}\r\n+\r\n+\r\n+# --- Helper function ---\r\n+def _get_base_map_type(target_map_string: str) -> str:\r\n+ \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n+ match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n+ if match:\r\n+ return match.group(1).upper()\r\n+ return target_map_string.upper() # Fallback if no number suffix\r\n+\r\n+\r\n+def _is_power_of_two(n: int) -> bool:\r\n+ \"\"\"Checks if a number is a power of two.\"\"\"\r\n+ return (n > 0) and (n & (n - 1) == 0)\r\n+\r\n+def get_nearest_pot(value: int) -> int:\r\n+ \"\"\"Finds the nearest power of two to the given value.\"\"\"\r\n+ if value <= 0:\r\n+ return 1 # Or raise error, POT must be positive\r\n+ if _is_power_of_two(value):\r\n+ return value\r\n+\r\n+ # Calculate the powers of two below and above the value\r\n+ lower_pot = 1 << (value.bit_length() - 1)\r\n+ upper_pot = 1 << value.bit_length()\r\n+\r\n+ # Determine which power of two is closer\r\n+ if (value - lower_pot) < (upper_pot - value):\r\n+ return lower_pot\r\n+ else:\r\n+ return upper_pot\r\n+\r\n+# --- Asset Processor Class ---\r\n+class AssetProcessor:\r\n+ \"\"\"\r\n+ Handles the processing pipeline for a single asset (ZIP or folder).\r\n+ \"\"\"\r\n+ # Define the list of known grayscale map types (adjust as needed)\r\n+ GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n+\r\n+ def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n+ \"\"\"\r\n+ Initializes the processor for a given input asset.\r\n+\r\n+ Args:\r\n+ input_path: Path to the input ZIP file or folder.\r\n+ config: The loaded Configuration object.\r\n+ output_base_path: The base directory where processed output will be saved.\r\n+ overwrite: If True, forces reprocessing even if output exists.\r\n+ \"\"\"\r\n+ if not isinstance(input_path, Path): input_path = Path(input_path)\r\n+ if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n+ if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n+\r\n+ if not input_path.exists():\r\n+ raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n+ if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n+ raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n+\r\n+ self.input_path: Path = input_path\r\n+ self.config: Configuration = config\r\n+ self.output_base_path: Path = output_base_path\r\n+ self.overwrite: bool = overwrite # Store the overwrite flag\r\n+\r\n+ self.temp_dir: Path | None = None # Path to the temporary working directory\r\n+ self.classified_files: dict[str, list[dict]] = {\r\n+ \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n+ }\r\n+ # These will no longer store instance-wide results, but are kept for potential future use or refactoring\r\n+ # self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n+ # self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n+ # self.metadata_file_path_temp: Path | None = None\r\n+ # self.metadata: dict = {} # Metadata is now handled per-asset within the process loop\r\n+\r\n+ log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n+\r\n+\r\n+ # --- New Helper Function: Load and Transform Source ---\r\n+ def _load_and_transform_source(self, source_path_rel: Path, map_type: str, target_resolution_key: str, is_gloss_source: bool, cache: dict) -> Tuple[Optional[np.ndarray], Optional[np.dtype]]:\r\n+ \"\"\"\r\n+ Loads a source image file, performs initial prep (BGR->RGB, Gloss->Rough),\r\n+ resizes it to the target resolution, and caches the result.\r\n+\r\n+ Args:\r\n+ source_path_rel: Relative path to the source file within the temp directory.\r\n+ map_type: The standard map type (e.g., \"NRM\", \"ROUGH\").\r\n+ target_resolution_key: The key for the target resolution (e.g., \"4K\").\r\n+ is_gloss_source: Boolean indicating if this source should be treated as gloss for inversion.\r\n+ cache: The dictionary used for caching loaded/resized data.\r\n+\r\n+ Returns:\r\n+ Tuple containing:\r\n+ - Resized NumPy array (float32) or None if loading/processing fails.\r\n+ - Original source NumPy dtype or None if loading fails.\r\n+ \"\"\"\r\n+ if not self.temp_dir:\r\n+ log.error(\"Temporary directory not set in _load_and_transform_source.\")\r\n+ return None, None\r\n+\r\n+ cache_key = (source_path_rel, target_resolution_key)\r\n+ if cache_key in cache:\r\n+ log.debug(f\"CACHE HIT: Returning cached data for {source_path_rel} at {target_resolution_key}\")\r\n+ return cache[cache_key] # Return tuple (image_data, source_dtype)\r\n+\r\n+ log.debug(f\"CACHE MISS: Loading and transforming {source_path_rel} for {target_resolution_key}\")\r\n+ full_source_path = self.temp_dir / source_path_rel\r\n+ img_prepared = None\r\n+ source_dtype = None\r\n+\r\n+ try:\r\n+ # --- 1. Load Source Image ---\r\n+ # Determine read flag (Grayscale for specific types, unchanged otherwise)\r\n+ read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n+ # Special case for MASK: always load unchanged first to check alpha\r\n+ if map_type.upper() == 'MASK': read_flag = cv2.IMREAD_UNCHANGED\r\n+\r\n+ log.debug(f\"Loading source {full_source_path.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n+ img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n+ if img_loaded is None:\r\n+ raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n+ source_dtype = img_loaded.dtype\r\n+ log.debug(f\"Loaded source {full_source_path.name}, dtype: {source_dtype}, shape: {img_loaded.shape}\")\r\n+\r\n+ # --- 2. Initial Preparation (BGR->RGB, Gloss Inversion, MASK handling) ---\r\n+ img_prepared = img_loaded # Start with loaded image\r\n+\r\n+ # BGR -> RGB conversion (only for 3-channel images)\r\n+ if len(img_prepared.shape) == 3 and img_prepared.shape[2] >= 3: # Check for 3 or 4 channels\r\n+ # Ensure it's not already grayscale before attempting conversion\r\n+ if read_flag != cv2.IMREAD_GRAYSCALE:\r\n+ log.debug(f\"Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n+ # Handle 4-channel (BGRA) by converting to RGB first\r\n+ if img_prepared.shape[2] == 4:\r\n+ img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGRA2RGB)\r\n+ else: # 3-channel (BGR)\r\n+ img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGR2RGB)\r\n+ else:\r\n+ log.debug(f\"Skipping BGR->RGB conversion for {source_path_rel.name} as it was loaded grayscale.\")\r\n+ elif len(img_prepared.shape) == 2:\r\n+ log.debug(f\"Image {source_path_rel.name} is grayscale, no BGR->RGB conversion needed.\")\r\n+ else:\r\n+ log.warning(f\"Unexpected image shape {img_prepared.shape} for {source_path_rel.name} after loading.\")\r\n+\r\n+\r\n+ # Gloss -> Roughness Inversion\r\n+ if map_type == 'ROUGH' and is_gloss_source:\r\n+ log.info(f\"Performing Gloss->Roughness inversion for {source_path_rel.name}\")\r\n+ # Ensure grayscale before inversion\r\n+ if len(img_prepared.shape) == 3:\r\n+ img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY) # Use RGB2GRAY as it's already converted\r\n+\r\n+ # Normalize based on original source dtype before inversion\r\n+ if source_dtype == np.uint16:\r\n+ img_float = 1.0 - (img_prepared.astype(np.float32) / 65535.0)\r\n+ elif source_dtype == np.uint8:\r\n+ img_float = 1.0 - (img_prepared.astype(np.float32) / 255.0)\r\n+ else: # Assuming float input is already 0-1 range\r\n+ img_float = 1.0 - img_prepared.astype(np.float32)\r\n+ img_prepared = np.clip(img_float, 0.0, 1.0) # Result is float32\r\n+ log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {source_dtype}\")\r\n+\r\n+ # MASK Handling (Extract alpha or convert) - Ensure it happens after potential BGR->RGB\r\n+ if map_type == 'MASK':\r\n+ log.debug(f\"Processing as MASK type for {source_path_rel.name}.\")\r\n+ shape = img_prepared.shape\r\n+ if len(shape) == 3 and shape[2] == 4:\r\n+ log.debug(\"MASK processing: Extracting alpha channel (4-channel source).\")\r\n+ img_prepared = img_prepared[:, :, 3] # Alpha is usually the 4th channel (index 3)\r\n+ elif len(shape) == 3 and shape[2] == 3:\r\n+ log.debug(\"MASK processing: Converting RGB to Grayscale (3-channel source).\")\r\n+ img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY)\r\n+ elif len(shape) == 2:\r\n+ log.debug(\"MASK processing: Source is already grayscale.\")\r\n+ else:\r\n+ log.warning(f\"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n+ # MASK should ideally be uint8 for saving later, but keep float for now if inverted?\r\n+ # Let _save_image handle final conversion based on format rules.\r\n+\r\n+ # Ensure data is float32 for resizing if it came from gloss inversion\r\n+ if isinstance(img_prepared, np.ndarray) and img_prepared.dtype != np.float32 and map_type == 'ROUGH' and is_gloss_source:\r\n+ img_prepared = img_prepared.astype(np.float32)\r\n+ elif isinstance(img_prepared, np.ndarray) and img_prepared.dtype not in [np.uint8, np.uint16, np.float32, np.float16]:\r\n+ # Convert other potential types (like bool) to float32 for resizing compatibility\r\n+ log.warning(f\"Converting unexpected dtype {img_prepared.dtype} to float32 before resizing.\")\r\n+ img_prepared = img_prepared.astype(np.float32)\r\n+\r\n+\r\n+ # --- 3. Resize ---\r\n+ if img_prepared is None: raise AssetProcessingError(\"Image data is None after initial prep.\")\r\n+ orig_h, orig_w = img_prepared.shape[:2]\r\n+ target_dim_px = self.config.image_resolutions.get(target_resolution_key)\r\n+ if not target_dim_px:\r\n+ raise AssetProcessingError(f\"Target resolution key '{target_resolution_key}' not found in config.\")\r\n+\r\n+ # Avoid upscaling check\r\n+ max_original_dimension = max(orig_w, orig_h)\r\n+ if target_dim_px > max_original_dimension:\r\n+ log.warning(f\"Target dimension {target_dim_px}px is larger than original {max_original_dimension}px for {source_path_rel}. Skipping resize for {target_resolution_key}.\")\r\n+ # Store None in cache for this specific resolution to avoid retrying\r\n+ cache[cache_key] = (None, source_dtype)\r\n+ return None, source_dtype # Indicate resize was skipped\r\n+\r\n+ if orig_w <= 0 or orig_h <= 0:\r\n+ raise AssetProcessingError(f\"Invalid original dimensions ({orig_w}x{orig_h}) for {source_path_rel}.\")\r\n+\r\n+ target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim_px)\r\n+ interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n+ log.debug(f\"Resizing {source_path_rel.name} from ({orig_w}x{orig_h}) to ({target_w}x{target_h}) for {target_resolution_key}\")\r\n+ img_resized = cv2.resize(img_prepared, (target_w, target_h), interpolation=interpolation)\r\n+\r\n+ # --- 4. Cache and Return ---\r\n+ # Ensure result is float32 if it came from gloss inversion, otherwise keep resized dtype\r\n+ final_data_to_cache = img_resized\r\n+ if map_type == 'ROUGH' and is_gloss_source and final_data_to_cache.dtype != np.float32:\r\n+ final_data_to_cache = final_data_to_cache.astype(np.float32)\r\n+\r\n+ log.debug(f\"CACHING result for {cache_key}. Shape: {final_data_to_cache.shape}, Dtype: {final_data_to_cache.dtype}\")\r\n+ cache[cache_key] = (final_data_to_cache, source_dtype)\r\n+ return final_data_to_cache, source_dtype\r\n+\r\n+ except Exception as e:\r\n+ log.error(f\"Error in _load_and_transform_source for {source_path_rel} at {target_resolution_key}: {e}\", exc_info=True)\r\n+ # Cache None to prevent retrying on error for this specific key\r\n+ cache[cache_key] = (None, None)\r\n+ return None, None\r\n+\r\n+\r\n+ # --- New Helper Function: Save Image ---\r\n+ def _save_image(self, image_data: np.ndarray, map_type: str, resolution_key: str, asset_base_name: str, source_info: dict, output_bit_depth_rule: str, temp_dir: Path) -> Optional[Dict]:\r\n+ \"\"\"\r\n+ Handles saving an image NumPy array to a temporary file, including determining\r\n+ format, bit depth, performing final conversions, and fallback logic.\r\n+\r\n+ Args:\r\n+ image_data: NumPy array containing the image data to save.\r\n+ map_type: The standard map type being saved (e.g., \"COL\", \"NRMRGH\").\r\n+ resolution_key: The resolution key (e.g., \"4K\").\r\n+ asset_base_name: The sanitized base name of the asset.\r\n+ source_info: Dictionary containing details about the source(s), e.g.,\r\n+ {'original_extension': '.tif', 'source_bit_depth': 16, 'involved_extensions': {'.tif', '.png'}}\r\n+ output_bit_depth_rule: Rule for determining output bit depth ('respect', 'force_8bit', 'force_16bit', 'respect_inputs').\r\n+ temp_dir: The temporary directory path to save the file in.\r\n+\r\n+ Returns:\r\n+ A dictionary containing details of the saved file (path, width, height,\r\n+ bit_depth, format) or None if saving failed.\r\n+ \"\"\"\r\n+ if image_data is None:\r\n+ log.error(f\"Cannot save image for {map_type} ({resolution_key}): image_data is None.\")\r\n+ return None\r\n+ if not temp_dir or not temp_dir.exists():\r\n+ log.error(f\"Cannot save image for {map_type} ({resolution_key}): temp_dir is invalid.\")\r\n+ return None\r\n+\r\n+ try:\r\n+ h, w = image_data.shape[:2]\r\n+ current_dtype = image_data.dtype\r\n+ log.debug(f\"Saving {map_type} ({resolution_key}) for asset '{asset_base_name}'. Input shape: {image_data.shape}, dtype: {current_dtype}\")\r\n+\r\n+ # --- 1. Determine Output Bit Depth ---\r\n+ source_bpc = source_info.get('source_bit_depth', 8) # Default to 8 if missing\r\n+ max_input_bpc = source_info.get('max_input_bit_depth', source_bpc) # For 'respect_inputs' merge rule\r\n+ output_dtype_target, output_bit_depth = np.uint8, 8 # Default\r\n+\r\n+ if output_bit_depth_rule == 'force_8bit':\r\n+ output_dtype_target, output_bit_depth = np.uint8, 8\r\n+ elif output_bit_depth_rule == 'force_16bit':\r\n+ output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ elif output_bit_depth_rule == 'respect': # For individual maps\r\n+ if source_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ # Handle float source? Assume 16-bit output if source was float? Needs clarification.\r\n+ # For now, stick to uint8/16 based on source_bpc.\r\n+ elif output_bit_depth_rule == 'respect_inputs': # For merged maps\r\n+ if max_input_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n+ else: # Default to 8-bit if rule is unknown\r\n+ log.warning(f\"Unknown output_bit_depth_rule '{output_bit_depth_rule}'. Defaulting to 8-bit.\")\r\n+ output_dtype_target, output_bit_depth = np.uint8, 8\r\n+\r\n+ log.debug(f\"Target output bit depth: {output_bit_depth}-bit (dtype: {output_dtype_target.__name__}) based on rule '{output_bit_depth_rule}'\")\r\n+\r\n+ # --- 2. Determine Output Format ---\r\n+ output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n+ primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n+ fmt_8bit_config = self.config.get_8bit_output_format()\r\n+ threshold = self.config.resolution_threshold_for_jpg\r\n+ force_lossless = map_type in self.config.force_lossless_map_types\r\n+ original_extension = source_info.get('original_extension', '.png') # Primary source ext\r\n+ involved_extensions = source_info.get('involved_extensions', {original_extension}) # For merges\r\n+ target_dim_px = self.config.image_resolutions.get(resolution_key, 0) # Get target dimension size\r\n+\r\n+ # Apply format determination logic (similar to old _process_maps/_merge_maps)\r\n+ if force_lossless:\r\n+ log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"):\r\n+ output_ext, needs_float16 = \".exr\", True\r\n+ save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: # Assume PNG if primary 16-bit isn't EXR\r\n+ if output_format != \"png\": log.warning(f\"Primary 16-bit format '{output_format}' not PNG/EXR for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n+ output_format = fallback_fmt_16 if fallback_fmt_16 == \"png\" else \"png\" # Ensure PNG\r\n+ output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n+ else: # 8-bit lossless -> PNG\r\n+ output_format = \"png\"; output_ext = \".png\"\r\n+ png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n+ save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n+\r\n+ elif output_bit_depth == 8 and target_dim_px >= threshold:\r\n+ output_format = 'jpg'; output_ext = '.jpg'\r\n+ jpg_quality = self.config.jpg_quality\r\n+ save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n+ log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {resolution_key} due to resolution threshold ({target_dim_px} >= {threshold}).\")\r\n+ else:\r\n+ # Determine highest format involved (for merges) or use original (for individuals)\r\n+ highest_format_str = 'jpg' # Default lowest\r\n+ relevant_extensions = involved_extensions if map_type in self.config.map_merge_rules else {original_extension}\r\n+ if '.exr' in relevant_extensions: highest_format_str = 'exr'\r\n+ elif '.tif' in relevant_extensions: highest_format_str = 'tif'\r\n+ elif '.png' in relevant_extensions: highest_format_str = 'png'\r\n+\r\n+ if highest_format_str == 'exr':\r\n+ if output_bit_depth == 16: output_format, output_ext, needs_float16 = \"exr\", \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif highest_format_str == 'tif':\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"): output_ext, needs_float16 = \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: output_format = \"png\"; output_ext = \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif highest_format_str == 'png':\r\n+ if output_bit_depth == 16:\r\n+ output_format = primary_fmt_16\r\n+ if output_format.startswith(\"exr\"): output_ext, needs_float16 = \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n+ else: output_format = \"png\"; output_ext = \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ else: # Default to configured 8-bit format if highest was JPG or unknown\r\n+ output_format = fmt_8bit_config; output_ext = f\".{output_format}\"\r\n+ if output_format == \"png\": save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n+ elif output_format == \"jpg\": save_params.extend([cv2.IMWRITE_JPEG_QUALITY, self.config.jpg_quality])\r\n+\r\n+ # Final check: JPG must be 8-bit\r\n+ if output_format == \"jpg\" and output_bit_depth == 16:\r\n+ log.warning(f\"Output format is JPG, but target bit depth is 16. Forcing 8-bit for {map_type} ({resolution_key}).\")\r\n+ output_dtype_target, output_bit_depth = np.uint8, 8\r\n+\r\n+ log.debug(f\"Determined save format: {output_format}, ext: {output_ext}, bit_depth: {output_bit_depth}, needs_float16: {needs_float16}\")\r\n+\r\n+ # --- 3. Final Data Type Conversion ---\r\n+ img_to_save = image_data.copy() # Work on a copy\r\n+ if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n+ log.debug(f\"Converting image data from {img_to_save.dtype} to uint8 for saving.\")\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n+ else: img_to_save = img_to_save.astype(np.uint8) # Direct cast for other types (e.g., bool)\r\n+ elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n+ log.debug(f\"Converting image data from {img_to_save.dtype} to uint16 for saving.\")\r\n+ if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257 # Proper 8->16 bit scaling\r\n+ elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n+ else: img_to_save = img_to_save.astype(np.uint16)\r\n+ if needs_float16 and img_to_save.dtype != np.float16:\r\n+ log.debug(f\"Converting image data from {img_to_save.dtype} to float16 for EXR saving.\")\r\n+ if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n+ elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n+ else: log.warning(f\"Cannot convert {img_to_save.dtype} to float16 for EXR save.\"); return None\r\n+\r\n+ # --- 4. Final Color Space Conversion (RGB -> BGR for non-EXR) ---\r\n+ img_save_final = img_to_save\r\n+ is_3_channel = len(img_to_save.shape) == 3 and img_to_save.shape[2] == 3\r\n+ if is_3_channel and not output_format.startswith(\"exr\"):\r\n+ log.debug(f\"Converting RGB to BGR for saving {map_type} ({resolution_key}) as {output_format}\")\r\n+ try:\r\n+ img_save_final = cv2.cvtColor(img_to_save, cv2.COLOR_RGB2BGR)\r\n+ except Exception as cvt_err:\r\n+ log.error(f\"Failed RGB->BGR conversion before save for {map_type} ({resolution_key}): {cvt_err}. Saving original RGB.\")\r\n+ img_save_final = img_to_save # Fallback\r\n+\r\n+ # --- 5. Construct Filename & Save ---\r\n+ filename = self.config.target_filename_pattern.format(\r\n+ base_name=asset_base_name,\r\n+ map_type=map_type,\r\n+ resolution=resolution_key,\r\n+ ext=output_ext.lstrip('.')\r\n+ )\r\n+ output_path_temp = temp_dir / filename\r\n+ log.debug(f\"Attempting to save: {output_path_temp.name} (Format: {output_format}, Dtype: {img_save_final.dtype})\")\r\n+\r\n+ saved_successfully = False\r\n+ actual_format_saved = output_format\r\n+ try:\r\n+ cv2.imwrite(str(output_path_temp), img_save_final, save_params)\r\n+ saved_successfully = True\r\n+ log.info(f\" > Saved {map_type} ({resolution_key}, {output_bit_depth}-bit) as {output_format}\")\r\n+ except Exception as save_err:\r\n+ log.error(f\"Save failed ({output_format}) for {map_type} {resolution_key}: {save_err}\")\r\n+ # --- Try Fallback ---\r\n+ if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format and fallback_fmt_16 == \"png\":\r\n+ log.warning(f\"Attempting fallback PNG save for {map_type} {resolution_key}\")\r\n+ actual_format_saved = \"png\"; output_ext = \".png\";\r\n+ filename = self.config.target_filename_pattern.format(base_name=asset_base_name, map_type=map_type, resolution=resolution_key, ext=\"png\")\r\n+ output_path_temp = temp_dir / filename\r\n+ save_params_fallback = [cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)]\r\n+ img_fallback = None; target_fallback_dtype = np.uint16\r\n+\r\n+ # Convert original data (before float16 conversion) to uint16 for PNG fallback\r\n+ if img_to_save.dtype == np.float16: # This means original was likely float or uint16/8 converted to float16\r\n+ # Need to get back to uint16 - use the pre-float16 converted data if possible?\r\n+ # Safest is to convert the float16 back to uint16\r\n+ img_scaled = np.clip(img_to_save.astype(np.float32) * 65535.0, 0, 65535)\r\n+ img_fallback = img_scaled.astype(target_fallback_dtype)\r\n+ elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already uint16\r\n+ else: log.error(f\"Cannot convert {img_to_save.dtype} for PNG fallback.\"); return None\r\n+\r\n+ # --- Conditional RGB -> BGR Conversion for fallback ---\r\n+ img_fallback_save_final = img_fallback\r\n+ is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3\r\n+ if is_3_channel_fallback: # PNG is non-EXR\r\n+ log.debug(f\"Converting RGB to BGR for fallback PNG save {map_type} ({resolution_key})\")\r\n+ try: img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR)\r\n+ except Exception as cvt_err_fb: log.error(f\"Failed RGB->BGR conversion for fallback PNG: {cvt_err_fb}. Saving original.\")\r\n+\r\n+ try:\r\n+ cv2.imwrite(str(output_path_temp), img_fallback_save_final, save_params_fallback)\r\n+ saved_successfully = True\r\n+ log.info(f\" > Saved {map_type} ({resolution_key}) using fallback PNG\")\r\n+ except Exception as fallback_err:\r\n+ log.error(f\"Fallback PNG save failed for {map_type} {resolution_key}: {fallback_err}\", exc_info=True)\r\n+ else:\r\n+ log.error(f\"No suitable fallback available or applicable for failed save of {map_type} ({resolution_key}) as {output_format}.\")\r\n+\r\n+\r\n+ # --- 6. Return Result ---\r\n+ if saved_successfully:\r\n+ return {\r\n+ \"path\": output_path_temp.relative_to(self.temp_dir), # Store relative path\r\n+ \"resolution\": resolution_key,\r\n+ \"width\": w, \"height\": h,\r\n+ \"bit_depth\": output_bit_depth,\r\n+ \"format\": actual_format_saved\r\n+ }\r\n+ else:\r\n+ return None # Indicate save failure\r\n+\r\n+ except Exception as e:\r\n+ log.error(f\"Unexpected error in _save_image for {map_type} ({resolution_key}): {e}\", exc_info=True)\r\n+ return None\r\n+\r\n+ def process(self) -> Dict[str, List[str]]:\r\n+ \"\"\"\r\n+ Executes the full processing pipeline for the input path, handling\r\n+ multiple assets within a single input if detected.\r\n+\r\n+ Returns:\r\n+ Dict[str, List[str]]: A dictionary summarizing the status of each\r\n+ detected asset within the input:\r\n+ {\"processed\": [asset_name1, ...],\r\n+ \"skipped\": [asset_name2, ...],\r\n+ \"failed\": [asset_name3, ...]}\r\n+ \"\"\"\r\n+ log.info(f\"Starting processing for input: {self.input_path.name}\")\r\n+ overall_status = {\"processed\": [], \"skipped\": [], \"failed\": []}\r\n+ supplier_name = self.config.supplier_name # Get once\r\n+ loaded_data_cache = {} # Initialize cache for this process call\r\n+\r\n+ try:\r\n+ self._setup_workspace()\r\n+ self._extract_input()\r\n+ self._inventory_and_classify_files() # Classifies all files in self.classified_files\r\n+\r\n+ # Determine distinct assets and file mapping\r\n+ distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n+ unmatched_files_paths = [p for p, name in file_to_base_name_map.items() if name is None]\r\n+ if unmatched_files_paths:\r\n+ log.warning(f\"Found {len(unmatched_files_paths)} files not matched to any specific asset base name. They will be copied to each asset's Extra folder.\")\r\n+ log.debug(f\"Unmatched files: {[str(p) for p in unmatched_files_paths]}\")\r\n+\r\n+\r\n+ # --- Loop through each detected asset ---\r\n+ for current_asset_name in distinct_base_names:\r\n+ log.info(f\"--- Processing detected asset: '{current_asset_name}' ---\")\r\n+ asset_processed = False\r\n+ asset_skipped = False\r\n+ asset_failed = False\r\n+ temp_metadata_path_asset = None # Track metadata file for this asset\r\n+ map_details_asset = {} # Store map details for this asset\r\n+\r\n+ try:\r\n+ # --- Filter classified files for the current asset ---\r\n+ filtered_classified_files_asset = defaultdict(list)\r\n+ for category, file_list in self.classified_files.items():\r\n+ for file_info in file_list:\r\n+ file_path = file_info.get('source_path')\r\n+ if file_path and file_to_base_name_map.get(file_path) == current_asset_name:\r\n+ filtered_classified_files_asset[category].append(file_info)\r\n+ log.debug(f\"Asset '{current_asset_name}': Filtered files - Maps: {len(filtered_classified_files_asset.get('maps',[]))}, Models: {len(filtered_classified_files_asset.get('models',[]))}, Extra: {len(filtered_classified_files_asset.get('extra',[]))}, Ignored: {len(filtered_classified_files_asset.get('ignored',[]))}\")\r\n+\r\n+ # --- Assign Suffixes Per-Asset ---\r\n+ log.debug(f\"Asset '{current_asset_name}': Assigning map type suffixes...\")\r\n+ asset_maps = filtered_classified_files_asset.get('maps', [])\r\n+ grouped_asset_maps = defaultdict(list)\r\n+ for map_info in asset_maps:\r\n+ # Group by the base map type stored earlier\r\n+ grouped_asset_maps[map_info['map_type']].append(map_info)\r\n+\r\n+ for base_map_type, maps_in_group in grouped_asset_maps.items():\r\n+ log.debug(f\" Assigning suffixes for base type '{base_map_type}' within asset '{current_asset_name}' ({len(maps_in_group)} maps)\")\r\n+ # Sorting is already done by _inventory_and_classify_files, just need to assign suffix\r\n+ respect_variants = base_map_type in self.config.respect_variant_map_types\r\n+ for i, map_info in enumerate(maps_in_group):\r\n+ if respect_variants:\r\n+ final_map_type = f\"{base_map_type}-{i + 1}\"\r\n+ else:\r\n+ final_map_type = base_map_type\r\n+ log.debug(f\" Updating '{map_info['source_path']}' map_type from '{map_info['map_type']}' to '{final_map_type}'\")\r\n+ map_info['map_type'] = final_map_type # Update the map_type in the dictionary\r\n+\r\n+ # --- Determine Metadata for this specific asset ---\r\n+ asset_specific_metadata = self._determine_single_asset_metadata(current_asset_name, filtered_classified_files_asset)\r\n+ current_asset_metadata = {\r\n+ \"asset_name\": current_asset_name,\r\n+ \"supplier_name\": supplier_name,\r\n+ \"asset_category\": asset_specific_metadata.get(\"asset_category\", self.config.default_asset_category),\r\n+ \"archetype\": asset_specific_metadata.get(\"archetype\", \"Unknown\"),\r\n+ # Initialize fields that will be populated by processing steps\r\n+ \"maps_present\": [],\r\n+ \"merged_maps\": [],\r\n+ \"shader_features\": [],\r\n+ \"source_files_in_extra\": [], # Will be populated in _generate_metadata\r\n+ \"image_stats_1k\": {},\r\n+ \"map_details\": {}, # Will be populated by _process_maps\r\n+ \"aspect_ratio_change_string\": \"N/A\"\r\n+ }\r\n+\r\n+ # --- Skip Check for this specific asset ---\r\n+ if not self.overwrite:\r\n+ supplier_sanitized = self._sanitize_filename(supplier_name)\r\n+ asset_name_sanitized = self._sanitize_filename(current_asset_name)\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ metadata_file_path = final_dir / self.config.metadata_filename\r\n+ if final_dir.exists() and metadata_file_path.is_file():\r\n+ log.info(f\"Output directory and metadata found for asset '{asset_name_sanitized}' and overwrite is False. Skipping this asset.\")\r\n+ overall_status[\"skipped\"].append(current_asset_name)\r\n+ asset_skipped = True\r\n+ continue # Skip to the next asset in the loop\r\n+ elif self.overwrite:\r\n+ log.info(f\"Overwrite flag is set. Processing asset '{current_asset_name}' even if output exists.\")\r\n+\r\n+ # --- Process Individual Maps for this asset ---\r\n+ processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps = self._process_individual_maps(\r\n+ filtered_maps_list=filtered_classified_files_asset.get('maps', []),\r\n+ current_asset_metadata=current_asset_metadata, # Pass base metadata\r\n+ loaded_data_cache=loaded_data_cache # Pass cache\r\n+ )\r\n+ # Update current metadata with results\r\n+ current_asset_metadata[\"image_stats_1k\"] = image_stats_asset\r\n+ current_asset_metadata[\"aspect_ratio_change_string\"] = aspect_ratio_change_string_asset\r\n+ # Add newly ignored rough maps to the asset's specific ignored list\r\n+ if ignored_rough_maps:\r\n+ filtered_classified_files_asset['ignored'].extend(ignored_rough_maps)\r\n+ # Store map details (like source bit depth) collected during processing\r\n+ # This was previously stored in self.metadata[\"map_details\"]\r\n+ map_details_asset = {k: v for k, v in current_asset_metadata.pop(\"map_details\", {}).items() if k in processed_maps_details_asset}\r\n+\r\n+\r\n+ # --- Merge Maps from Source for this asset ---\r\n+ merged_maps_details_asset = self._merge_maps_from_source(\r\n+ processed_maps_details_asset=processed_maps_details_asset, # Still needed for source info lookup? Or pass classified files? Check impl.\r\n+ filtered_classified_files=filtered_classified_files_asset,\r\n+ current_asset_metadata=current_asset_metadata,\r\n+ loaded_data_cache=loaded_data_cache # Pass cache\r\n+ )\r\n+\r\n+ # --- Generate Metadata for this asset ---\r\n+ temp_metadata_path_asset = self._generate_metadata_file(\r\n+ current_asset_metadata=current_asset_metadata, # Pass the populated dict\r\n+ processed_maps_details_asset=processed_maps_details_asset,\r\n+ merged_maps_details_asset=merged_maps_details_asset,\r\n+ filtered_classified_files_asset=filtered_classified_files_asset,\r\n+ unmatched_files_paths=unmatched_files_paths, # Pass the list of unmatched files\r\n+ map_details_asset=map_details_asset # Pass the filtered map details\r\n+ )\r\n+\r\n+ # --- Organize Output Files for this asset ---\r\n+ self._organize_output_files(\r\n+ current_asset_name=current_asset_name,\r\n+ processed_maps_details_asset=processed_maps_details_asset,\r\n+ merged_maps_details_asset=merged_maps_details_asset,\r\n+ filtered_classified_files_asset=filtered_classified_files_asset,\r\n+ unmatched_files_paths=unmatched_files_paths, # Pass unmatched files for copying\r\n+ temp_metadata_path=temp_metadata_path_asset\r\n+ )\r\n+\r\n+ log.info(f\"--- Asset '{current_asset_name}' processed successfully. ---\")\r\n+ overall_status[\"processed\"].append(current_asset_name)\r\n+ asset_processed = True\r\n+\r\n+ except Exception as asset_err:\r\n+ log.error(f\"--- Failed processing asset '{current_asset_name}': {asset_err} ---\", exc_info=True)\r\n+ overall_status[\"failed\"].append(current_asset_name)\r\n+ asset_failed = True\r\n+ # Continue to the next asset even if one fails\r\n+\r\n+ # --- Determine Final Consolidated Status ---\r\n+ # This logic remains the same, interpreting the overall_status dict\r\n+ final_status = \"failed\" # Default if nothing else matches\r\n+ if overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"processed\"\r\n+ elif overall_status[\"skipped\"] and not overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"skipped\"\r\n+ elif overall_status[\"processed\"] and overall_status[\"failed\"]:\r\n+ final_status = \"partial_success\" # Indicate some succeeded, some failed\r\n+ elif overall_status[\"processed\"] and overall_status[\"skipped\"] and not overall_status[\"failed\"]:\r\n+ final_status = \"processed\" # Consider processed+skipped as processed overall\r\n+ elif overall_status[\"skipped\"] and overall_status[\"failed\"] and not overall_status[\"processed\"]:\r\n+ final_status = \"failed\" # If only skips and fails, report as failed\r\n+ # Add any other combinations if needed\r\n+\r\n+ log.info(f\"Finished processing input '{self.input_path.name}'. Overall Status: {final_status}. Summary: {overall_status}\")\r\n+ # Return the detailed status dictionary instead of just a string\r\n+ # The wrapper function in main.py will interpret this\r\n+ return overall_status\r\n+\r\n+ except Exception as e:\r\n+ # Catch errors during initial setup (before asset loop)\r\n+ if not isinstance(e, (AssetProcessingError, ConfigurationError)):\r\n+ log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name} during setup: {e}\")\r\n+ if not isinstance(e, AssetProcessingError):\r\n+ raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n+ else:\r\n+ raise\r\n+ finally:\r\n+ # Ensure cleanup always happens\r\n+ self._cleanup_workspace()\r\n+\r\n+ def _setup_workspace(self):\r\n+ \"\"\"Creates a temporary directory for processing.\"\"\"\r\n+ try:\r\n+ self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n+ log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n+\r\n+ def _extract_input(self):\r\n+ \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n+\r\n+ log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n+ try:\r\n+ if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n+ log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ zip_ref.extractall(self.temp_dir)\r\n+ log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n+ elif self.input_path.is_dir():\r\n+ log.debug(f\"Copying directory contents: {self.input_path}\")\r\n+ for item in self.input_path.iterdir():\r\n+ destination = self.temp_dir / item.name\r\n+ if item.is_dir():\r\n+ # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n+ try:\r\n+ shutil.copytree(item, destination, dirs_exist_ok=True)\r\n+ except TypeError: # Fallback for older Python\r\n+ if not destination.exists():\r\n+ shutil.copytree(item, destination)\r\n+ else:\r\n+ log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n+\r\n+ else:\r\n+ shutil.copy2(item, destination)\r\n+ log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n+ except zipfile.BadZipFile:\r\n+ raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n+\r\n+ def _inventory_and_classify_files(self):\r\n+ \"\"\"\r\n+ Scans workspace, classifies files according to preset rules, handling\r\n+ 16-bit prioritization and multiple variants of the same base map type.\r\n+ \"\"\"\r\n+ if not self.temp_dir:\r\n+ raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n+\r\n+ log.info(\"Scanning and classifying files...\")\r\n+ log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n+ all_files_rel = []\r\n+ for root, _, files in os.walk(self.temp_dir):\r\n+ root_path = Path(root)\r\n+ for file in files:\r\n+ full_path = root_path / file\r\n+ relative_path = full_path.relative_to(self.temp_dir)\r\n+ all_files_rel.append(relative_path)\r\n+\r\n+ log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n+\r\n+ # --- Initialization ---\r\n+ processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n+ potential_map_candidates = [] # List to store potential map file info\r\n+ # Reset classified files (important if this method is ever called multiple times)\r\n+ self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n+\r\n+\r\n+ # --- Step 1: Identify Explicit 'Extra' Files ---\r\n+ log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n+ compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n+ log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path in processed_files: continue\r\n+ for compiled_regex in compiled_extra_regex:\r\n+ if compiled_regex.search(file_rel_path.name):\r\n+ log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n+ self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n+ processed_files.add(file_rel_path)\r\n+ log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n+ break # Stop checking extra patterns for this file\r\n+\r\n+ # --- Step 2: Identify Model Files ---\r\n+ log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n+ compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n+ log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path in processed_files: continue\r\n+ for compiled_regex in compiled_model_regex:\r\n+ if compiled_regex.search(file_rel_path.name):\r\n+ log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n+ self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n+ processed_files.add(file_rel_path)\r\n+ log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n+ break # Stop checking model patterns for this file\r\n+\r\n+ # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n+ log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n+ # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n+ compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n+\r\n+ for file_rel_path in all_files_rel:\r\n+ # Skip files already classified as Extra or Model\r\n+ if file_rel_path in processed_files:\r\n+ continue\r\n+\r\n+ file_stem = file_rel_path.stem\r\n+ match_found = False\r\n+\r\n+ # Iterate through base types and their associated regex tuples\r\n+ for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n+ if match_found: break # Stop checking types for this file once matched\r\n+\r\n+ # Get the original keywords list for the current rule index\r\n+ # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n+ original_rule = None\r\n+ # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n+ if regex_tuples:\r\n+ current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n+ if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n+ rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n+ # Verify it's the correct rule by checking target_type\r\n+ if rule_candidate.get(\"target_type\") == base_map_type:\r\n+ original_rule = rule_candidate\r\n+ else:\r\n+ log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n+ # Fallback search if index doesn't match (shouldn't happen ideally)\r\n+ for idx, rule in enumerate(self.config.map_type_mapping):\r\n+ if rule.get(\"target_type\") == base_map_type:\r\n+ original_rule = rule\r\n+ log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n+ break\r\n+\r\n+ original_keywords_list = []\r\n+ if original_rule and 'keywords' in original_rule:\r\n+ original_keywords_list = original_rule['keywords']\r\n+ else:\r\n+ log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n+\r\n+ for kw_regex, original_keyword, rule_index in regex_tuples:\r\n+ if kw_regex.search(file_stem):\r\n+ log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n+\r\n+ # Find the index of the matched keyword within its rule's list\r\n+ keyword_index_in_rule = -1 # Default if not found\r\n+ if original_keywords_list:\r\n+ try:\r\n+ # Use the original_keyword string directly\r\n+ keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n+ except ValueError:\r\n+ log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n+ else:\r\n+ log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n+\r\n+ # Add candidate only if not already added\r\n+ if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n+ potential_map_candidates.append({\r\n+ 'source_path': file_rel_path,\r\n+ 'matched_keyword': original_keyword,\r\n+ 'base_map_type': base_map_type,\r\n+ 'preset_rule_index': rule_index,\r\n+ 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n+ 'is_16bit_source': False\r\n+ })\r\n+ else:\r\n+ log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n+\r\n+ match_found = True\r\n+ break # Stop checking regex tuples for this base_type once matched\r\n+\r\n+ log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n+\r\n+ # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n+ log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n+ compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n+ for file_rel_path in all_files_rel:\r\n+ # Skip if already processed or already identified as a candidate\r\n+ if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n+ continue\r\n+\r\n+ for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n+ log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n+ match = compiled_regex.search(file_rel_path.name) # Store result\r\n+ if match:\r\n+ log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n+ potential_map_candidates.append({\r\n+ 'source_path': file_rel_path,\r\n+ 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n+ 'base_map_type': base_type,\r\n+ 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n+ 'is_16bit_source': True # Mark as 16-bit immediately\r\n+ })\r\n+ log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n+ # Don't add to processed_files yet, let Step 4 handle filtering\r\n+ break # Stop checking bit depth patterns for this file\r\n+\r\n+ log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n+\r\n+ # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n+ log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n+ compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n+ candidates_to_keep = []\r\n+ candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n+\r\n+ # Mark 16-bit candidates\r\n+ for candidate in potential_map_candidates:\r\n+ base_type = candidate['base_map_type']\r\n+ # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n+ if base_type in compiled_bit_depth_regex:\r\n+ if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n+ candidate['is_16bit_source'] = True\r\n+ log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n+\r\n+\r\n+ # Identify base types that have a 16-bit version present\r\n+ prioritized_16bit_bases = {\r\n+ candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n+ }\r\n+ log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n+\r\n+ # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n+ for candidate in potential_map_candidates:\r\n+ if candidate['is_16bit_source']:\r\n+ candidates_to_keep.append(candidate)\r\n+ log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+ elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n+ candidates_to_keep.append(candidate)\r\n+ log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+ else:\r\n+ # This is an 8-bit candidate whose 16-bit counterpart exists\r\n+ candidates_to_ignore.append(candidate)\r\n+ log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n+\r\n+ # Add ignored 8-bit files to the main ignored list\r\n+ for ignored_candidate in candidates_to_ignore:\r\n+ self.classified_files[\"ignored\"].append({\r\n+ 'source_path': ignored_candidate['source_path'],\r\n+ 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n+ })\r\n+ processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n+\r\n+ log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n+\r\n+ # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n+ log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n+ # from collections import defaultdict # Moved import to top of file\r\n+ grouped_by_base_type = defaultdict(list)\r\n+ for candidate in candidates_to_keep:\r\n+ grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n+\r\n+ final_map_list = []\r\n+ for base_map_type, candidates in grouped_by_base_type.items():\r\n+ # --- DIAGNOSTIC LOGGING START ---\r\n+ candidate_paths_str = [str(c['source_path']) for c in candidates]\r\n+ log.debug(f\" [DIAGNOSIS] Processing base_map_type: '{base_map_type}'. Candidates before sort: {candidate_paths_str}\")\r\n+ # --- DIAGNOSTIC LOGGING END ---\r\n+ log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n+\r\n+ # --- NEW SORTING LOGIC ---\r\n+ # Sort candidates based on:\r\n+ # 1. The index of the rule object in the preset's map_type_mapping list.\r\n+ # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n+ # 3. Alphabetical order of the source file path as a tie-breaker.\r\n+ candidates.sort(key=lambda c: (\r\n+ c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n+ c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n+ str(c['source_path'])\r\n+ ))\r\n+ # --- END NEW SORTING LOGIC ---\r\n+\r\n+ # Removed diagnostic log\r\n+\r\n+ # Add sorted candidates to the final list, but without assigning the suffix yet.\r\n+ # Suffix assignment will happen per-asset later.\r\n+ for final_candidate in candidates: # Use the directly sorted list\r\n+ # Store the base map type for now.\r\n+ final_map_list.append({\r\n+ \"map_type\": base_map_type, # Store BASE type only\r\n+ \"source_path\": final_candidate[\"source_path\"],\r\n+ \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n+ \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n+ \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n+ })\r\n+ processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n+\r\n+ self.classified_files[\"maps\"] = final_map_list\r\n+\r\n+ # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n+ log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n+ remaining_count = 0\r\n+ for file_rel_path in all_files_rel:\r\n+ if file_rel_path not in processed_files:\r\n+ log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n+ self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n+ remaining_count += 1\r\n+ # No need to add to processed_files here, it's the final step\r\n+ log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n+\r\n+ # --- Final Summary ---\r\n+ # Note: self.metadata[\"source_files_in_extra\"] is now populated per-asset in _generate_metadata_file\r\n+ log.info(f\"File classification complete.\")\r\n+ log.debug(\"--- Final Classification Summary (v2) ---\")\r\n+ map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n+ model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n+ extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n+ ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n+ log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n+ log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n+ log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n+ log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n+ log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n+\r\n+\r\n+ def _determine_base_metadata(self) -> Tuple[List[str], Dict[Path, Optional[str]]]:\r\n+ \"\"\"\r\n+ Determines distinct asset base names within the input based on preset rules\r\n+ and maps each relevant source file to its determined base name.\r\n+\r\n+ Returns:\r\n+ Tuple[List[str], Dict[Path, Optional[str]]]:\r\n+ - A list of unique, sanitized base names found.\r\n+ - A dictionary mapping source file relative paths to their determined\r\n+ base name string (or None if no base name could be determined for that file).\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ log.info(\"Determining distinct base names and file mapping...\")\r\n+\r\n+ # Combine map and model files for base name determination\r\n+ relevant_files = self.classified_files.get('maps', []) + self.classified_files.get('models', [])\r\n+ if not relevant_files:\r\n+ log.warning(\"No map or model files found to determine base name(s).\")\r\n+ # Fallback: Use input path name as a single asset\r\n+ input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ sanitized_input_name = self._sanitize_filename(input_name or \"UnknownInput\")\r\n+ # Map all files (maps, models, extra, ignored) to this fallback name\r\n+ all_files_paths = [f['source_path'] for cat in self.classified_files.values() for f in cat if 'source_path' in f]\r\n+ file_to_base_name_map = {f_path: sanitized_input_name for f_path in all_files_paths}\r\n+ log.info(f\"Using input path name '{sanitized_input_name}' as the single asset name.\")\r\n+ return [sanitized_input_name], file_to_base_name_map\r\n+\r\n+ # --- Determine Base Names from Files ---\r\n+ separator = self.config.source_naming_separator\r\n+ indices_dict = self.config.source_naming_indices\r\n+ base_index_raw = indices_dict.get('base_name')\r\n+ base_index = None\r\n+ if base_index_raw is not None:\r\n+ try:\r\n+ base_index = int(base_index_raw)\r\n+ except (ValueError, TypeError):\r\n+ log.warning(f\"Could not convert base_name index '{base_index_raw}' to integer. Base name determination might be inaccurate.\")\r\n+\r\n+ file_to_base_name_map: Dict[Path, Optional[str]] = {}\r\n+ potential_base_names_per_file: Dict[Path, str] = {} # Store potential name for each file path\r\n+\r\n+ if isinstance(base_index, int):\r\n+ log.debug(f\"Attempting base name extraction using separator '{separator}' and index {base_index}.\")\r\n+ for file_info in relevant_files:\r\n+ file_path = file_info['source_path']\r\n+ stem = file_path.stem\r\n+ parts = stem.split(separator)\r\n+ if len(parts) > base_index:\r\n+ extracted_name = parts[base_index]\r\n+ sanitized_name = self._sanitize_filename(extracted_name)\r\n+ if sanitized_name: # Ensure we don't add empty names\r\n+ potential_base_names_per_file[file_path] = sanitized_name\r\n+ log.debug(f\" File '{file_path.name}' -> Potential Base Name: '{sanitized_name}'\")\r\n+ else:\r\n+ log.debug(f\" File '{file_path.name}' -> Extracted empty name at index {base_index}. Marking as None.\")\r\n+ file_to_base_name_map[file_path] = None # Explicitly mark as None if extraction yields empty\r\n+ else:\r\n+ log.debug(f\" File '{file_path.name}' -> Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}. Marking as None.\")\r\n+ file_to_base_name_map[file_path] = None # Mark as None if index is invalid for this file\r\n+ else:\r\n+ log.warning(\"Base name index not configured or invalid. Cannot determine distinct assets based on index. Treating as single asset.\")\r\n+ # Fallback to common prefix if no valid index\r\n+ stems = [f['source_path'].stem for f in relevant_files]\r\n+ common_prefix_name = os.path.commonprefix(stems) if stems else \"\"\r\n+ sanitized_common_name = self._sanitize_filename(common_prefix_name or self.input_path.stem or \"UnknownAsset\")\r\n+ log.info(f\"Using common prefix '{sanitized_common_name}' as the single asset name.\")\r\n+ # Map all relevant files to this single name\r\n+ for file_info in relevant_files:\r\n+ potential_base_names_per_file[file_info['source_path']] = sanitized_common_name\r\n+\r\n+ # --- Consolidate Distinct Names and Final Mapping ---\r\n+ distinct_base_names_set = set(potential_base_names_per_file.values())\r\n+ distinct_base_names = sorted(list(distinct_base_names_set)) # Sort for consistent processing order\r\n+\r\n+ # Populate the final map, including files that didn't match the index rule (marked as None earlier)\r\n+ for file_info in relevant_files:\r\n+ file_path = file_info['source_path']\r\n+ if file_path not in file_to_base_name_map: # If not already marked as None\r\n+ file_to_base_name_map[file_path] = potential_base_names_per_file.get(file_path) # Assign determined name or None if somehow missed\r\n+\r\n+ # Add files from 'extra' and 'ignored' to the map, marking them as None for base name\r\n+ for category in ['extra', 'ignored']:\r\n+ for file_info in self.classified_files.get(category, []):\r\n+ file_path = file_info['source_path']\r\n+ if file_path not in file_to_base_name_map: # Avoid overwriting if somehow already mapped\r\n+ file_to_base_name_map[file_path] = None\r\n+ log.debug(f\" File '{file_path.name}' (Category: {category}) -> Marked as None (No Base Name).\")\r\n+\r\n+\r\n+ if not distinct_base_names:\r\n+ # This case should be rare due to fallbacks, but handle it.\r\n+ log.warning(\"No distinct base names could be determined. Using input name as fallback.\")\r\n+ input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ fallback_name = self._sanitize_filename(input_name or \"FallbackAsset\")\r\n+ distinct_base_names = [fallback_name]\r\n+ # Remap all files to this single fallback name\r\n+ file_to_base_name_map = {f_path: fallback_name for f_path in file_to_base_name_map.keys()}\r\n+\r\n+\r\n+ log.info(f\"Determined {len(distinct_base_names)} distinct asset base name(s): {distinct_base_names}\")\r\n+ log.debug(f\"File-to-BaseName Map ({len(file_to_base_name_map)} entries): { {str(k): v for k, v in file_to_base_name_map.items()} }\") # Log string paths for readability\r\n+\r\n+ return distinct_base_names, file_to_base_name_map\r\n+\r\n+ def _determine_single_asset_metadata(self, asset_base_name: str, filtered_classified_files: Dict[str, List[Dict]]) -> Dict[str, str]:\r\n+ \"\"\"\r\n+ Determines the asset_category and archetype for a single, specific asset\r\n+ based on its filtered list of classified files.\r\n+\r\n+ Args:\r\n+ asset_base_name: The determined base name for this specific asset.\r\n+ filtered_classified_files: A dictionary containing only the classified\r\n+ files (maps, models, etc.) belonging to this asset.\r\n+\r\n+ Returns:\r\n+ A dictionary containing {\"asset_category\": str, \"archetype\": str}.\r\n+ \"\"\"\r\n+ log.debug(f\"Determining category and archetype for asset: '{asset_base_name}'\")\r\n+ determined_category = self.config.default_asset_category # Start with default\r\n+ determined_archetype = \"Unknown\"\r\n+\r\n+ # --- Determine Asset Category ---\r\n+ if filtered_classified_files.get(\"models\"):\r\n+ determined_category = \"Asset\"\r\n+ log.debug(f\" Category set to 'Asset' for '{asset_base_name}' due to model file presence.\")\r\n+ else:\r\n+ # Check for Decal keywords only if not an Asset\r\n+ decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n+ found_decal = False\r\n+ # Check map names first for decal keywords\r\n+ candidate_files = [f['source_path'] for f in filtered_classified_files.get('maps', [])]\r\n+ # Fallback to checking extra files if no maps found for this asset\r\n+ if not candidate_files:\r\n+ candidate_files = [f['source_path'] for f in filtered_classified_files.get('extra', [])]\r\n+\r\n+ if decal_keywords:\r\n+ for file_path in candidate_files:\r\n+ # Check against the specific file's name within this asset's context\r\n+ for keyword in decal_keywords:\r\n+ if keyword.lower() in file_path.name.lower():\r\n+ determined_category = \"Decal\"\r\n+ found_decal = True; break\r\n+ if found_decal: break\r\n+ if found_decal: log.debug(f\" Category set to 'Decal' for '{asset_base_name}' due to keyword match.\")\r\n+ # If not Asset or Decal, it remains the default (e.g., \"Texture\")\r\n+\r\n+ log.debug(f\" Determined Category for '{asset_base_name}': {determined_category}\")\r\n+\r\n+ # --- Determine Archetype (Usage) ---\r\n+ archetype_rules = self.config.archetype_rules\r\n+ # Use stems from maps and models belonging *only* to this asset\r\n+ check_stems = [f['source_path'].stem.lower() for f in filtered_classified_files.get('maps', [])]\r\n+ check_stems.extend([f['source_path'].stem.lower() for f in filtered_classified_files.get('models', [])])\r\n+ # Also check the determined base name itself\r\n+ check_stems.append(asset_base_name.lower())\r\n+\r\n+ if check_stems:\r\n+ best_match_archetype = \"Unknown\"\r\n+ # Using simple \"first match wins\" logic as before\r\n+ for rule in archetype_rules:\r\n+ if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n+ arch_name, rules_dict = rule\r\n+ match_any = rules_dict.get(\"match_any\", [])\r\n+ matched_any_keyword = False\r\n+ if match_any:\r\n+ for keyword in match_any:\r\n+ kw_lower = keyword.lower()\r\n+ for stem in check_stems:\r\n+ if kw_lower in stem: # Simple substring check\r\n+ matched_any_keyword = True\r\n+ break # Found a match for this keyword\r\n+ if matched_any_keyword: break # Found a match for this rule's keywords\r\n+\r\n+ if matched_any_keyword:\r\n+ best_match_archetype = arch_name\r\n+ log.debug(f\" Archetype match '{arch_name}' for '{asset_base_name}' based on keywords: {match_any}\")\r\n+ break # First rule match wins\r\n+\r\n+ determined_archetype = best_match_archetype\r\n+\r\n+ log.debug(f\" Determined Archetype for '{asset_base_name}': {determined_archetype}\")\r\n+\r\n+ return {\"asset_category\": determined_category, \"archetype\": determined_archetype}\r\n+\r\n+\r\n+ def _process_individual_maps(self, filtered_maps_list: List[Dict], current_asset_metadata: Dict, loaded_data_cache: dict) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str, List[Dict]]:\r\n+ \"\"\"\r\n+ Processes, resizes, and saves classified map files for a specific asset\r\n+ that are NOT used as inputs for merge rules. Uses helper functions.\r\n+\r\n+ Args:\r\n+ filtered_maps_list: List of map dictionaries belonging to the current asset.\r\n+ current_asset_metadata: Metadata dictionary for the current asset.\r\n+ loaded_data_cache: Cache dictionary for loaded/resized source data.\r\n+\r\n+ Returns:\r\n+ Tuple containing:\r\n+ - processed_maps_details_asset: Dict mapping map_type to resolution details.\r\n+ - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n+ - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n+ - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n+ log.info(f\"Processing individual map files for asset '{asset_name}'...\")\r\n+\r\n+ # Initialize results specific to this asset\r\n+ processed_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n+ image_stats_asset: Dict[str, Dict] = {}\r\n+ map_details_asset: Dict[str, Dict] = {} # Store details like source bit depth, gloss inversion\r\n+ aspect_ratio_change_string_asset: str = \"N/A\"\r\n+ ignored_rough_maps: List[Dict] = [] # Store ignored native rough maps\r\n+\r\n+ # --- Settings retrieval ---\r\n+ resolutions = self.config.image_resolutions\r\n+ stats_res_key = self.config.calculate_stats_resolution\r\n+ stats_target_dim = resolutions.get(stats_res_key)\r\n+ if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped for '{asset_name}'.\")\r\n+ gloss_keywords = self.config.source_glossiness_keywords\r\n+ # target_pattern = self.config.target_filename_pattern # Not needed here, handled by _save_image\r\n+ base_name = asset_name # Use the asset name passed in\r\n+\r\n+ # --- Pre-process Glossiness -> Roughness ---\r\n+ # This logic needs to stay here to determine which ROUGH source to use\r\n+ # and potentially ignore the native one.\r\n+ derived_from_gloss_flag = {}\r\n+ gloss_map_info_for_rough, native_rough_map_info = None, None\r\n+ for map_info in filtered_maps_list:\r\n+ # Use the final assigned map_type (e.g., ROUGH, ROUGH-1)\r\n+ if map_info['map_type'].startswith('ROUGH'):\r\n+ is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n+ if is_gloss:\r\n+ # If multiple gloss sources map to ROUGH variants, prioritize the first one?\r\n+ # For now, assume only one gloss source maps to ROUGH variants.\r\n+ if gloss_map_info_for_rough is None: gloss_map_info_for_rough = map_info\r\n+ else:\r\n+ # If multiple native rough sources map to ROUGH variants, prioritize the first one?\r\n+ if native_rough_map_info is None: native_rough_map_info = map_info\r\n+\r\n+ rough_source_to_use_info = None # Store the map_info dict of the source to use\r\n+ if gloss_map_info_for_rough:\r\n+ rough_source_to_use_info = gloss_map_info_for_rough\r\n+ derived_from_gloss_flag['ROUGH'] = True # Apply to all ROUGH variants if derived from gloss\r\n+ if native_rough_map_info:\r\n+ log.warning(f\"Asset '{asset_name}': Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found for ROUGH maps. Prioritizing Gloss.\")\r\n+ ignored_rough_maps.append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n+ elif native_rough_map_info:\r\n+ rough_source_to_use_info = native_rough_map_info\r\n+ derived_from_gloss_flag['ROUGH'] = False\r\n+\r\n+ # --- Identify maps used in merge rules ---\r\n+ merge_input_map_types = set()\r\n+ for rule in self.config.map_merge_rules:\r\n+ inputs_mapping = rule.get(\"inputs\", {})\r\n+ for source_map_type in inputs_mapping.values():\r\n+ # Use the base type for checking against merge rules\r\n+ base_type = _get_base_map_type(source_map_type)\r\n+ merge_input_map_types.add(base_type)\r\n+ log.debug(f\"Map types used as input for merge rules: {merge_input_map_types}\")\r\n+\r\n+ # --- Filter maps to process individually ---\r\n+ maps_to_process_individually = []\r\n+ for map_info in filtered_maps_list:\r\n+ base_map_type = _get_base_map_type(map_info['map_type'])\r\n+ # Skip if this base map type is used in *any* merge rule input\r\n+ if base_map_type in merge_input_map_types:\r\n+ log.debug(f\"Skipping individual processing for {map_info['map_type']} ({map_info['source_path']}) as its base type '{base_map_type}' is used in merge rules.\")\r\n+ continue\r\n+ # Skip native rough map if gloss was prioritized\r\n+ if map_info['map_type'].startswith('ROUGH') and any(ignored['source_path'] == map_info['source_path'] for ignored in ignored_rough_maps):\r\n+ log.debug(f\"Skipping individual processing of native rough map '{map_info['source_path']}' as gloss version was prioritized.\")\r\n+ continue\r\n+ maps_to_process_individually.append(map_info)\r\n+\r\n+ log.info(f\"Processing {len(maps_to_process_individually)} maps individually for asset '{asset_name}'...\")\r\n+\r\n+ # --- Aspect Ratio Calculation Setup ---\r\n+ # We need original dimensions once per asset for aspect ratio.\r\n+ # Find the first map to process to get its dimensions.\r\n+ first_map_info_for_aspect = next((m for m in maps_to_process_individually), None)\r\n+ orig_w_aspect, orig_h_aspect = None, None\r\n+ if first_map_info_for_aspect:\r\n+ # Load just to get dimensions (might hit cache if used later)\r\n+ # Use the first resolution key as a representative target for loading\r\n+ first_res_key = next(iter(resolutions))\r\n+ temp_img_for_dims, _ = self._load_and_transform_source(\r\n+ first_map_info_for_aspect['source_path'],\r\n+ first_map_info_for_aspect['map_type'],\r\n+ first_res_key,\r\n+ False, # is_gloss_source doesn't matter for dims\r\n+ loaded_data_cache # Use the main cache\r\n+ )\r\n+ if temp_img_for_dims is not None:\r\n+ orig_h_aspect, orig_w_aspect = temp_img_for_dims.shape[:2]\r\n+ log.debug(f\"Got original dimensions ({orig_w_aspect}x{orig_h_aspect}) for aspect ratio calculation from {first_map_info_for_aspect['source_path']}\")\r\n+ else:\r\n+ log.warning(f\"Could not load image {first_map_info_for_aspect['source_path']} to get original dimensions for aspect ratio.\")\r\n+ else:\r\n+ log.warning(\"No maps found to process individually, cannot calculate aspect ratio string.\")\r\n+\r\n+\r\n+ # --- Process Each Individual Map ---\r\n+ for map_info in maps_to_process_individually:\r\n+ map_type = map_info['map_type'] # Final type (e.g., COL-1)\r\n+ source_path_rel = map_info['source_path']\r\n+ original_extension = map_info.get('original_extension', '.png')\r\n+ # Determine if this specific map type should use gloss inversion logic\r\n+ # If ROUGH-1, ROUGH-2 etc derive from gloss, they all use inversion\r\n+ is_gloss_source_for_this_map = map_type.startswith('ROUGH') and derived_from_gloss_flag.get('ROUGH', False)\r\n+\r\n+ log.info(f\"-- Asset '{asset_name}': Processing Individual Map: {map_type} (Source: {source_path_rel.name}) --\")\r\n+ current_map_details = {\"derived_from_gloss\": is_gloss_source_for_this_map}\r\n+ source_bit_depth_found = None # Track if we've found the bit depth for this map type\r\n+\r\n+ try:\r\n+ # --- Loop through target resolutions ---\r\n+ for res_key, target_dim_px in resolutions.items():\r\n+ log.debug(f\"Processing {map_type} for resolution: {res_key}...\")\r\n+\r\n+ # --- 1. Load and Transform Source (using helper + cache) ---\r\n+ img_resized, source_dtype = self._load_and_transform_source(\r\n+ source_path_rel=source_path_rel,\r\n+ map_type=map_type, # Pass the specific map type (e.g., ROUGH-1)\r\n+ target_resolution_key=res_key,\r\n+ is_gloss_source=is_gloss_source_for_this_map,\r\n+ cache=loaded_data_cache\r\n+ )\r\n+\r\n+ if img_resized is None:\r\n+ log.warning(f\"Failed to load/transform source {source_path_rel} for {res_key}. Skipping resolution.\")\r\n+ continue # Skip this resolution\r\n+\r\n+ # Store source bit depth once found\r\n+ if source_dtype is not None and source_bit_depth_found is None:\r\n+ source_bit_depth_found = 16 if source_dtype == np.uint16 else (8 if source_dtype == np.uint8 else 8) # Default non-uint to 8\r\n+ current_map_details[\"source_bit_depth\"] = source_bit_depth_found\r\n+ log.debug(f\"Stored source bit depth for {map_type}: {source_bit_depth_found}\")\r\n+\r\n+ # --- 2. Calculate Stats (if applicable) ---\r\n+ if res_key == stats_res_key and stats_target_dim:\r\n+ log.debug(f\"Calculating stats for {map_type} using {res_key} image...\")\r\n+ stats = _calculate_image_stats(img_resized)\r\n+ if stats: image_stats_asset[map_type] = stats\r\n+ else: log.warning(f\"Stats calculation failed for {map_type} at {res_key}.\")\r\n+\r\n+ # --- 3. Calculate Aspect Ratio Change String (once per asset, using pre-calculated dims) ---\r\n+ if aspect_ratio_change_string_asset == \"N/A\" and orig_w_aspect is not None and orig_h_aspect is not None:\r\n+ target_w_aspect, target_h_aspect = img_resized.shape[1], img_resized.shape[0] # Use current resized dims\r\n+ try:\r\n+ aspect_string = self._normalize_aspect_ratio_change(orig_w_aspect, orig_h_aspect, target_w_aspect, target_h_aspect)\r\n+ aspect_ratio_change_string_asset = aspect_string\r\n+ log.debug(f\"Stored aspect ratio change string using {res_key}: '{aspect_string}'\")\r\n+ except Exception as aspect_err:\r\n+ log.error(f\"Failed to calculate aspect ratio change string using {res_key}: {aspect_err}\", exc_info=True)\r\n+ aspect_ratio_change_string_asset = \"Error\"\r\n+ elif aspect_ratio_change_string_asset == \"N/A\":\r\n+ # This case happens if we couldn't get original dims\r\n+ aspect_ratio_change_string_asset = \"Unknown\" # Set to unknown instead of recalculating\r\n+\r\n+\r\n+ # --- 4. Save Image (using helper) ---\r\n+ source_info = {\r\n+ 'original_extension': original_extension,\r\n+ 'source_bit_depth': source_bit_depth_found or 8, # Use found depth or default\r\n+ 'involved_extensions': {original_extension} # Only self for individual maps\r\n+ }\r\n+ bit_depth_rule = self.config.get_bit_depth_rule(map_type) # Get rule for this specific map type\r\n+\r\n+ save_result = self._save_image(\r\n+ image_data=img_resized,\r\n+ map_type=map_type,\r\n+ resolution_key=res_key,\r\n+ asset_base_name=base_name,\r\n+ source_info=source_info,\r\n+ output_bit_depth_rule=bit_depth_rule,\r\n+ temp_dir=self.temp_dir\r\n+ )\r\n+\r\n+ # --- 5. Store Result ---\r\n+ if save_result:\r\n+ processed_maps_details_asset.setdefault(map_type, {})[res_key] = save_result\r\n+ # Update overall map detail (e.g., final format) if needed\r\n+ current_map_details[\"output_format\"] = save_result.get(\"format\")\r\n+ else:\r\n+ log.error(f\"Failed to save {map_type} at {res_key}.\")\r\n+ processed_maps_details_asset.setdefault(map_type, {})[f'error_{res_key}'] = \"Save failed\"\r\n+\r\n+\r\n+ except Exception as map_proc_err:\r\n+ log.error(f\"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n+ processed_maps_details_asset.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n+\r\n+ # Store collected details for this map type\r\n+ map_details_asset[map_type] = current_map_details\r\n+\r\n+ # --- Final Metadata Updates (Handled in main process loop) ---\r\n+ # Update the passed-in current_asset_metadata dictionary directly with map_details\r\n+ # This avoids returning it and merging later.\r\n+ current_asset_metadata[\"map_details\"] = map_details_asset\r\n+\r\n+ log.info(f\"Finished processing individual map files for asset '{asset_name}'.\")\r\n+ return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps\r\n+\r\n+\r\n+ def _merge_maps_from_source(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict, loaded_data_cache: dict) -> Dict[str, Dict[str, Dict]]:\r\n+ \"\"\"\r\n+ Merges channels from different SOURCE maps for a specific asset based on rules\r\n+ in configuration, using helper functions for loading and saving.\r\n+\r\n+ Args:\r\n+ processed_maps_details_asset: Details of processed maps (used to find common resolutions).\r\n+ filtered_classified_files: Classified files dictionary filtered for this asset (used to find source paths).\r\n+ current_asset_metadata: Metadata dictionary for the current asset.\r\n+ loaded_data_cache: Cache dictionary for loaded/resized source data.\r\n+\r\n+ Returns:\r\n+ Dict[str, Dict[str, Dict]]: Details of the merged maps created for this asset.\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n+ gloss_keywords = self.config.source_glossiness_keywords # Get gloss keywords\r\n+\r\n+ merge_rules = self.config.map_merge_rules\r\n+ log.info(f\"Asset '{asset_name}': Applying {len(merge_rules)} map merging rule(s) from source...\")\r\n+\r\n+ # Initialize results for this asset\r\n+ merged_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n+\r\n+ for rule_index, rule in enumerate(merge_rules):\r\n+ output_map_type = rule.get(\"output_map_type\")\r\n+ inputs_mapping = rule.get(\"inputs\") # e.g., {\"R\": \"AO\", \"G\": \"ROUGH\", \"B\": \"METAL\"}\r\n+ defaults = rule.get(\"defaults\", {})\r\n+ rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n+\r\n+ if not output_map_type or not inputs_mapping:\r\n+ log.warning(f\"Asset '{asset_name}': Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs'. Rule: {rule}\")\r\n+ continue\r\n+\r\n+ log.info(f\"-- Asset '{asset_name}': Applying merge rule for '{output_map_type}' --\")\r\n+\r\n+ # --- Find required SOURCE files and their details for this asset ---\r\n+ required_input_sources = {} # map_type -> {'source_path': Path, 'original_extension': str, 'is_gloss_source': bool}\r\n+ possible_to_find_sources = True\r\n+ for input_type in set(inputs_mapping.values()): # e.g., {\"AO\", \"ROUGH\", \"METAL\"}\r\n+ found_source_for_type = False\r\n+ # Search in the filtered classified maps for this asset\r\n+ for classified_map in filtered_classified_files.get(\"maps\", []):\r\n+ # Check if the classified map's type matches the required input type\r\n+ # This needs to handle variants (e.g., ROUGH-1 should match ROUGH)\r\n+ if classified_map['map_type'].startswith(input_type):\r\n+ source_path_rel = classified_map.get('source_path')\r\n+ if not source_path_rel: continue # Skip if path is missing\r\n+\r\n+ # Determine if this source is gloss (only relevant if input_type is ROUGH)\r\n+ is_gloss = False\r\n+ if input_type == 'ROUGH':\r\n+ is_gloss = any(kw.lower() in source_path_rel.stem.lower() for kw in gloss_keywords)\r\n+ # Prioritize gloss source if both exist (logic from _process_individual_maps)\r\n+ native_rough_exists = any(m['map_type'].startswith('ROUGH') and not any(gk.lower() in m['source_path'].stem.lower() for gk in gloss_keywords) for m in filtered_classified_files.get(\"maps\", []))\r\n+ if is_gloss and native_rough_exists:\r\n+ log.debug(f\"Merge input '{input_type}': Prioritizing gloss source '{source_path_rel}' over native rough.\")\r\n+ elif not is_gloss and native_rough_exists and any(m['map_type'].startswith('ROUGH') and any(gk.lower() in m['source_path'].stem.lower() for gk in gloss_keywords) for m in filtered_classified_files.get(\"maps\", [])):\r\n+ log.debug(f\"Merge input '{input_type}': Skipping native rough source '{source_path_rel}' because gloss source exists.\")\r\n+ continue # Skip this native rough source\r\n+\r\n+ required_input_sources[input_type] = {\r\n+ 'source_path': source_path_rel,\r\n+ 'original_extension': classified_map.get('original_extension', '.png'),\r\n+ 'is_gloss_source': is_gloss\r\n+ }\r\n+ found_source_for_type = True\r\n+ log.debug(f\"Found source for merge input '{input_type}': {source_path_rel} (Gloss: {is_gloss})\")\r\n+ break # Found the first matching source for this input type\r\n+ if not found_source_for_type:\r\n+ log.warning(f\"Asset '{asset_name}': Required source file for input map type '{input_type}' not found in classified files. Cannot perform merge for '{output_map_type}'.\")\r\n+ possible_to_find_sources = False\r\n+ break\r\n+\r\n+ if not possible_to_find_sources:\r\n+ continue # Skip this merge rule\r\n+\r\n+ # --- Determine common resolutions based on *processed* maps (as a proxy for available sizes) ---\r\n+ # This assumes _process_individual_maps ran first and populated processed_maps_details_asset\r\n+ possible_resolutions_per_input = []\r\n+ for input_type in set(inputs_mapping.values()):\r\n+ if input_type in processed_maps_details_asset:\r\n+ res_keys = {res for res, details in processed_maps_details_asset[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n+ if not res_keys:\r\n+ log.warning(f\"Asset '{asset_name}': Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions (needed for size check).\")\r\n+ possible_resolutions_per_input = []\r\n+ break\r\n+ possible_resolutions_per_input.append(res_keys)\r\n+ else:\r\n+ # This case might happen if the input map is *only* used for merging\r\n+ # We need a way to determine available resolutions without relying on prior processing.\r\n+ # For now, we'll rely on the check above ensuring the source exists.\r\n+ # We'll load the source at *all* target resolutions and let _load_and_transform_source handle skipping if upscale is needed.\r\n+ log.debug(f\"Input map type '{input_type}' for merge rule '{output_map_type}' might not have been processed individually. Will attempt loading source for all target resolutions.\")\r\n+ # Add all configured resolutions as possibilities for this input\r\n+ possible_resolutions_per_input.append(set(self.config.image_resolutions.keys()))\r\n+\r\n+\r\n+ if not possible_resolutions_per_input:\r\n+ log.warning(f\"Asset '{asset_name}': Cannot determine common resolutions for '{output_map_type}'. Skipping rule.\")\r\n+ continue\r\n+\r\n+ common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n+\r\n+ if not common_resolutions:\r\n+ log.warning(f\"Asset '{asset_name}': No common resolutions found among required inputs {set(inputs_mapping.values())} for merge rule '{output_map_type}'. Skipping rule.\")\r\n+ continue\r\n+ log.debug(f\"Asset '{asset_name}': Common resolutions for '{output_map_type}': {common_resolutions}\")\r\n+\r\n+ # --- Loop through common resolutions ---\r\n+ res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n+ if not res_order:\r\n+ log.warning(f\"Asset '{asset_name}': Common resolutions {common_resolutions} do not match config. Skipping merge for '{output_map_type}'.\")\r\n+ continue\r\n+\r\n+ sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n+ base_name = asset_name # Use current asset's name\r\n+\r\n+ for current_res_key in sorted_res_keys:\r\n+ log.debug(f\"Asset '{asset_name}': Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n+ try:\r\n+ loaded_inputs_data = {} # map_type -> loaded numpy array\r\n+ source_info_for_save = {'involved_extensions': set(), 'max_input_bit_depth': 8}\r\n+\r\n+ # --- Load required SOURCE maps using helper ---\r\n+ possible_to_load = True\r\n+ target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B']\r\n+\r\n+ for map_type in set(inputs_mapping.values()): # e.g., {\"AO\", \"ROUGH\", \"METAL\"}\r\n+ source_details = required_input_sources.get(map_type)\r\n+ if not source_details:\r\n+ log.error(f\"Internal Error: Source details missing for '{map_type}' during merge load.\")\r\n+ possible_to_load = False; break\r\n+\r\n+ source_path_rel = source_details['source_path']\r\n+ is_gloss = source_details['is_gloss_source']\r\n+ original_ext = source_details['original_extension']\r\n+ source_info_for_save['involved_extensions'].add(original_ext)\r\n+\r\n+ log.debug(f\"Loading source '{source_path_rel}' for merge input '{map_type}' at {current_res_key} (Gloss: {is_gloss})\")\r\n+ img_resized, source_dtype = self._load_and_transform_source(\r\n+ source_path_rel=source_path_rel,\r\n+ map_type=map_type, # Pass the base map type (e.g., ROUGH)\r\n+ target_resolution_key=current_res_key,\r\n+ is_gloss_source=is_gloss,\r\n+ cache=loaded_data_cache\r\n+ )\r\n+\r\n+ if img_resized is None:\r\n+ log.warning(f\"Asset '{asset_name}': Failed to load/transform source '{source_path_rel}' for merge input '{map_type}' at {current_res_key}. Skipping resolution.\")\r\n+ possible_to_load = False; break\r\n+\r\n+ loaded_inputs_data[map_type] = img_resized\r\n+\r\n+ # Track max source bit depth\r\n+ if source_dtype == np.uint16:\r\n+ source_info_for_save['max_input_bit_depth'] = max(source_info_for_save['max_input_bit_depth'], 16)\r\n+ # Add other dtype checks if needed (e.g., float32 -> 16?)\r\n+\r\n+ if not possible_to_load: continue\r\n+\r\n+ # --- Determine dimensions ---\r\n+ # All loaded inputs should have the same dimensions for this resolution\r\n+ first_map_type = next(iter(loaded_inputs_data))\r\n+ h, w = loaded_inputs_data[first_map_type].shape[:2]\r\n+ num_target_channels = len(target_channels)\r\n+\r\n+ # --- Prepare and Merge Channels ---\r\n+ merged_channels_float32 = []\r\n+ for target_channel in target_channels: # e.g., 'R', 'G', 'B'\r\n+ source_map_type = inputs_mapping.get(target_channel) # e.g., \"AO\", \"ROUGH\", \"METAL\"\r\n+ channel_data_float32 = None\r\n+\r\n+ if source_map_type and source_map_type in loaded_inputs_data:\r\n+ img_input = loaded_inputs_data[source_map_type] # Get the loaded NumPy array\r\n+\r\n+ # Ensure input is float32 0-1 range for merging\r\n+ if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n+ elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n+ elif img_input.dtype == np.float16: img_float = img_input.astype(np.float32) # Assume float16 is 0-1\r\n+ else: img_float = img_input.astype(np.float32) # Assume other floats are 0-1\r\n+\r\n+ num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n+\r\n+ # Extract the correct channel\r\n+ if num_source_channels >= 3:\r\n+ if target_channel == 'R': channel_data_float32 = img_float[:, :, 0]\r\n+ elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n+ elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2]\r\n+ elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n+ else: log.warning(f\"Target channel '{target_channel}' invalid for 3/4 channel source '{source_map_type}'.\")\r\n+ elif num_source_channels == 1 or len(img_float.shape) == 2:\r\n+ # If source is grayscale, use it for R, G, B, or A target channels\r\n+ channel_data_float32 = img_float.reshape(h, w)\r\n+ else:\r\n+ log.warning(f\"Unexpected shape {img_float.shape} for source '{source_map_type}'.\")\r\n+\r\n+ # Apply default if channel data couldn't be extracted\r\n+ if channel_data_float32 is None:\r\n+ default_val = defaults.get(target_channel)\r\n+ if default_val is None:\r\n+ raise AssetProcessingError(f\"Missing input/default for target channel '{target_channel}' in merge rule '{output_map_type}'.\")\r\n+ log.debug(f\"Using default value {default_val} for target channel '{target_channel}' in '{output_map_type}'.\")\r\n+ channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n+\r\n+ merged_channels_float32.append(channel_data_float32)\r\n+\r\n+ if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels:\r\n+ raise AssetProcessingError(f\"Channel count mismatch during merge for '{output_map_type}'. Expected {num_target_channels}, got {len(merged_channels_float32)}.\")\r\n+\r\n+ merged_image_float32 = cv2.merge(merged_channels_float32)\r\n+ log.debug(f\"Merged channels for '{output_map_type}' ({current_res_key}). Result shape: {merged_image_float32.shape}, dtype: {merged_image_float32.dtype}\")\r\n+\r\n+ # --- Save Merged Map using Helper ---\r\n+ save_result = self._save_image(\r\n+ image_data=merged_image_float32, # Pass the merged float32 data\r\n+ map_type=output_map_type,\r\n+ resolution_key=current_res_key,\r\n+ asset_base_name=base_name,\r\n+ source_info=source_info_for_save, # Pass collected source info\r\n+ output_bit_depth_rule=rule_bit_depth, # Pass the rule's requirement\r\n+ temp_dir=self.temp_dir\r\n+ )\r\n+\r\n+ # --- Record details locally ---\r\n+ if save_result:\r\n+ merged_maps_details_asset[output_map_type][current_res_key] = save_result\r\n+ else:\r\n+ log.error(f\"Asset '{asset_name}': Failed to save merged map '{output_map_type}' at resolution '{current_res_key}'.\")\r\n+ merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = \"Save failed via helper\"\r\n+\r\n+\r\n+ except Exception as merge_res_err:\r\n+ log.error(f\"Asset '{asset_name}': Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n+ # Store error locally for this asset\r\n+ merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n+\r\n+ log.info(f\"Asset '{asset_name}': Finished applying map merging rules.\")\r\n+ # Return the details for this asset\r\n+ return merged_maps_details_asset\r\n+\r\n+\r\n+ def _generate_metadata_file(self, current_asset_metadata: Dict, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], map_details_asset: Dict[str, Dict]) -> Path:\r\n+ \"\"\"\r\n+ Gathers metadata for a specific asset and writes it to a temporary JSON file.\r\n+\r\n+ Args:\r\n+ current_asset_metadata: Base metadata for this asset (name, category, archetype, etc.).\r\n+ processed_maps_details_asset: Details of processed maps for this asset.\r\n+ merged_maps_details_asset: Details of merged maps for this asset.\r\n+ filtered_classified_files_asset: Classified files belonging only to this asset.\r\n+ unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n+ map_details_asset: Dictionary containing details like source bit depth, gloss inversion per map type.\r\n+\r\n+\r\n+ Returns:\r\n+ Path: The path to the generated temporary metadata file.\r\n+ \"\"\"\r\n+ if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n+ asset_name = current_asset_metadata.get(\"asset_name\")\r\n+ if not asset_name or asset_name == \"UnknownAssetName\":\r\n+ log.warning(\"Asset name unknown during metadata generation, file may be incomplete or incorrectly named.\")\r\n+ asset_name = \"UnknownAsset_Metadata\" # Fallback for filename\r\n+\r\n+ log.info(f\"Generating metadata file for asset '{asset_name}'...\")\r\n+ # Start with the base metadata passed in for this asset\r\n+ final_metadata = current_asset_metadata.copy()\r\n+\r\n+ # Populate map details from the specific asset's processing results\r\n+ final_metadata[\"processed_map_resolutions\"] = {}\r\n+ for map_type, res_dict in processed_maps_details_asset.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys)\r\n+\r\n+ final_metadata[\"merged_map_resolutions\"] = {}\r\n+ for map_type, res_dict in merged_maps_details_asset.items():\r\n+ keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n+ if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n+\r\n+ # Determine maps present based on successful processing for this asset\r\n+ final_metadata[\"maps_present\"] = sorted(list(processed_maps_details_asset.keys()))\r\n+ final_metadata[\"merged_maps\"] = sorted(list(merged_maps_details_asset.keys()))\r\n+\r\n+ # Determine shader features based on this asset's maps\r\n+ features = set()\r\n+ for map_type, details in map_details_asset.items(): # Use map_details_asset passed in\r\n+ if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n+ if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n+ res_details = processed_maps_details_asset.get(map_type, {})\r\n+ if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n+ final_metadata[\"shader_features\"] = sorted(list(features))\r\n+\r\n+ # Determine source files in this asset's Extra folder\r\n+ # Includes:\r\n+ # - Files originally classified as 'Extra' or 'Unrecognised' belonging to this asset.\r\n+ # - Files originally classified as 'Ignored' belonging to this asset.\r\n+ # - All 'unmatched' files (belonging to no specific asset).\r\n+ source_files_in_extra_set = set()\r\n+ for category in ['extra', 'ignored']:\r\n+ for file_info in filtered_classified_files_asset.get(category, []):\r\n+ source_files_in_extra_set.add(str(file_info['source_path']))\r\n+ # Add all unmatched files\r\n+ for file_path in unmatched_files_paths:\r\n+ source_files_in_extra_set.add(str(file_path))\r\n+ final_metadata[\"source_files_in_extra\"] = sorted(list(source_files_in_extra_set))\r\n+\r\n+ # Add image stats and map details specific to this asset\r\n+ final_metadata[\"image_stats_1k\"] = current_asset_metadata.get(\"image_stats_1k\", {}) # Get from passed metadata\r\n+ final_metadata[\"map_details\"] = map_details_asset # Use map_details_asset passed in\r\n+ final_metadata[\"aspect_ratio_change_string\"] = current_asset_metadata.get(\"aspect_ratio_change_string\", \"N/A\") # Get from passed metadata\r\n+\r\n+\r\n+ # Add processing info\r\n+ final_metadata[\"_processing_info\"] = {\r\n+ \"preset_used\": self.config.preset_name,\r\n+ \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n+ \"input_source\": str(self.input_path.name), # Add original input source\r\n+ }\r\n+\r\n+ # Sort lists just before writing\r\n+ for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n+ if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n+\r\n+ # Use asset name in temporary filename to avoid conflicts\r\n+ metadata_filename = f\"{asset_name}_{self.config.metadata_filename}\"\r\n+ output_path = self.temp_dir / metadata_filename\r\n+ log.debug(f\"Writing metadata for asset '{asset_name}' to temporary file: {output_path}\")\r\n+ try:\r\n+ with open(output_path, 'w', encoding='utf-8') as f:\r\n+ json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n+ log.info(f\"Metadata file '{metadata_filename}' generated successfully for asset '{asset_name}'.\")\r\n+ return output_path # Return the path to the temporary file\r\n+ except Exception as e:\r\n+ raise AssetProcessingError(f\"Failed to write metadata file {output_path} for asset '{asset_name}': {e}\") from e\r\n+\r\n+\r\n+ def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n+ \"\"\"\r\n+ Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n+ Returns the string representation.\r\n+ \"\"\"\r\n+ if original_width <= 0 or original_height <= 0:\r\n+ log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n+ return \"InvalidInput\"\r\n+\r\n+ # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n+ if resized_width <= 0 or resized_height <= 0:\r\n+ log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n+ return \"InvalidResize\"\r\n+\r\n+ # Original logic from user feedback\r\n+ width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n+ height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n+\r\n+ normalized_width_change = width_change_percentage / 100\r\n+ normalized_height_change = height_change_percentage / 100\r\n+\r\n+ normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n+ normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n+\r\n+ # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n+ # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n+ if normalized_width_change == 0 and normalized_height_change == 0:\r\n+ closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n+ elif normalized_width_change == 0:\r\n+ closest_value_to_one = abs(normalized_height_change)\r\n+ elif normalized_height_change == 0:\r\n+ closest_value_to_one = abs(normalized_width_change)\r\n+ else:\r\n+ closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n+\r\n+ # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n+ epsilon = 1e-9\r\n+ scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n+\r\n+ scaled_normalized_width_change = scale_factor * normalized_width_change\r\n+ scaled_normalized_height_change = scale_factor * normalized_height_change\r\n+\r\n+ output_width = round(scaled_normalized_width_change, decimals)\r\n+ output_height = round(scaled_normalized_height_change, decimals)\r\n+\r\n+ # Convert to int if exactly 1.0 after rounding\r\n+ if abs(output_width - 1.0) < epsilon: output_width = 1\r\n+ if abs(output_height - 1.0) < epsilon: output_height = 1\r\n+\r\n+ # Determine output string\r\n+ if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n+ output = \"EVEN\"\r\n+ elif output_width != 1 and output_height == 1:\r\n+ output = f\"X{str(output_width).replace('.', '')}\"\r\n+ elif output_height != 1 and output_width == 1:\r\n+ output = f\"Y{str(output_height).replace('.', '')}\"\r\n+ else:\r\n+ # Both changed relative to each other\r\n+ output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n+\r\n+ log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n+ return output\r\n+\r\n+ def _sanitize_filename(self, name: str) -> str:\r\n+ \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n+ # ... (Implementation from Response #51) ...\r\n+ if not isinstance(name, str): name = str(name)\r\n+ name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n+ name = re.sub(r'_+', '_', name)\r\n+ name = name.strip('_')\r\n+ if not name: name = \"invalid_name\"\r\n+ return name\r\n+\r\n+ def _organize_output_files(self, current_asset_name: str, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], temp_metadata_path: Path):\r\n+ \"\"\"\r\n+ Moves/copies processed files for a specific asset from the temp dir to the final output structure.\r\n+\r\n+ Args:\r\n+ current_asset_name: The sanitized name of the asset being organized.\r\n+ processed_maps_details_asset: Details of processed maps for this asset.\r\n+ merged_maps_details_asset: Details of merged maps for this asset.\r\n+ filtered_classified_files_asset: Classified files dictionary filtered for this asset.\r\n+ unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n+ temp_metadata_path: Path to the temporary metadata file for this asset.\r\n+ \"\"\"\r\n+ if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n+ if not current_asset_name or current_asset_name == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing for organization.\")\r\n+ supplier_name = self.config.supplier_name # Get supplier name from config\r\n+ if not supplier_name: raise AssetProcessingError(\"Supplier name missing from config.\")\r\n+\r\n+ supplier_sanitized = self._sanitize_filename(supplier_name)\r\n+ asset_name_sanitized = self._sanitize_filename(current_asset_name) # Already sanitized, but ensure consistency\r\n+ final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n+ log.info(f\"Organizing output files for asset '{asset_name_sanitized}' into: {final_dir}\")\r\n+\r\n+ try:\r\n+ # Handle overwrite logic specifically for this asset's directory\r\n+ if final_dir.exists() and self.overwrite:\r\n+ log.warning(f\"Output directory exists for '{asset_name_sanitized}' and overwrite is True. Removing existing directory: {final_dir}\")\r\n+ try:\r\n+ shutil.rmtree(final_dir)\r\n+ except Exception as rm_err:\r\n+ raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} for asset '{asset_name_sanitized}' during overwrite: {rm_err}\") from rm_err\r\n+ # Note: Skip check should prevent this if overwrite is False, but mkdir handles exist_ok=True\r\n+\r\n+ final_dir.mkdir(parents=True, exist_ok=True)\r\n+ except Exception as e:\r\n+ if not isinstance(e, AssetProcessingError):\r\n+ raise AssetProcessingError(f\"Failed to create final dir {final_dir} for asset '{asset_name_sanitized}': {e}\") from e\r\n+ else:\r\n+ raise\r\n+\r\n+ # --- Helper for moving files ---\r\n+ # Keep track of files successfully moved to avoid copying them later as 'unmatched'\r\n+ moved_source_files = set()\r\n+ def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n+ if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc}.\"); return\r\n+ source_abs = self.temp_dir / src_rel_path\r\n+ # Use the original filename from the source path for the destination\r\n+ dest_abs = dest_dir / src_rel_path.name\r\n+ try:\r\n+ if source_abs.exists():\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n+ dest_dir.mkdir(parents=True, exist_ok=True)\r\n+ shutil.move(str(source_abs), str(dest_abs))\r\n+ moved_source_files.add(src_rel_path) # Track successfully moved source files\r\n+ else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc}: {source_abs}\")\r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n+\r\n+ # --- Helper for copying files (for unmatched extras) ---\r\n+ def _safe_copy(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n+ if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc} copy.\"); return\r\n+ # Skip copying if this source file was already moved (e.g., it was an 'Extra' for this specific asset)\r\n+ if src_rel_path in moved_source_files:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Skipping copy of {file_desc} '{src_rel_path.name}' as it was already moved.\")\r\n+ return\r\n+ source_abs = self.temp_dir / src_rel_path\r\n+ dest_abs = dest_dir / src_rel_path.name\r\n+ try:\r\n+ if source_abs.exists():\r\n+ # Avoid copying if the exact destination file already exists (e.g., from a previous asset's copy)\r\n+ if dest_abs.exists():\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Destination file already exists for {file_desc} copy: {dest_abs.name}. Skipping copy.\")\r\n+ return\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Copying {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n+ dest_dir.mkdir(parents=True, exist_ok=True)\r\n+ shutil.copy2(str(source_abs), str(dest_abs)) # Use copy2 to preserve metadata\r\n+ else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc} copy: {source_abs}\")\r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed copying {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n+\r\n+\r\n+ # --- Move Processed/Merged Maps ---\r\n+ for details_dict in [processed_maps_details_asset, merged_maps_details_asset]:\r\n+ for map_type, res_dict in details_dict.items():\r\n+ if 'error' in res_dict: continue\r\n+ for res_key, details in res_dict.items():\r\n+ if isinstance(details, dict) and 'path' in details:\r\n+ _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n+\r\n+ # --- Move Models specific to this asset ---\r\n+ for model_info in filtered_classified_files_asset.get('models', []):\r\n+ _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n+\r\n+ # --- Move Metadata File ---\r\n+ if temp_metadata_path and temp_metadata_path.exists():\r\n+ final_metadata_path = final_dir / self.config.metadata_filename # Use standard name\r\n+ try:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving metadata file: {temp_metadata_path.name} -> {final_metadata_path.relative_to(self.output_base_path)}\")\r\n+ shutil.move(str(temp_metadata_path), str(final_metadata_path))\r\n+ # No need to add metadata path to moved_source_files as it's uniquely generated\r\n+ except Exception as e:\r\n+ log.error(f\"Asset '{asset_name_sanitized}': Failed moving metadata file '{temp_metadata_path.name}': {e}\", exc_info=True)\r\n+ else:\r\n+ log.warning(f\"Asset '{asset_name_sanitized}': Temporary metadata file path missing or file does not exist: {temp_metadata_path}\")\r\n+\r\n+\r\n+ # --- Handle Extra/Ignored/Unmatched Files ---\r\n+ extra_subdir_name = self.config.extra_files_subdir\r\n+ extra_dir = final_dir / extra_subdir_name\r\n+ if filtered_classified_files_asset.get('extra') or filtered_classified_files_asset.get('ignored') or unmatched_files_paths:\r\n+ try:\r\n+ extra_dir.mkdir(parents=True, exist_ok=True)\r\n+\r\n+ # Move asset-specific Extra/Ignored files\r\n+ files_to_move_extra = filtered_classified_files_asset.get('extra', []) + filtered_classified_files_asset.get('ignored', [])\r\n+ if files_to_move_extra:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Moving {len(files_to_move_extra)} asset-specific files to '{extra_subdir_name}/'...\")\r\n+ for file_info in files_to_move_extra:\r\n+ _safe_move(file_info.get('source_path'), extra_dir, f\"extra/ignored file ({file_info.get('reason', 'Unknown')})\")\r\n+\r\n+ # Copy unmatched files\r\n+ if unmatched_files_paths:\r\n+ log.debug(f\"Asset '{asset_name_sanitized}': Copying {len(unmatched_files_paths)} unmatched files to '{extra_subdir_name}/'...\")\r\n+ for file_path in unmatched_files_paths:\r\n+ _safe_copy(file_path, extra_dir, \"unmatched file\")\r\n+\r\n+ except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed creating/moving/copying to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n+\r\n+ log.info(f\"Finished organizing output for asset '{asset_name_sanitized}'.\")\r\n+\r\n+\r\n+ def _cleanup_workspace(self):\r\n+ \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n+ # ... (Implementation from Response #45) ...\r\n+ if self.temp_dir and self.temp_dir.exists():\r\n+ try:\r\n+ log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n+ shutil.rmtree(self.temp_dir)\r\n+ self.temp_dir = None\r\n+ log.debug(\"Temporary workspace cleaned up successfully.\")\r\n+ except Exception as e:\r\n+ log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n+\r\n+ # --- Prediction Method ---\r\n+ def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n+ \"\"\"\r\n+ Predicts the final output structure (supplier, asset name) and attempts\r\n+ to predict output filenames for potential map files based on naming conventions.\r\n+ Does not perform full processing or image loading.\r\n+\r\n+ Returns:\r\n+ tuple[str | None, str | None, dict[str, str] | None]:\r\n+ (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n+ where file_predictions_dict maps input filename -> predicted output filename.\r\n+ Returns None if prediction fails critically.\r\n+ \"\"\"\r\n+ log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n+ try:\r\n+ # 1. Get Supplier Name\r\n+ supplier_name = self.config.supplier_name\r\n+ if not supplier_name:\r\n+ log.warning(\"Supplier name not found in configuration during prediction.\")\r\n+ return None\r\n+\r\n+ # 2. List Input Filenames/Stems\r\n+ candidate_stems = set() # Use set for unique stems\r\n+ filenames = []\r\n+ if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n+ try:\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ # Get only filenames, ignore directories\r\n+ filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n+ except zipfile.BadZipFile:\r\n+ log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n+ return None\r\n+ except Exception as zip_err:\r\n+ log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n+ return None # Cannot proceed if we can't list files\r\n+ elif self.input_path.is_dir():\r\n+ try:\r\n+ for item in self.input_path.iterdir():\r\n+ if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n+ filenames.append(item.name)\r\n+ # Note: Not walking subdirs for prediction to keep it fast\r\n+ except Exception as dir_err:\r\n+ log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n+ return None\r\n+\r\n+ if not filenames:\r\n+ log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n+ return None # Return None if no files found\r\n+\r\n+ # 3. Lightweight Classification for Stems and Potential Maps\r\n+ map_type_mapping = self.config.map_type_mapping\r\n+ model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n+ separator = self.config.source_naming_separator\r\n+ processed_filenames = set() # Track full filenames processed\r\n+ potential_map_files = {} # Store fname -> potential map_type\r\n+\r\n+ for fname in filenames:\r\n+ if fname in processed_filenames: continue\r\n+\r\n+ fstem = Path(fname).stem\r\n+ fstem_lower = fstem.lower()\r\n+ name_parts = fstem_lower.split(separator)\r\n+\r\n+ # Check map rules first\r\n+ map_matched = False\r\n+ for mapping_rule in map_type_mapping:\r\n+ source_keywords, standard_map_type = mapping_rule\r\n+ if standard_map_type not in self.config.standard_map_types: continue\r\n+ for keyword in source_keywords:\r\n+ kw_lower = keyword.lower().strip('*')\r\n+ if kw_lower in name_parts:\r\n+ is_exact_match = any(part == kw_lower for part in name_parts)\r\n+ if is_exact_match:\r\n+ candidate_stems.add(fstem) # Add unique stem\r\n+ potential_map_files[fname] = standard_map_type # Store potential type\r\n+ processed_filenames.add(fname)\r\n+ map_matched = True\r\n+ break # Found keyword match for this rule\r\n+ if map_matched: break # Found a rule match for this file\r\n+ if map_matched: continue # Move to next filename if identified as map\r\n+\r\n+ # Check model patterns if not a map\r\n+ for pattern in model_patterns:\r\n+ if fnmatch(fname.lower(), pattern.lower()):\r\n+ candidate_stems.add(fstem) # Still add stem for base name determination\r\n+ processed_filenames.add(fname)\r\n+ # Don't add models to potential_map_files\r\n+ break # Found model match\r\n+\r\n+ # Note: Files matching neither maps nor models are ignored for prediction details\r\n+\r\n+ log.debug(f\"[PREDICTION] Potential map files identified: {potential_map_files}\") # DEBUG PREDICTION\r\n+ candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n+ log.debug(f\"[PREDICTION] Candidate stems identified: {candidate_stems_list}\") # DEBUG PREDICTION\r\n+ if not candidate_stems_list:\r\n+ log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n+ # Fallback: Use the input path's name itself if no stems found\r\n+ base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n+ determined_base_name = base_name_fallback\r\n+ else:\r\n+ # 4. Replicate _determine_base_metadata logic for base name\r\n+ determined_base_name = \"UnknownAssetName\"\r\n+ separator = self.config.source_naming_separator\r\n+ indices_dict = self.config.source_naming_indices\r\n+ base_index_raw = indices_dict.get('base_name')\r\n+ log.debug(f\"[PREDICTION] Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}'\") # DEBUG PREDICTION\r\n+\r\n+ base_index = None\r\n+ if base_index_raw is not None:\r\n+ try:\r\n+ base_index = int(base_index_raw) # Use explicit conversion like in main logic\r\n+ except (ValueError, TypeError):\r\n+ log.warning(f\"[PREDICTION] Could not convert base_name index '{base_index_raw}' to integer.\")\r\n+\r\n+ if isinstance(base_index, int):\r\n+ potential_base_names = set()\r\n+ for stem in candidate_stems_list: # Iterate over the list\r\n+ parts = stem.split(separator)\r\n+ log.debug(f\"[PREDICTION] Processing stem: '{stem}', Parts: {parts}\") # DEBUG PREDICTION\r\n+ if len(parts) > base_index:\r\n+ extracted_name = parts[base_index]\r\n+ potential_base_names.add(extracted_name)\r\n+ log.debug(f\"[PREDICTION] Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG PREDICTION\r\n+ else:\r\n+ log.debug(f\"[PREDICTION] Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG PREDICTION\r\n+ if len(potential_base_names) == 1:\r\n+ determined_base_name = potential_base_names.pop()\r\n+ log.debug(f\"[PREDICTION] Determined base name '{determined_base_name}' from structured parts (index {base_index}).\") # DEBUG PREDICTION\r\n+ elif len(potential_base_names) > 1:\r\n+ log.debug(f\"[PREDICTION] Multiple potential base names found from index {base_index}: {potential_base_names}. Falling back to common prefix.\") # DEBUG PREDICTION\r\n+ determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+ # else: Use common prefix below\r\n+\r\n+ if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n+ log.debug(\"[PREDICTION] Falling back to common prefix for base name determination (structured parts failed or no index).\") # DEBUG PREDICTION\r\n+ determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n+ determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n+\r\n+ # 5. Sanitize Names\r\n+ final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n+ log.debug(f\"[PREDICTION] Final determined base name for prediction: '{final_base_name}'\") # DEBUG PREDICTION\r\n+ final_supplier_name = self._sanitize_filename(supplier_name)\r\n+\r\n+ # 6. Predict Output Filenames\r\n+ file_predictions = {}\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ # Use highest resolution key as a placeholder for prediction\r\n+ highest_res_key = \"Res?\" # Fallback\r\n+ if self.config.image_resolutions:\r\n+ highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n+\r\n+ for input_fname, map_type in potential_map_files.items():\r\n+ # Assume PNG for prediction, extension might change based on bit depth rules later\r\n+ # but this gives a good idea of the renaming.\r\n+ # A more complex prediction could check bit depth rules.\r\n+ predicted_ext = \"png\" # Simple assumption for preview\r\n+ try:\r\n+ predicted_fname = target_pattern.format(\r\n+ base_name=final_base_name,\r\n+ map_type=map_type,\r\n+ resolution=highest_res_key, # Use placeholder resolution\r\n+ ext=predicted_ext\r\n+ )\r\n+ file_predictions[input_fname] = predicted_fname\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n+ file_predictions[input_fname] = \"[Filename Format Error]\"\r\n+\r\n+\r\n+ log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n+ return final_supplier_name, final_base_name, file_predictions\r\n+\r\n+ except Exception as e:\r\n+ log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n+ return None\r\n+\r\n+\r\n+ # --- New Detailed Prediction Method ---\r\n+ def get_detailed_file_predictions(self) -> list[dict] | None:\r\n+ \"\"\"\r\n+ Performs extraction and classification to provide a detailed list of all\r\n+ files found within the input and their predicted status/output name,\r\n+ handling multiple potential assets within the input.\r\n+\r\n+ Returns:\r\n+ list[dict] | None: A list of dictionaries, each representing a file:\r\n+ {'original_path': str,\r\n+ 'predicted_asset_name': str | None,\r\n+ 'predicted_output_name': str | None,\r\n+ 'status': str,\r\n+ 'details': str | None}\r\n+ Returns None if a critical error occurs during setup/classification.\r\n+ \"\"\"\r\n+ log.info(f\"Getting detailed file predictions for input: {self.input_path.name}\")\r\n+ results = []\r\n+ all_files_in_workspace = [] # Keep track of all files found\r\n+\r\n+ try:\r\n+ # --- Perform necessary setup and classification ---\r\n+ self._setup_workspace()\r\n+ self._extract_input()\r\n+ # Run classification - this populates self.classified_files\r\n+ self._inventory_and_classify_files()\r\n+\r\n+ # --- Determine distinct assets and file mapping ---\r\n+ # This uses the results from _inventory_and_classify_files\r\n+ distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n+ log.debug(f\"Prediction: Determined base names: {distinct_base_names}\")\r\n+ log.debug(f\"Prediction: File to base name map: { {str(k):v for k,v in file_to_base_name_map.items()} }\")\r\n+\r\n+ # --- Apply Suffixes for Prediction Preview ---\r\n+ # This logic is similar to the main process method but applied to the classified_files list\r\n+ log.debug(\"Prediction: Applying map type suffixes for preview...\")\r\n+ grouped_classified_maps = defaultdict(list)\r\n+ for map_info in self.classified_files.get('maps', []):\r\n+ # Group by the base map type\r\n+ grouped_classified_maps[map_info['map_type']].append(map_info)\r\n+\r\n+ # Create a new list for maps with updated types for prediction\r\n+ maps_with_predicted_types = []\r\n+ for base_map_type, maps_in_group in grouped_classified_maps.items():\r\n+ respect_variants = base_map_type in self.config.respect_variant_map_types\r\n+ # Sort maps within the group for consistent suffixing (using the same key as in _inventory_and_classify_files)\r\n+ maps_in_group.sort(key=lambda c: (\r\n+ c.get('preset_rule_index', 9999),\r\n+ c.get('keyword_index_in_rule', 9999) if 'keyword_index_in_rule' in c else 9999, # Handle potential missing key\r\n+ str(c['source_path'])\r\n+ ))\r\n+\r\n+ for i, map_info in enumerate(maps_in_group):\r\n+ predicted_map_type = f\"{base_map_type}-{i + 1}\" if respect_variants else base_map_type\r\n+ # Create a copy to avoid modifying the original classified_files list in place\r\n+ map_info_copy = map_info.copy()\r\n+ map_info_copy['predicted_map_type'] = predicted_map_type # Store the predicted type\r\n+ maps_with_predicted_types.append(map_info_copy)\r\n+\r\n+ # Replace the original maps list with the one containing predicted types for the next step\r\n+ # Note: This is a temporary list for prediction generation, not modifying the instance's classified_files permanently\r\n+ # self.classified_files[\"maps\"] = maps_with_predicted_types # Avoid modifying instance state\r\n+\r\n+ # --- Prepare for filename prediction ---\r\n+ target_pattern = self.config.target_filename_pattern\r\n+ highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n+ if self.config.image_resolutions:\r\n+ highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n+\r\n+ # --- Process all classified files (including maps with predicted types) ---\r\n+ all_classified_files_with_category = []\r\n+ # Add maps with predicted types first\r\n+ for map_info in maps_with_predicted_types:\r\n+ map_info['category'] = 'maps' # Ensure category is set\r\n+ all_classified_files_with_category.append(map_info)\r\n+ if 'source_path' in map_info:\r\n+ all_files_in_workspace.append(map_info['source_path'])\r\n+\r\n+ # Add other categories (models, extra, ignored)\r\n+ for category in ['models', 'extra', 'ignored']:\r\n+ for file_info in self.classified_files.get(category, []):\r\n+ file_info['category'] = category\r\n+ all_classified_files_with_category.append(file_info)\r\n+ if 'source_path' in file_info:\r\n+ all_files_in_workspace.append(file_info['source_path'])\r\n+\r\n+\r\n+ # --- Generate results for each file ---\r\n+ processed_paths = set() # Track paths already added to results\r\n+ for file_info in all_classified_files_with_category:\r\n+ original_path = file_info.get(\"source_path\")\r\n+ if not original_path or original_path in processed_paths:\r\n+ continue # Skip if path missing or already processed\r\n+\r\n+ original_path_str = str(original_path)\r\n+ processed_paths.add(original_path) # Mark as processed\r\n+\r\n+ # Determine predicted asset name and status\r\n+ predicted_asset_name = file_to_base_name_map.get(original_path) # Can be None\r\n+ category = file_info['category'] # maps, models, extra, ignored\r\n+ reason = file_info.get('reason') # Specific reason for extra/ignored\r\n+ status = \"Unknown\"\r\n+ details = None\r\n+ predicted_output_name = None # Usually original name, except for maps\r\n+\r\n+ if category == \"maps\":\r\n+ status = \"Mapped\"\r\n+ # Use the predicted_map_type for the preview display\r\n+ map_type_for_preview = file_info.get(\"predicted_map_type\", file_info.get(\"map_type\", \"UnknownType\"))\r\n+ details = f\"[{map_type_for_preview}]\"\r\n+ if file_info.get(\"is_16bit_source\"): details += \" (16-bit)\"\r\n+ # Predict map output name using its determined asset name and predicted map type\r\n+ if predicted_asset_name:\r\n+ try:\r\n+ predicted_ext = \"png\" # Assume PNG for prediction simplicity\r\n+ predicted_output_name = target_pattern.format(\r\n+ base_name=predicted_asset_name,\r\n+ map_type=map_type_for_preview, # Use the predicted type here\r\n+ resolution=highest_res_key,\r\n+ ext=predicted_ext\r\n+ )\r\n+ except KeyError as fmt_err:\r\n+ log.warning(f\"Prediction format error for map {original_path_str}: {fmt_err}\")\r\n+ predicted_output_name = \"[Format Error]\"\r\n+ details += f\" (Format Key Error: {fmt_err})\"\r\n+ except Exception as pred_err:\r\n+ log.warning(f\"Prediction error for map {original_path_str}: {pred_err}\")\r\n+ predicted_output_name = \"[Prediction Error]\"\r\n+ details += f\" (Error: {pred_err})\"\r\n+ else:\r\n+ # Should not happen for maps if _determine_base_metadata worked correctly\r\n+ log.warning(f\"Map file '{original_path_str}' has no predicted asset name.\")\r\n+ predicted_output_name = \"[No Asset Name]\"\r\n+\r\n+ elif category == \"models\":\r\n+ status = \"Model\"\r\n+ details = \"[Model]\"\r\n+ predicted_output_name = original_path.name # Models keep original name\r\n+\r\n+ elif category == \"ignored\":\r\n+ status = \"Ignored\"\r\n+ details = f\"Ignored ({reason or 'Unknown reason'})\"\r\n+ predicted_output_name = None # Ignored files have no output\r\n+\r\n+ elif category == \"extra\":\r\n+ if predicted_asset_name is None:\r\n+ # This is an \"Unmatched Extra\" file (includes Unrecognised and explicit Extras without a base name)\r\n+ status = \"Unmatched Extra\"\r\n+ details = f\"[Unmatched Extra ({reason or 'N/A'})]\" # Include original reason if available\r\n+ elif reason == 'Unrecognised':\r\n+ # Unrecognised but belongs to a specific asset\r\n+ status = \"Unrecognised\"\r\n+ details = \"[Unrecognised]\"\r\n+ else:\r\n+ # Explicitly matched an 'extra' pattern and belongs to an asset\r\n+ status = \"Extra\"\r\n+ details = f\"Extra ({reason})\"\r\n+ predicted_output_name = original_path.name # Extra files keep original name\r\n+\r\n+ else:\r\n+ log.warning(f\"Unknown category '{category}' encountered during prediction for {original_path_str}\")\r\n+ status = \"Error\"\r\n+ details = f\"[Unknown Category: {category}]\"\r\n+ predicted_output_name = original_path.name\r\n+\r\n+\r\n+ results.append({\r\n+ \"original_path\": original_path_str,\r\n+ \"predicted_asset_name\": predicted_asset_name, # May be None\r\n+ \"predicted_output_name\": predicted_output_name,\r\n+ \"status\": status,\r\n+ \"details\": details\r\n+ })\r\n+\r\n+ # Add any files found during walk but missed by classification (should be rare)\r\n+ # These are likely unmatched as well.\r\n+ for file_path in all_files_in_workspace:\r\n+ if file_path not in processed_paths:\r\n+ log.warning(f\"File found in workspace but not classified: {file_path}. Adding as Unmatched Extra.\")\r\n+ results.append({\r\n+ \"original_path\": str(file_path),\r\n+ \"predicted_asset_name\": None, # Explicitly None as it wasn't mapped\r\n+ \"predicted_output_name\": file_path.name,\r\n+ \"status\": \"Unmatched Extra\",\r\n+ \"details\": \"[Missed Classification]\"\r\n+ })\r\n+\r\n+\r\n+ log.info(f\"Detailed prediction complete for input '{self.input_path.name}'. Found {len(results)} files.\")\r\n+ # Sort results by original path for consistent display\r\n+ results.sort(key=lambda x: x.get(\"original_path\", \"\"))\r\n+ return results\r\n+\r\n+ except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n+ log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n+ return None # Indicate critical failure\r\n+ finally:\r\n+ # Ensure cleanup always happens\r\n+ self._cleanup_workspace()\r\n+\r\n+\r\n+# --- End of AssetProcessor Class ---\n\\ No newline at end of file\n" }, { "date": 1745348574217, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -293,24 +293,8 @@\n img_float = 1.0 - img_prepared.astype(np.float32)\r\n img_prepared = np.clip(img_float, 0.0, 1.0) # Result is float32\r\n log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {source_dtype}\")\r\n \r\n- # MASK Handling (Extract alpha or convert) - Ensure it happens after potential BGR->RGB\r\n- if map_type == 'MASK':\r\n- log.debug(f\"Processing as MASK type for {source_path_rel.name}.\")\r\n- shape = img_prepared.shape\r\n- if len(shape) == 3 and shape[2] == 4:\r\n- log.debug(\"MASK processing: Extracting alpha channel (4-channel source).\")\r\n- img_prepared = img_prepared[:, :, 3] # Alpha is usually the 4th channel (index 3)\r\n- elif len(shape) == 3 and shape[2] == 3:\r\n- log.debug(\"MASK processing: Converting RGB to Grayscale (3-channel source).\")\r\n- img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY)\r\n- elif len(shape) == 2:\r\n- log.debug(\"MASK processing: Source is already grayscale.\")\r\n- else:\r\n- log.warning(f\"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n- # MASK should ideally be uint8 for saving later, but keep float for now if inverted?\r\n- # Let _save_image handle final conversion based on format rules.\r\n \r\n # Ensure data is float32 for resizing if it came from gloss inversion\r\n if isinstance(img_prepared, np.ndarray) and img_prepared.dtype != np.float32 and map_type == 'ROUGH' and is_gloss_source:\r\n img_prepared = img_prepared.astype(np.float32)\r\n@@ -1283,8 +1267,9 @@\n - processed_maps_details_asset: Dict mapping map_type to resolution details.\r\n - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n+\r\n \"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n log.info(f\"Processing individual map files for asset '{asset_name}'...\")\r\n@@ -2414,2713 +2399,5 @@\n # Ensure cleanup always happens\r\n self._cleanup_workspace()\r\n \r\n \r\n-# --- End of AssetProcessor Class ---\n-# asset_processor.py\r\n-\r\n-import os\r\n-import math\r\n-import shutil\r\n-import tempfile\r\n-import zipfile\r\n-import logging\r\n-import json\r\n-import re\r\n-import time\r\n-from pathlib import Path\r\n-from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n-from typing import List, Dict, Tuple, Optional # Added for type hinting\r\n-from collections import defaultdict # Added for grouping\r\n-\r\n-# Attempt to import image processing libraries\r\n-try:\r\n- import cv2\r\n- import numpy as np\r\n-except ImportError:\r\n- print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n- print(\"pip install opencv-python numpy\")\r\n- exit(1) # Exit if essential libraries are missing\r\n-\r\n-# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n-try:\r\n- import OpenEXR\r\n- import Imath\r\n- _HAS_OPENEXR = True\r\n-except ImportError:\r\n- _HAS_OPENEXR = False\r\n- # Log this information - basic EXR might still work via OpenCV\r\n- logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n-\r\n-\r\n-# Assuming Configuration class is in configuration.py\r\n-try:\r\n- from configuration import Configuration, ConfigurationError\r\n-except ImportError:\r\n- print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n- print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n- exit(1)\r\n-\r\n-# Use logger defined in main.py (or configure one here if run standalone)\r\n-log = logging.getLogger(__name__)\r\n-# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\n-if not log.hasHandlers():\r\n- logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n-\r\n-\r\n-# --- Custom Exception ---\r\n-class AssetProcessingError(Exception):\r\n- \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n- pass\r\n-\r\n-# --- Helper Functions ---\r\n-def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n- \"\"\"\r\n- Calculates target dimensions by first scaling to fit target_max_dim\r\n- while maintaining aspect ratio, then finding the nearest power-of-two\r\n- value for each resulting dimension (Stretch/Squash to POT).\r\n- \"\"\"\r\n- if orig_w <= 0 or orig_h <= 0:\r\n- # Fallback to target_max_dim if original dimensions are invalid\r\n- pot_dim = get_nearest_pot(target_max_dim)\r\n- log.warning(f\"Invalid original dimensions ({orig_w}x{orig_h}). Falling back to nearest POT of target_max_dim: {pot_dim}x{pot_dim}\")\r\n- return (pot_dim, pot_dim)\r\n-\r\n- # Step 1: Calculate intermediate dimensions maintaining aspect ratio\r\n- ratio = orig_w / orig_h\r\n- if ratio > 1: # Width is dominant\r\n- scaled_w = target_max_dim\r\n- scaled_h = max(1, round(scaled_w / ratio))\r\n- else: # Height is dominant or square\r\n- scaled_h = target_max_dim\r\n- scaled_w = max(1, round(scaled_h * ratio))\r\n-\r\n- # Step 2: Find the nearest power of two for each scaled dimension\r\n- pot_w = get_nearest_pot(scaled_w)\r\n- pot_h = get_nearest_pot(scaled_h)\r\n-\r\n- log.debug(f\"POT Calc: Orig=({orig_w}x{orig_h}), MaxDim={target_max_dim} -> Scaled=({scaled_w}x{scaled_h}) -> POT=({pot_w}x{pot_h})\")\r\n-\r\n- return int(pot_w), int(pot_h)\r\n-\r\n-def _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n- \"\"\"\r\n- Calculates min, max, mean for a given numpy image array.\r\n- Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n- \"\"\"\r\n- if image_data is None:\r\n- log.warning(\"Attempted to calculate stats on None image data.\")\r\n- return None\r\n- try:\r\n- # Use float64 for calculations to avoid potential overflow/precision issues\r\n- data_float = image_data.astype(np.float64)\r\n-\r\n- # Normalize data_float based on original dtype before calculating stats\r\n- if image_data.dtype == np.uint16:\r\n- log.debug(\"Stats calculation: Normalizing uint16 data to 0-1 range.\")\r\n- data_float /= 65535.0\r\n- elif image_data.dtype == np.uint8:\r\n- log.debug(\"Stats calculation: Normalizing uint8 data to 0-1 range.\")\r\n- data_float /= 255.0\r\n- # Assuming float inputs are already in 0-1 range or similar\r\n-\r\n- log.debug(f\"Stats calculation: data_float dtype: {data_float.dtype}, shape: {data_float.shape}\")\r\n- # Log a few sample values to check range after normalization\r\n- if data_float.size > 0:\r\n- sample_values = data_float.flatten()[:10] # Get first 10 values\r\n- log.debug(f\"Stats calculation: Sample values (first 10) after normalization: {sample_values.tolist()}\")\r\n-\r\n-\r\n- if len(data_float.shape) == 2: # Grayscale (H, W)\r\n- min_val = float(np.min(data_float))\r\n- max_val = float(np.max(data_float))\r\n- mean_val = float(np.mean(data_float))\r\n- stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n- elif len(data_float.shape) == 3: # Color (H, W, C)\r\n- channels = data_float.shape[2]\r\n- min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n- max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n- mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n- # The input data_float is now expected to be in RGB order after conversion in _process_maps\r\n- stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n- log.debug(f\"Calculated {channels}-Channel Stats (RGB order): Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n- else:\r\n- log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n- return None\r\n- return stats\r\n- except Exception as e:\r\n- log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n- return {\"error\": str(e)}\r\n-\r\n-\r\n-# --- Helper function ---\r\n-def _get_base_map_type(target_map_string: str) -> str:\r\n- \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n- match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n- if match:\r\n- return match.group(1).upper()\r\n- return target_map_string.upper() # Fallback if no number suffix\r\n-\r\n-\r\n-def _is_power_of_two(n: int) -> bool:\r\n- \"\"\"Checks if a number is a power of two.\"\"\"\r\n- return (n > 0) and (n & (n - 1) == 0)\r\n-\r\n-def get_nearest_pot(value: int) -> int:\r\n- \"\"\"Finds the nearest power of two to the given value.\"\"\"\r\n- if value <= 0:\r\n- return 1 # Or raise error, POT must be positive\r\n- if _is_power_of_two(value):\r\n- return value\r\n-\r\n- # Calculate the powers of two below and above the value\r\n- lower_pot = 1 << (value.bit_length() - 1)\r\n- upper_pot = 1 << value.bit_length()\r\n-\r\n- # Determine which power of two is closer\r\n- if (value - lower_pot) < (upper_pot - value):\r\n- return lower_pot\r\n- else:\r\n- return upper_pot\r\n-\r\n-# --- Asset Processor Class ---\r\n-class AssetProcessor:\r\n- \"\"\"\r\n- Handles the processing pipeline for a single asset (ZIP or folder).\r\n- \"\"\"\r\n- # Define the list of known grayscale map types (adjust as needed)\r\n- GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n-\r\n- def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n- \"\"\"\r\n- Initializes the processor for a given input asset.\r\n-\r\n- Args:\r\n- input_path: Path to the input ZIP file or folder.\r\n- config: The loaded Configuration object.\r\n- output_base_path: The base directory where processed output will be saved.\r\n- overwrite: If True, forces reprocessing even if output exists.\r\n- \"\"\"\r\n- if not isinstance(input_path, Path): input_path = Path(input_path)\r\n- if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n- if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n-\r\n- if not input_path.exists():\r\n- raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n- if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n- raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n-\r\n- self.input_path: Path = input_path\r\n- self.config: Configuration = config\r\n- self.output_base_path: Path = output_base_path\r\n- self.overwrite: bool = overwrite # Store the overwrite flag\r\n-\r\n- self.temp_dir: Path | None = None # Path to the temporary working directory\r\n- self.classified_files: dict[str, list[dict]] = {\r\n- \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n- }\r\n- # These will no longer store instance-wide results, but are kept for potential future use or refactoring\r\n- # self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n- # self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n- # self.metadata_file_path_temp: Path | None = None\r\n- # self.metadata: dict = {} # Metadata is now handled per-asset within the process loop\r\n-\r\n- log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n-\r\n-\r\n- # --- New Helper Function: Load and Transform Source ---\r\n- def _load_and_transform_source(self, source_path_rel: Path, map_type: str, target_resolution_key: str, is_gloss_source: bool, cache: dict) -> Tuple[Optional[np.ndarray], Optional[np.dtype]]:\r\n- \"\"\"\r\n- Loads a source image file, performs initial prep (BGR->RGB, Gloss->Rough),\r\n- resizes it to the target resolution, and caches the result.\r\n-\r\n- Args:\r\n- source_path_rel: Relative path to the source file within the temp directory.\r\n- map_type: The standard map type (e.g., \"NRM\", \"ROUGH\").\r\n- target_resolution_key: The key for the target resolution (e.g., \"4K\").\r\n- is_gloss_source: Boolean indicating if this source should be treated as gloss for inversion.\r\n- cache: The dictionary used for caching loaded/resized data.\r\n-\r\n- Returns:\r\n- Tuple containing:\r\n- - Resized NumPy array (float32) or None if loading/processing fails.\r\n- - Original source NumPy dtype or None if loading fails.\r\n- \"\"\"\r\n- if not self.temp_dir:\r\n- log.error(\"Temporary directory not set in _load_and_transform_source.\")\r\n- return None, None\r\n-\r\n- cache_key = (source_path_rel, target_resolution_key)\r\n- if cache_key in cache:\r\n- log.debug(f\"CACHE HIT: Returning cached data for {source_path_rel} at {target_resolution_key}\")\r\n- return cache[cache_key] # Return tuple (image_data, source_dtype)\r\n-\r\n- log.debug(f\"CACHE MISS: Loading and transforming {source_path_rel} for {target_resolution_key}\")\r\n- full_source_path = self.temp_dir / source_path_rel\r\n- img_prepared = None\r\n- source_dtype = None\r\n-\r\n- try:\r\n- # --- 1. Load Source Image ---\r\n- # Determine read flag (Grayscale for specific types, unchanged otherwise)\r\n- read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- # Special case for MASK: always load unchanged first to check alpha\r\n- if map_type.upper() == 'MASK': read_flag = cv2.IMREAD_UNCHANGED\r\n-\r\n- log.debug(f\"Loading source {full_source_path.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n- img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n- if img_loaded is None:\r\n- raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n- source_dtype = img_loaded.dtype\r\n- log.debug(f\"Loaded source {full_source_path.name}, dtype: {source_dtype}, shape: {img_loaded.shape}\")\r\n-\r\n- # --- 2. Initial Preparation (BGR->RGB, Gloss Inversion, MASK handling) ---\r\n- img_prepared = img_loaded # Start with loaded image\r\n-\r\n- # BGR -> RGB conversion (only for 3-channel images)\r\n- if len(img_prepared.shape) == 3 and img_prepared.shape[2] >= 3: # Check for 3 or 4 channels\r\n- # Ensure it's not already grayscale before attempting conversion\r\n- if read_flag != cv2.IMREAD_GRAYSCALE:\r\n- log.debug(f\"Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n- # Handle 4-channel (BGRA) by converting to RGB first\r\n- if img_prepared.shape[2] == 4:\r\n- img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGRA2RGB)\r\n- else: # 3-channel (BGR)\r\n- img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGR2RGB)\r\n- else:\r\n- log.debug(f\"Skipping BGR->RGB conversion for {source_path_rel.name} as it was loaded grayscale.\")\r\n- elif len(img_prepared.shape) == 2:\r\n- log.debug(f\"Image {source_path_rel.name} is grayscale, no BGR->RGB conversion needed.\")\r\n- else:\r\n- log.warning(f\"Unexpected image shape {img_prepared.shape} for {source_path_rel.name} after loading.\")\r\n-\r\n-\r\n- # Gloss -> Roughness Inversion\r\n- if map_type == 'ROUGH' and is_gloss_source:\r\n- log.info(f\"Performing Gloss->Roughness inversion for {source_path_rel.name}\")\r\n- # Ensure grayscale before inversion\r\n- if len(img_prepared.shape) == 3:\r\n- img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY) # Use RGB2GRAY as it's already converted\r\n-\r\n- # Normalize based on original source dtype before inversion\r\n- if source_dtype == np.uint16:\r\n- img_float = 1.0 - (img_prepared.astype(np.float32) / 65535.0)\r\n- elif source_dtype == np.uint8:\r\n- img_float = 1.0 - (img_prepared.astype(np.float32) / 255.0)\r\n- else: # Assuming float input is already 0-1 range\r\n- img_float = 1.0 - img_prepared.astype(np.float32)\r\n- img_prepared = np.clip(img_float, 0.0, 1.0) # Result is float32\r\n- log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {source_dtype}\")\r\n-\r\n- # MASK Handling (Extract alpha or convert) - Ensure it happens after potential BGR->RGB\r\n- if map_type == 'MASK':\r\n- log.debug(f\"Processing as MASK type for {source_path_rel.name}.\")\r\n- shape = img_prepared.shape\r\n- if len(shape) == 3 and shape[2] == 4:\r\n- log.debug(\"MASK processing: Extracting alpha channel (4-channel source).\")\r\n- img_prepared = img_prepared[:, :, 3] # Alpha is usually the 4th channel (index 3)\r\n- elif len(shape) == 3 and shape[2] == 3:\r\n- log.debug(\"MASK processing: Converting RGB to Grayscale (3-channel source).\")\r\n- img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY)\r\n- elif len(shape) == 2:\r\n- log.debug(\"MASK processing: Source is already grayscale.\")\r\n- else:\r\n- log.warning(f\"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n- # MASK should ideally be uint8 for saving later, but keep float for now if inverted?\r\n- # Let _save_image handle final conversion based on format rules.\r\n-\r\n- # Ensure data is float32 for resizing if it came from gloss inversion\r\n- if isinstance(img_prepared, np.ndarray) and img_prepared.dtype != np.float32 and map_type == 'ROUGH' and is_gloss_source:\r\n- img_prepared = img_prepared.astype(np.float32)\r\n- elif isinstance(img_prepared, np.ndarray) and img_prepared.dtype not in [np.uint8, np.uint16, np.float32, np.float16]:\r\n- # Convert other potential types (like bool) to float32 for resizing compatibility\r\n- log.warning(f\"Converting unexpected dtype {img_prepared.dtype} to float32 before resizing.\")\r\n- img_prepared = img_prepared.astype(np.float32)\r\n-\r\n-\r\n- # --- 3. Resize ---\r\n- if img_prepared is None: raise AssetProcessingError(\"Image data is None after initial prep.\")\r\n- orig_h, orig_w = img_prepared.shape[:2]\r\n- target_dim_px = self.config.image_resolutions.get(target_resolution_key)\r\n- if not target_dim_px:\r\n- raise AssetProcessingError(f\"Target resolution key '{target_resolution_key}' not found in config.\")\r\n-\r\n- # Avoid upscaling check\r\n- max_original_dimension = max(orig_w, orig_h)\r\n- if target_dim_px > max_original_dimension:\r\n- log.warning(f\"Target dimension {target_dim_px}px is larger than original {max_original_dimension}px for {source_path_rel}. Skipping resize for {target_resolution_key}.\")\r\n- # Store None in cache for this specific resolution to avoid retrying\r\n- cache[cache_key] = (None, source_dtype)\r\n- return None, source_dtype # Indicate resize was skipped\r\n-\r\n- if orig_w <= 0 or orig_h <= 0:\r\n- raise AssetProcessingError(f\"Invalid original dimensions ({orig_w}x{orig_h}) for {source_path_rel}.\")\r\n-\r\n- target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim_px)\r\n- interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n- log.debug(f\"Resizing {source_path_rel.name} from ({orig_w}x{orig_h}) to ({target_w}x{target_h}) for {target_resolution_key}\")\r\n- img_resized = cv2.resize(img_prepared, (target_w, target_h), interpolation=interpolation)\r\n-\r\n- # --- 4. Cache and Return ---\r\n- # Ensure result is float32 if it came from gloss inversion, otherwise keep resized dtype\r\n- final_data_to_cache = img_resized\r\n- if map_type == 'ROUGH' and is_gloss_source and final_data_to_cache.dtype != np.float32:\r\n- final_data_to_cache = final_data_to_cache.astype(np.float32)\r\n-\r\n- log.debug(f\"CACHING result for {cache_key}. Shape: {final_data_to_cache.shape}, Dtype: {final_data_to_cache.dtype}\")\r\n- cache[cache_key] = (final_data_to_cache, source_dtype)\r\n- return final_data_to_cache, source_dtype\r\n-\r\n- except Exception as e:\r\n- log.error(f\"Error in _load_and_transform_source for {source_path_rel} at {target_resolution_key}: {e}\", exc_info=True)\r\n- # Cache None to prevent retrying on error for this specific key\r\n- cache[cache_key] = (None, None)\r\n- return None, None\r\n-\r\n-\r\n- # --- New Helper Function: Save Image ---\r\n- def _save_image(self, image_data: np.ndarray, map_type: str, resolution_key: str, asset_base_name: str, source_info: dict, output_bit_depth_rule: str, temp_dir: Path) -> Optional[Dict]:\r\n- \"\"\"\r\n- Handles saving an image NumPy array to a temporary file, including determining\r\n- format, bit depth, performing final conversions, and fallback logic.\r\n-\r\n- Args:\r\n- image_data: NumPy array containing the image data to save.\r\n- map_type: The standard map type being saved (e.g., \"COL\", \"NRMRGH\").\r\n- resolution_key: The resolution key (e.g., \"4K\").\r\n- asset_base_name: The sanitized base name of the asset.\r\n- source_info: Dictionary containing details about the source(s), e.g.,\r\n- {'original_extension': '.tif', 'source_bit_depth': 16, 'involved_extensions': {'.tif', '.png'}}\r\n- output_bit_depth_rule: Rule for determining output bit depth ('respect', 'force_8bit', 'force_16bit', 'respect_inputs').\r\n- temp_dir: The temporary directory path to save the file in.\r\n-\r\n- Returns:\r\n- A dictionary containing details of the saved file (path, width, height,\r\n- bit_depth, format) or None if saving failed.\r\n- \"\"\"\r\n- if image_data is None:\r\n- log.error(f\"Cannot save image for {map_type} ({resolution_key}): image_data is None.\")\r\n- return None\r\n- if not temp_dir or not temp_dir.exists():\r\n- log.error(f\"Cannot save image for {map_type} ({resolution_key}): temp_dir is invalid.\")\r\n- return None\r\n-\r\n- try:\r\n- h, w = image_data.shape[:2]\r\n- current_dtype = image_data.dtype\r\n- log.debug(f\"Saving {map_type} ({resolution_key}) for asset '{asset_base_name}'. Input shape: {image_data.shape}, dtype: {current_dtype}\")\r\n-\r\n- # --- 1. Determine Output Bit Depth ---\r\n- source_bpc = source_info.get('source_bit_depth', 8) # Default to 8 if missing\r\n- max_input_bpc = source_info.get('max_input_bit_depth', source_bpc) # For 'respect_inputs' merge rule\r\n- output_dtype_target, output_bit_depth = np.uint8, 8 # Default\r\n-\r\n- if output_bit_depth_rule == 'force_8bit':\r\n- output_dtype_target, output_bit_depth = np.uint8, 8\r\n- elif output_bit_depth_rule == 'force_16bit':\r\n- output_dtype_target, output_bit_depth = np.uint16, 16\r\n- elif output_bit_depth_rule == 'respect': # For individual maps\r\n- if source_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- # Handle float source? Assume 16-bit output if source was float? Needs clarification.\r\n- # For now, stick to uint8/16 based on source_bpc.\r\n- elif output_bit_depth_rule == 'respect_inputs': # For merged maps\r\n- if max_input_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- else: # Default to 8-bit if rule is unknown\r\n- log.warning(f\"Unknown output_bit_depth_rule '{output_bit_depth_rule}'. Defaulting to 8-bit.\")\r\n- output_dtype_target, output_bit_depth = np.uint8, 8\r\n-\r\n- log.debug(f\"Target output bit depth: {output_bit_depth}-bit (dtype: {output_dtype_target.__name__}) based on rule '{output_bit_depth_rule}'\")\r\n-\r\n- # --- 2. Determine Output Format ---\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format()\r\n- threshold = self.config.resolution_threshold_for_jpg\r\n- force_lossless = map_type in self.config.force_lossless_map_types\r\n- original_extension = source_info.get('original_extension', '.png') # Primary source ext\r\n- involved_extensions = source_info.get('involved_extensions', {original_extension}) # For merges\r\n- target_dim_px = self.config.image_resolutions.get(resolution_key, 0) # Get target dimension size\r\n-\r\n- # Apply format determination logic (similar to old _process_maps/_merge_maps)\r\n- if force_lossless:\r\n- log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else: # Assume PNG if primary 16-bit isn't EXR\r\n- if output_format != \"png\": log.warning(f\"Primary 16-bit format '{output_format}' not PNG/EXR for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16 if fallback_fmt_16 == \"png\" else \"png\" # Ensure PNG\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # 8-bit lossless -> PNG\r\n- output_format = \"png\"; output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n-\r\n- elif output_bit_depth == 8 and target_dim_px >= threshold:\r\n- output_format = 'jpg'; output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {resolution_key} due to resolution threshold ({target_dim_px} >= {threshold}).\")\r\n- else:\r\n- # Determine highest format involved (for merges) or use original (for individuals)\r\n- highest_format_str = 'jpg' # Default lowest\r\n- relevant_extensions = involved_extensions if map_type in self.config.map_merge_rules else {original_extension}\r\n- if '.exr' in relevant_extensions: highest_format_str = 'exr'\r\n- elif '.tif' in relevant_extensions: highest_format_str = 'tif'\r\n- elif '.png' in relevant_extensions: highest_format_str = 'png'\r\n-\r\n- if highest_format_str == 'exr':\r\n- if output_bit_depth == 16: output_format, output_ext, needs_float16 = \"exr\", \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif highest_format_str == 'tif':\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"): output_ext, needs_float16 = \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else: output_format = \"png\"; output_ext = \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif highest_format_str == 'png':\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"): output_ext, needs_float16 = \".exr\", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else: output_format = \"png\"; output_ext = \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- else: output_format, output_ext = \"png\", \".png\"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- else: # Default to configured 8-bit format if highest was JPG or unknown\r\n- output_format = fmt_8bit_config; output_ext = f\".{output_format}\"\r\n- if output_format == \"png\": save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif output_format == \"jpg\": save_params.extend([cv2.IMWRITE_JPEG_QUALITY, self.config.jpg_quality])\r\n-\r\n- # Final check: JPG must be 8-bit\r\n- if output_format == \"jpg\" and output_bit_depth == 16:\r\n- log.warning(f\"Output format is JPG, but target bit depth is 16. Forcing 8-bit for {map_type} ({resolution_key}).\")\r\n- output_dtype_target, output_bit_depth = np.uint8, 8\r\n-\r\n- log.debug(f\"Determined save format: {output_format}, ext: {output_ext}, bit_depth: {output_bit_depth}, needs_float16: {needs_float16}\")\r\n-\r\n- # --- 3. Final Data Type Conversion ---\r\n- img_to_save = image_data.copy() # Work on a copy\r\n- if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n- log.debug(f\"Converting image data from {img_to_save.dtype} to uint8 for saving.\")\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- else: img_to_save = img_to_save.astype(np.uint8) # Direct cast for other types (e.g., bool)\r\n- elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n- log.debug(f\"Converting image data from {img_to_save.dtype} to uint16 for saving.\")\r\n- if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257 # Proper 8->16 bit scaling\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- else: img_to_save = img_to_save.astype(np.uint16)\r\n- if needs_float16 and img_to_save.dtype != np.float16:\r\n- log.debug(f\"Converting image data from {img_to_save.dtype} to float16 for EXR saving.\")\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n- else: log.warning(f\"Cannot convert {img_to_save.dtype} to float16 for EXR save.\"); return None\r\n-\r\n- # --- 4. Final Color Space Conversion (RGB -> BGR for non-EXR) ---\r\n- img_save_final = img_to_save\r\n- is_3_channel = len(img_to_save.shape) == 3 and img_to_save.shape[2] == 3\r\n- if is_3_channel and not output_format.startswith(\"exr\"):\r\n- log.debug(f\"Converting RGB to BGR for saving {map_type} ({resolution_key}) as {output_format}\")\r\n- try:\r\n- img_save_final = cv2.cvtColor(img_to_save, cv2.COLOR_RGB2BGR)\r\n- except Exception as cvt_err:\r\n- log.error(f\"Failed RGB->BGR conversion before save for {map_type} ({resolution_key}): {cvt_err}. Saving original RGB.\")\r\n- img_save_final = img_to_save # Fallback\r\n-\r\n- # --- 5. Construct Filename & Save ---\r\n- filename = self.config.target_filename_pattern.format(\r\n- base_name=asset_base_name,\r\n- map_type=map_type,\r\n- resolution=resolution_key,\r\n- ext=output_ext.lstrip('.')\r\n- )\r\n- output_path_temp = temp_dir / filename\r\n- log.debug(f\"Attempting to save: {output_path_temp.name} (Format: {output_format}, Dtype: {img_save_final.dtype})\")\r\n-\r\n- saved_successfully = False\r\n- actual_format_saved = output_format\r\n- try:\r\n- cv2.imwrite(str(output_path_temp), img_save_final, save_params)\r\n- saved_successfully = True\r\n- log.info(f\" > Saved {map_type} ({resolution_key}, {output_bit_depth}-bit) as {output_format}\")\r\n- except Exception as save_err:\r\n- log.error(f\"Save failed ({output_format}) for {map_type} {resolution_key}: {save_err}\")\r\n- # --- Try Fallback ---\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format and fallback_fmt_16 == \"png\":\r\n- log.warning(f\"Attempting fallback PNG save for {map_type} {resolution_key}\")\r\n- actual_format_saved = \"png\"; output_ext = \".png\";\r\n- filename = self.config.target_filename_pattern.format(base_name=asset_base_name, map_type=map_type, resolution=resolution_key, ext=\"png\")\r\n- output_path_temp = temp_dir / filename\r\n- save_params_fallback = [cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)]\r\n- img_fallback = None; target_fallback_dtype = np.uint16\r\n-\r\n- # Convert original data (before float16 conversion) to uint16 for PNG fallback\r\n- if img_to_save.dtype == np.float16: # This means original was likely float or uint16/8 converted to float16\r\n- # Need to get back to uint16 - use the pre-float16 converted data if possible?\r\n- # Safest is to convert the float16 back to uint16\r\n- img_scaled = np.clip(img_to_save.astype(np.float32) * 65535.0, 0, 65535)\r\n- img_fallback = img_scaled.astype(target_fallback_dtype)\r\n- elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already uint16\r\n- else: log.error(f\"Cannot convert {img_to_save.dtype} for PNG fallback.\"); return None\r\n-\r\n- # --- Conditional RGB -> BGR Conversion for fallback ---\r\n- img_fallback_save_final = img_fallback\r\n- is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3\r\n- if is_3_channel_fallback: # PNG is non-EXR\r\n- log.debug(f\"Converting RGB to BGR for fallback PNG save {map_type} ({resolution_key})\")\r\n- try: img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR)\r\n- except Exception as cvt_err_fb: log.error(f\"Failed RGB->BGR conversion for fallback PNG: {cvt_err_fb}. Saving original.\")\r\n-\r\n- try:\r\n- cv2.imwrite(str(output_path_temp), img_fallback_save_final, save_params_fallback)\r\n- saved_successfully = True\r\n- log.info(f\" > Saved {map_type} ({resolution_key}) using fallback PNG\")\r\n- except Exception as fallback_err:\r\n- log.error(f\"Fallback PNG save failed for {map_type} {resolution_key}: {fallback_err}\", exc_info=True)\r\n- else:\r\n- log.error(f\"No suitable fallback available or applicable for failed save of {map_type} ({resolution_key}) as {output_format}.\")\r\n-\r\n-\r\n- # --- 6. Return Result ---\r\n- if saved_successfully:\r\n- return {\r\n- \"path\": output_path_temp.relative_to(self.temp_dir), # Store relative path\r\n- \"resolution\": resolution_key,\r\n- \"width\": w, \"height\": h,\r\n- \"bit_depth\": output_bit_depth,\r\n- \"format\": actual_format_saved\r\n- }\r\n- else:\r\n- return None # Indicate save failure\r\n-\r\n- except Exception as e:\r\n- log.error(f\"Unexpected error in _save_image for {map_type} ({resolution_key}): {e}\", exc_info=True)\r\n- return None\r\n-\r\n- def process(self) -> Dict[str, List[str]]:\r\n- \"\"\"\r\n- Executes the full processing pipeline for the input path, handling\r\n- multiple assets within a single input if detected.\r\n-\r\n- Returns:\r\n- Dict[str, List[str]]: A dictionary summarizing the status of each\r\n- detected asset within the input:\r\n- {\"processed\": [asset_name1, ...],\r\n- \"skipped\": [asset_name2, ...],\r\n- \"failed\": [asset_name3, ...]}\r\n- \"\"\"\r\n- log.info(f\"Starting processing for input: {self.input_path.name}\")\r\n- overall_status = {\"processed\": [], \"skipped\": [], \"failed\": []}\r\n- supplier_name = self.config.supplier_name # Get once\r\n- loaded_data_cache = {} # Initialize cache for this process call\r\n-\r\n- try:\r\n- self._setup_workspace()\r\n- self._extract_input()\r\n- self._inventory_and_classify_files() # Classifies all files in self.classified_files\r\n-\r\n- # Determine distinct assets and file mapping\r\n- distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n- unmatched_files_paths = [p for p, name in file_to_base_name_map.items() if name is None]\r\n- if unmatched_files_paths:\r\n- log.warning(f\"Found {len(unmatched_files_paths)} files not matched to any specific asset base name. They will be copied to each asset's Extra folder.\")\r\n- log.debug(f\"Unmatched files: {[str(p) for p in unmatched_files_paths]}\")\r\n-\r\n-\r\n- # --- Loop through each detected asset ---\r\n- for current_asset_name in distinct_base_names:\r\n- log.info(f\"--- Processing detected asset: '{current_asset_name}' ---\")\r\n- asset_processed = False\r\n- asset_skipped = False\r\n- asset_failed = False\r\n- temp_metadata_path_asset = None # Track metadata file for this asset\r\n- map_details_asset = {} # Store map details for this asset\r\n-\r\n- try:\r\n- # --- Filter classified files for the current asset ---\r\n- filtered_classified_files_asset = defaultdict(list)\r\n- for category, file_list in self.classified_files.items():\r\n- for file_info in file_list:\r\n- file_path = file_info.get('source_path')\r\n- if file_path and file_to_base_name_map.get(file_path) == current_asset_name:\r\n- filtered_classified_files_asset[category].append(file_info)\r\n- log.debug(f\"Asset '{current_asset_name}': Filtered files - Maps: {len(filtered_classified_files_asset.get('maps',[]))}, Models: {len(filtered_classified_files_asset.get('models',[]))}, Extra: {len(filtered_classified_files_asset.get('extra',[]))}, Ignored: {len(filtered_classified_files_asset.get('ignored',[]))}\")\r\n-\r\n- # --- Assign Suffixes Per-Asset ---\r\n- log.debug(f\"Asset '{current_asset_name}': Assigning map type suffixes...\")\r\n- asset_maps = filtered_classified_files_asset.get('maps', [])\r\n- grouped_asset_maps = defaultdict(list)\r\n- for map_info in asset_maps:\r\n- # Group by the base map type stored earlier\r\n- grouped_asset_maps[map_info['map_type']].append(map_info)\r\n-\r\n- for base_map_type, maps_in_group in grouped_asset_maps.items():\r\n- log.debug(f\" Assigning suffixes for base type '{base_map_type}' within asset '{current_asset_name}' ({len(maps_in_group)} maps)\")\r\n- # Sorting is already done by _inventory_and_classify_files, just need to assign suffix\r\n- respect_variants = base_map_type in self.config.respect_variant_map_types\r\n- for i, map_info in enumerate(maps_in_group):\r\n- if respect_variants:\r\n- final_map_type = f\"{base_map_type}-{i + 1}\"\r\n- else:\r\n- final_map_type = base_map_type\r\n- log.debug(f\" Updating '{map_info['source_path']}' map_type from '{map_info['map_type']}' to '{final_map_type}'\")\r\n- map_info['map_type'] = final_map_type # Update the map_type in the dictionary\r\n-\r\n- # --- Determine Metadata for this specific asset ---\r\n- asset_specific_metadata = self._determine_single_asset_metadata(current_asset_name, filtered_classified_files_asset)\r\n- current_asset_metadata = {\r\n- \"asset_name\": current_asset_name,\r\n- \"supplier_name\": supplier_name,\r\n- \"asset_category\": asset_specific_metadata.get(\"asset_category\", self.config.default_asset_category),\r\n- \"archetype\": asset_specific_metadata.get(\"archetype\", \"Unknown\"),\r\n- # Initialize fields that will be populated by processing steps\r\n- \"maps_present\": [],\r\n- \"merged_maps\": [],\r\n- \"shader_features\": [],\r\n- \"source_files_in_extra\": [], # Will be populated in _generate_metadata\r\n- \"image_stats_1k\": {},\r\n- \"map_details\": {}, # Will be populated by _process_maps\r\n- \"aspect_ratio_change_string\": \"N/A\"\r\n- }\r\n-\r\n- # --- Skip Check for this specific asset ---\r\n- if not self.overwrite:\r\n- supplier_sanitized = self._sanitize_filename(supplier_name)\r\n- asset_name_sanitized = self._sanitize_filename(current_asset_name)\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- metadata_file_path = final_dir / self.config.metadata_filename\r\n- if final_dir.exists() and metadata_file_path.is_file():\r\n- log.info(f\"Output directory and metadata found for asset '{asset_name_sanitized}' and overwrite is False. Skipping this asset.\")\r\n- overall_status[\"skipped\"].append(current_asset_name)\r\n- asset_skipped = True\r\n- continue # Skip to the next asset in the loop\r\n- elif self.overwrite:\r\n- log.info(f\"Overwrite flag is set. Processing asset '{current_asset_name}' even if output exists.\")\r\n-\r\n- # --- Process Individual Maps for this asset ---\r\n- processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps = self._process_individual_maps(\r\n- filtered_maps_list=filtered_classified_files_asset.get('maps', []),\r\n- current_asset_metadata=current_asset_metadata, # Pass base metadata\r\n- loaded_data_cache=loaded_data_cache # Pass cache\r\n- )\r\n- # Update current metadata with results\r\n- current_asset_metadata[\"image_stats_1k\"] = image_stats_asset\r\n- current_asset_metadata[\"aspect_ratio_change_string\"] = aspect_ratio_change_string_asset\r\n- # Add newly ignored rough maps to the asset's specific ignored list\r\n- if ignored_rough_maps:\r\n- filtered_classified_files_asset['ignored'].extend(ignored_rough_maps)\r\n- # Store map details (like source bit depth) collected during processing\r\n- # This was previously stored in self.metadata[\"map_details\"]\r\n- map_details_asset = {k: v for k, v in current_asset_metadata.pop(\"map_details\", {}).items() if k in processed_maps_details_asset}\r\n-\r\n-\r\n- # --- Merge Maps from Source for this asset ---\r\n- merged_maps_details_asset = self._merge_maps_from_source(\r\n- processed_maps_details_asset=processed_maps_details_asset, # Still needed for source info lookup? Or pass classified files? Check impl.\r\n- filtered_classified_files=filtered_classified_files_asset,\r\n- current_asset_metadata=current_asset_metadata,\r\n- loaded_data_cache=loaded_data_cache # Pass cache\r\n- )\r\n-\r\n- # --- Generate Metadata for this asset ---\r\n- temp_metadata_path_asset = self._generate_metadata_file(\r\n- current_asset_metadata=current_asset_metadata, # Pass the populated dict\r\n- processed_maps_details_asset=processed_maps_details_asset,\r\n- merged_maps_details_asset=merged_maps_details_asset,\r\n- filtered_classified_files_asset=filtered_classified_files_asset,\r\n- unmatched_files_paths=unmatched_files_paths, # Pass the list of unmatched files\r\n- map_details_asset=map_details_asset # Pass the filtered map details\r\n- )\r\n-\r\n- # --- Organize Output Files for this asset ---\r\n- self._organize_output_files(\r\n- current_asset_name=current_asset_name,\r\n- processed_maps_details_asset=processed_maps_details_asset,\r\n- merged_maps_details_asset=merged_maps_details_asset,\r\n- filtered_classified_files_asset=filtered_classified_files_asset,\r\n- unmatched_files_paths=unmatched_files_paths, # Pass unmatched files for copying\r\n- temp_metadata_path=temp_metadata_path_asset\r\n- )\r\n-\r\n- log.info(f\"--- Asset '{current_asset_name}' processed successfully. ---\")\r\n- overall_status[\"processed\"].append(current_asset_name)\r\n- asset_processed = True\r\n-\r\n- except Exception as asset_err:\r\n- log.error(f\"--- Failed processing asset '{current_asset_name}': {asset_err} ---\", exc_info=True)\r\n- overall_status[\"failed\"].append(current_asset_name)\r\n- asset_failed = True\r\n- # Continue to the next asset even if one fails\r\n-\r\n- # --- Determine Final Consolidated Status ---\r\n- # This logic remains the same, interpreting the overall_status dict\r\n- final_status = \"failed\" # Default if nothing else matches\r\n- if overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n- final_status = \"processed\"\r\n- elif overall_status[\"skipped\"] and not overall_status[\"processed\"] and not overall_status[\"failed\"]:\r\n- final_status = \"skipped\"\r\n- elif overall_status[\"processed\"] and overall_status[\"failed\"]:\r\n- final_status = \"partial_success\" # Indicate some succeeded, some failed\r\n- elif overall_status[\"processed\"] and overall_status[\"skipped\"] and not overall_status[\"failed\"]:\r\n- final_status = \"processed\" # Consider processed+skipped as processed overall\r\n- elif overall_status[\"skipped\"] and overall_status[\"failed\"] and not overall_status[\"processed\"]:\r\n- final_status = \"failed\" # If only skips and fails, report as failed\r\n- # Add any other combinations if needed\r\n-\r\n- log.info(f\"Finished processing input '{self.input_path.name}'. Overall Status: {final_status}. Summary: {overall_status}\")\r\n- # Return the detailed status dictionary instead of just a string\r\n- # The wrapper function in main.py will interpret this\r\n- return overall_status\r\n-\r\n- except Exception as e:\r\n- # Catch errors during initial setup (before asset loop)\r\n- if not isinstance(e, (AssetProcessingError, ConfigurationError)):\r\n- log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name} during setup: {e}\")\r\n- if not isinstance(e, AssetProcessingError):\r\n- raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n- else:\r\n- raise\r\n- finally:\r\n- # Ensure cleanup always happens\r\n- self._cleanup_workspace()\r\n-\r\n- def _setup_workspace(self):\r\n- \"\"\"Creates a temporary directory for processing.\"\"\"\r\n- try:\r\n- self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n- log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n-\r\n- def _extract_input(self):\r\n- \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n-\r\n- log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n- try:\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- zip_ref.extractall(self.temp_dir)\r\n- log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n- elif self.input_path.is_dir():\r\n- log.debug(f\"Copying directory contents: {self.input_path}\")\r\n- for item in self.input_path.iterdir():\r\n- destination = self.temp_dir / item.name\r\n- if item.is_dir():\r\n- # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n- try:\r\n- shutil.copytree(item, destination, dirs_exist_ok=True)\r\n- except TypeError: # Fallback for older Python\r\n- if not destination.exists():\r\n- shutil.copytree(item, destination)\r\n- else:\r\n- log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n-\r\n- else:\r\n- shutil.copy2(item, destination)\r\n- log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n- except zipfile.BadZipFile:\r\n- raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n-\r\n- def _inventory_and_classify_files(self):\r\n- \"\"\"\r\n- Scans workspace, classifies files according to preset rules, handling\r\n- 16-bit prioritization and multiple variants of the same base map type.\r\n- \"\"\"\r\n- if not self.temp_dir:\r\n- raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n-\r\n- log.info(\"Scanning and classifying files...\")\r\n- log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n- all_files_rel = []\r\n- for root, _, files in os.walk(self.temp_dir):\r\n- root_path = Path(root)\r\n- for file in files:\r\n- full_path = root_path / file\r\n- relative_path = full_path.relative_to(self.temp_dir)\r\n- all_files_rel.append(relative_path)\r\n-\r\n- log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n-\r\n- # --- Initialization ---\r\n- processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n- potential_map_candidates = [] # List to store potential map file info\r\n- # Reset classified files (important if this method is ever called multiple times)\r\n- self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n-\r\n-\r\n- # --- Step 1: Identify Explicit 'Extra' Files ---\r\n- log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n- compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n- log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path in processed_files: continue\r\n- for compiled_regex in compiled_extra_regex:\r\n- if compiled_regex.search(file_rel_path.name):\r\n- log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n- self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n- processed_files.add(file_rel_path)\r\n- log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n- break # Stop checking extra patterns for this file\r\n-\r\n- # --- Step 2: Identify Model Files ---\r\n- log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n- compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n- log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path in processed_files: continue\r\n- for compiled_regex in compiled_model_regex:\r\n- if compiled_regex.search(file_rel_path.name):\r\n- log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n- self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n- processed_files.add(file_rel_path)\r\n- log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n- break # Stop checking model patterns for this file\r\n-\r\n- # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n- log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n- # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n- compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n-\r\n- for file_rel_path in all_files_rel:\r\n- # Skip files already classified as Extra or Model\r\n- if file_rel_path in processed_files:\r\n- continue\r\n-\r\n- file_stem = file_rel_path.stem\r\n- match_found = False\r\n-\r\n- # Iterate through base types and their associated regex tuples\r\n- for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n- if match_found: break # Stop checking types for this file once matched\r\n-\r\n- # Get the original keywords list for the current rule index\r\n- # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n- original_rule = None\r\n- # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n- if regex_tuples:\r\n- current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n- if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n- rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n- # Verify it's the correct rule by checking target_type\r\n- if rule_candidate.get(\"target_type\") == base_map_type:\r\n- original_rule = rule_candidate\r\n- else:\r\n- log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n- # Fallback search if index doesn't match (shouldn't happen ideally)\r\n- for idx, rule in enumerate(self.config.map_type_mapping):\r\n- if rule.get(\"target_type\") == base_map_type:\r\n- original_rule = rule\r\n- log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n- break\r\n-\r\n- original_keywords_list = []\r\n- if original_rule and 'keywords' in original_rule:\r\n- original_keywords_list = original_rule['keywords']\r\n- else:\r\n- log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n-\r\n- for kw_regex, original_keyword, rule_index in regex_tuples:\r\n- if kw_regex.search(file_stem):\r\n- log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n-\r\n- # Find the index of the matched keyword within its rule's list\r\n- keyword_index_in_rule = -1 # Default if not found\r\n- if original_keywords_list:\r\n- try:\r\n- # Use the original_keyword string directly\r\n- keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n- except ValueError:\r\n- log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n- else:\r\n- log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n-\r\n- # Add candidate only if not already added\r\n- if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n- potential_map_candidates.append({\r\n- 'source_path': file_rel_path,\r\n- 'matched_keyword': original_keyword,\r\n- 'base_map_type': base_map_type,\r\n- 'preset_rule_index': rule_index,\r\n- 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n- 'is_16bit_source': False\r\n- })\r\n- else:\r\n- log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n-\r\n- match_found = True\r\n- break # Stop checking regex tuples for this base_type once matched\r\n-\r\n- log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n-\r\n- # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n- log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n- compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n- for file_rel_path in all_files_rel:\r\n- # Skip if already processed or already identified as a candidate\r\n- if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n- continue\r\n-\r\n- for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n- log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n- match = compiled_regex.search(file_rel_path.name) # Store result\r\n- if match:\r\n- log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n- potential_map_candidates.append({\r\n- 'source_path': file_rel_path,\r\n- 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n- 'base_map_type': base_type,\r\n- 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n- 'is_16bit_source': True # Mark as 16-bit immediately\r\n- })\r\n- log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n- # Don't add to processed_files yet, let Step 4 handle filtering\r\n- break # Stop checking bit depth patterns for this file\r\n-\r\n- log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n-\r\n- # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n- log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n- compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n- candidates_to_keep = []\r\n- candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n-\r\n- # Mark 16-bit candidates\r\n- for candidate in potential_map_candidates:\r\n- base_type = candidate['base_map_type']\r\n- # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n- if base_type in compiled_bit_depth_regex:\r\n- if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n- candidate['is_16bit_source'] = True\r\n- log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n-\r\n-\r\n- # Identify base types that have a 16-bit version present\r\n- prioritized_16bit_bases = {\r\n- candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n- }\r\n- log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n-\r\n- # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n- for candidate in potential_map_candidates:\r\n- if candidate['is_16bit_source']:\r\n- candidates_to_keep.append(candidate)\r\n- log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n- elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n- candidates_to_keep.append(candidate)\r\n- log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n- else:\r\n- # This is an 8-bit candidate whose 16-bit counterpart exists\r\n- candidates_to_ignore.append(candidate)\r\n- log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n-\r\n- # Add ignored 8-bit files to the main ignored list\r\n- for ignored_candidate in candidates_to_ignore:\r\n- self.classified_files[\"ignored\"].append({\r\n- 'source_path': ignored_candidate['source_path'],\r\n- 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n- })\r\n- processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n-\r\n- log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n-\r\n- # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n- log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n- # from collections import defaultdict # Moved import to top of file\r\n- grouped_by_base_type = defaultdict(list)\r\n- for candidate in candidates_to_keep:\r\n- grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n-\r\n- final_map_list = []\r\n- for base_map_type, candidates in grouped_by_base_type.items():\r\n- # --- DIAGNOSTIC LOGGING START ---\r\n- candidate_paths_str = [str(c['source_path']) for c in candidates]\r\n- log.debug(f\" [DIAGNOSIS] Processing base_map_type: '{base_map_type}'. Candidates before sort: {candidate_paths_str}\")\r\n- # --- DIAGNOSTIC LOGGING END ---\r\n- log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n-\r\n- # --- NEW SORTING LOGIC ---\r\n- # Sort candidates based on:\r\n- # 1. The index of the rule object in the preset's map_type_mapping list.\r\n- # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n- # 3. Alphabetical order of the source file path as a tie-breaker.\r\n- candidates.sort(key=lambda c: (\r\n- c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n- c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n- str(c['source_path'])\r\n- ))\r\n- # --- END NEW SORTING LOGIC ---\r\n-\r\n- # Removed diagnostic log\r\n-\r\n- # Add sorted candidates to the final list, but without assigning the suffix yet.\r\n- # Suffix assignment will happen per-asset later.\r\n- for final_candidate in candidates: # Use the directly sorted list\r\n- # Store the base map type for now.\r\n- final_map_list.append({\r\n- \"map_type\": base_map_type, # Store BASE type only\r\n- \"source_path\": final_candidate[\"source_path\"],\r\n- \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n- \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n- \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n- })\r\n- processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n-\r\n- self.classified_files[\"maps\"] = final_map_list\r\n-\r\n- # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n- log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n- remaining_count = 0\r\n- for file_rel_path in all_files_rel:\r\n- if file_rel_path not in processed_files:\r\n- log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n- self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n- remaining_count += 1\r\n- # No need to add to processed_files here, it's the final step\r\n- log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n-\r\n- # --- Final Summary ---\r\n- # Note: self.metadata[\"source_files_in_extra\"] is now populated per-asset in _generate_metadata_file\r\n- log.info(f\"File classification complete.\")\r\n- log.debug(\"--- Final Classification Summary (v2) ---\")\r\n- map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n- model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n- extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n- ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n- log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n- log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n- log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n- log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n- log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n-\r\n-\r\n- def _determine_base_metadata(self) -> Tuple[List[str], Dict[Path, Optional[str]]]:\r\n- \"\"\"\r\n- Determines distinct asset base names within the input based on preset rules\r\n- and maps each relevant source file to its determined base name.\r\n-\r\n- Returns:\r\n- Tuple[List[str], Dict[Path, Optional[str]]]:\r\n- - A list of unique, sanitized base names found.\r\n- - A dictionary mapping source file relative paths to their determined\r\n- base name string (or None if no base name could be determined for that file).\r\n- \"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- log.info(\"Determining distinct base names and file mapping...\")\r\n-\r\n- # Combine map and model files for base name determination\r\n- relevant_files = self.classified_files.get('maps', []) + self.classified_files.get('models', [])\r\n- if not relevant_files:\r\n- log.warning(\"No map or model files found to determine base name(s).\")\r\n- # Fallback: Use input path name as a single asset\r\n- input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n- sanitized_input_name = self._sanitize_filename(input_name or \"UnknownInput\")\r\n- # Map all files (maps, models, extra, ignored) to this fallback name\r\n- all_files_paths = [f['source_path'] for cat in self.classified_files.values() for f in cat if 'source_path' in f]\r\n- file_to_base_name_map = {f_path: sanitized_input_name for f_path in all_files_paths}\r\n- log.info(f\"Using input path name '{sanitized_input_name}' as the single asset name.\")\r\n- return [sanitized_input_name], file_to_base_name_map\r\n-\r\n- # --- Determine Base Names from Files ---\r\n- separator = self.config.source_naming_separator\r\n- indices_dict = self.config.source_naming_indices\r\n- base_index_raw = indices_dict.get('base_name')\r\n- base_index = None\r\n- if base_index_raw is not None:\r\n- try:\r\n- base_index = int(base_index_raw)\r\n- except (ValueError, TypeError):\r\n- log.warning(f\"Could not convert base_name index '{base_index_raw}' to integer. Base name determination might be inaccurate.\")\r\n-\r\n- file_to_base_name_map: Dict[Path, Optional[str]] = {}\r\n- potential_base_names_per_file: Dict[Path, str] = {} # Store potential name for each file path\r\n-\r\n- if isinstance(base_index, int):\r\n- log.debug(f\"Attempting base name extraction using separator '{separator}' and index {base_index}.\")\r\n- for file_info in relevant_files:\r\n- file_path = file_info['source_path']\r\n- stem = file_path.stem\r\n- parts = stem.split(separator)\r\n- if len(parts) > base_index:\r\n- extracted_name = parts[base_index]\r\n- sanitized_name = self._sanitize_filename(extracted_name)\r\n- if sanitized_name: # Ensure we don't add empty names\r\n- potential_base_names_per_file[file_path] = sanitized_name\r\n- log.debug(f\" File '{file_path.name}' -> Potential Base Name: '{sanitized_name}'\")\r\n- else:\r\n- log.debug(f\" File '{file_path.name}' -> Extracted empty name at index {base_index}. Marking as None.\")\r\n- file_to_base_name_map[file_path] = None # Explicitly mark as None if extraction yields empty\r\n- else:\r\n- log.debug(f\" File '{file_path.name}' -> Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}. Marking as None.\")\r\n- file_to_base_name_map[file_path] = None # Mark as None if index is invalid for this file\r\n- else:\r\n- log.warning(\"Base name index not configured or invalid. Cannot determine distinct assets based on index. Treating as single asset.\")\r\n- # Fallback to common prefix if no valid index\r\n- stems = [f['source_path'].stem for f in relevant_files]\r\n- common_prefix_name = os.path.commonprefix(stems) if stems else \"\"\r\n- sanitized_common_name = self._sanitize_filename(common_prefix_name or self.input_path.stem or \"UnknownAsset\")\r\n- log.info(f\"Using common prefix '{sanitized_common_name}' as the single asset name.\")\r\n- # Map all relevant files to this single name\r\n- for file_info in relevant_files:\r\n- potential_base_names_per_file[file_info['source_path']] = sanitized_common_name\r\n-\r\n- # --- Consolidate Distinct Names and Final Mapping ---\r\n- distinct_base_names_set = set(potential_base_names_per_file.values())\r\n- distinct_base_names = sorted(list(distinct_base_names_set)) # Sort for consistent processing order\r\n-\r\n- # Populate the final map, including files that didn't match the index rule (marked as None earlier)\r\n- for file_info in relevant_files:\r\n- file_path = file_info['source_path']\r\n- if file_path not in file_to_base_name_map: # If not already marked as None\r\n- file_to_base_name_map[file_path] = potential_base_names_per_file.get(file_path) # Assign determined name or None if somehow missed\r\n-\r\n- # Add files from 'extra' and 'ignored' to the map, marking them as None for base name\r\n- for category in ['extra', 'ignored']:\r\n- for file_info in self.classified_files.get(category, []):\r\n- file_path = file_info['source_path']\r\n- if file_path not in file_to_base_name_map: # Avoid overwriting if somehow already mapped\r\n- file_to_base_name_map[file_path] = None\r\n- log.debug(f\" File '{file_path.name}' (Category: {category}) -> Marked as None (No Base Name).\")\r\n-\r\n-\r\n- if not distinct_base_names:\r\n- # This case should be rare due to fallbacks, but handle it.\r\n- log.warning(\"No distinct base names could be determined. Using input name as fallback.\")\r\n- input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n- fallback_name = self._sanitize_filename(input_name or \"FallbackAsset\")\r\n- distinct_base_names = [fallback_name]\r\n- # Remap all files to this single fallback name\r\n- file_to_base_name_map = {f_path: fallback_name for f_path in file_to_base_name_map.keys()}\r\n-\r\n-\r\n- log.info(f\"Determined {len(distinct_base_names)} distinct asset base name(s): {distinct_base_names}\")\r\n- log.debug(f\"File-to-BaseName Map ({len(file_to_base_name_map)} entries): { {str(k): v for k, v in file_to_base_name_map.items()} }\") # Log string paths for readability\r\n-\r\n- return distinct_base_names, file_to_base_name_map\r\n-\r\n- def _determine_single_asset_metadata(self, asset_base_name: str, filtered_classified_files: Dict[str, List[Dict]]) -> Dict[str, str]:\r\n- \"\"\"\r\n- Determines the asset_category and archetype for a single, specific asset\r\n- based on its filtered list of classified files.\r\n-\r\n- Args:\r\n- asset_base_name: The determined base name for this specific asset.\r\n- filtered_classified_files: A dictionary containing only the classified\r\n- files (maps, models, etc.) belonging to this asset.\r\n-\r\n- Returns:\r\n- A dictionary containing {\"asset_category\": str, \"archetype\": str}.\r\n- \"\"\"\r\n- log.debug(f\"Determining category and archetype for asset: '{asset_base_name}'\")\r\n- determined_category = self.config.default_asset_category # Start with default\r\n- determined_archetype = \"Unknown\"\r\n-\r\n- # --- Determine Asset Category ---\r\n- if filtered_classified_files.get(\"models\"):\r\n- determined_category = \"Asset\"\r\n- log.debug(f\" Category set to 'Asset' for '{asset_base_name}' due to model file presence.\")\r\n- else:\r\n- # Check for Decal keywords only if not an Asset\r\n- decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n- found_decal = False\r\n- # Check map names first for decal keywords\r\n- candidate_files = [f['source_path'] for f in filtered_classified_files.get('maps', [])]\r\n- # Fallback to checking extra files if no maps found for this asset\r\n- if not candidate_files:\r\n- candidate_files = [f['source_path'] for f in filtered_classified_files.get('extra', [])]\r\n-\r\n- if decal_keywords:\r\n- for file_path in candidate_files:\r\n- # Check against the specific file's name within this asset's context\r\n- for keyword in decal_keywords:\r\n- if keyword.lower() in file_path.name.lower():\r\n- determined_category = \"Decal\"\r\n- found_decal = True; break\r\n- if found_decal: break\r\n- if found_decal: log.debug(f\" Category set to 'Decal' for '{asset_base_name}' due to keyword match.\")\r\n- # If not Asset or Decal, it remains the default (e.g., \"Texture\")\r\n-\r\n- log.debug(f\" Determined Category for '{asset_base_name}': {determined_category}\")\r\n-\r\n- # --- Determine Archetype (Usage) ---\r\n- archetype_rules = self.config.archetype_rules\r\n- # Use stems from maps and models belonging *only* to this asset\r\n- check_stems = [f['source_path'].stem.lower() for f in filtered_classified_files.get('maps', [])]\r\n- check_stems.extend([f['source_path'].stem.lower() for f in filtered_classified_files.get('models', [])])\r\n- # Also check the determined base name itself\r\n- check_stems.append(asset_base_name.lower())\r\n-\r\n- if check_stems:\r\n- best_match_archetype = \"Unknown\"\r\n- # Using simple \"first match wins\" logic as before\r\n- for rule in archetype_rules:\r\n- if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n- arch_name, rules_dict = rule\r\n- match_any = rules_dict.get(\"match_any\", [])\r\n- matched_any_keyword = False\r\n- if match_any:\r\n- for keyword in match_any:\r\n- kw_lower = keyword.lower()\r\n- for stem in check_stems:\r\n- if kw_lower in stem: # Simple substring check\r\n- matched_any_keyword = True\r\n- break # Found a match for this keyword\r\n- if matched_any_keyword: break # Found a match for this rule's keywords\r\n-\r\n- if matched_any_keyword:\r\n- best_match_archetype = arch_name\r\n- log.debug(f\" Archetype match '{arch_name}' for '{asset_base_name}' based on keywords: {match_any}\")\r\n- break # First rule match wins\r\n-\r\n- determined_archetype = best_match_archetype\r\n-\r\n- log.debug(f\" Determined Archetype for '{asset_base_name}': {determined_archetype}\")\r\n-\r\n- return {\"asset_category\": determined_category, \"archetype\": determined_archetype}\r\n-\r\n-\r\n- def _process_individual_maps(self, filtered_maps_list: List[Dict], current_asset_metadata: Dict, loaded_data_cache: dict) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str, List[Dict]]:\r\n- \"\"\"\r\n- Processes, resizes, and saves classified map files for a specific asset\r\n- that are NOT used as inputs for merge rules. Uses helper functions.\r\n-\r\n- Args:\r\n- filtered_maps_list: List of map dictionaries belonging to the current asset.\r\n- current_asset_metadata: Metadata dictionary for the current asset.\r\n- loaded_data_cache: Cache dictionary for loaded/resized source data.\r\n-\r\n- Returns:\r\n- Tuple containing:\r\n- - processed_maps_details_asset: Dict mapping map_type to resolution details.\r\n- - image_stats_asset: Dict mapping map_type to calculated image statistics.\r\n- - aspect_ratio_change_string_asset: String indicating aspect ratio change.\r\n- - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority.\r\n- \"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n- log.info(f\"Processing identified map files for asset '{asset_name}'...\")\r\n-\r\n- # Initialize results specific to this asset\r\n- processed_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n- image_stats_asset: Dict[str, Dict] = {}\r\n- map_details_asset: Dict[str, Dict] = {} # Store details like source bit depth, gloss inversion\r\n- aspect_ratio_change_string_asset: str = \"N/A\"\r\n- ignored_rough_maps: List[Dict] = [] # Store ignored native rough maps\r\n-\r\n- # --- Settings retrieval ---\r\n- resolutions = self.config.image_resolutions\r\n- stats_res_key = self.config.calculate_stats_resolution\r\n- stats_target_dim = resolutions.get(stats_res_key)\r\n- if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped for '{asset_name}'.\")\r\n- gloss_keywords = self.config.source_glossiness_keywords\r\n- target_pattern = self.config.target_filename_pattern\r\n- base_name = asset_name # Use the asset name passed in\r\n-\r\n- # --- Pre-process Glossiness -> Roughness ---\r\n- preprocessed_data = {}\r\n- derived_from_gloss_flag = {}\r\n- gloss_map_info_for_rough, native_rough_map_info = None, None\r\n- # Use the filtered list for this asset\r\n- for map_info in filtered_maps_list:\r\n- if map_info['map_type'] == 'ROUGH':\r\n- is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n- if is_gloss: gloss_map_info_for_rough = map_info\r\n- else: native_rough_map_info = map_info\r\n-\r\n- rough_source_to_use = None\r\n- if gloss_map_info_for_rough:\r\n- rough_source_to_use = gloss_map_info_for_rough\r\n- derived_from_gloss_flag['ROUGH'] = True\r\n- if native_rough_map_info:\r\n- log.warning(f\"Asset '{asset_name}': Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n- # Instead of modifying lists, just add the ignored info to be returned\r\n- ignored_rough_maps.append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n- # We still need to ensure the native rough map isn't processed later in the main loop\r\n- elif native_rough_map_info:\r\n- rough_source_to_use = native_rough_map_info\r\n- derived_from_gloss_flag['ROUGH'] = False\r\n-\r\n- if derived_from_gloss_flag.get('ROUGH'):\r\n- # Ensure rough_source_to_use is not None before proceeding\r\n- if rough_source_to_use:\r\n- source_path = self.temp_dir / rough_source_to_use['source_path']\r\n- log.info(f\"Asset '{asset_name}': Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n- try:\r\n- img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n- if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n- original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n- if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n- if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n- elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n- else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n- # Store tuple: (inverted_float_data, original_dtype)\r\n- preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n- log.debug(f\"Asset '{asset_name}': Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n- except Exception as e: log.error(f\"Asset '{asset_name}': Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n- else:\r\n- log.error(f\"Asset '{asset_name}': Gloss map identified for ROUGH, but source info is missing.\")\r\n-\r\n-\r\n- # --- Main Processing Loop ---\r\n- # Use the filtered list passed to the function\r\n- maps_to_process = list(filtered_maps_list)\r\n- for map_info in maps_to_process:\r\n- map_type = map_info['map_type']\r\n- source_path_rel = map_info['source_path']\r\n-\r\n- # Skip processing native rough map if gloss was prioritized and ignored\r\n- if map_type == 'ROUGH' and any(ignored['source_path'] == source_path_rel for ignored in ignored_rough_maps):\r\n- log.debug(f\"Asset '{asset_name}': Skipping processing of native rough map '{source_path_rel}' as gloss version was prioritized.\")\r\n- continue\r\n-\r\n- original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n- log.info(f\"-- Asset '{asset_name}': Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n- img_processed, source_dtype = None, None\r\n- # Store details locally for this asset\r\n- current_map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n-\r\n- try:\r\n- # --- 1. Get/Load Source Data ---\r\n- if map_type in preprocessed_data:\r\n- log.debug(f\"Asset '{asset_name}': Using pre-processed data for {map_type}.\")\r\n- # Unpack tuple: (inverted_float_data, original_dtype)\r\n- img_processed, source_dtype = preprocessed_data[map_type]\r\n- else:\r\n- full_source_path = self.temp_dir / source_path_rel\r\n- read_flag = cv2.IMREAD_UNCHANGED if map_type.upper() == 'MASK' else (cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED)\r\n- log.debug(f\"Asset '{asset_name}': Loading source {source_path_rel.name} with flag: {'UNCHANGED' if read_flag == cv2.IMREAD_UNCHANGED else ('GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'DEFAULT')}\")\r\n- img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n- if img_loaded is None:\r\n- raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n-\r\n- if len(img_loaded.shape) == 3:\r\n- log.debug(f\"Asset '{asset_name}': Converting loaded image from BGR to RGB for {source_path_rel.name}.\")\r\n- img_processed = cv2.cvtColor(img_loaded, cv2.COLOR_BGR2RGB)\r\n- else:\r\n- img_processed = img_loaded.copy()\r\n-\r\n- source_dtype = img_loaded.dtype\r\n- log.debug(f\"Asset '{asset_name}': Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape} (After potential BGR->RGB conversion)\")\r\n-\r\n- current_map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n-\r\n- # --- 2. Handle Alpha Mask ---\r\n- if map_type == 'MASK' and img_processed is not None:\r\n- # [ Existing MASK handling logic remains largely the same, just add asset_name to logs ]\r\n- log.debug(f\"Asset '{asset_name}': Processing as MASK type.\")\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Initial shape: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n- shape = img_processed.shape\r\n- if len(shape) == 3 and shape[2] == 4:\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Extracting alpha channel (4-channel source).\")\r\n- img_processed = img_processed[:, :, 3]\r\n- elif len(shape) == 3 and shape[2] == 3:\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Converting BGR to Grayscale (3-channel source).\")\r\n- img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n- elif len(shape) == 2:\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Source is already grayscale (2-channel shape).\")\r\n- else:\r\n- log.warning(f\"Asset '{asset_name}': MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n-\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Shape after channel extraction/conversion: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n-\r\n- if img_processed.dtype != np.uint8:\r\n- log.debug(f\"Asset '{asset_name}': Converting mask from {img_processed.dtype} to uint8.\")\r\n- if img_processed.dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- else: img_processed = img_processed.astype(np.uint8)\r\n- log.debug(f\"Asset '{asset_name}': MASK processing: Shape after dtype conversion: {img_processed.shape}, dtype: {img_processed.dtype}\")\r\n-\r\n-\r\n- if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n- orig_h, orig_w = img_processed.shape[:2]\r\n- # Use local dictionary for this asset's results\r\n- processed_maps_details_asset.setdefault(map_type, {})\r\n- max_original_dimension = max(orig_w, orig_h)\r\n-\r\n- # --- 3. Resize & Save Loop ---\r\n- for res_key, target_dim in resolutions.items():\r\n- if target_dim > max_original_dimension:\r\n- log.debug(f\"Asset '{asset_name}': Skipping {res_key} ({target_dim}px) for {map_type}: Target larger than original ({max_original_dimension}px).\")\r\n- continue\r\n- log.debug(f\"Asset '{asset_name}': Processing {map_type} for resolution: {res_key}...\")\r\n- if orig_w <= 0 or orig_h <= 0: log.warning(f\"Asset '{asset_name}': Invalid original dims for {map_type}, skipping resize {res_key}.\"); continue\r\n- target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n- interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n- try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n- except Exception as resize_err: log.error(f\"Asset '{asset_name}': Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n-\r\n- # --- 3a. Calculate Stats ---\r\n- if res_key == stats_res_key and stats_target_dim:\r\n- log.debug(f\"Asset '{asset_name}': Calculating stats for {map_type} using {res_key} image...\")\r\n- stats = _calculate_image_stats(img_resized)\r\n- # Store stats locally for this asset\r\n- if stats: image_stats_asset[map_type] = stats\r\n- else: log.warning(f\"Asset '{asset_name}': Stats calculation failed for {map_type} at {res_key}.\")\r\n-\r\n- # Calculate aspect change string (only once per asset)\r\n- lowest_res_key = min(resolutions, key=resolutions.get)\r\n- # Use local variable for check and assignment\r\n- if aspect_ratio_change_string_asset == \"N/A\" and res_key == lowest_res_key:\r\n- log.debug(f\"Asset '{asset_name}': Aspect ratio calculation condition met.\")\r\n- try:\r\n- aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n- aspect_ratio_change_string_asset = aspect_string # Store locally\r\n- log.debug(f\"Asset '{asset_name}': Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n- except Exception as aspect_err:\r\n- log.error(f\"Asset '{asset_name}': Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n- aspect_ratio_change_string_asset = \"Error\" # Indicate calculation failure locally\r\n-\r\n- # --- 3b. Determine Output Bit Depth & Format ---\r\n- # [ Existing logic for determining bit depth and format remains the same ]\r\n- bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n- current_dtype = img_resized.dtype\r\n- output_dtype_target, output_bit_depth = None, 8\r\n- if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n- elif bit_depth_rule == 'respect':\r\n- if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8\r\n- else: output_dtype_target, output_bit_depth = np.uint8, 8\r\n-\r\n- # --- 3c. Determine Output Format ---\r\n- # [ Existing logic for determining output format remains the same, add asset_name to logs ]\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format()\r\n- threshold = self.config.resolution_threshold_for_jpg\r\n- force_lossless = map_type in self.config.force_lossless_map_types\r\n-\r\n- if force_lossless:\r\n- log.debug(f\"Asset '{asset_name}': Format forced to lossless for map type '{map_type}'.\")\r\n- # ... (rest of force_lossless logic) ...\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else:\r\n- if output_format != \"png\":\r\n- log.warning(f\"Asset '{asset_name}': Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # 8-bit lossless\r\n- output_format = fmt_8bit_config\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- log.warning(f\"Asset '{asset_name}': Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n- output_format = \"png\"; output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n-\r\n- elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'; output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Asset '{asset_name}': Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- else:\r\n- # ... (rest of format determination logic, adding asset_name to logs) ...\r\n- if bit_depth_rule == 'force_8bit':\r\n- output_format = 'png'; output_ext = '.png'\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n- elif original_extension == '.jpg' and output_bit_depth == 8:\r\n- output_format = 'jpg'; output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Asset '{asset_name}': Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n- elif original_extension == '.tif':\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- log.debug(f\"Asset '{asset_name}': Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n- else:\r\n- output_format = fallback_fmt_16; output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n- else:\r\n- output_format = 'png'; output_ext = '.png'\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n- else:\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- output_format = fallback_fmt_16; output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- log.debug(f\"Asset '{asset_name}': Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n- else:\r\n- output_format = fmt_8bit_config; output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\":\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Asset '{asset_name}': Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n-\r\n-\r\n- img_to_save = img_resized.copy()\r\n- # --- Apply Dtype Conversion ---\r\n- # [ Existing dtype conversion logic remains the same ]\r\n- if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n- if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257\r\n- elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- if needs_float16 and img_to_save.dtype != np.float16:\r\n- if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n-\r\n-\r\n- # --- 3d. Construct Filename & Save ---\r\n- # Use base_name (which is the current asset's name)\r\n- filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n- output_path_temp = self.temp_dir / filename\r\n- log.debug(f\"Asset '{asset_name}': Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n- log.debug(f\"Asset '{asset_name}': Saving {map_type} ({res_key}): Final image shape: {img_to_save.shape}, dtype: {img_to_save.dtype}\")\r\n- saved_successfully, actual_format_saved = False, output_format\r\n- # --- Conditional RGB -> BGR Conversion before saving ---\r\n- img_save_final = img_to_save # Default to original\r\n- is_3_channel = len(img_to_save.shape) == 3 and img_to_save.shape[2] == 3\r\n- if is_3_channel and not output_format.startswith(\"exr\"):\r\n- log.debug(f\"Asset '{asset_name}': Converting RGB to BGR for saving {map_type} ({res_key}) as {output_format}\")\r\n- try:\r\n- img_save_final = cv2.cvtColor(img_to_save, cv2.COLOR_RGB2BGR)\r\n- except Exception as cvt_err:\r\n- log.error(f\"Asset '{asset_name}': Failed RGB->BGR conversion for {map_type} ({res_key}): {cvt_err}. Saving original.\")\r\n- img_save_final = img_to_save # Fallback to original if conversion fails\r\n-\r\n- try:\r\n- cv2.imwrite(str(output_path_temp), img_save_final, save_params)\r\n- saved_successfully = True\r\n- except Exception as save_err:\r\n- log.error(f\"Asset '{asset_name}': Save failed ({output_format}) for {map_type} {res_key}: {save_err}\")\r\n- # --- Try Fallback ---\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Asset '{asset_name}': Attempting fallback {fallback_fmt_16} for {map_type} {res_key}\")\r\n- # [ Existing fallback logic remains the same, add asset_name to logs ]\r\n- actual_format_saved = fallback_fmt_16; output_ext = f\".{fallback_fmt_16}\";\r\n- filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n- output_path_temp = self.temp_dir / filename\r\n- save_params_fallback = []\r\n- img_fallback = None; target_fallback_dtype = np.uint16\r\n- if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif fallback_fmt_16 == \"tif\": pass\r\n-\r\n- if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n- if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n- log.error(f\"Asset '{asset_name}': Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n- continue\r\n- img_scaled = img_to_save * 65535.0\r\n- img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save\r\n- else: log.error(f\"Asset '{asset_name}': Cannot convert {img_to_save.dtype} for fallback.\"); continue\r\n-\r\n- # --- Conditional RGB -> BGR Conversion for fallback ---\r\n- img_fallback_save_final = img_fallback # Default to original fallback image\r\n- is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3\r\n- # Use actual_format_saved for the check here\r\n- if is_3_channel_fallback and not actual_format_saved.startswith(\"exr\"):\r\n- log.debug(f\"Asset '{asset_name}': Converting RGB to BGR for fallback saving {map_type} ({res_key}) as {actual_format_saved}\")\r\n- try:\r\n- img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR)\r\n- except Exception as cvt_err_fb:\r\n- log.error(f\"Asset '{asset_name}': Failed RGB->BGR conversion for fallback {map_type} ({res_key}): {cvt_err_fb}. Saving original fallback.\")\r\n- img_fallback_save_final = img_fallback # Fallback to original if conversion fails\r\n-\r\n- try:\r\n- cv2.imwrite(str(output_path_temp), img_fallback_save_final, save_params_fallback)\r\n- saved_successfully = True\r\n- log.info(f\" > Asset '{asset_name}': Saved {map_type} ({res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err:\r\n- log.error(f\"Asset '{asset_name}': Fallback save failed for {map_type} {res_key}: {fallback_err}\", exc_info=True)\r\n-\r\n-\r\n- # --- 3e. Store Result ---\r\n- if saved_successfully:\r\n- # Store in the local dictionary for this asset\r\n- processed_maps_details_asset[map_type][res_key] = {\r\n- \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n- \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n- \"format\": actual_format_saved\r\n- }\r\n- current_map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n-\r\n- except Exception as map_proc_err:\r\n- log.error(f\"Asset '{asset_name}': Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n- # Store error in the local dictionary for this asset\r\n- processed_maps_details_asset.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n-\r\n- # Store details locally for this asset\r\n- map_details_asset[map_type] = current_map_details\r\n-\r\n- # --- Return results for this specific asset ---\r\n- log.info(f\"Finished processing map files for asset '{asset_name}'.\")\r\n- # Note: Final metadata updates (maps_present, shader_features) are handled in the main process loop\r\n- return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps\r\n-\r\n-\r\n- def _merge_maps_from_source(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict, loaded_data_cache: dict) -> Dict[str, Dict[str, Dict]]:\r\n- \"\"\"\r\n- Merges channels from different SOURCE maps for a specific asset based on rules\r\n- in configuration, using helper functions for loading and saving.\r\n-\r\n- Args:\r\n- \"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- asset_name = current_asset_metadata.get(\"asset_name\", \"UnknownAsset\")\r\n-\r\n- merge_rules = self.config.map_merge_rules\r\n- log.info(f\"Asset '{asset_name}': Applying {len(merge_rules)} map merging rule(s)...\")\r\n-\r\n- # Initialize results for this asset\r\n- merged_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict)\r\n-\r\n- for rule_index, rule in enumerate(merge_rules):\r\n- output_map_type = rule.get(\"output_map_type\")\r\n- inputs_mapping = rule.get(\"inputs\")\r\n- defaults = rule.get(\"defaults\", {})\r\n- rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n-\r\n- if not output_map_type or not inputs_mapping:\r\n- log.warning(f\"Asset '{asset_name}': Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs'. Rule: {rule}\")\r\n- continue\r\n-\r\n- log.info(f\"-- Asset '{asset_name}': Applying merge rule for '{output_map_type}' --\")\r\n-\r\n- # --- Determine required inputs and their common resolutions for *this asset* ---\r\n- required_input_types = set(inputs_mapping.values())\r\n- if not required_input_types:\r\n- log.warning(f\"Asset '{asset_name}': Skipping merge rule '{output_map_type}': No input map types defined.\")\r\n- continue\r\n-\r\n- possible_resolutions_per_input = []\r\n- for input_type in required_input_types:\r\n- # Use the processed map details passed for this asset\r\n- if input_type in processed_maps_details_asset:\r\n- res_keys = {res for res, details in processed_maps_details_asset[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n- if not res_keys:\r\n- log.warning(f\"Asset '{asset_name}': Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n- possible_resolutions_per_input = []\r\n- break\r\n- possible_resolutions_per_input.append(res_keys)\r\n- else:\r\n- log.warning(f\"Asset '{asset_name}': Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n- possible_resolutions_per_input = []\r\n- break\r\n-\r\n- if not possible_resolutions_per_input:\r\n- log.warning(f\"Asset '{asset_name}': Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n- continue\r\n-\r\n- common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n-\r\n- if not common_resolutions:\r\n- log.warning(f\"Asset '{asset_name}': No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n- continue\r\n- log.debug(f\"Asset '{asset_name}': Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n-\r\n- # --- Loop through common resolutions ---\r\n- res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n- if not res_order:\r\n- log.warning(f\"Asset '{asset_name}': Common resolutions {common_resolutions} do not match config. Skipping merge for '{output_map_type}'.\")\r\n- continue\r\n-\r\n- sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n- target_pattern = self.config.target_filename_pattern\r\n- base_name = asset_name # Use current asset's name\r\n-\r\n- for current_res_key in sorted_res_keys:\r\n- log.debug(f\"Asset '{asset_name}': Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n- try:\r\n- loaded_inputs = {}\r\n- input_bit_depths = set()\r\n- input_original_extensions = {}\r\n-\r\n- # --- Load required input maps for this asset and resolution ---\r\n- possible_to_load = True\r\n- target_channels = list(inputs_mapping.keys())\r\n-\r\n- for map_type in required_input_types:\r\n- # Use processed_maps_details_asset passed in\r\n- res_details = processed_maps_details_asset.get(map_type, {}).get(current_res_key)\r\n- if not res_details or 'path' not in res_details:\r\n- log.warning(f\"Asset '{asset_name}': Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge.\")\r\n- possible_to_load = False; break\r\n-\r\n- # Find original extension from the filtered classified data for this asset\r\n- original_ext = '.png' # Default\r\n- found_original = False\r\n- # Use filtered_classified_files passed in\r\n- for classified_map in filtered_classified_files.get(\"maps\", []):\r\n- if classified_map['map_type'].startswith(map_type):\r\n- original_ext = classified_map.get('original_extension', '.png')\r\n- found_original = True\r\n- break\r\n- if not found_original:\r\n- log.warning(f\"Asset '{asset_name}': Could not find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n-\r\n- input_original_extensions[map_type] = original_ext\r\n-\r\n- # Load the image\r\n- input_file_path = self.temp_dir / res_details['path']\r\n- read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n- log.debug(f\"Asset '{asset_name}': Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n- img = cv2.imread(str(input_file_path), read_flag)\r\n- if img is None:\r\n- raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n- loaded_inputs[map_type] = img\r\n- input_bit_depths.add(res_details.get('bit_depth', 8))\r\n-\r\n- if len(img.shape) == 3:\r\n- log.debug(f\"Asset '{asset_name}': DEBUG: Merge input '{input_file_path.name}' ({map_type}) loaded shape {img.shape}, dtype {img.dtype}.\")\r\n-\r\n- if not possible_to_load: continue\r\n-\r\n- # --- Determine dimensions and target_dim ---\r\n- first_map_type = next(iter(required_input_types))\r\n- h, w = loaded_inputs[first_map_type].shape[:2]\r\n- first_res_details = processed_maps_details_asset.get(first_map_type, {}).get(current_res_key)\r\n- target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n- num_target_channels = len(target_channels)\r\n-\r\n- # --- Determine Output Bit Depth ---\r\n- max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n- output_bit_depth = 8\r\n- if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n- output_bit_depth = 16\r\n- log.debug(f\"Asset '{asset_name}': Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n-\r\n- # --- Prepare and Merge Channels ---\r\n- # [ Existing channel preparation and merging logic remains the same ]\r\n- merged_channels_float32 = []\r\n- for target_channel in target_channels:\r\n- source_map_type = inputs_mapping.get(target_channel)\r\n- channel_data_float32 = None\r\n- if source_map_type and source_map_type in loaded_inputs:\r\n- img_input = loaded_inputs[source_map_type]\r\n- if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n- elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n- else: img_float = img_input.astype(np.float32)\r\n- num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n- if num_source_channels >= 3:\r\n- if target_channel == 'R': channel_data_float32 = img_float[:, :, 0]\r\n- elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n- elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2]\r\n- elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n- elif num_source_channels == 1 or len(img_float.shape) == 2:\r\n- channel_data_float32 = img_float.reshape(h, w)\r\n- if channel_data_float32 is None:\r\n- default_val = defaults.get(target_channel)\r\n- if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n- channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n- merged_channels_float32.append(channel_data_float32)\r\n-\r\n- if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n- merged_image_float32 = cv2.merge(merged_channels_float32)\r\n-\r\n- # --- Final Data Type Conversion ---\r\n- img_final_merged = None\r\n- if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n- else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n-\r\n- # --- Determine Output Format ---\r\n- # [ Existing format determination logic remains the same, add asset_name to logs ]\r\n- output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n- primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n- fmt_8bit_config = self.config.get_8bit_output_format()\r\n- threshold = self.config.resolution_threshold_for_jpg\r\n- force_lossless = output_map_type in self.config.force_lossless_map_types\r\n-\r\n- if force_lossless:\r\n- log.debug(f\"Asset '{asset_name}': Format forced to lossless for merged map type '{output_map_type}'.\")\r\n- # ... (rest of force_lossless logic) ...\r\n- if output_bit_depth == 16:\r\n- output_format = primary_fmt_16\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- else:\r\n- if output_format != \"png\":\r\n- log.warning(f\"Asset '{asset_name}': Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n- output_format = fallback_fmt_16\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else: # 8-bit lossless\r\n- output_format = fmt_8bit_config\r\n- output_ext = f\".{output_format}\"\r\n- if output_format == \"png\":\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- else:\r\n- log.warning(f\"Asset '{asset_name}': Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n- output_format = \"png\"; output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level]\r\n-\r\n- elif output_bit_depth == 8 and target_dim >= threshold:\r\n- output_format = 'jpg'; output_ext = '.jpg'\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- log.debug(f\"Asset '{asset_name}': Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n- else:\r\n- # ... (rest of hierarchy logic, add asset_name to logs) ...\r\n- involved_extensions = set(input_original_extensions.values())\r\n- log.debug(f\"Asset '{asset_name}': Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n- highest_format_str = 'jpg'\r\n- if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n- elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n- elif '.png' in involved_extensions: highest_format_str = 'png'\r\n-\r\n- final_output_format = highest_format_str\r\n- if highest_format_str == 'tif':\r\n- if output_bit_depth == 16:\r\n- final_output_format = primary_fmt_16\r\n- log.debug(f\"Asset '{asset_name}': Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n- else:\r\n- final_output_format = 'png'\r\n- log.debug(f\"Asset '{asset_name}': Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n- else:\r\n- log.debug(f\"Asset '{asset_name}': Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n-\r\n- output_format = final_output_format\r\n- if output_format.startswith(\"exr\"):\r\n- output_ext, needs_float16 = \".exr\", True\r\n- save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n- elif output_format == \"png\":\r\n- output_ext = \".png\"\r\n- png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n- save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n- elif output_format == \"jpg\":\r\n- output_ext = \".jpg\"\r\n- jpg_quality = self.config.jpg_quality\r\n- save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n- else:\r\n- log.error(f\"Asset '{asset_name}': Unsupported final output format '{output_format}' for merged map '{output_map_type}'. Skipping save.\")\r\n- continue\r\n-\r\n-\r\n- # --- JPG 8-bit Check ---\r\n- if output_format == \"jpg\" and output_bit_depth == 16:\r\n- log.warning(f\"Asset '{asset_name}': Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n- img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n- output_bit_depth = 8\r\n-\r\n- # --- Save Merged Map ---\r\n- image_to_save = img_final_merged\r\n- if needs_float16 and image_to_save.dtype != np.float16:\r\n- # [ Existing float16 conversion logic ]\r\n- if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n- elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n- else: log.warning(f\"Asset '{asset_name}': Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n-\r\n- # Use base_name (current asset's name)\r\n- merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n- merged_output_path_temp = self.temp_dir / merged_filename\r\n- log.debug(f\"Asset '{asset_name}': Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n-\r\n- # --- Save with Fallback ---\r\n- # [ Existing save/fallback logic, add asset_name to logs ]\r\n- saved_successfully = False\r\n- actual_format_saved = output_format\r\n-\r\n- # --- Conditional RGB -> BGR Conversion before saving ---\r\n- img_save_final = image_to_save # Default to original\r\n- is_3_channel = len(image_to_save.shape) == 3 and image_to_save.shape[2] == 3\r\n- if is_3_channel and not output_format.startswith(\"exr\"):\r\n- log.debug(f\"Asset '{asset_name}': Converting RGB to BGR for saving merged {output_map_type} ({current_res_key}) as {output_format}\")\r\n- try:\r\n- img_save_final = cv2.cvtColor(image_to_save, cv2.COLOR_RGB2BGR)\r\n- except Exception as cvt_err:\r\n- log.error(f\"Asset '{asset_name}': Failed RGB->BGR conversion for merged {output_map_type} ({current_res_key}): {cvt_err}. Saving original.\")\r\n- img_save_final = image_to_save # Fallback to original if conversion fails\r\n-\r\n- try:\r\n- cv2.imwrite(str(merged_output_path_temp), img_save_final, save_params)\r\n- log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n- saved_successfully = True\r\n- except Exception as save_err:\r\n- log.error(f\"Asset '{asset_name}': Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n- if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n- log.warning(f\"Asset '{asset_name}': Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n- # ... [ Fallback save logic, add asset_name to logs ] ...\r\n- actual_format_saved = fallback_fmt_16\r\n- output_ext = f\".{fallback_fmt_16}\"\r\n- merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n- merged_output_path_temp = self.temp_dir / merged_filename\r\n- save_params_fallback = []\r\n- img_fallback = None\r\n- target_fallback_dtype = np.uint16\r\n-\r\n- if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n- elif fallback_fmt_16 == \"tif\": pass\r\n-\r\n- if image_to_save.dtype == np.float16:\r\n- if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(f\"Asset '{asset_name}': NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n- img_scaled = image_to_save * 65535.0\r\n- img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n- elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n- else: log.error(f\"Asset '{asset_name}': Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n-\r\n- # --- Conditional RGB -> BGR Conversion for fallback ---\r\n- img_fallback_save_final = img_fallback # Default to original fallback image\r\n- is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3\r\n- # Use actual_format_saved for the check here\r\n- if is_3_channel_fallback and not actual_format_saved.startswith(\"exr\"):\r\n- log.debug(f\"Asset '{asset_name}': Converting RGB to BGR for fallback saving merged {output_map_type} ({current_res_key}) as {actual_format_saved}\")\r\n- try:\r\n- img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR)\r\n- except Exception as cvt_err_fb:\r\n- log.error(f\"Asset '{asset_name}': Failed RGB->BGR conversion for fallback merged {output_map_type} ({current_res_key}): {cvt_err_fb}. Saving original fallback.\")\r\n- img_fallback_save_final = img_fallback # Fallback to original if conversion fails\r\n-\r\n- try:\r\n- cv2.imwrite(str(merged_output_path_temp), img_fallback_save_final, save_params_fallback)\r\n- saved_successfully = True\r\n- log.info(f\" > Asset '{asset_name}': Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n- except Exception as fallback_err:\r\n- log.error(f\"Asset '{asset_name}': Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n-\r\n-\r\n- # --- Record details locally ---\r\n- if saved_successfully:\r\n- merged_maps_details_asset[output_map_type][current_res_key] = {\r\n- \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n- \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n- }\r\n- # Note: Adding to metadata[\"merged_maps\"] list happens in the main process loop\r\n-\r\n- except Exception as merge_res_err:\r\n- log.error(f\"Asset '{asset_name}': Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n- # Store error locally for this asset\r\n- merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n-\r\n- log.info(f\"Asset '{asset_name}': Finished applying map merging rules.\")\r\n- # Return the details for this asset\r\n- return merged_maps_details_asset\r\n-\r\n-\r\n- def _generate_metadata_file(self, current_asset_metadata: Dict, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], map_details_asset: Dict[str, Dict]) -> Path:\r\n- \"\"\"\r\n- Gathers metadata for a specific asset and writes it to a temporary JSON file.\r\n-\r\n- Args:\r\n- current_asset_metadata: Base metadata for this asset (name, category, archetype, etc.).\r\n- processed_maps_details_asset: Details of processed maps for this asset.\r\n- merged_maps_details_asset: Details of merged maps for this asset.\r\n- filtered_classified_files_asset: Classified files belonging only to this asset.\r\n- unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n- map_details_asset: Dictionary containing details like source bit depth, gloss inversion per map type.\r\n-\r\n-\r\n- Returns:\r\n- Path: The path to the generated temporary metadata file.\r\n- \"\"\"\r\n- if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n- asset_name = current_asset_metadata.get(\"asset_name\")\r\n- if not asset_name or asset_name == \"UnknownAssetName\":\r\n- log.warning(\"Asset name unknown during metadata generation, file may be incomplete or incorrectly named.\")\r\n- asset_name = \"UnknownAsset_Metadata\" # Fallback for filename\r\n-\r\n- log.info(f\"Generating metadata file for asset '{asset_name}'...\")\r\n- # Start with the base metadata passed in for this asset\r\n- final_metadata = current_asset_metadata.copy()\r\n-\r\n- # Populate map details from the specific asset's processing results\r\n- final_metadata[\"processed_map_resolutions\"] = {}\r\n- for map_type, res_dict in processed_maps_details_asset.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n- if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys)\r\n-\r\n- final_metadata[\"merged_map_resolutions\"] = {}\r\n- for map_type, res_dict in merged_maps_details_asset.items():\r\n- keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n- if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n-\r\n- # Determine maps present based on successful processing for this asset\r\n- final_metadata[\"maps_present\"] = sorted(list(processed_maps_details_asset.keys()))\r\n- final_metadata[\"merged_maps\"] = sorted(list(merged_maps_details_asset.keys()))\r\n-\r\n- # Determine shader features based on this asset's maps\r\n- features = set()\r\n- for map_type, details in map_details_asset.items(): # Use map_details_asset passed in\r\n- if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n- if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n- res_details = processed_maps_details_asset.get(map_type, {})\r\n- if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n- final_metadata[\"shader_features\"] = sorted(list(features))\r\n-\r\n- # Determine source files in this asset's Extra folder\r\n- # Includes:\r\n- # - Files originally classified as 'Extra' or 'Unrecognised' belonging to this asset.\r\n- # - Files originally classified as 'Ignored' belonging to this asset.\r\n- # - All 'unmatched' files (belonging to no specific asset).\r\n- source_files_in_extra_set = set()\r\n- for category in ['extra', 'ignored']:\r\n- for file_info in filtered_classified_files_asset.get(category, []):\r\n- source_files_in_extra_set.add(str(file_info['source_path']))\r\n- # Add all unmatched files\r\n- for file_path in unmatched_files_paths:\r\n- source_files_in_extra_set.add(str(file_path))\r\n- final_metadata[\"source_files_in_extra\"] = sorted(list(source_files_in_extra_set))\r\n-\r\n- # Add image stats and map details specific to this asset\r\n- final_metadata[\"image_stats_1k\"] = current_asset_metadata.get(\"image_stats_1k\", {}) # Get from passed metadata\r\n- final_metadata[\"map_details\"] = map_details_asset # Use map_details_asset passed in\r\n- final_metadata[\"aspect_ratio_change_string\"] = current_asset_metadata.get(\"aspect_ratio_change_string\", \"N/A\") # Get from passed metadata\r\n-\r\n-\r\n- # Add processing info\r\n- final_metadata[\"_processing_info\"] = {\r\n- \"preset_used\": self.config.preset_name,\r\n- \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n- \"input_source\": str(self.input_path.name), # Add original input source\r\n- }\r\n-\r\n- # Sort lists just before writing\r\n- for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n- if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n-\r\n- # Use asset name in temporary filename to avoid conflicts\r\n- metadata_filename = f\"{asset_name}_{self.config.metadata_filename}\"\r\n- output_path = self.temp_dir / metadata_filename\r\n- log.debug(f\"Writing metadata for asset '{asset_name}' to temporary file: {output_path}\")\r\n- try:\r\n- with open(output_path, 'w', encoding='utf-8') as f:\r\n- json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n- log.info(f\"Metadata file '{metadata_filename}' generated successfully for asset '{asset_name}'.\")\r\n- return output_path # Return the path to the temporary file\r\n- except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to write metadata file {output_path} for asset '{asset_name}': {e}\") from e\r\n-\r\n-\r\n- def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n- \"\"\"\r\n- Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n- Returns the string representation.\r\n- \"\"\"\r\n- if original_width <= 0 or original_height <= 0:\r\n- log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n- return \"InvalidInput\"\r\n-\r\n- # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n- if resized_width <= 0 or resized_height <= 0:\r\n- log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n- return \"InvalidResize\"\r\n-\r\n- # Original logic from user feedback\r\n- width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n- height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n-\r\n- normalized_width_change = width_change_percentage / 100\r\n- normalized_height_change = height_change_percentage / 100\r\n-\r\n- normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n- normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n-\r\n- # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n- # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n- if normalized_width_change == 0 and normalized_height_change == 0:\r\n- closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n- elif normalized_width_change == 0:\r\n- closest_value_to_one = abs(normalized_height_change)\r\n- elif normalized_height_change == 0:\r\n- closest_value_to_one = abs(normalized_width_change)\r\n- else:\r\n- closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n-\r\n- # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n- epsilon = 1e-9\r\n- scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n-\r\n- scaled_normalized_width_change = scale_factor * normalized_width_change\r\n- scaled_normalized_height_change = scale_factor * normalized_height_change\r\n-\r\n- output_width = round(scaled_normalized_width_change, decimals)\r\n- output_height = round(scaled_normalized_height_change, decimals)\r\n-\r\n- # Convert to int if exactly 1.0 after rounding\r\n- if abs(output_width - 1.0) < epsilon: output_width = 1\r\n- if abs(output_height - 1.0) < epsilon: output_height = 1\r\n-\r\n- # Determine output string\r\n- if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n- output = \"EVEN\"\r\n- elif output_width != 1 and output_height == 1:\r\n- output = f\"X{str(output_width).replace('.', '')}\"\r\n- elif output_height != 1 and output_width == 1:\r\n- output = f\"Y{str(output_height).replace('.', '')}\"\r\n- else:\r\n- # Both changed relative to each other\r\n- output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n-\r\n- log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n- return output\r\n-\r\n- def _sanitize_filename(self, name: str) -> str:\r\n- \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n- # ... (Implementation from Response #51) ...\r\n- if not isinstance(name, str): name = str(name)\r\n- name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n- name = re.sub(r'_+', '_', name)\r\n- name = name.strip('_')\r\n- if not name: name = \"invalid_name\"\r\n- return name\r\n-\r\n- def _organize_output_files(self, current_asset_name: str, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], temp_metadata_path: Path):\r\n- \"\"\"\r\n- Moves/copies processed files for a specific asset from the temp dir to the final output structure.\r\n-\r\n- Args:\r\n- current_asset_name: The sanitized name of the asset being organized.\r\n- processed_maps_details_asset: Details of processed maps for this asset.\r\n- merged_maps_details_asset: Details of merged maps for this asset.\r\n- filtered_classified_files_asset: Classified files dictionary filtered for this asset.\r\n- unmatched_files_paths: List of relative paths for files not matched to any base name.\r\n- temp_metadata_path: Path to the temporary metadata file for this asset.\r\n- \"\"\"\r\n- if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n- if not current_asset_name or current_asset_name == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing for organization.\")\r\n- supplier_name = self.config.supplier_name # Get supplier name from config\r\n- if not supplier_name: raise AssetProcessingError(\"Supplier name missing from config.\")\r\n-\r\n- supplier_sanitized = self._sanitize_filename(supplier_name)\r\n- asset_name_sanitized = self._sanitize_filename(current_asset_name) # Already sanitized, but ensure consistency\r\n- final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n- log.info(f\"Organizing output files for asset '{asset_name_sanitized}' into: {final_dir}\")\r\n-\r\n- try:\r\n- # Handle overwrite logic specifically for this asset's directory\r\n- if final_dir.exists() and self.overwrite:\r\n- log.warning(f\"Output directory exists for '{asset_name_sanitized}' and overwrite is True. Removing existing directory: {final_dir}\")\r\n- try:\r\n- shutil.rmtree(final_dir)\r\n- except Exception as rm_err:\r\n- raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} for asset '{asset_name_sanitized}' during overwrite: {rm_err}\") from rm_err\r\n- # Note: Skip check should prevent this if overwrite is False, but mkdir handles exist_ok=True\r\n-\r\n- final_dir.mkdir(parents=True, exist_ok=True)\r\n- except Exception as e:\r\n- if not isinstance(e, AssetProcessingError):\r\n- raise AssetProcessingError(f\"Failed to create final dir {final_dir} for asset '{asset_name_sanitized}': {e}\") from e\r\n- else:\r\n- raise\r\n-\r\n- # --- Helper for moving files ---\r\n- # Keep track of files successfully moved to avoid copying them later as 'unmatched'\r\n- moved_source_files = set()\r\n- def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n- if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc}.\"); return\r\n- source_abs = self.temp_dir / src_rel_path\r\n- # Use the original filename from the source path for the destination\r\n- dest_abs = dest_dir / src_rel_path.name\r\n- try:\r\n- if source_abs.exists():\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n- dest_dir.mkdir(parents=True, exist_ok=True)\r\n- shutil.move(str(source_abs), str(dest_abs))\r\n- moved_source_files.add(src_rel_path) # Track successfully moved source files\r\n- else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc}: {source_abs}\")\r\n- except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n-\r\n- # --- Helper for copying files (for unmatched extras) ---\r\n- def _safe_copy(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n- if not src_rel_path: log.warning(f\"Asset '{asset_name_sanitized}': Missing src path for {file_desc} copy.\"); return\r\n- # Skip copying if this source file was already moved (e.g., it was an 'Extra' for this specific asset)\r\n- if src_rel_path in moved_source_files:\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Skipping copy of {file_desc} '{src_rel_path.name}' as it was already moved.\")\r\n- return\r\n- source_abs = self.temp_dir / src_rel_path\r\n- dest_abs = dest_dir / src_rel_path.name\r\n- try:\r\n- if source_abs.exists():\r\n- # Avoid copying if the exact destination file already exists (e.g., from a previous asset's copy)\r\n- if dest_abs.exists():\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Destination file already exists for {file_desc} copy: {dest_abs.name}. Skipping copy.\")\r\n- return\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Copying {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n- dest_dir.mkdir(parents=True, exist_ok=True)\r\n- shutil.copy2(str(source_abs), str(dest_abs)) # Use copy2 to preserve metadata\r\n- else: log.warning(f\"Asset '{asset_name_sanitized}': Source file missing for {file_desc} copy: {source_abs}\")\r\n- except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed copying {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n-\r\n-\r\n- # --- Move Processed/Merged Maps ---\r\n- for details_dict in [processed_maps_details_asset, merged_maps_details_asset]:\r\n- for map_type, res_dict in details_dict.items():\r\n- if 'error' in res_dict: continue\r\n- for res_key, details in res_dict.items():\r\n- if isinstance(details, dict) and 'path' in details:\r\n- _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n-\r\n- # --- Move Models specific to this asset ---\r\n- for model_info in filtered_classified_files_asset.get('models', []):\r\n- _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n-\r\n- # --- Move Metadata File ---\r\n- if temp_metadata_path and temp_metadata_path.exists():\r\n- final_metadata_path = final_dir / self.config.metadata_filename # Use standard name\r\n- try:\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Moving metadata file: {temp_metadata_path.name} -> {final_metadata_path.relative_to(self.output_base_path)}\")\r\n- shutil.move(str(temp_metadata_path), str(final_metadata_path))\r\n- # No need to add metadata path to moved_source_files as it's uniquely generated\r\n- except Exception as e:\r\n- log.error(f\"Asset '{asset_name_sanitized}': Failed moving metadata file '{temp_metadata_path.name}': {e}\", exc_info=True)\r\n- else:\r\n- log.warning(f\"Asset '{asset_name_sanitized}': Temporary metadata file path missing or file does not exist: {temp_metadata_path}\")\r\n-\r\n-\r\n- # --- Handle Extra/Ignored/Unmatched Files ---\r\n- extra_subdir_name = self.config.extra_files_subdir\r\n- extra_dir = final_dir / extra_subdir_name\r\n- if filtered_classified_files_asset.get('extra') or filtered_classified_files_asset.get('ignored') or unmatched_files_paths:\r\n- try:\r\n- extra_dir.mkdir(parents=True, exist_ok=True)\r\n-\r\n- # Move asset-specific Extra/Ignored files\r\n- files_to_move_extra = filtered_classified_files_asset.get('extra', []) + filtered_classified_files_asset.get('ignored', [])\r\n- if files_to_move_extra:\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Moving {len(files_to_move_extra)} asset-specific files to '{extra_subdir_name}/'...\")\r\n- for file_info in files_to_move_extra:\r\n- _safe_move(file_info.get('source_path'), extra_dir, f\"extra/ignored file ({file_info.get('reason', 'Unknown')})\")\r\n-\r\n- # Copy unmatched files\r\n- if unmatched_files_paths:\r\n- log.debug(f\"Asset '{asset_name_sanitized}': Copying {len(unmatched_files_paths)} unmatched files to '{extra_subdir_name}/'...\")\r\n- for file_path in unmatched_files_paths:\r\n- _safe_copy(file_path, extra_dir, \"unmatched file\")\r\n-\r\n- except Exception as e: log.error(f\"Asset '{asset_name_sanitized}': Failed creating/moving/copying to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n-\r\n- log.info(f\"Finished organizing output for asset '{asset_name_sanitized}'.\")\r\n-\r\n-\r\n- def _cleanup_workspace(self):\r\n- \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n- # ... (Implementation from Response #45) ...\r\n- if self.temp_dir and self.temp_dir.exists():\r\n- try:\r\n- log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n- shutil.rmtree(self.temp_dir)\r\n- self.temp_dir = None\r\n- log.debug(\"Temporary workspace cleaned up successfully.\")\r\n- except Exception as e:\r\n- log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n-\r\n- # --- Prediction Method ---\r\n- def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n- \"\"\"\r\n- Predicts the final output structure (supplier, asset name) and attempts\r\n- to predict output filenames for potential map files based on naming conventions.\r\n- Does not perform full processing or image loading.\r\n-\r\n- Returns:\r\n- tuple[str | None, str | None, dict[str, str] | None]:\r\n- (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n- where file_predictions_dict maps input filename -> predicted output filename.\r\n- Returns None if prediction fails critically.\r\n- \"\"\"\r\n- log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n- try:\r\n- # 1. Get Supplier Name\r\n- supplier_name = self.config.supplier_name\r\n- if not supplier_name:\r\n- log.warning(\"Supplier name not found in configuration during prediction.\")\r\n- return None\r\n-\r\n- # 2. List Input Filenames/Stems\r\n- candidate_stems = set() # Use set for unique stems\r\n- filenames = []\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- try:\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- # Get only filenames, ignore directories\r\n- filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n- except zipfile.BadZipFile:\r\n- log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n- return None\r\n- except Exception as zip_err:\r\n- log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n- return None # Cannot proceed if we can't list files\r\n- elif self.input_path.is_dir():\r\n- try:\r\n- for item in self.input_path.iterdir():\r\n- if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n- filenames.append(item.name)\r\n- # Note: Not walking subdirs for prediction to keep it fast\r\n- except Exception as dir_err:\r\n- log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n- return None\r\n-\r\n- if not filenames:\r\n- log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n- return None # Return None if no files found\r\n-\r\n- # 3. Lightweight Classification for Stems and Potential Maps\r\n- map_type_mapping = self.config.map_type_mapping\r\n- model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n- separator = self.config.source_naming_separator\r\n- processed_filenames = set() # Track full filenames processed\r\n- potential_map_files = {} # Store fname -> potential map_type\r\n-\r\n- for fname in filenames:\r\n- if fname in processed_filenames: continue\r\n-\r\n- fstem = Path(fname).stem\r\n- fstem_lower = fstem.lower()\r\n- name_parts = fstem_lower.split(separator)\r\n-\r\n- # Check map rules first\r\n- map_matched = False\r\n- for mapping_rule in map_type_mapping:\r\n- source_keywords, standard_map_type = mapping_rule\r\n- if standard_map_type not in self.config.standard_map_types: continue\r\n- for keyword in source_keywords:\r\n- kw_lower = keyword.lower().strip('*')\r\n- if kw_lower in name_parts:\r\n- is_exact_match = any(part == kw_lower for part in name_parts)\r\n- if is_exact_match:\r\n- candidate_stems.add(fstem) # Add unique stem\r\n- potential_map_files[fname] = standard_map_type # Store potential type\r\n- processed_filenames.add(fname)\r\n- map_matched = True\r\n- break # Found keyword match for this rule\r\n- if map_matched: break # Found a rule match for this file\r\n- if map_matched: continue # Move to next filename if identified as map\r\n-\r\n- # Check model patterns if not a map\r\n- for pattern in model_patterns:\r\n- if fnmatch(fname.lower(), pattern.lower()):\r\n- candidate_stems.add(fstem) # Still add stem for base name determination\r\n- processed_filenames.add(fname)\r\n- # Don't add models to potential_map_files\r\n- break # Found model match\r\n-\r\n- # Note: Files matching neither maps nor models are ignored for prediction details\r\n-\r\n- log.debug(f\"[PREDICTION] Potential map files identified: {potential_map_files}\") # DEBUG PREDICTION\r\n- candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n- log.debug(f\"[PREDICTION] Candidate stems identified: {candidate_stems_list}\") # DEBUG PREDICTION\r\n- if not candidate_stems_list:\r\n- log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n- # Fallback: Use the input path's name itself if no stems found\r\n- base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n- determined_base_name = base_name_fallback\r\n- else:\r\n- # 4. Replicate _determine_base_metadata logic for base name\r\n- determined_base_name = \"UnknownAssetName\"\r\n- separator = self.config.source_naming_separator\r\n- indices_dict = self.config.source_naming_indices\r\n- base_index_raw = indices_dict.get('base_name')\r\n- log.debug(f\"[PREDICTION] Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}'\") # DEBUG PREDICTION\r\n-\r\n- base_index = None\r\n- if base_index_raw is not None:\r\n- try:\r\n- base_index = int(base_index_raw) # Use explicit conversion like in main logic\r\n- except (ValueError, TypeError):\r\n- log.warning(f\"[PREDICTION] Could not convert base_name index '{base_index_raw}' to integer.\")\r\n-\r\n- if isinstance(base_index, int):\r\n- potential_base_names = set()\r\n- for stem in candidate_stems_list: # Iterate over the list\r\n- parts = stem.split(separator)\r\n- log.debug(f\"[PREDICTION] Processing stem: '{stem}', Parts: {parts}\") # DEBUG PREDICTION\r\n- if len(parts) > base_index:\r\n- extracted_name = parts[base_index]\r\n- potential_base_names.add(extracted_name)\r\n- log.debug(f\"[PREDICTION] Extracted potential base name: '{extracted_name}' using index {base_index}\") # DEBUG PREDICTION\r\n- else:\r\n- log.debug(f\"[PREDICTION] Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.\") # DEBUG PREDICTION\r\n- if len(potential_base_names) == 1:\r\n- determined_base_name = potential_base_names.pop()\r\n- log.debug(f\"[PREDICTION] Determined base name '{determined_base_name}' from structured parts (index {base_index}).\") # DEBUG PREDICTION\r\n- elif len(potential_base_names) > 1:\r\n- log.debug(f\"[PREDICTION] Multiple potential base names found from index {base_index}: {potential_base_names}. Falling back to common prefix.\") # DEBUG PREDICTION\r\n- determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n- # else: Use common prefix below\r\n-\r\n- if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n- log.debug(\"[PREDICTION] Falling back to common prefix for base name determination (structured parts failed or no index).\") # DEBUG PREDICTION\r\n- determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n- determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n-\r\n- # 5. Sanitize Names\r\n- final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n- log.debug(f\"[PREDICTION] Final determined base name for prediction: '{final_base_name}'\") # DEBUG PREDICTION\r\n- final_supplier_name = self._sanitize_filename(supplier_name)\r\n-\r\n- # 6. Predict Output Filenames\r\n- file_predictions = {}\r\n- target_pattern = self.config.target_filename_pattern\r\n- # Use highest resolution key as a placeholder for prediction\r\n- highest_res_key = \"Res?\" # Fallback\r\n- if self.config.image_resolutions:\r\n- highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n-\r\n- for input_fname, map_type in potential_map_files.items():\r\n- # Assume PNG for prediction, extension might change based on bit depth rules later\r\n- # but this gives a good idea of the renaming.\r\n- # A more complex prediction could check bit depth rules.\r\n- predicted_ext = \"png\" # Simple assumption for preview\r\n- try:\r\n- predicted_fname = target_pattern.format(\r\n- base_name=final_base_name,\r\n- map_type=map_type,\r\n- resolution=highest_res_key, # Use placeholder resolution\r\n- ext=predicted_ext\r\n- )\r\n- file_predictions[input_fname] = predicted_fname\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n- file_predictions[input_fname] = \"[Filename Format Error]\"\r\n-\r\n-\r\n- log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n- return final_supplier_name, final_base_name, file_predictions\r\n-\r\n- except Exception as e:\r\n- log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- return None\r\n-\r\n-\r\n- # --- New Detailed Prediction Method ---\r\n- def get_detailed_file_predictions(self) -> list[dict] | None:\r\n- \"\"\"\r\n- Performs extraction and classification to provide a detailed list of all\r\n- files found within the input and their predicted status/output name,\r\n- handling multiple potential assets within the input.\r\n-\r\n- Returns:\r\n- list[dict] | None: A list of dictionaries, each representing a file:\r\n- {'original_path': str,\r\n- 'predicted_asset_name': str | None,\r\n- 'predicted_output_name': str | None,\r\n- 'status': str,\r\n- 'details': str | None}\r\n- Returns None if a critical error occurs during setup/classification.\r\n- \"\"\"\r\n- log.info(f\"Getting detailed file predictions for input: {self.input_path.name}\")\r\n- results = []\r\n- all_files_in_workspace = [] # Keep track of all files found\r\n-\r\n- try:\r\n- # --- Perform necessary setup and classification ---\r\n- self._setup_workspace()\r\n- self._extract_input()\r\n- # Run classification - this populates self.classified_files\r\n- self._inventory_and_classify_files()\r\n-\r\n- # --- Determine distinct assets and file mapping ---\r\n- # This uses the results from _inventory_and_classify_files\r\n- distinct_base_names, file_to_base_name_map = self._determine_base_metadata()\r\n- log.debug(f\"Prediction: Determined base names: {distinct_base_names}\")\r\n- log.debug(f\"Prediction: File to base name map: { {str(k):v for k,v in file_to_base_name_map.items()} }\")\r\n-\r\n- # --- Apply Suffixes for Prediction Preview ---\r\n- # This logic is similar to the main process method but applied to the classified_files list\r\n- log.debug(\"Prediction: Applying map type suffixes for preview...\")\r\n- grouped_classified_maps = defaultdict(list)\r\n- for map_info in self.classified_files.get('maps', []):\r\n- # Group by the base map type\r\n- grouped_classified_maps[map_info['map_type']].append(map_info)\r\n-\r\n- # Create a new list for maps with updated types for prediction\r\n- maps_with_predicted_types = []\r\n- for base_map_type, maps_in_group in grouped_classified_maps.items():\r\n- respect_variants = base_map_type in self.config.respect_variant_map_types\r\n- # Sort maps within the group for consistent suffixing (using the same key as in _inventory_and_classify_files)\r\n- maps_in_group.sort(key=lambda c: (\r\n- c.get('preset_rule_index', 9999),\r\n- c.get('keyword_index_in_rule', 9999) if 'keyword_index_in_rule' in c else 9999, # Handle potential missing key\r\n- str(c['source_path'])\r\n- ))\r\n-\r\n- for i, map_info in enumerate(maps_in_group):\r\n- predicted_map_type = f\"{base_map_type}-{i + 1}\" if respect_variants else base_map_type\r\n- # Create a copy to avoid modifying the original classified_files list in place\r\n- map_info_copy = map_info.copy()\r\n- map_info_copy['predicted_map_type'] = predicted_map_type # Store the predicted type\r\n- maps_with_predicted_types.append(map_info_copy)\r\n-\r\n- # Replace the original maps list with the one containing predicted types for the next step\r\n- # Note: This is a temporary list for prediction generation, not modifying the instance's classified_files permanently\r\n- # self.classified_files[\"maps\"] = maps_with_predicted_types # Avoid modifying instance state\r\n-\r\n- # --- Prepare for filename prediction ---\r\n- target_pattern = self.config.target_filename_pattern\r\n- highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n- if self.config.image_resolutions:\r\n- highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n-\r\n- # --- Process all classified files (including maps with predicted types) ---\r\n- all_classified_files_with_category = []\r\n- # Add maps with predicted types first\r\n- for map_info in maps_with_predicted_types:\r\n- map_info['category'] = 'maps' # Ensure category is set\r\n- all_classified_files_with_category.append(map_info)\r\n- if 'source_path' in map_info:\r\n- all_files_in_workspace.append(map_info['source_path'])\r\n-\r\n- # Add other categories (models, extra, ignored)\r\n- for category in ['models', 'extra', 'ignored']:\r\n- for file_info in self.classified_files.get(category, []):\r\n- file_info['category'] = category\r\n- all_classified_files_with_category.append(file_info)\r\n- if 'source_path' in file_info:\r\n- all_files_in_workspace.append(file_info['source_path'])\r\n-\r\n-\r\n- # --- Generate results for each file ---\r\n- processed_paths = set() # Track paths already added to results\r\n- for file_info in all_classified_files_with_category:\r\n- original_path = file_info.get(\"source_path\")\r\n- if not original_path or original_path in processed_paths:\r\n- continue # Skip if path missing or already processed\r\n-\r\n- original_path_str = str(original_path)\r\n- processed_paths.add(original_path) # Mark as processed\r\n-\r\n- # Determine predicted asset name and status\r\n- predicted_asset_name = file_to_base_name_map.get(original_path) # Can be None\r\n- category = file_info['category'] # maps, models, extra, ignored\r\n- reason = file_info.get('reason') # Specific reason for extra/ignored\r\n- status = \"Unknown\"\r\n- details = None\r\n- predicted_output_name = None # Usually original name, except for maps\r\n-\r\n- if category == \"maps\":\r\n- status = \"Mapped\"\r\n- # Use the predicted_map_type for the preview display\r\n- map_type_for_preview = file_info.get(\"predicted_map_type\", file_info.get(\"map_type\", \"UnknownType\"))\r\n- details = f\"[{map_type_for_preview}]\"\r\n- if file_info.get(\"is_16bit_source\"): details += \" (16-bit)\"\r\n- # Predict map output name using its determined asset name and predicted map type\r\n- if predicted_asset_name:\r\n- try:\r\n- predicted_ext = \"png\" # Assume PNG for prediction simplicity\r\n- predicted_output_name = target_pattern.format(\r\n- base_name=predicted_asset_name,\r\n- map_type=map_type_for_preview, # Use the predicted type here\r\n- resolution=highest_res_key,\r\n- ext=predicted_ext\r\n- )\r\n- except KeyError as fmt_err:\r\n- log.warning(f\"Prediction format error for map {original_path_str}: {fmt_err}\")\r\n- predicted_output_name = \"[Format Error]\"\r\n- details += f\" (Format Key Error: {fmt_err})\"\r\n- except Exception as pred_err:\r\n- log.warning(f\"Prediction error for map {original_path_str}: {pred_err}\")\r\n- predicted_output_name = \"[Prediction Error]\"\r\n- details += f\" (Error: {pred_err})\"\r\n- else:\r\n- # Should not happen for maps if _determine_base_metadata worked correctly\r\n- log.warning(f\"Map file '{original_path_str}' has no predicted asset name.\")\r\n- predicted_output_name = \"[No Asset Name]\"\r\n-\r\n- elif category == \"models\":\r\n- status = \"Model\"\r\n- details = \"[Model]\"\r\n- predicted_output_name = original_path.name # Models keep original name\r\n-\r\n- elif category == \"ignored\":\r\n- status = \"Ignored\"\r\n- details = f\"Ignored ({reason or 'Unknown reason'})\"\r\n- predicted_output_name = None # Ignored files have no output\r\n-\r\n- elif category == \"extra\":\r\n- if predicted_asset_name is None:\r\n- # This is an \"Unmatched Extra\" file (includes Unrecognised and explicit Extras without a base name)\r\n- status = \"Unmatched Extra\"\r\n- details = f\"[Unmatched Extra ({reason or 'N/A'})]\" # Include original reason if available\r\n- elif reason == 'Unrecognised':\r\n- # Unrecognised but belongs to a specific asset\r\n- status = \"Unrecognised\"\r\n- details = \"[Unrecognised]\"\r\n- else:\r\n- # Explicitly matched an 'extra' pattern and belongs to an asset\r\n- status = \"Extra\"\r\n- details = f\"Extra ({reason})\"\r\n- predicted_output_name = original_path.name # Extra files keep original name\r\n-\r\n- else:\r\n- log.warning(f\"Unknown category '{category}' encountered during prediction for {original_path_str}\")\r\n- status = \"Error\"\r\n- details = f\"[Unknown Category: {category}]\"\r\n- predicted_output_name = original_path.name\r\n-\r\n-\r\n- results.append({\r\n- \"original_path\": original_path_str,\r\n- \"predicted_asset_name\": predicted_asset_name, # May be None\r\n- \"predicted_output_name\": predicted_output_name,\r\n- \"status\": status,\r\n- \"details\": details\r\n- })\r\n-\r\n- # Add any files found during walk but missed by classification (should be rare)\r\n- # These are likely unmatched as well.\r\n- for file_path in all_files_in_workspace:\r\n- if file_path not in processed_paths:\r\n- log.warning(f\"File found in workspace but not classified: {file_path}. Adding as Unmatched Extra.\")\r\n- results.append({\r\n- \"original_path\": str(file_path),\r\n- \"predicted_asset_name\": None, # Explicitly None as it wasn't mapped\r\n- \"predicted_output_name\": file_path.name,\r\n- \"status\": \"Unmatched Extra\",\r\n- \"details\": \"[Missed Classification]\"\r\n- })\r\n-\r\n-\r\n- log.info(f\"Detailed prediction complete for input '{self.input_path.name}'. Found {len(results)} files.\")\r\n- # Sort results by original path for consistent display\r\n- results.sort(key=lambda x: x.get(\"original_path\", \"\"))\r\n- return results\r\n-\r\n- except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n- log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n- return None # Indicate critical failure\r\n- finally:\r\n- # Ensure cleanup always happens\r\n- self._cleanup_workspace()\r\n-\r\n-\r\n # --- End of AssetProcessor Class ---\n\\ No newline at end of file\n" }, { "date": 1745348583264, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -255,8 +255,29 @@\n raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n source_dtype = img_loaded.dtype\r\n log.debug(f\"Loaded source {full_source_path.name}, dtype: {source_dtype}, shape: {img_loaded.shape}\")\r\n \r\n+ # MASK Handling (Extract alpha or convert) - Do this BEFORE general color conversions\r\n+ if _get_base_map_type(map_type) == 'MASK':\r\n+ log.debug(f\"Processing as MASK type for {source_path_rel.name}.\")\r\n+ shape = img_loaded.shape # Use img_loaded\r\n+ if len(shape) == 3 and shape[2] == 4:\r\n+ log.debug(\"MASK processing: Extracting alpha channel (4-channel source).\")\r\n+ img_prepared = img_loaded[:, :, 3] # Extract alpha from img_loaded\r\n+ elif len(shape) == 3 and shape[2] == 3:\r\n+ log.debug(\"MASK processing: Converting BGR to Grayscale (3-channel source).\") # OpenCV loads as BGR\r\n+ img_prepared = cv2.cvtColor(img_loaded, cv2.COLOR_BGR2GRAY) # Convert BGR to Gray\r\n+ elif len(shape) == 2:\r\n+ log.debug(\"MASK processing: Source is already grayscale.\")\r\n+ img_prepared = img_loaded # Keep as is\r\n+ else:\r\n+ log.warning(f\"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.\")\r\n+ img_prepared = None # Cannot process\r\n+ # MASK should ideally be uint8 for saving later, but keep float for now if inverted?\r\n+ # Let _save_image handle final conversion based on format rules.\r\n+ else:\r\n+ # For non-MASK types, start with the loaded image\r\n+ img_prepared = img_loaded\r\n # --- 2. Initial Preparation (BGR->RGB, Gloss Inversion, MASK handling) ---\r\n img_prepared = img_loaded # Start with loaded image\r\n \r\n # BGR -> RGB conversion (only for 3-channel images)\r\n" }, { "date": 1745348810757, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -1653,8 +1653,31 @@\n # Add other dtype checks if needed (e.g., float32 -> 16?)\r\n \r\n if not possible_to_load: continue\r\n \r\n+ # --- Calculate Stats for ROUGH source if used and at stats resolution ---\r\n+ stats_res_key = self.config.calculate_stats_resolution\r\n+ if current_res_key == stats_res_key:\r\n+ log.debug(f\"Asset '{asset_name}': Checking for ROUGH source stats for '{output_map_type}' at {stats_res_key}\")\r\n+ for target_channel, source_map_type in inputs_mapping.items():\r\n+ if source_map_type == 'ROUGH' and source_map_type in loaded_inputs_data:\r\n+ log.debug(f\"Asset '{asset_name}': Calculating stats for ROUGH source (mapped to channel '{target_channel}') for '{output_map_type}' at {stats_res_key}\")\r\n+ rough_image_data = loaded_inputs_data[source_map_type]\r\n+ rough_stats = _calculate_image_stats(rough_image_data)\r\n+ if rough_stats:\r\n+ # Ensure the nested dictionary structure exists\r\n+ if \"merged_map_channel_stats\" not in current_asset_metadata:\r\n+ current_asset_metadata[\"merged_map_channel_stats\"] = {}\r\n+ if output_map_type not in current_asset_metadata[\"merged_map_channel_stats\"]:\r\n+ current_asset_metadata[\"merged_map_channel_stats\"][output_map_type] = {}\r\n+ if target_channel not in current_asset_metadata[\"merged_map_channel_stats\"][output_map_type]:\r\n+ current_asset_metadata[\"merged_map_channel_stats\"][output_map_type][target_channel] = {}\r\n+\r\n+ current_asset_metadata[\"merged_map_channel_stats\"][output_map_type][target_channel][stats_res_key] = rough_stats\r\n+ log.debug(f\"Asset '{asset_name}': Stored ROUGH stats for '{output_map_type}' channel '{target_channel}' at {stats_res_key}: {rough_stats}\")\r\n+ else:\r\n+ log.warning(f\"Asset '{asset_name}': Failed to calculate ROUGH stats for '{output_map_type}' channel '{target_channel}' at {stats_res_key}.\")\r\n+\r\n # --- Determine dimensions ---\r\n # All loaded inputs should have the same dimensions for this resolution\r\n first_map_type = next(iter(loaded_inputs_data))\r\n h, w = loaded_inputs_data[first_map_type].shape[:2]\r\n@@ -1761,8 +1784,11 @@\n # Start with the base metadata passed in for this asset\r\n final_metadata = current_asset_metadata.copy()\r\n \r\n # Populate map details from the specific asset's processing results\r\n+ # Add merged map channel stats\r\n+ final_metadata[\"merged_map_channel_stats\"] = current_asset_metadata.get(\"merged_map_channel_stats\", {}) # Get from passed metadata\r\n+\r\n final_metadata[\"processed_map_resolutions\"] = {}\r\n for map_type, res_dict in processed_maps_details_asset.items():\r\n keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d]\r\n if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys)\r\n" }, { "date": 1745506912170, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -788,13 +788,58 @@\n raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n \r\n log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n try:\r\n- if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n- log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n- with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n- zip_ref.extractall(self.temp_dir)\r\n- log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n+ if self.input_path.is_file():\r\n+ suffix = self.input_path.suffix.lower()\r\n+ if suffix == '.zip':\r\n+ log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n+ with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n+ zip_ref.extractall(self.temp_dir)\r\n+ log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n+ elif suffix == '.rar':\r\n+ log.debug(f\"Extracting RAR file: {self.input_path}\")\r\n+ # rarfile requires unrar to be installed and in the system's PATH\r\n+ # We assume this is handled by the user's environment setup.\r\n+ # Basic error handling for common rarfile exceptions.\r\n+ try:\r\n+ with rarfile.RarFile(self.input_path, 'r') as rar_ref:\r\n+ rar_ref.extractall(self.temp_dir)\r\n+ log.info(f\"RAR extracted to {self.temp_dir}\")\r\n+ except rarfile.BadRarFile:\r\n+ raise AssetProcessingError(f\"Input file is not a valid RAR archive: {self.input_path.name}\")\r\n+ except rarfile.NeedFirstVolume:\r\n+ raise AssetProcessingError(f\"RAR archive is part of a multi-volume set, but the first volume is missing: {self.input_path.name}\")\r\n+ except rarfile.PasswordRequired:\r\n+ # As per plan, we don't handle passwords at this stage\r\n+ raise AssetProcessingError(f\"RAR archive is password protected. Skipping: {self.input_path.name}\")\r\n+ except rarfile.NoRarEntry:\r\n+ raise AssetProcessingError(f\"RAR archive is empty or corrupted: {self.input_path.name}\")\r\n+ except Exception as rar_err:\r\n+ # Catch any other unexpected rarfile errors\r\n+ raise AssetProcessingError(f\"Failed to extract RAR archive {self.input_path.name}: {rar_err}\") from rar_err\r\n+\r\n+ elif suffix == '.7z':\r\n+ log.debug(f\"Extracting 7z file: {self.input_path}\")\r\n+ # py7zr handles extraction directly\r\n+ try:\r\n+ with py7zr.SevenZipFile(self.input_path, mode='r') as sz_ref:\r\n+ sz_ref.extractall(path=self.temp_dir)\r\n+ log.info(f\"7z extracted to {self.temp_dir}\")\r\n+ except py7zr.Bad7zFile:\r\n+ raise AssetProcessingError(f\"Input file is not a valid 7z archive: {self.input_path.name}\")\r\n+ except py7zr.PasswordRequired:\r\n+ # As per plan, we don't handle passwords at this stage\r\n+ raise AssetProcessingError(f\"7z archive is password protected. Skipping: {self.input_path.name}\")\r\n+ except Exception as sz_err:\r\n+ # Catch any other unexpected py7zr errors\r\n+ raise AssetProcessingError(f\"Failed to extract 7z archive {self.input_path.name}: {sz_err}\") from sz_err\r\n+\r\n+ else:\r\n+ # If it's a file but not zip, rar, or 7z, treat it as an error for now\r\n+ # Or could add logic to copy single files? Plan says zip or folder.\r\n+ raise AssetProcessingError(f\"Input file is not a supported archive type (.zip, .rar, .7z): {self.input_path.name}\")\r\n+\r\n elif self.input_path.is_dir():\r\n log.debug(f\"Copying directory contents: {self.input_path}\")\r\n for item in self.input_path.iterdir():\r\n destination = self.temp_dir / item.name\r\n@@ -810,12 +855,18 @@\n \r\n else:\r\n shutil.copy2(item, destination)\r\n log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n- except zipfile.BadZipFile:\r\n- raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n+ else:\r\n+ # This case should be caught by __init__ but included for robustness\r\n+ raise AssetProcessingError(f\"Input path must be a directory or a supported archive file (.zip, .rar, .7z): {self.input_path}\")\r\n+\r\n+ except AssetProcessingError:\r\n+ # Re-raise our custom exception directly\r\n+ raise\r\n except Exception as e:\r\n- raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n+ # Wrap any other unexpected exceptions\r\n+ raise AssetProcessingError(f\"An unexpected error occurred during input extraction for {self.input_path.name}: {e}\") from e\r\n \r\n def _inventory_and_classify_files(self):\r\n \"\"\"\r\n Scans workspace, classifies files according to preset rules, handling\r\n" }, { "date": 1745507312101, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -188,10 +188,11 @@\n if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n \r\n if not input_path.exists():\r\n raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n- if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n- raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n+ supported_suffixes = ['.zip', '.rar', '.7z']\r\n+ if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() in supported_suffixes)):\r\n+ raise AssetProcessingError(f\"Input path must be a directory or a supported archive file (.zip, .rar, .7z): {input_path}\")\r\n \r\n self.input_path: Path = input_path\r\n self.config: Configuration = config\r\n self.output_base_path: Path = output_base_path\r\n" }, { "date": 1745507513502, "content": "Index: \n===================================================================\n--- \n+++ \n@@ -13,14 +13,26 @@\n from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n from typing import List, Dict, Tuple, Optional # Added for type hinting\r\n from collections import defaultdict # Added for grouping\r\n \r\n+# Attempt to import archive libraries\r\n+try:\r\n+ import rarfile\r\n+ import py7zr\r\n+except ImportError as e:\r\n+ print(f\"ERROR: Missing required archive libraries: {e}\")\r\n+ print(\"Please install them using:\")\r\n+ print(\"pip install rarfile py7zr\")\r\n+ # Do not exit here, allow the script to run but extraction will fail for these types\r\n+ rarfile = None # Set to None so checks can still be made\r\n+ py7zr = None # Set to None\r\n+\r\n # Attempt to import image processing libraries\r\n try:\r\n import cv2\r\n import numpy as np\r\n except ImportError:\r\n- print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n+ print(\"ERROR: Missing required image processing libraries. Please install opencv-python and numpy:\")\r\n print(\"pip install opencv-python numpy\")\r\n exit(1) # Exit if essential libraries are missing\r\n \r\n # Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\n" } ], "date": 1745225918059, "name": "Commit-0", "content": "# asset_processor.py\r\n\r\nimport os\r\nimport shutil\r\nimport tempfile\r\nimport zipfile\r\nimport logging\r\nimport json\r\nimport re\r\nimport time\r\nfrom pathlib import Path\r\nfrom fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview*\r\n\r\n# Attempt to import image processing libraries\r\ntry:\r\n import cv2\r\n import numpy as np\r\nexcept ImportError:\r\n print(\"ERROR: Missing required libraries. Please install opencv-python and numpy:\")\r\n print(\"pip install opencv-python numpy\")\r\n exit(1) # Exit if essential libraries are missing\r\n\r\n# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types\r\ntry:\r\n import OpenEXR\r\n import Imath\r\n _HAS_OPENEXR = True\r\nexcept ImportError:\r\n _HAS_OPENEXR = False\r\n # Log this information - basic EXR might still work via OpenCV\r\n logging.debug(\"Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.\")\r\n\r\n\r\n# Assuming Configuration class is in configuration.py\r\ntry:\r\n from configuration import Configuration, ConfigurationError\r\nexcept ImportError:\r\n print(\"ERROR: Cannot import Configuration class from configuration.py.\")\r\n print(\"Ensure configuration.py is in the same directory or Python path.\")\r\n exit(1)\r\n\r\n# Use logger defined in main.py (or configure one here if run standalone)\r\nlog = logging.getLogger(__name__)\r\n# Basic config if logger hasn't been set up elsewhere (e.g., during testing)\r\nif not log.hasHandlers():\r\n logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level\r\n\r\n\r\n# --- Custom Exception ---\r\nclass AssetProcessingError(Exception):\r\n \"\"\"Custom exception for errors during asset processing.\"\"\"\r\n pass\r\n\r\n# --- Helper Functions ---\r\ndef calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]:\r\n \"\"\"Calculates dimensions maintaining aspect ratio, fitting within target_max_dim.\"\"\"\r\n if orig_w <= 0 or orig_h <= 0: return (target_max_dim, target_max_dim) # Avoid division by zero\r\n\r\n ratio = orig_w / orig_h\r\n if ratio > 1: # Width is dominant\r\n target_w = target_max_dim\r\n target_h = max(1, round(target_w / ratio)) # Ensure height is at least 1\r\n else: # Height is dominant or square\r\n target_h = target_max_dim\r\n target_w = max(1, round(target_h * ratio)) # Ensure width is at least 1\r\n return int(target_w), int(target_h)\r\n\r\ndef _calculate_image_stats(image_data: np.ndarray) -> dict | None:\r\n \"\"\"\r\n Calculates min, max, mean for a given numpy image array.\r\n Handles grayscale and multi-channel images. Converts to float64 for calculation.\r\n \"\"\"\r\n if image_data is None:\r\n log.warning(\"Attempted to calculate stats on None image data.\")\r\n return None\r\n try:\r\n # Use float64 for calculations to avoid potential overflow/precision issues\r\n data_float = image_data.astype(np.float64)\r\n\r\n if len(data_float.shape) == 2: # Grayscale (H, W)\r\n min_val = float(np.min(data_float))\r\n max_val = float(np.max(data_float))\r\n mean_val = float(np.mean(data_float))\r\n stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n log.debug(f\"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}\")\r\n elif len(data_float.shape) == 3: # Color (H, W, C)\r\n channels = data_float.shape[2]\r\n min_val = [float(v) for v in np.min(data_float, axis=(0, 1))]\r\n max_val = [float(v) for v in np.max(data_float, axis=(0, 1))]\r\n mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))]\r\n # Assuming BGR(A?) order from OpenCV, report as list [B, G, R, (A)]\r\n stats = {\"min\": min_val, \"max\": max_val, \"mean\": mean_val}\r\n log.debug(f\"Calculated {channels}-Channel Stats: Min={min_val}, Max={max_val}, Mean={mean_val}\")\r\n else:\r\n log.warning(f\"Cannot calculate stats for image with unsupported shape {data_float.shape}\")\r\n return None\r\n return stats\r\n except Exception as e:\r\n log.error(f\"Error calculating image stats: {e}\", exc_info=True) # Log exception info\r\n return {\"error\": str(e)}\r\n\r\n\r\nfrom collections import defaultdict # Added for grouping\r\n\r\n# --- Helper function ---\r\ndef _get_base_map_type(target_map_string: str) -> str:\r\n \"\"\"Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').\"\"\"\r\n match = re.match(r\"([a-zA-Z]+)\", target_map_string)\r\n if match:\r\n return match.group(1).upper()\r\n return target_map_string.upper() # Fallback if no number suffix\r\n\r\n# --- Asset Processor Class ---\r\nclass AssetProcessor:\r\n \"\"\"\r\n Handles the processing pipeline for a single asset (ZIP or folder).\r\n \"\"\"\r\n # Define the list of known grayscale map types (adjust as needed)\r\n GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK']\r\n\r\n def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False):\r\n \"\"\"\r\n Initializes the processor for a given input asset.\r\n\r\n Args:\r\n input_path: Path to the input ZIP file or folder.\r\n config: The loaded Configuration object.\r\n output_base_path: The base directory where processed output will be saved.\r\n overwrite: If True, forces reprocessing even if output exists.\r\n \"\"\"\r\n if not isinstance(input_path, Path): input_path = Path(input_path)\r\n if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path)\r\n if not isinstance(config, Configuration): raise TypeError(\"config must be a Configuration object.\")\r\n\r\n if not input_path.exists():\r\n raise AssetProcessingError(f\"Input path does not exist: {input_path}\")\r\n if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() == '.zip')):\r\n raise AssetProcessingError(f\"Input path must be a directory or a .zip file: {input_path}\")\r\n\r\n self.input_path: Path = input_path\r\n self.config: Configuration = config\r\n self.output_base_path: Path = output_base_path\r\n self.overwrite: bool = overwrite # Store the overwrite flag\r\n\r\n self.temp_dir: Path | None = None # Path to the temporary working directory\r\n self.classified_files: dict[str, list[dict]] = {\r\n \"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []\r\n }\r\n self.processed_maps_details: dict[str, dict[str, dict]] = {}\r\n self.merged_maps_details: dict[str, dict[str, dict]] = {}\r\n self.metadata_file_path_temp: Path | None = None\r\n\r\n # Initialize metadata collected during processing\r\n self.metadata: dict = {\r\n \"asset_name\": \"Unknown\",\r\n \"supplier_name\": self.config.supplier_name,\r\n \"asset_category\": self.config._core_settings.get('DEFAULT_ASSET_CATEGORY', \"Texture\"),\r\n \"archetype\": \"Unknown\",\r\n \"maps_present\": [],\r\n \"merged_maps\": [],\r\n \"shader_features\": [],\r\n \"source_files_in_extra\": [],\r\n \"image_stats_1k\": {},\r\n \"map_details\": {},\r\n \"aspect_ratio_change_string\": \"N/A\" # Replaces output_scaling_factors\r\n # Processing info added in _generate_metadata_file\r\n }\r\n\r\n log.debug(f\"AssetProcessor initialized for: {self.input_path.name}\")\r\n\r\n def process(self) -> str:\r\n \"\"\"\r\n Executes the full processing pipeline for the asset.\r\n Returns:\r\n str: Status (\"processed\", \"skipped\").\r\n \"\"\"\r\n log.info(f\"Processing asset: {self.input_path.name}\")\r\n try:\r\n self._setup_workspace()\r\n self._extract_input()\r\n self._inventory_and_classify_files()\r\n self._determine_base_metadata()\r\n\r\n # --- Check if asset should be skipped ---\r\n # Ensure asset_name and supplier_name were determined before checking\r\n asset_name = self.metadata.get(\"asset_name\")\r\n supplier_name = self.metadata.get(\"supplier_name\")\r\n\r\n # Only check for skipping if overwrite is False AND we have valid names\r\n if not self.overwrite and asset_name and asset_name != \"UnknownAssetName\" and supplier_name:\r\n supplier_sanitized = self._sanitize_filename(supplier_name)\r\n asset_name_sanitized = self._sanitize_filename(asset_name)\r\n final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n metadata_file_path = final_dir / self.config.metadata_filename\r\n\r\n if final_dir.exists() and metadata_file_path.is_file():\r\n log.info(f\"Output directory and metadata found for '{asset_name_sanitized}' and overwrite is False. Skipping.\")\r\n # No need to call cleanup here, the finally block will handle it.\r\n return \"skipped\" # Return status\r\n elif self.overwrite:\r\n # Log only if asset name is known, otherwise it's redundant with the initial processing log\r\n known_asset_name = self.metadata.get('asset_name', self.input_path.name)\r\n # Avoid logging overwrite message if name is still unknown\r\n if known_asset_name != \"UnknownAssetName\" and known_asset_name != self.input_path.name:\r\n log.info(f\"Overwrite flag is set for '{known_asset_name}'. Processing will continue even if output exists.\")\r\n # --- End Skip Check ---\r\n\r\n # Continue with processing if not skipped\r\n self._process_maps()\r\n self._merge_maps()\r\n self._generate_metadata_file()\r\n self._organize_output_files()\r\n log.info(f\"Asset processing completed successfully for: {self.metadata.get('asset_name', self.input_path.name)}\")\r\n return \"processed\" # Return status\r\n except Exception as e:\r\n # Log error with traceback if it hasn't been logged already\r\n if not isinstance(e, (AssetProcessingError, ConfigurationError)): # Avoid double logging known types\r\n log.exception(f\"Asset processing failed unexpectedly for {self.input_path.name}: {e}\")\r\n # Ensure error is propagated\r\n if not isinstance(e, AssetProcessingError):\r\n raise AssetProcessingError(f\"Failed processing {self.input_path.name}: {e}\") from e\r\n else:\r\n raise # Re-raise AssetProcessingError or ConfigurationError\r\n finally:\r\n # Ensure cleanup always happens\r\n self._cleanup_workspace()\r\n\r\n def _setup_workspace(self):\r\n \"\"\"Creates a temporary directory for processing.\"\"\"\r\n try:\r\n self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix))\r\n log.debug(f\"Created temporary workspace: {self.temp_dir}\")\r\n except Exception as e:\r\n raise AssetProcessingError(f\"Failed to create temporary workspace: {e}\") from e\r\n\r\n def _extract_input(self):\r\n \"\"\"Extracts ZIP or copies folder contents to the temporary workspace.\"\"\"\r\n if not self.temp_dir:\r\n raise AssetProcessingError(\"Temporary workspace not setup before extraction.\")\r\n\r\n log.info(f\"Preparing source files from {self.input_path.name}...\")\r\n try:\r\n if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n log.debug(f\"Extracting ZIP file: {self.input_path}\")\r\n with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n zip_ref.extractall(self.temp_dir)\r\n log.info(f\"ZIP extracted to {self.temp_dir}\")\r\n elif self.input_path.is_dir():\r\n log.debug(f\"Copying directory contents: {self.input_path}\")\r\n for item in self.input_path.iterdir():\r\n destination = self.temp_dir / item.name\r\n if item.is_dir():\r\n # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+)\r\n try:\r\n shutil.copytree(item, destination, dirs_exist_ok=True)\r\n except TypeError: # Fallback for older Python\r\n if not destination.exists():\r\n shutil.copytree(item, destination)\r\n else:\r\n log.warning(f\"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).\")\r\n\r\n else:\r\n shutil.copy2(item, destination)\r\n log.info(f\"Directory contents copied to {self.temp_dir}\")\r\n except zipfile.BadZipFile:\r\n raise AssetProcessingError(f\"Input file is not a valid ZIP archive: {self.input_path.name}\")\r\n except Exception as e:\r\n raise AssetProcessingError(f\"Failed to extract/copy input {self.input_path.name}: {e}\") from e\r\n\r\n def _inventory_and_classify_files(self):\r\n \"\"\"\r\n Scans workspace, classifies files according to preset rules, handling\r\n 16-bit prioritization and multiple variants of the same base map type.\r\n \"\"\"\r\n if not self.temp_dir:\r\n raise AssetProcessingError(\"Temporary workspace not setup before inventory.\")\r\n\r\n log.info(\"Scanning and classifying files...\")\r\n log.debug(\"--- Starting File Inventory and Classification (v2) ---\")\r\n all_files_rel = []\r\n for root, _, files in os.walk(self.temp_dir):\r\n root_path = Path(root)\r\n for file in files:\r\n full_path = root_path / file\r\n relative_path = full_path.relative_to(self.temp_dir)\r\n all_files_rel.append(relative_path)\r\n\r\n log.debug(f\"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}\")\r\n\r\n # --- Initialization ---\r\n processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps)\r\n potential_map_candidates = [] # List to store potential map file info\r\n # Reset classified files (important if this method is ever called multiple times)\r\n self.classified_files = {\"maps\": [], \"models\": [], \"extra\": [], \"ignored\": []}\r\n\r\n\r\n # --- Step 1: Identify Explicit 'Extra' Files ---\r\n log.debug(\"Step 1: Checking for files to move to 'Extra' (using regex)...\")\r\n compiled_extra_regex = getattr(self.config, 'compiled_extra_regex', [])\r\n log.debug(f\" Compiled 'Extra' regex patterns: {[r.pattern for r in compiled_extra_regex]}\")\r\n for file_rel_path in all_files_rel:\r\n if file_rel_path in processed_files: continue\r\n for compiled_regex in compiled_extra_regex:\r\n if compiled_regex.search(file_rel_path.name):\r\n log.debug(f\" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.\")\r\n self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'})\r\n processed_files.add(file_rel_path)\r\n log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n break # Stop checking extra patterns for this file\r\n\r\n # --- Step 2: Identify Model Files ---\r\n log.debug(\"Step 2: Identifying model files (using regex)...\")\r\n compiled_model_regex = getattr(self.config, 'compiled_model_regex', [])\r\n log.debug(f\" Compiled 'Model' regex patterns: {[r.pattern for r in compiled_model_regex]}\")\r\n for file_rel_path in all_files_rel:\r\n if file_rel_path in processed_files: continue\r\n for compiled_regex in compiled_model_regex:\r\n if compiled_regex.search(file_rel_path.name):\r\n log.debug(f\" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.\")\r\n self.classified_files[\"models\"].append({'source_path': file_rel_path})\r\n processed_files.add(file_rel_path)\r\n log.debug(f\" Added '{file_rel_path}' to processed files.\")\r\n break # Stop checking model patterns for this file\r\n\r\n # --- Step 3: Gather Potential Map Candidates (Refactored) ---\r\n log.debug(\"Step 3: Gathering potential map candidates (iterating files first)...\")\r\n # Compiled map keyword regex now maps: base_type -> [(regex, keyword, rule_index), ...]\r\n compiled_map_keyword_regex_tuples = getattr(self.config, 'compiled_map_keyword_regex', {})\r\n\r\n for file_rel_path in all_files_rel:\r\n # Skip files already classified as Extra or Model\r\n if file_rel_path in processed_files:\r\n continue\r\n\r\n file_stem = file_rel_path.stem\r\n match_found = False\r\n\r\n # Iterate through base types and their associated regex tuples\r\n for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items():\r\n if match_found: break # Stop checking types for this file once matched\r\n\r\n # Get the original keywords list for the current rule index\r\n # Assuming self.config.map_type_mapping holds the original list of dicts from JSON\r\n original_rule = None\r\n # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type)\r\n if regex_tuples:\r\n current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple\r\n if hasattr(self.config, 'map_type_mapping') and current_rule_index < len(self.config.map_type_mapping):\r\n rule_candidate = self.config.map_type_mapping[current_rule_index]\r\n # Verify it's the correct rule by checking target_type\r\n if rule_candidate.get(\"target_type\") == base_map_type:\r\n original_rule = rule_candidate\r\n else:\r\n log.warning(f\"Rule index mismatch for {base_map_type} at index {current_rule_index}. Searching...\")\r\n # Fallback search if index doesn't match (shouldn't happen ideally)\r\n for idx, rule in enumerate(self.config.map_type_mapping):\r\n if rule.get(\"target_type\") == base_map_type:\r\n original_rule = rule\r\n log.warning(f\"Found rule for {base_map_type} at index {idx} instead.\")\r\n break\r\n\r\n original_keywords_list = []\r\n if original_rule and 'keywords' in original_rule:\r\n original_keywords_list = original_rule['keywords']\r\n else:\r\n log.warning(f\"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.\")\r\n\r\n for kw_regex, original_keyword, rule_index in regex_tuples:\r\n if kw_regex.search(file_stem):\r\n log.debug(f\" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'\")\r\n\r\n # Find the index of the matched keyword within its rule's list\r\n keyword_index_in_rule = -1 # Default if not found\r\n if original_keywords_list:\r\n try:\r\n # Use the original_keyword string directly\r\n keyword_index_in_rule = original_keywords_list.index(original_keyword)\r\n except ValueError:\r\n log.warning(f\"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}\")\r\n else:\r\n log.warning(f\"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.\")\r\n\r\n # Add candidate only if not already added\r\n if not any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n potential_map_candidates.append({\r\n 'source_path': file_rel_path,\r\n 'matched_keyword': original_keyword,\r\n 'base_map_type': base_map_type,\r\n 'preset_rule_index': rule_index,\r\n 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX\r\n 'is_16bit_source': False\r\n })\r\n else:\r\n log.warning(f\" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.\")\r\n\r\n match_found = True\r\n break # Stop checking regex tuples for this base_type once matched\r\n\r\n log.debug(f\"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.\")\r\n\r\n # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) ---\r\n log.debug(\"Step 3.5: Checking for standalone 16-bit variants...\")\r\n compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n for file_rel_path in all_files_rel:\r\n # Skip if already processed or already identified as a candidate\r\n if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates):\r\n continue\r\n\r\n for base_type, compiled_regex in compiled_bit_depth_regex.items():\r\n log.debug(f\" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}\") # ADDED LOG\r\n match = compiled_regex.search(file_rel_path.name) # Store result\r\n if match:\r\n log.debug(f\" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'\") # MODIFIED LOG\r\n potential_map_candidates.append({\r\n 'source_path': file_rel_path,\r\n 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword\r\n 'base_map_type': base_type,\r\n 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority\r\n 'is_16bit_source': True # Mark as 16-bit immediately\r\n })\r\n log.debug(f\" Added candidate: {potential_map_candidates[-1]}\")\r\n # Don't add to processed_files yet, let Step 4 handle filtering\r\n break # Stop checking bit depth patterns for this file\r\n\r\n log.debug(f\"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}\")\r\n\r\n # --- Step 4: Prioritize 16-bit Variants & Filter Candidates ---\r\n log.debug(\"Step 4: Prioritizing 16-bit variants and filtering candidates...\")\r\n compiled_bit_depth_regex = getattr(self.config, 'compiled_bit_depth_regex_map', {})\r\n candidates_to_keep = []\r\n candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit\r\n\r\n # Mark 16-bit candidates\r\n for candidate in potential_map_candidates:\r\n base_type = candidate['base_map_type']\r\n # Check if the base type exists in the bit depth map AND the filename matches the regex\r\n if base_type in compiled_bit_depth_regex:\r\n if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name):\r\n candidate['is_16bit_source'] = True\r\n log.debug(f\" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.\")\r\n\r\n\r\n # Identify base types that have a 16-bit version present\r\n prioritized_16bit_bases = {\r\n candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source']\r\n }\r\n log.debug(f\" Base map types with 16-bit variants found: {prioritized_16bit_bases}\")\r\n\r\n # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type\r\n for candidate in potential_map_candidates:\r\n if candidate['is_16bit_source']:\r\n candidates_to_keep.append(candidate)\r\n log.debug(f\" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})\")\r\n elif candidate['base_map_type'] not in prioritized_16bit_bases:\r\n candidates_to_keep.append(candidate)\r\n log.debug(f\" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n else:\r\n # This is an 8-bit candidate whose 16-bit counterpart exists\r\n candidates_to_ignore.append(candidate)\r\n log.debug(f\" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})\")\r\n\r\n # Add ignored 8-bit files to the main ignored list\r\n for ignored_candidate in candidates_to_ignore:\r\n self.classified_files[\"ignored\"].append({\r\n 'source_path': ignored_candidate['source_path'],\r\n 'reason': f'Superseded by 16bit variant for {ignored_candidate[\"base_map_type\"]}'\r\n })\r\n processed_files.add(ignored_candidate['source_path']) # Mark as processed\r\n\r\n log.debug(f\"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}\")\r\n\r\n # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps ---\r\n log.debug(\"Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...\")\r\n # from collections import defaultdict # Moved import to top of file\r\n grouped_by_base_type = defaultdict(list)\r\n for candidate in candidates_to_keep:\r\n grouped_by_base_type[candidate['base_map_type']].append(candidate)\r\n\r\n final_map_list = []\r\n for base_map_type, candidates in grouped_by_base_type.items():\r\n log.debug(f\" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)\")\r\n\r\n # --- NEW SORTING LOGIC ---\r\n # Sort candidates based on:\r\n # 1. The index of the rule object in the preset's map_type_mapping list.\r\n # 2. The index of the matched keyword within that rule object's 'keywords' list.\r\n # 3. Alphabetical order of the source file path as a tie-breaker.\r\n candidates.sort(key=lambda c: (\r\n c.get('preset_rule_index', 9999), # Use get with fallback for safety\r\n c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety\r\n str(c['source_path'])\r\n ))\r\n # --- END NEW SORTING LOGIC ---\r\n\r\n # Removed diagnostic log\r\n\r\n # Assign suffixes and add to the final map list\r\n for i, final_candidate in enumerate(candidates): # Use the directly sorted list\r\n # Determine final map type based on the new rule\r\n if base_map_type in self.config.respect_variant_map_types: # Check the new config list\r\n # Always assign suffix for types in the list (if more than one or only one)\r\n final_map_type = f\"{base_map_type}-{i + 1}\"\r\n else:\r\n # Never assign suffix for types NOT in the list\r\n final_map_type = base_map_type\r\n\r\n final_map_list.append({\r\n \"map_type\": final_map_type,\r\n \"source_path\": final_candidate[\"source_path\"],\r\n \"source_keyword\": final_candidate[\"matched_keyword\"],\r\n \"is_16bit_source\": final_candidate[\"is_16bit_source\"],\r\n \"original_extension\": final_candidate[\"source_path\"].suffix.lower() # Store original extension\r\n })\r\n processed_files.add(final_candidate[\"source_path\"]) # Mark final map source as processed\r\n log.debug(f\" Final Map: Type='{final_map_type}', Source='{final_candidate['source_path']}', Keyword='{final_candidate['matched_keyword']}', 16bit={final_candidate['is_16bit_source']}\")\r\n\r\n self.classified_files[\"maps\"] = final_map_list\r\n\r\n # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') ---\r\n log.debug(\"Step 6: Classifying remaining files as 'Unrecognised'...\")\r\n remaining_count = 0\r\n for file_rel_path in all_files_rel:\r\n if file_rel_path not in processed_files:\r\n log.debug(f\" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).\")\r\n self.classified_files[\"extra\"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'})\r\n remaining_count += 1\r\n # No need to add to processed_files here, it's the final step\r\n log.debug(f\" Marked {remaining_count} remaining files as 'Unrecognised'.\")\r\n\r\n # --- Final Summary ---\r\n # Update metadata list of files actually moved to extra (including Unrecognised and Ignored)\r\n self.metadata[\"source_files_in_extra\"] = sorted([\r\n str(f['source_path']) for f in self.classified_files['extra'] + self.classified_files['ignored']\r\n ])\r\n log.info(f\"File classification complete.\")\r\n log.debug(\"--- Final Classification Summary (v2) ---\")\r\n map_details_log = [f\"{m['map_type']}:{m['source_path']}\" for m in self.classified_files[\"maps\"]]\r\n model_details_log = [str(f['source_path']) for f in self.classified_files[\"models\"]]\r\n extra_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"extra\"]]\r\n ignored_details_log = [f\"{str(f['source_path'])} ({f['reason']})\" for f in self.classified_files[\"ignored\"]]\r\n log.debug(f\" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}\")\r\n log.debug(f\" Model Files ({len(self.classified_files['models'])}): {model_details_log}\")\r\n log.debug(f\" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}\")\r\n log.debug(f\" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}\")\r\n log.debug(\"--- End File Inventory and Classification (v2) ---\")\r\n\r\n\r\n def _determine_base_metadata(self):\r\n \"\"\"Determines base_name, asset_category, and archetype.\"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n log.info(\"Determining base metadata...\")\r\n\r\n # --- Determine Asset Category ---\r\n self.metadata[\"asset_category\"] = self.config.default_asset_category # Start with default\r\n if self.classified_files[\"models\"]:\r\n self.metadata[\"asset_category\"] = \"Asset\"\r\n log.debug(\"Category set to 'Asset' due to model file presence.\")\r\n else:\r\n decal_keywords = self.config.asset_category_rules.get('decal_keywords', [])\r\n found_decal = False\r\n candidate_files = [f['source_path'] for f in self.classified_files['maps']] # Check map names first\r\n if not candidate_files: # Fallback to checking all files? Maybe too broad. Check Extra?\r\n candidate_files = [f['source_path'] for f in self.classified_files['extra']]\r\n\r\n if decal_keywords:\r\n for file_path in candidate_files:\r\n for keyword in decal_keywords:\r\n if keyword.lower() in file_path.name.lower():\r\n self.metadata[\"asset_category\"] = \"Decal\"\r\n found_decal = True; break\r\n if found_decal: break\r\n if found_decal: log.debug(\"Category set to 'Decal' due to keyword match.\")\r\n\r\n # --- Determine Base Name ---\r\n map_stems = [f['source_path'].stem for f in self.classified_files['maps']]\r\n model_stems = [f['source_path'].stem for f in self.classified_files['models']]\r\n candidate_stems = map_stems + model_stems\r\n\r\n determined_base_name = \"UnknownAssetName\"\r\n if candidate_stems:\r\n separator = self.config.source_naming_separator\r\n base_index = self.config.source_naming_indices.get('base_name')\r\n\r\n if isinstance(base_index, int):\r\n potential_base_names = set()\r\n for stem in candidate_stems:\r\n parts = stem.split(separator)\r\n if len(parts) > base_index:\r\n potential_base_names.add(parts[base_index])\r\n if len(potential_base_names) == 1:\r\n determined_base_name = potential_base_names.pop()\r\n log.debug(f\"Determined base name '{determined_base_name}' from structured parts (index {base_index}).\")\r\n elif len(potential_base_names) > 1 :\r\n log.warning(f\"Multiple potential base names found from parts index {base_index}: {potential_base_names}. Falling back to common prefix.\")\r\n # Fallback logic if structured parts method fails or yields multiple names\r\n determined_base_name = os.path.commonprefix(candidate_stems)\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') # Clean trailing separators etc.\r\n # else: len is 0, means no valid parts found, use common prefix below\r\n\r\n # If no index or structured parts failed, use common prefix of all relevant stems\r\n if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n determined_base_name = os.path.commonprefix(candidate_stems)\r\n log.debug(f\"Using common prefix: '{determined_base_name}'\")\r\n # Clean up common separators/underscores often left by commonprefix\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n\r\n # Final cleanup and fallback for base name\r\n determined_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n self.metadata[\"asset_name\"] = determined_base_name\r\n log.debug(f\"Final Determined Base Name: {self.metadata['asset_name']}\")\r\n\r\n # --- Determine Archetype (Usage) ---\r\n archetype_rules = self.config.archetype_rules\r\n determined_archetype = \"Unknown\"\r\n check_stems = [f['source_path'].stem.lower() for f in self.classified_files['maps']]\r\n check_stems.extend([f['source_path'].stem.lower() for f in self.classified_files['models']])\r\n # Also check the determined base name itself?\r\n check_stems.append(self.metadata[\"asset_name\"].lower())\r\n\r\n if check_stems:\r\n best_match_archetype = \"Unknown\"\r\n highest_match_count = 0 # Simple heuristic: prioritize rule with most keyword hits?\r\n\r\n for rule in archetype_rules:\r\n if len(rule) != 2 or not isinstance(rule[1], dict): continue\r\n arch_name, rules_dict = rule\r\n match_any = rules_dict.get(\"match_any\", [])\r\n # match_all = rules_dict.get(\"match_all\", []) # Add logic if needed\r\n\r\n current_match_count = 0\r\n matched_any_keyword = False\r\n if match_any:\r\n for keyword in match_any:\r\n kw_lower = keyword.lower()\r\n for stem in check_stems:\r\n # Using simple substring check again\r\n if kw_lower in stem:\r\n current_match_count += 1\r\n matched_any_keyword = True\r\n # Optionally break after first hit per keyword? Or count all occurrences? Count all for now.\r\n\r\n # Decide if this rule matches. For now: requires at least one 'match_any' hit.\r\n if matched_any_keyword:\r\n # Simple approach: first rule that matches wins.\r\n # Could be enhanced by prioritizing rules or counting hits.\r\n if best_match_archetype == \"Unknown\": # Take the first match\r\n best_match_archetype = arch_name\r\n log.debug(f\"Tentative archetype match '{arch_name}' based on keywords: {match_any}\")\r\n # Break here for \"first match wins\" logic\r\n break\r\n\r\n # --- Example: Prioritize by match count (more complex) ---\r\n # if current_match_count > highest_match_count:\r\n # highest_match_count = current_match_count\r\n # best_match_archetype = arch_name\r\n # log.debug(f\"New best archetype match '{arch_name}' with count {current_match_count}\")\r\n # ----------------------------------------------------------\r\n\r\n determined_archetype = best_match_archetype\r\n\r\n self.metadata[\"archetype\"] = determined_archetype\r\n log.debug(f\"Determined Archetype: {self.metadata['archetype']}\")\r\n log.info(\"Base metadata determination complete.\")\r\n\r\n\r\n def _process_maps(self):\r\n \"\"\"Loads, processes, resizes, and saves classified map files.\"\"\"\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n log.info(f\"Processing identified map files for '{self.metadata['asset_name']}'...\")\r\n processed_map_types = set()\r\n\r\n # --- Settings retrieval ---\r\n resolutions = self.config.image_resolutions\r\n stats_res_key = self.config.calculate_stats_resolution\r\n stats_target_dim = resolutions.get(stats_res_key)\r\n if not stats_target_dim: log.warning(f\"Stats resolution key '{stats_res_key}' not found. Stats skipped.\")\r\n gloss_keywords = self.config.source_glossiness_keywords\r\n target_pattern = self.config.target_filename_pattern\r\n base_name = self.metadata['asset_name']\r\n\r\n # --- Pre-process Glossiness -> Roughness ---\r\n preprocessed_data = {}\r\n derived_from_gloss_flag = {}\r\n gloss_map_info_for_rough, native_rough_map_info = None, None\r\n for map_info in self.classified_files['maps']:\r\n if map_info['map_type'] == 'ROUGH':\r\n is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords)\r\n if is_gloss: gloss_map_info_for_rough = map_info\r\n else: native_rough_map_info = map_info\r\n\r\n rough_source_to_use = None\r\n if gloss_map_info_for_rough:\r\n rough_source_to_use = gloss_map_info_for_rough\r\n derived_from_gloss_flag['ROUGH'] = True\r\n if native_rough_map_info:\r\n log.warning(f\"Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found. Prioritizing Gloss.\")\r\n if native_rough_map_info in self.classified_files['maps']: self.classified_files['maps'].remove(native_rough_map_info)\r\n self.classified_files['ignored'].append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'})\r\n elif native_rough_map_info:\r\n rough_source_to_use = native_rough_map_info\r\n derived_from_gloss_flag['ROUGH'] = False\r\n\r\n if derived_from_gloss_flag.get('ROUGH'):\r\n source_path = self.temp_dir / rough_source_to_use['source_path']\r\n log.info(f\"Pre-processing Gloss map '{source_path.name}' for Roughness inversion...\")\r\n try:\r\n img_gloss = cv2.imread(str(source_path), cv2.IMREAD_UNCHANGED)\r\n if img_gloss is None: raise ValueError(\"Failed to load gloss image\")\r\n original_gloss_dtype = img_gloss.dtype # Store original dtype\r\n if len(img_gloss.shape) == 3: img_gloss = cv2.cvtColor(img_gloss, cv2.COLOR_BGR2GRAY)\r\n if original_gloss_dtype == np.uint16: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 65535.0)\r\n elif original_gloss_dtype == np.uint8: img_inv_f = 1.0 - (img_gloss.astype(np.float32) / 255.0)\r\n else: img_inv_f = 1.0 - img_gloss.astype(np.float32) # Assuming float input if not uint8/16\r\n # Store tuple: (inverted_float_data, original_dtype)\r\n preprocessed_data['ROUGH'] = (np.clip(img_inv_f, 0.0, 1.0), original_gloss_dtype)\r\n log.debug(f\"Inverted gloss map stored as float32 for ROUGH, original dtype: {original_gloss_dtype}\")\r\n except Exception as e: log.error(f\"Failed to invert gloss map {source_path.name}: {e}\", exc_info=True); derived_from_gloss_flag.pop('ROUGH', None)\r\n\r\n # --- Main Processing Loop ---\r\n maps_to_process = list(self.classified_files['maps'])\r\n for map_info in maps_to_process:\r\n map_type = map_info['map_type']\r\n source_path_rel = map_info['source_path']\r\n original_extension = map_info.get('original_extension', '.png') # Get original ext, default if missing\r\n log.info(f\"-- Processing Map Type: {map_type} (Source: {source_path_rel.name}, Orig Ext: {original_extension}) --\")\r\n img_processed, source_dtype = None, None\r\n map_details = {\"derived_from_gloss\": derived_from_gloss_flag.get(map_type, False)}\r\n\r\n try:\r\n # --- 1. Get/Load Source Data ---\r\n if map_type in preprocessed_data:\r\n log.debug(f\"Using pre-processed data for {map_type}.\")\r\n # Unpack tuple: (inverted_float_data, original_dtype)\r\n img_processed, source_dtype = preprocessed_data[map_type]\r\n # No longer need to read the original file just for dtype\r\n else:\r\n full_source_path = self.temp_dir / source_path_rel\r\n # Determine the read flag based on map type\r\n read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n log.debug(f\"Loading source {source_path_rel.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n img_loaded = cv2.imread(str(full_source_path), read_flag)\r\n if img_loaded is None:\r\n raise AssetProcessingError(f\"Failed to load image file: {full_source_path.name} with flag {read_flag}\")\r\n img_processed, source_dtype = img_loaded.copy(), img_loaded.dtype\r\n log.debug(f\"Loaded source {source_path_rel.name}, dtype: {source_dtype}, shape: {img_processed.shape}\")\r\n map_details[\"source_bit_depth\"] = 16 if source_dtype == np.uint16 else 8\r\n\r\n # --- 2. Handle Alpha Mask ---\r\n if map_type == 'MASK' and img_processed is not None:\r\n log.debug(\"Processing as MASK type.\")\r\n shape = img_processed.shape\r\n if len(shape) == 3 and shape[2] == 4: img_processed = img_processed[:, :, 3]\r\n elif len(shape) == 3 and shape[2] == 3: img_processed = cv2.cvtColor(img_processed, cv2.COLOR_BGR2GRAY)\r\n if img_processed.dtype != np.uint8:\r\n log.debug(f\"Converting mask from {img_processed.dtype} to uint8.\")\r\n if source_dtype == np.uint16: img_processed = (img_processed.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n elif img_processed.dtype in [np.float16, np.float32]: img_processed = (np.clip(img_processed, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n else: img_processed = img_processed.astype(np.uint8)\r\n\r\n if img_processed is None: raise AssetProcessingError(\"Image data is None after initial processing.\")\r\n orig_h, orig_w = img_processed.shape[:2]\r\n self.processed_maps_details.setdefault(map_type, {})\r\n max_original_dimension = max(orig_w, orig_h) # Get max original dimension\r\n\r\n # --- 3. Resize & Save Loop ---\r\n for res_key, target_dim in resolutions.items():\r\n # --- Skip Upscaling ---\r\n if target_dim > max_original_dimension:\r\n log.debug(f\"Skipping {res_key} ({target_dim}px): Target dimension is larger than original max dimension ({max_original_dimension}px).\")\r\n continue\r\n log.debug(f\"Processing {map_type} for resolution: {res_key}...\")\r\n if orig_w <= 0 or orig_h <= 0: log.warning(f\"Invalid original dims, skipping resize {res_key}.\"); continue\r\n target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim)\r\n interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC\r\n try: img_resized = cv2.resize(img_processed, (target_w, target_h), interpolation=interpolation)\r\n except Exception as resize_err: log.error(f\"Resize failed for {map_type} to {res_key}: {resize_err}\", exc_info=True); continue\r\n\r\n # --- 3a. Calculate Stats ---\r\n if res_key == stats_res_key and stats_target_dim:\r\n log.debug(f\"Calculating stats for {map_type} using {res_key} image...\")\r\n stats = _calculate_image_stats(img_resized) # Use the already resized image\r\n if stats: self.metadata[\"image_stats_1k\"][map_type] = stats\r\n else: log.warning(f\"Stats calculation failed for {map_type} at {res_key}.\")\r\n # Calculate aspect change string on the lowest processed resolution, only once per asset\r\n lowest_res_key = min(resolutions, key=resolutions.get)\r\n if self.metadata[\"aspect_ratio_change_string\"] == \"N/A\" and res_key == lowest_res_key:\r\n try:\r\n aspect_string = self._normalize_aspect_ratio_change(orig_w, orig_h, target_w, target_h)\r\n self.metadata[\"aspect_ratio_change_string\"] = aspect_string\r\n log.debug(f\"Stored aspect ratio change string using lowest res '{res_key}': '{aspect_string}'\")\r\n except Exception as aspect_err:\r\n log.error(f\"Failed to calculate aspect ratio change string using lowest res '{res_key}': {aspect_err}\", exc_info=True)\r\n self.metadata[\"aspect_ratio_change_string\"] = \"Error\" # Indicate calculation failure\r\n\r\n # --- 3b. Determine Output Bit Depth & Format ---\r\n bit_depth_rule = self.config.get_bit_depth_rule(map_type)\r\n current_dtype = img_resized.dtype # Dtype after resize\r\n output_dtype_target, output_bit_depth = None, 8 # Defaults\r\n if bit_depth_rule == 'force_8bit': output_dtype_target, output_bit_depth = np.uint8, 8\r\n elif bit_depth_rule == 'respect':\r\n if source_dtype == np.uint16: output_dtype_target, output_bit_depth = np.uint16, 16\r\n elif current_dtype in [np.float16, np.float32]: output_dtype_target, output_bit_depth = np.uint16, 16 # Respect float source as 16bit uint for non-EXR\r\n else: output_dtype_target, output_bit_depth = np.uint8, 8 # Default respect to 8bit\r\n else: output_dtype_target, output_bit_depth = np.uint8, 8 # Fallback\r\n\r\n # --- 3c. Determine Output Format based on Input, Rules & Threshold ---\r\n output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n fmt_8bit_config = self.config.get_8bit_output_format() # Usually 'png'\r\n threshold = self.config.resolution_threshold_for_jpg # Get threshold value\r\n force_lossless = map_type in self.config.force_lossless_map_types\r\n\r\n if force_lossless:\r\n log.debug(f\"Format forced to lossless for map type '{map_type}'.\")\r\n if output_bit_depth == 16:\r\n output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n # Add EXR type flags if needed, e.g., HALF for 16-bit float\r\n save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n # Add compression later if desired, e.g. cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_ZIP\r\n else: # Assume png or other lossless 16-bit format\r\n # Use fallback if primary isn't suitable or fails? For now, assume primary is lossless.\r\n # If primary_fmt_16 is not 'png', maybe default to fallback_fmt_16?\r\n if output_format != \"png\":\r\n log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless. Using fallback '{fallback_fmt_16}'.\")\r\n output_format = fallback_fmt_16 # Ensure it's a known lossless like png/tif\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n # Add params for other lossless like TIF if supported\r\n else: # 8-bit lossless\r\n output_format = fmt_8bit_config # Usually 'png'\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n else:\r\n log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for '{map_type}' will use PNG instead.\")\r\n output_format = \"png\"\r\n output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n\r\n # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n elif output_bit_depth == 8 and target_dim >= threshold:\r\n output_format = 'jpg'\r\n output_ext = '.jpg'\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} due to resolution threshold ({target_dim} >= {threshold}).\")\r\n # --- Else: Apply Input/Rule-Based Logic ---\r\n else:\r\n # Apply force_8bit rule (if not overridden by threshold)\r\n if bit_depth_rule == 'force_8bit':\r\n output_format = 'png' # Force to PNG as per clarification\r\n output_ext = '.png'\r\n # output_bit_depth is already 8, output_dtype_target is already uint8\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n log.debug(f\"Format forced to PNG (8-bit) due to 'force_8bit' rule for {map_type} (threshold not met).\")\r\n # Handle specific input extensions if not forced to 8bit PNG\r\n elif original_extension == '.jpg' and output_bit_depth == 8:\r\n output_format = 'jpg'\r\n output_ext = '.jpg'\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n log.debug(f\"Using JPG format (Quality: {jpg_quality}) for {map_type} at {res_key} based on JPG input (threshold not met).\")\r\n elif original_extension == '.tif':\r\n if output_bit_depth == 16:\r\n output_format = primary_fmt_16\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) # Add compression later\r\n log.debug(f\"Using EXR format for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n elif output_format == \"png\":\r\n output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (16-bit target, PNG config).\")\r\n else: # Fallback for 16-bit from TIF\r\n output_format = fallback_fmt_16\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n log.debug(f\"Using fallback format '{output_format}' for {map_type} at {res_key} based on TIF input (16-bit target).\")\r\n else: # output_bit_depth == 8 from TIF input (and below threshold)\r\n output_format = 'png'\r\n output_ext = '.png'\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n log.debug(f\"Using PNG format for {map_type} at {res_key} based on TIF input (8-bit target, threshold not met).\")\r\n # Handle other inputs (e.g., PNG) or fallbacks\r\n else:\r\n if output_bit_depth == 16:\r\n output_format = primary_fmt_16\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n elif output_format == \"png\":\r\n output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n else: # Fallback for 16-bit\r\n output_format = fallback_fmt_16\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n log.debug(f\"Using configured 16-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}).\")\r\n else: # 8-bit output (and below threshold)\r\n output_format = fmt_8bit_config # Use configured 8-bit format\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n elif output_format == \"jpg\":\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n log.debug(f\"Using configured 8-bit format '{output_format}' for {map_type} at {res_key} (Input: {original_extension}, threshold not met).\")\r\n\r\n img_to_save = img_resized.copy() # Work on copy for dtype conversion\r\n # --- Apply Dtype Conversion ---\r\n if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:\r\n if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)\r\n elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:\r\n if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257\r\n elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n if needs_float16 and img_to_save.dtype != np.float16:\r\n if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16)\r\n\r\n # --- 3d. Construct Filename & Save ---\r\n filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n output_path_temp = self.temp_dir / filename\r\n log.debug(f\"Saving: {output_path_temp.name} (Format: {output_format}, Dtype: {img_to_save.dtype})\")\r\n saved_successfully, actual_format_saved = False, output_format\r\n try: cv2.imwrite(str(output_path_temp), img_to_save, save_params); saved_successfully = True\r\n except Exception as save_err:\r\n log.error(f\"Save failed ({output_format}): {save_err}\")\r\n # --- Try Fallback ---\r\n if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt != output_format:\r\n log.warning(f\"Attempting fallback: {fallback_fmt}\")\r\n actual_format_saved = fallback_fmt; output_ext = f\".{fallback_fmt}\"; # Adjust format/ext\r\n filename = target_pattern.format(base_name=base_name, map_type=map_type, resolution=res_key, ext=output_ext.lstrip('.'))\r\n output_path_temp = self.temp_dir / filename\r\n save_params_fallback = [] # Reset params for fallback\r\n img_fallback = None; target_fallback_dtype = np.uint16\r\n if fallback_fmt == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, 6])\r\n elif fallback_fmt == \"tif\": pass # Default TIF params\r\n\r\n # Convert EXR source (float16) to uint16 for PNG/TIF fallback\r\n #if img_to_save.dtype == np.float16: img_fallback = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(target_fallback_dtype)\r\n if img_to_save.dtype == np.float16 or img_to_save.dtype == np.float32:\r\n # <<< START MODIFICATION HERE >>>\r\n # Check for NaN/Inf before conversion\r\n if np.any(np.isnan(img_to_save)) or np.any(np.isinf(img_to_save)):\r\n log.error(f\"Invalid float values (NaN/Inf) detected in {map_type} ({res_key}) before fallback save. Skipping resolution.\")\r\n continue # Skip fallback if data is bad\r\n\r\n # Clip *after* scaling for uint16 conversion robustness\r\n img_scaled = img_to_save * 65535.0\r\n img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n # <<< END MODIFICATION HERE >>>\r\n elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already correct type\r\n else: log.error(f\"Cannot convert {img_to_save.dtype} for fallback.\"); continue # Skip fallback\r\n\r\n try: cv2.imwrite(str(output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved {map_type} ({res_key}) using fallback {fallback_fmt}\")\r\n except Exception as fallback_err: log.error(f\"Fallback save failed: {fallback_err}\", exc_info=True)\r\n\r\n # --- 3e. Store Result ---\r\n if saved_successfully:\r\n self.processed_maps_details[map_type][res_key] = {\r\n \"path\": output_path_temp.relative_to(self.temp_dir), \"resolution\": res_key,\r\n \"width\": target_w, \"height\": target_h, \"bit_depth\": output_bit_depth,\r\n \"format\": actual_format_saved\r\n }\r\n map_details[\"output_format\"] = actual_format_saved # Update overall map detail\r\n\r\n except Exception as map_proc_err:\r\n log.error(f\"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}\", exc_info=True)\r\n self.processed_maps_details.setdefault(map_type, {})['error'] = str(map_proc_err)\r\n\r\n self.metadata[\"map_details\"][map_type] = map_details # Store details for this map type\r\n\r\n # --- Final Metadata Updates ---\r\n processed_map_types = set(k for k, v in self.processed_maps_details.items() if 'error' not in v) # Only count successful\r\n self.metadata[\"maps_present\"] = sorted(list(processed_map_types))\r\n features = set()\r\n for map_type, details in self.metadata[\"map_details\"].items():\r\n if map_type in [\"SSS\", \"FUZZ\", \"MASK\"]: features.add(map_type)\r\n if details.get(\"derived_from_gloss\"): features.add(\"InvertedGloss\")\r\n res_details = self.processed_maps_details.get(map_type, {})\r\n if any(res_info.get(\"bit_depth\") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f\"16bit_{map_type}\")\r\n self.metadata[\"shader_features\"] = sorted(list(features))\r\n log.debug(f\"Determined shader features: {self.metadata['shader_features']}\")\r\n log.info(\"Finished processing all map files.\")\r\n\r\n\r\n #log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n\r\n def _merge_maps(self):\r\n \"\"\"Merges channels from different maps based on rules in configuration.\"\"\"\r\n # ... (initial checks and getting merge_rules) ...\r\n if not self.temp_dir:\r\n raise AssetProcessingError(\"Workspace not setup.\")\r\n\r\n\r\n # <<< FIX: Get merge rules from the configuration object >>>\r\n\r\n merge_rules = self.config.map_merge_rules\r\n\r\n # <<< END FIX >>>\r\n log.info(f\"Applying {len(merge_rules)} map merging rule(s)...\")\r\n\r\n for rule_index, rule in enumerate(merge_rules):\r\n # <<< FIX: Assign variables *before* using them >>>\r\n output_map_type = rule.get(\"output_map_type\")\r\n inputs_mapping = rule.get(\"inputs\")\r\n defaults = rule.get(\"defaults\", {})\r\n rule_bit_depth = rule.get(\"output_bit_depth\", \"respect_inputs\")\r\n\r\n # <<< FIX: Check if essential rule keys exist *after* assignment >>>\r\n if not output_map_type or not inputs_mapping:\r\n log.warning(f\"Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs' in rule definition. Rule data: {rule}\")\r\n continue # Skip to the next rule in merge_rules\r\n\r\n # Now it's safe to use output_map_type in the log statement\r\n log.info(f\"-- Applying merge rule for '{output_map_type}' --\")\r\n # <<< END FIX >>>\r\n\r\n self.merged_maps_details.setdefault(output_map_type, {})\r\n\r\n # --- Determine required inputs and their common resolutions ---\r\n required_input_types = set(inputs_mapping.values()) # e.g., {'NRM', 'ROUGH'}\r\n if not required_input_types:\r\n log.warning(f\"Skipping merge rule '{output_map_type}': No input map types defined in 'inputs'.\")\r\n continue\r\n\r\n possible_resolutions_per_input = []\r\n for input_type in required_input_types:\r\n if input_type in self.processed_maps_details:\r\n # Get resolution keys where processing didn't error\r\n res_keys = {res for res, details in self.processed_maps_details[input_type].items() if isinstance(details, dict) and 'error' not in details}\r\n if not res_keys:\r\n log.warning(f\"Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions.\")\r\n possible_resolutions_per_input = [] # Cannot proceed if any input is missing all resolutions\r\n break\r\n possible_resolutions_per_input.append(res_keys)\r\n else:\r\n log.warning(f\"Required input map type '{input_type}' for merge rule '{output_map_type}' was not processed.\")\r\n possible_resolutions_per_input = [] # Cannot proceed if any input type is missing\r\n break\r\n\r\n if not possible_resolutions_per_input:\r\n log.warning(f\"Cannot perform merge for '{output_map_type}' due to missing inputs or resolutions. Skipping rule.\")\r\n continue\r\n\r\n # Find the intersection of resolution keys across all required inputs\r\n common_resolutions = set.intersection(*possible_resolutions_per_input)\r\n\r\n if not common_resolutions:\r\n log.warning(f\"No common resolutions found among required inputs {required_input_types} for merge rule '{output_map_type}'. Skipping rule.\")\r\n continue\r\n log.debug(f\"Found common resolutions for '{output_map_type}': {common_resolutions}\")\r\n # --- End Common Resolution Logic ---\r\n\r\n\r\n # <<< LOOP THROUGH COMMON RESOLUTIONS >>>\r\n # Use the actual common_resolutions found\r\n res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions}\r\n if not res_order:\r\n log.warning(f\"Common resolutions {common_resolutions} do not match any target resolutions in config. Skipping merge for '{output_map_type}'.\")\r\n continue\r\n\r\n # Sort resolutions to process (optional, but nice for logs)\r\n sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True)\r\n\r\n # Get target pattern from config for filename formatting\r\n target_pattern = self.config.target_filename_pattern\r\n\r\n for current_res_key in sorted_res_keys:\r\n log.debug(f\"Merging '{output_map_type}' for resolution: {current_res_key}\")\r\n try:\r\n loaded_inputs = {}\r\n input_bit_depths = set()\r\n input_original_extensions = {} # Store original extensions for this resolution's inputs\r\n\r\n # --- Load required input maps *at this specific resolution* AND get original extensions ---\r\n possible_to_load = True\r\n base_name = self.metadata['asset_name']\r\n target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B']\r\n\r\n for map_type in required_input_types:\r\n res_details = self.processed_maps_details.get(map_type, {}).get(current_res_key)\r\n if not res_details or 'path' not in res_details:\r\n log.warning(f\"Input map '{map_type}' missing details or path for resolution '{current_res_key}'. Cannot merge for this resolution.\")\r\n possible_to_load = False; break\r\n\r\n # Find original extension from classified data\r\n original_ext = '.png' # Default\r\n found_original = False\r\n for classified_map in self.classified_files[\"maps\"]:\r\n # Match based on the base map type (e.g., NRM matches NRM-1)\r\n if classified_map['map_type'].startswith(map_type):\r\n # Check if the processed path matches (to handle multiple variants if needed, though less likely here)\r\n # This assumes processed_maps_details path is relative to temp_dir\r\n processed_path_str = str(res_details['path'])\r\n classified_path_str = str(classified_map['source_path']) # This is the original source path relative to temp_dir root\r\n # A more robust check might involve comparing filenames if paths differ due to processing steps\r\n # For now, rely on the base map type match and grab the first extension found\r\n original_ext = classified_map.get('original_extension', '.png')\r\n found_original = True\r\n break # Found the first match for this map_type\r\n if not found_original:\r\n log.warning(f\"Could not reliably find original extension for merge input '{map_type}'. Defaulting to '.png'.\")\r\n\r\n input_original_extensions[map_type] = original_ext\r\n\r\n # Load the image\r\n input_file_path = self.temp_dir / res_details['path']\r\n read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED\r\n log.debug(f\"Loading merge input {input_file_path.name} ({map_type}, OrigExt: {original_ext}) with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}\")\r\n img = cv2.imread(str(input_file_path), read_flag)\r\n if img is None:\r\n raise AssetProcessingError(f\"Failed to load merge input {input_file_path.name} with flag {read_flag}\")\r\n loaded_inputs[map_type] = img\r\n input_bit_depths.add(res_details.get('bit_depth', 8))\r\n\r\n if not possible_to_load: continue # Skip this resolution if inputs missing\r\n\r\n # --- Determine dimensions and target_dim for threshold check ---\r\n first_map_type = next(iter(required_input_types)) # Get one map type to read dimensions\r\n h, w = loaded_inputs[first_map_type].shape[:2]\r\n # Get target_dim from the details of the first loaded input for this resolution\r\n first_res_details = self.processed_maps_details.get(first_map_type, {}).get(current_res_key)\r\n target_dim = max(first_res_details.get('width', 0), first_res_details.get('height', 0)) if first_res_details else 0\r\n num_target_channels = len(target_channels)\r\n\r\n # <<< Re-calculate output_bit_depth based on THIS resolution's inputs >>>\r\n max_input_bpc = max(input_bit_depths) if input_bit_depths else 8\r\n output_bit_depth = 8\r\n if rule_bit_depth == 'force_16bit' or (rule_bit_depth == 'respect_inputs' and max_input_bpc == 16):\r\n output_bit_depth = 16\r\n log.debug(f\"Target bit depth for '{output_map_type}' ({current_res_key}): {output_bit_depth}-bit\")\r\n\r\n # Prepare channels (float32) (same logic as before)\r\n merged_channels_float32 = []\r\n # Use the defined target_channels list\r\n for target_channel in target_channels: # Iterate R, G, B (or specified) order\r\n source_map_type = inputs_mapping.get(target_channel)\r\n channel_data_float32 = None\r\n if source_map_type and source_map_type in loaded_inputs:\r\n # ... [Extract channel data as float32 as before] ...\r\n img_input = loaded_inputs[source_map_type]\r\n if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0\r\n elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0\r\n else: img_float = img_input.astype(np.float32)\r\n num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1\r\n if num_source_channels >= 3: # BGR Source\r\n if target_channel == 'R': channel_data_float32 = img_float[:, :, 2]\r\n elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1]\r\n elif target_channel == 'B': channel_data_float32 = img_float[:, :, 0]\r\n elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3]\r\n elif num_source_channels == 1 or len(img_float.shape) == 2: # Grayscale source\r\n channel_data_float32 = img_float.reshape(h, w)\r\n if channel_data_float32 is None: # Use default if needed\r\n default_val = defaults.get(target_channel)\r\n if default_val is None: raise AssetProcessingError(f\"Missing input/default Ch '{target_channel}' rule '{output_map_type}'.\")\r\n channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32)\r\n merged_channels_float32.append(channel_data_float32)\r\n\r\n\r\n # Merge channels (same as before)\r\n if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: raise AssetProcessingError(\"Channel count mismatch.\")\r\n merged_image_float32 = cv2.merge(merged_channels_float32)\r\n\r\n # Final Data Type Conversion (based on recalculated output_bit_depth)\r\n img_final_merged = None\r\n if output_bit_depth == 16: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 65535.0).astype(np.uint16)\r\n else: img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n\r\n # --- Determine Output Format based on Threshold, Input Hierarchy & Rules ---\r\n output_format, output_ext, save_params, needs_float16 = \"\", \"\", [], False\r\n primary_fmt_16, fallback_fmt_16 = self.config.get_16bit_output_formats()\r\n fmt_8bit_config = self.config.get_8bit_output_format()\r\n threshold = self.config.resolution_threshold_for_jpg\r\n force_lossless = output_map_type in self.config.force_lossless_map_types\r\n\r\n if force_lossless:\r\n log.debug(f\"Format forced to lossless for merged map type '{output_map_type}'.\")\r\n if output_bit_depth == 16:\r\n output_format = primary_fmt_16 # e.g., 'exr' or 'png'\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n else: # Assume png or other lossless 16-bit format\r\n if output_format != \"png\":\r\n log.warning(f\"Primary 16-bit format '{output_format}' not PNG for forced lossless merged map. Using fallback '{fallback_fmt_16}'.\")\r\n output_format = fallback_fmt_16\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n else: # 8-bit lossless\r\n output_format = fmt_8bit_config # Usually 'png'\r\n output_ext = f\".{output_format}\"\r\n if output_format == \"png\":\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n else:\r\n log.warning(f\"Configured 8-bit format '{output_format}' is not PNG. Forced lossless for merged '{output_map_type}' will use PNG instead.\")\r\n output_format = \"png\"\r\n output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_level] # Reset params\r\n\r\n # --- Check JPG Threshold Override (only if not force_lossless) ---\r\n elif output_bit_depth == 8 and target_dim >= threshold:\r\n output_format = 'jpg'\r\n output_ext = '.jpg'\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n log.debug(f\"Using JPG format for merged '{output_map_type}' ({current_res_key}) due to resolution threshold ({target_dim} >= {threshold}).\")\r\n # --- Else: Apply Hierarchy/Rule-Based Logic ---\r\n else:\r\n involved_extensions = set(input_original_extensions.values())\r\n log.debug(f\"Original extensions involved in merge for '{output_map_type}' ({current_res_key}): {involved_extensions}\")\r\n # Hierarchy: EXR > TIF > PNG > JPG\r\n highest_format_str = 'jpg' # Start lowest\r\n if '.exr' in involved_extensions: highest_format_str = 'exr'\r\n elif '.tif' in involved_extensions: highest_format_str = 'tif'\r\n elif '.png' in involved_extensions: highest_format_str = 'png'\r\n\r\n # Determine final output format based on hierarchy and target bit depth\r\n final_output_format = highest_format_str\r\n\r\n if highest_format_str == 'tif':\r\n if output_bit_depth == 16:\r\n final_output_format = primary_fmt_16 # Use configured 16-bit pref (EXR/PNG)\r\n log.debug(f\"Highest input TIF, target 16-bit -> Final format: {final_output_format} (threshold not met)\")\r\n else: # 8-bit target\r\n final_output_format = 'png' # Force TIF input to PNG for 8-bit merge output\r\n log.debug(\"Highest input TIF, target 8-bit -> Final format: PNG (threshold not met)\")\r\n else:\r\n log.debug(f\"Highest input format '{highest_format_str}' determines final format: {final_output_format} (threshold not met)\")\r\n\r\n # Set format/params based on the determined final_output_format\r\n output_format = final_output_format\r\n if output_format.startswith(\"exr\"):\r\n output_ext, needs_float16 = \".exr\", True\r\n save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])\r\n elif output_format == \"png\":\r\n output_ext = \".png\"\r\n png_level = self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)\r\n save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_level])\r\n elif output_format == \"jpg\": # Should only happen if highest input was JPG and below threshold\r\n output_ext = \".jpg\"\r\n jpg_quality = self.config.jpg_quality\r\n save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality])\r\n else:\r\n log.error(f\"Unsupported final output format '{output_format}' determined for merged map '{output_map_type}'. Skipping save.\")\r\n continue\r\n\r\n # --- JPG 8-bit Check (applies regardless of threshold logic) ---\r\n if output_format == \"jpg\" and output_bit_depth == 16:\r\n log.warning(f\"Output format is JPG, but merge rule resulted in 16-bit target. Forcing merged output to 8-bit for JPG save.\")\r\n img_final_merged = (np.clip(merged_image_float32, 0.0, 1.0) * 255.0).astype(np.uint8)\r\n output_bit_depth = 8 # Correct the recorded bit depth\r\n\r\n # --- Save Merged Map ---\r\n image_to_save = img_final_merged # Already converted to target bit depth (or forced to 8bit for JPG)\r\n\r\n # Apply float16 conversion if needed for EXR\r\n if needs_float16 and image_to_save.dtype != np.float16:\r\n if image_to_save.dtype == np.uint16: image_to_save = (image_to_save.astype(np.float32) / 65535.0).astype(np.float16)\r\n elif image_to_save.dtype == np.uint8: image_to_save = (image_to_save.astype(np.float32) / 255.0).astype(np.float16)\r\n else: log.warning(f\"Cannot convert merge dtype {image_to_save.dtype} to float16 for EXR save\"); continue\r\n\r\n merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n merged_output_path_temp = self.temp_dir / merged_filename\r\n log.debug(f\"Saving merged map: {merged_output_path_temp.name} (Format: {output_format}, Dtype: {image_to_save.dtype})\")\r\n\r\n # --- Add save logic with fallback here ---\r\n saved_successfully = False\r\n actual_format_saved = output_format\r\n try:\r\n cv2.imwrite(str(merged_output_path_temp), image_to_save, save_params)\r\n log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}, {output_bit_depth}-bit) as {output_format}\")\r\n saved_successfully = True\r\n except Exception as save_err:\r\n log.error(f\"Save failed for merged '{output_map_type}' ({current_res_key}) as {output_format}: {save_err}\")\r\n # Try Fallback for merged map (similar to _process_maps fallback)\r\n if output_bit_depth == 16 and output_format.startswith(\"exr\") and fallback_fmt_16 != output_format:\r\n log.warning(f\"Attempting fallback format for merged map: {fallback_fmt_16}\")\r\n # ... [ Implement fallback save logic here, converting image_to_save if needed ] ...\r\n actual_format_saved = fallback_fmt_16\r\n output_ext = f\".{fallback_fmt_16}\"\r\n merged_filename = target_pattern.format(base_name=base_name, map_type=output_map_type, resolution=current_res_key, ext=output_ext.lstrip('.'))\r\n merged_output_path_temp = self.temp_dir / merged_filename\r\n save_params_fallback = []\r\n img_fallback = None\r\n target_fallback_dtype = np.uint16\r\n\r\n if fallback_fmt_16 == \"png\": save_params_fallback.extend([cv2.IMWRITE_PNG_COMPRESSION, self.config._core_settings.get('PNG_COMPRESSION_LEVEL', 6)])\r\n elif fallback_fmt_16 == \"tif\": pass # Default TIF params\r\n\r\n if image_to_save.dtype == np.float16:\r\n if np.any(np.isnan(image_to_save)) or np.any(np.isinf(image_to_save)): log.error(\"NaN/Inf in merged float16 data, cannot fallback.\"); continue\r\n img_scaled = image_to_save * 65535.0\r\n img_fallback = np.clip(img_scaled, 0, 65535).astype(target_fallback_dtype)\r\n elif image_to_save.dtype == target_fallback_dtype: img_fallback = image_to_save\r\n else: log.error(f\"Cannot convert merged dtype {image_to_save.dtype} for fallback.\"); continue\r\n\r\n try: cv2.imwrite(str(merged_output_path_temp), img_fallback, save_params_fallback); saved_successfully = True; log.info(f\" > Saved Merged Map '{output_map_type}' ({current_res_key}) using fallback {fallback_fmt_16}\")\r\n except Exception as fallback_err: log.error(f\"Fallback save failed for merged map: {fallback_err}\", exc_info=True)\r\n # --- End Fallback Logic ---\r\n\r\n # Record details if save successful\r\n if saved_successfully:\r\n self.merged_maps_details[output_map_type][current_res_key] = {\r\n \"path\": merged_output_path_temp.relative_to(self.temp_dir), \"resolution\": current_res_key,\r\n \"width\": w, \"height\": h, \"bit_depth\": output_bit_depth, \"format\": actual_format_saved\r\n }\r\n if output_map_type not in self.metadata[\"merged_maps\"]: self.metadata[\"merged_maps\"].append(output_map_type)\r\n\r\n except Exception as merge_res_err:\r\n log.error(f\"Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}\", exc_info=True)\r\n self.merged_maps_details.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err)\r\n\r\n log.info(\"Finished applying map merging rules.\")\r\n\r\n\r\n def _generate_metadata_file(self):\r\n \"\"\"Gathers all collected metadata and writes it to a JSON file.\"\"\"\r\n # ... (Implementation from Response #49) ...\r\n if not self.temp_dir: raise AssetProcessingError(\"Workspace not setup.\")\r\n if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\":\r\n log.warning(\"Asset name unknown, metadata may be incomplete.\")\r\n\r\n log.info(f\"Generating metadata file for '{self.metadata['asset_name']}'...\")\r\n final_metadata = self.metadata.copy()\r\n\r\n final_metadata[\"processed_map_resolutions\"] = {}\r\n for map_type, res_dict in self.processed_maps_details.items():\r\n keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n if keys: final_metadata[\"processed_map_resolutions\"][map_type] = sorted(keys) # Basic sort\r\n\r\n final_metadata[\"merged_map_resolutions\"] = {}\r\n for map_type, res_dict in self.merged_maps_details.items():\r\n keys = [res for res, d in res_dict.items() if isinstance(d,dict) and 'error' not in d]\r\n if keys: final_metadata[\"merged_map_resolutions\"][map_type] = sorted(keys)\r\n\r\n # Add processing info\r\n final_metadata[\"_processing_info\"] = {\r\n \"preset_used\": self.config.preset_name,\r\n \"timestamp_utc\": time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\r\n # Optionally add core config details used, carefully\r\n }\r\n\r\n # Sort lists\r\n for key in [\"maps_present\", \"merged_maps\", \"shader_features\", \"source_files_in_extra\"]:\r\n if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort()\r\n\r\n metadata_filename = self.config.metadata_filename\r\n output_path = self.temp_dir / metadata_filename\r\n log.debug(f\"Writing metadata to: {output_path}\")\r\n try:\r\n with open(output_path, 'w', encoding='utf-8') as f:\r\n json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True)\r\n log.info(f\"Metadata file '{metadata_filename}' generated successfully.\")\r\n self.metadata_file_path_temp = output_path # Store path for moving\r\n except Exception as e:\r\n raise AssetProcessingError(f\"Failed to write metadata file {output_path}: {e}\") from e\r\n\r\n\r\n def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2):\r\n \"\"\"\r\n Calculates the aspect ratio change string (e.g., \"EVEN\", \"X133\") based on original prototype logic.\r\n Returns the string representation.\r\n \"\"\"\r\n if original_width <= 0 or original_height <= 0:\r\n log.warning(\"Cannot calculate aspect ratio change with zero original dimensions.\")\r\n return \"InvalidInput\"\r\n\r\n # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks)\r\n if resized_width <= 0 or resized_height <= 0:\r\n log.warning(\"Cannot calculate aspect ratio change with zero resized dimensions.\")\r\n return \"InvalidResize\"\r\n\r\n # Original logic from user feedback\r\n width_change_percentage = ((resized_width - original_width) / original_width) * 100\r\n height_change_percentage = ((resized_height - original_height) / original_height) * 100\r\n\r\n normalized_width_change = width_change_percentage / 100\r\n normalized_height_change = height_change_percentage / 100\r\n\r\n normalized_width_change = min(max(normalized_width_change + 1, 0), 2)\r\n normalized_height_change = min(max(normalized_height_change + 1, 0), 2)\r\n\r\n # Handle potential zero division if one dimension change is exactly -100% (normalized to 0)\r\n # If both are 0, aspect ratio is maintained. If one is 0, the other dominates.\r\n if normalized_width_change == 0 and normalized_height_change == 0:\r\n closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1\r\n elif normalized_width_change == 0:\r\n closest_value_to_one = abs(normalized_height_change)\r\n elif normalized_height_change == 0:\r\n closest_value_to_one = abs(normalized_width_change)\r\n else:\r\n closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))\r\n\r\n # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0\r\n epsilon = 1e-9\r\n scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one\r\n\r\n scaled_normalized_width_change = scale_factor * normalized_width_change\r\n scaled_normalized_height_change = scale_factor * normalized_height_change\r\n\r\n output_width = round(scaled_normalized_width_change, decimals)\r\n output_height = round(scaled_normalized_height_change, decimals)\r\n\r\n # Convert to int if exactly 1.0 after rounding\r\n if abs(output_width - 1.0) < epsilon: output_width = 1\r\n if abs(output_height - 1.0) < epsilon: output_height = 1\r\n\r\n # Determine output string\r\n if original_width == original_height or abs(output_width - output_height) < epsilon:\r\n output = \"EVEN\"\r\n elif output_width != 1 and output_height == 1:\r\n output = f\"X{str(output_width).replace('.', '')}\"\r\n elif output_height != 1 and output_width == 1:\r\n output = f\"Y{str(output_height).replace('.', '')}\"\r\n else:\r\n # Both changed relative to each other\r\n output = f\"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}\"\r\n\r\n log.debug(f\"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'\")\r\n return output\r\n\r\n def _sanitize_filename(self, name: str) -> str:\r\n \"\"\"Removes or replaces characters invalid for filenames/directory names.\"\"\"\r\n # ... (Implementation from Response #51) ...\r\n if not isinstance(name, str): name = str(name)\r\n name = re.sub(r'[^\\w.\\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot\r\n name = re.sub(r'_+', '_', name)\r\n name = name.strip('_')\r\n if not name: name = \"invalid_name\"\r\n return name\r\n\r\n def _organize_output_files(self):\r\n \"\"\"Moves processed files from temp dir to the final output structure.\"\"\"\r\n # ... (Implementation from Response #51) ...\r\n if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError(\"Temp workspace missing.\")\r\n if not self.metadata.get(\"asset_name\") or self.metadata[\"asset_name\"] == \"UnknownAssetName\": raise AssetProcessingError(\"Asset name missing.\")\r\n if not self.metadata.get(\"supplier_name\"): raise AssetProcessingError(\"Supplier name missing.\")\r\n\r\n supplier_sanitized = self._sanitize_filename(self.metadata[\"supplier_name\"])\r\n asset_name_sanitized = self._sanitize_filename(self.metadata[\"asset_name\"])\r\n final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized\r\n log.info(f\"Organizing output files into: {final_dir}\")\r\n try:\r\n # Check if overwriting is allowed before potentially deleting existing dir\r\n if final_dir.exists() and self.overwrite:\r\n log.warning(f\"Output directory exists and overwrite is True: {final_dir}. Removing existing directory.\")\r\n try:\r\n shutil.rmtree(final_dir)\r\n except Exception as rm_err:\r\n raise AssetProcessingError(f\"Failed to remove existing output directory {final_dir} during overwrite: {rm_err}\") from rm_err\r\n elif final_dir.exists() and not self.overwrite:\r\n # This case should ideally be caught by the skip logic earlier,\r\n # but adding a warning here as a safeguard.\r\n log.warning(f\"Output directory exists: {final_dir}. Overwriting (unexpected - should have been skipped).\")\r\n\r\n final_dir.mkdir(parents=True, exist_ok=True) # Create after potential removal\r\n except Exception as e:\r\n # Catch potential errors during mkdir if rmtree failed partially?\r\n if not isinstance(e, AssetProcessingError): # Avoid wrapping already specific error\r\n raise AssetProcessingError(f\"Failed to create final dir {final_dir}: {e}\") from e\r\n else:\r\n raise # Re-raise the AssetProcessingError from rmtree\r\n\r\n def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str):\r\n if not src_rel_path: log.warning(f\"Missing src path for {file_desc}.\"); return\r\n source_abs = self.temp_dir / src_rel_path\r\n dest_abs = dest_dir / src_rel_path.name\r\n try:\r\n if source_abs.exists():\r\n log.debug(f\"Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/\")\r\n dest_dir.mkdir(parents=True, exist_ok=True) # Ensure sub-dir exists (for Extra/)\r\n shutil.move(str(source_abs), str(dest_abs))\r\n else: log.warning(f\"Source file missing for {file_desc}: {source_abs}\")\r\n except Exception as e: log.error(f\"Failed moving {file_desc} '{source_abs.name}': {e}\", exc_info=True)\r\n\r\n # Move maps, merged maps, models, metadata\r\n for details_dict in [self.processed_maps_details, self.merged_maps_details]:\r\n for map_type, res_dict in details_dict.items():\r\n if 'error' in res_dict: continue\r\n for res_key, details in res_dict.items():\r\n if isinstance(details, dict) and 'path' in details: _safe_move(details['path'], final_dir, f\"{map_type} ({res_key})\")\r\n for model_info in self.classified_files.get('models', []): _safe_move(model_info.get('source_path'), final_dir, \"model file\")\r\n if self.metadata_file_path_temp: _safe_move(self.metadata_file_path_temp.relative_to(self.temp_dir), final_dir, \"metadata file\")\r\n\r\n # Move extra/ignored files\r\n extra_subdir_name = self.config.extra_files_subdir\r\n extra_dir = final_dir / extra_subdir_name\r\n files_to_move_extra = self.classified_files.get('extra', []) + self.classified_files.get('ignored', [])\r\n if files_to_move_extra:\r\n log.debug(f\"Moving {len(files_to_move_extra)} files to '{extra_subdir_name}/'...\")\r\n try:\r\n extra_dir.mkdir(exist_ok=True)\r\n for file_info in files_to_move_extra: _safe_move(file_info.get('source_path'), extra_dir, f\"extra file ({file_info.get('reason', 'Unknown')})\")\r\n except Exception as e: log.error(f\"Failed creating/moving to Extra dir {extra_dir}: {e}\", exc_info=True)\r\n\r\n log.info(f\"Finished organizing output for '{asset_name_sanitized}'.\")\r\n\r\n\r\n def _cleanup_workspace(self):\r\n \"\"\"Removes the temporary workspace directory if it exists.\"\"\"\r\n # ... (Implementation from Response #45) ...\r\n if self.temp_dir and self.temp_dir.exists():\r\n try:\r\n log.debug(f\"Cleaning up temporary workspace: {self.temp_dir}\")\r\n shutil.rmtree(self.temp_dir)\r\n self.temp_dir = None\r\n log.debug(\"Temporary workspace cleaned up successfully.\")\r\n except Exception as e:\r\n log.error(f\"Failed to remove temporary workspace {self.temp_dir}: {e}\", exc_info=True)\r\n\r\n # --- Prediction Method ---\r\n def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None:\r\n \"\"\"\r\n Predicts the final output structure (supplier, asset name) and attempts\r\n to predict output filenames for potential map files based on naming conventions.\r\n Does not perform full processing or image loading.\r\n\r\n Returns:\r\n tuple[str | None, str | None, dict[str, str] | None]:\r\n (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict)\r\n where file_predictions_dict maps input filename -> predicted output filename.\r\n Returns None if prediction fails critically.\r\n \"\"\"\r\n log.debug(f\"Predicting output structure and filenames for: {self.input_path.name}\")\r\n try:\r\n # 1. Get Supplier Name\r\n supplier_name = self.config.supplier_name\r\n if not supplier_name:\r\n log.warning(\"Supplier name not found in configuration during prediction.\")\r\n return None\r\n\r\n # 2. List Input Filenames/Stems\r\n candidate_stems = set() # Use set for unique stems\r\n filenames = []\r\n if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip':\r\n try:\r\n with zipfile.ZipFile(self.input_path, 'r') as zip_ref:\r\n # Get only filenames, ignore directories\r\n filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')]\r\n except zipfile.BadZipFile:\r\n log.error(f\"Bad ZIP file during prediction: {self.input_path.name}\")\r\n return None\r\n except Exception as zip_err:\r\n log.error(f\"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}\")\r\n return None # Cannot proceed if we can't list files\r\n elif self.input_path.is_dir():\r\n try:\r\n for item in self.input_path.iterdir():\r\n if item.is_file(): # Only consider files directly in the folder for prediction simplicity\r\n filenames.append(item.name)\r\n # Note: Not walking subdirs for prediction to keep it fast\r\n except Exception as dir_err:\r\n log.error(f\"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}\")\r\n return None\r\n\r\n if not filenames:\r\n log.warning(f\"No files found in input for prediction: {self.input_path.name}\")\r\n return None # Return None if no files found\r\n\r\n # 3. Lightweight Classification for Stems and Potential Maps\r\n map_type_mapping = self.config.map_type_mapping\r\n model_patterns = self.config.asset_category_rules.get('model_patterns', [])\r\n separator = self.config.source_naming_separator\r\n processed_filenames = set() # Track full filenames processed\r\n potential_map_files = {} # Store fname -> potential map_type\r\n\r\n for fname in filenames:\r\n if fname in processed_filenames: continue\r\n\r\n fstem = Path(fname).stem\r\n fstem_lower = fstem.lower()\r\n name_parts = fstem_lower.split(separator)\r\n\r\n # Check map rules first\r\n map_matched = False\r\n for mapping_rule in map_type_mapping:\r\n source_keywords, standard_map_type = mapping_rule\r\n if standard_map_type not in self.config.standard_map_types: continue\r\n for keyword in source_keywords:\r\n kw_lower = keyword.lower().strip('*')\r\n if kw_lower in name_parts:\r\n is_exact_match = any(part == kw_lower for part in name_parts)\r\n if is_exact_match:\r\n candidate_stems.add(fstem) # Add unique stem\r\n potential_map_files[fname] = standard_map_type # Store potential type\r\n processed_filenames.add(fname)\r\n map_matched = True\r\n break # Found keyword match for this rule\r\n if map_matched: break # Found a rule match for this file\r\n if map_matched: continue # Move to next filename if identified as map\r\n\r\n # Check model patterns if not a map\r\n for pattern in model_patterns:\r\n if fnmatch(fname.lower(), pattern.lower()):\r\n candidate_stems.add(fstem) # Still add stem for base name determination\r\n processed_filenames.add(fname)\r\n # Don't add models to potential_map_files\r\n break # Found model match\r\n\r\n # Note: Files matching neither maps nor models are ignored for prediction details\r\n\r\n candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix\r\n if not candidate_stems_list:\r\n log.warning(f\"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.\")\r\n # Fallback: Use the input path's name itself if no stems found\r\n base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name\r\n determined_base_name = base_name_fallback\r\n else:\r\n # 4. Replicate _determine_base_metadata logic for base name\r\n determined_base_name = \"UnknownAssetName\"\r\n base_index = self.config.source_naming_indices.get('base_name')\r\n\r\n if isinstance(base_index, int):\r\n potential_base_names = set()\r\n for stem in candidate_stems_list: # Iterate over the list\r\n parts = stem.split(separator)\r\n if len(parts) > base_index:\r\n potential_base_names.add(parts[base_index])\r\n if len(potential_base_names) == 1:\r\n determined_base_name = potential_base_names.pop()\r\n elif len(potential_base_names) > 1:\r\n log.debug(f\"Prediction: Multiple potential base names from index {base_index}, using common prefix.\")\r\n determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n # else: Use common prefix below\r\n\r\n if determined_base_name == \"UnknownAssetName\" or not determined_base_name:\r\n determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here\r\n determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _')\r\n\r\n # 5. Sanitize Names\r\n final_base_name = self._sanitize_filename(determined_base_name or \"UnknownAssetName\")\r\n final_supplier_name = self._sanitize_filename(supplier_name)\r\n\r\n # 6. Predict Output Filenames\r\n file_predictions = {}\r\n target_pattern = self.config.target_filename_pattern\r\n # Use highest resolution key as a placeholder for prediction\r\n highest_res_key = \"Res?\" # Fallback\r\n if self.config.image_resolutions:\r\n highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n\r\n for input_fname, map_type in potential_map_files.items():\r\n # Assume PNG for prediction, extension might change based on bit depth rules later\r\n # but this gives a good idea of the renaming.\r\n # A more complex prediction could check bit depth rules.\r\n predicted_ext = \"png\" # Simple assumption for preview\r\n try:\r\n predicted_fname = target_pattern.format(\r\n base_name=final_base_name,\r\n map_type=map_type,\r\n resolution=highest_res_key, # Use placeholder resolution\r\n ext=predicted_ext\r\n )\r\n file_predictions[input_fname] = predicted_fname\r\n except KeyError as fmt_err:\r\n log.warning(f\"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.\")\r\n file_predictions[input_fname] = \"[Filename Format Error]\"\r\n\r\n\r\n log.debug(f\"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}\")\r\n return final_supplier_name, final_base_name, file_predictions\r\n\r\n except Exception as e:\r\n log.error(f\"Error during output structure prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n return None\r\n\r\n\r\n # --- New Detailed Prediction Method ---\r\n def get_detailed_file_predictions(self) -> list[dict] | None:\r\n \"\"\"\r\n Performs extraction and classification to provide a detailed list of all\r\n files found within the asset and their predicted status/output name.\r\n Does not perform image processing or file moving.\r\n\r\n Returns:\r\n list[dict] | None: A list of dictionaries, each representing a file:\r\n {'original_path': str, 'predicted_name': str | None, 'status': str, 'details': str | None}\r\n Returns None if a critical error occurs during setup/classification.\r\n \"\"\"\r\n log.info(f\"Getting detailed file predictions for: {self.input_path.name}\")\r\n results = []\r\n asset_base_name = \"UnknownAssetName\" # Fallback\r\n\r\n try:\r\n # --- Perform necessary setup and classification ---\r\n self._setup_workspace()\r\n self._extract_input()\r\n self._inventory_and_classify_files()\r\n self._determine_base_metadata() # Needed for base name prediction\r\n asset_base_name = self.metadata.get(\"asset_name\", asset_base_name)\r\n\r\n # --- Prepare for filename prediction ---\r\n target_pattern = self.config.target_filename_pattern\r\n highest_res_key = \"Res?\" # Placeholder resolution for prediction\r\n if self.config.image_resolutions:\r\n highest_res_key = max(self.config.image_resolutions, key=self.config.image_resolutions.get)\r\n\r\n # --- Process classified files ---\r\n # Maps\r\n for map_info in self.classified_files.get(\"maps\", []):\r\n original_path_str = str(map_info.get(\"source_path\", \"UnknownPath\"))\r\n map_type = map_info.get(\"map_type\", \"UnknownType\")\r\n # Predicted name for maps should just be the base asset name\r\n predicted_name_display = asset_base_name\r\n # Concise details\r\n details = f\"[{map_type}]\"\r\n if map_info.get(\"is_16bit_source\"):\r\n details += \" (16-bit)\"\r\n\r\n # Still try to format the full name internally for error checking, but don't display it\r\n try:\r\n predicted_ext = \"png\" # Assumption for format check\r\n _ = target_pattern.format(\r\n base_name=asset_base_name,\r\n map_type=map_type,\r\n resolution=highest_res_key,\r\n ext=predicted_ext\r\n )\r\n except KeyError as fmt_err:\r\n log.warning(f\"Prediction format error for {original_path_str}: {fmt_err}\")\r\n predicted_name_display = \"[Format Error]\" # Show error in name field\r\n details += f\" (Format Key Error: {fmt_err})\"\r\n except Exception as pred_err:\r\n log.warning(f\"Prediction error for {original_path_str}: {pred_err}\")\r\n predicted_name_display = \"[Prediction Error]\" # Show error in name field\r\n details += f\" (Error: {pred_err})\"\r\n\r\n results.append({\r\n \"original_path\": original_path_str,\r\n \"predicted_name\": predicted_name_display, # Use the base name or error\r\n \"status\": \"Mapped\",\r\n \"details\": details # Use concise details\r\n })\r\n\r\n # Models\r\n for model_info in self.classified_files.get(\"models\", []):\r\n original_path_str = str(model_info.get(\"source_path\", \"UnknownPath\"))\r\n results.append({\r\n \"original_path\": original_path_str,\r\n \"predicted_name\": Path(original_path_str).name, # Models usually keep original name\r\n \"status\": \"Model\",\r\n \"details\": \"[Model]\" # Concise detail\r\n })\r\n\r\n # Extra\r\n for extra_info in self.classified_files.get(\"extra\", []):\r\n original_path_str = str(extra_info.get(\"source_path\", \"UnknownPath\"))\r\n reason = extra_info.get('reason', 'Unknown reason')\r\n # Determine status and details based on the reason\r\n if reason == 'Unrecognised': # Corrected string check\r\n status = \"Unrecognised\"\r\n details = \"[Unrecognised]\"\r\n else:\r\n status = \"Extra\"\r\n details = f\"Extra ({reason})\" # Show the pattern match reason\r\n\r\n results.append({\r\n \"original_path\": original_path_str,\r\n \"predicted_name\": Path(original_path_str).name, # Extra/Unrecognised files keep original name\r\n \"status\": status,\r\n \"details\": details\r\n })\r\n\r\n # Ignored\r\n for ignored_info in self.classified_files.get(\"ignored\", []):\r\n original_path_str = str(ignored_info.get(\"source_path\", \"UnknownPath\"))\r\n reason = ignored_info.get('reason', 'Unknown reason')\r\n results.append({\r\n \"original_path\": original_path_str,\r\n \"predicted_name\": None, # Ignored files have no output name\r\n \"status\": \"Ignored\",\r\n \"details\": f\"Ignored ({reason})\" # Keep reason for ignored files\r\n })\r\n\r\n log.info(f\"Detailed prediction complete for {self.input_path.name}. Found {len(results)} files.\")\r\n return results\r\n\r\n except (AssetProcessingError, ConfigurationError, Exception) as e:\r\n # Log critical errors during the prediction process\r\n log.error(f\"Critical error during detailed prediction for {self.input_path.name}: {e}\", exc_info=True)\r\n # Optionally add a single error entry to results?\r\n # results.append({\"original_path\": self.input_path.name, \"predicted_name\": None, \"status\": \"Error\", \"details\": str(e)})\r\n # return results # Or return None to indicate failure\r\n return None # Indicate critical failure\r\n finally:\r\n # Ensure cleanup always happens\r\n self._cleanup_workspace()\r\n\r\n\r\n# --- End of AssetProcessor Class ---" } ] }