Commented Code Cleanup

This commit is contained in:
Rusfort 2025-05-01 16:09:02 +02:00
parent a5be50b587
commit 51ff45bd5c
16 changed files with 714 additions and 577 deletions

View File

@ -0,0 +1,3 @@
def invoke(self, context, event):
# Example: Open a dialog to select materials if not already selected
return context.window_manager.invoke_props_dialog(self)

View File

@ -0,0 +1,6 @@
# --- REMOVED Slots for Old Hierarchy and Rule Editor ---
# @Slot(QModelIndex)
# def _on_hierarchy_item_clicked(self, index: QModelIndex): ...
#
# @Slot(object)
# def _on_rule_updated(self, rule_object): ...

View File

@ -0,0 +1,6 @@
# Slot for prediction results (Updated for new format and coloring) - REMOVED
# @Slot(list)
# def on_prediction_results_ready(self, results: list):
# """Populates the preview table model with detailed prediction results."""
# # This is no longer needed as _on_rule_hierarchy_ready handles data loading for the new model.
# pass

View File

@ -0,0 +1 @@
# REMOVED Placeholder SourceRule creation

View File

@ -0,0 +1,6 @@
# --- REMOVED connections causing thread/handler cleanup ---
# self.prediction_handler.prediction_finished.connect(self.prediction_thread.quit)
# self.prediction_handler.prediction_finished.connect(self.prediction_handler.deleteLater)
# self.prediction_thread.finished.connect(self.prediction_thread.deleteLater)
# self.prediction_thread.finished.connect(self._reset_prediction_thread_references)
# --- END REMOVED ---

View File

@ -0,0 +1,7 @@
self.disable_preview_checkbox = QCheckBox("Disable Detailed Preview") # REMOVED - Moved to View Menu
self.disable_preview_checkbox.setToolTip("If checked, shows only the list of input assets instead of detailed file predictions.")
self.disable_preview_checkbox.setChecked(False) # Default is detailed preview enabled
self.disable_preview_checkbox.toggled.connect(self.update_preview) # Update preview when toggled
bottom_controls_layout.addWidget(self.disable_preview_checkbox)
bottom_controls_layout.addSpacing(20) # Add some space # REMOVED - No longer needed after checkbox removal

View File

@ -0,0 +1,3 @@
# REMOVED Old Preview Model Mode Setting and Table Configuration ---
# The Unified View does not have a simple/detailed mode toggle.
# The Prediction Handler is triggered regardless of view settings.

View File

@ -0,0 +1,20 @@
# --- REMOVED Old Processing Thread Setup ---
# if ProcessingHandler and self.processing_thread is None:
# self.processing_thread = QThread(self)
# self.processing_handler = ProcessingHandler()
# self.processing_handler.moveToThread(self.processing_thread)
# self.processing_handler.progress_updated.connect(self.update_progress_bar)
# self.processing_handler.file_status_updated.connect(self.update_file_status)
# self.processing_handler.processing_finished.connect(self.on_processing_finished)
# self.processing_handler.status_message.connect(self.show_status_message)
# self.processing_handler.processing_finished.connect(self.processing_thread.quit)
# self.processing_handler.processing_finished.connect(self.processing_handler.deleteLater)
# self.processing_thread.finished.connect(self.processing_thread.deleteLater)
# self.processing_thread.finished.connect(self._reset_processing_thread_references)
# log.debug("Processing thread and handler set up.")
# elif not ProcessingHandler:
# log.error("ProcessingHandler not available. Cannot set up processing thread.")
# if hasattr(self, 'start_button'):
# self.start_button.setEnabled(False)
# self.start_button.setToolTip("Error: Backend processing components failed to load.")
# --- END REMOVED ---

View File

@ -0,0 +1,8 @@
# --- REMOVED Old Processing Thread Reset ---
# @Slot()
# def _reset_processing_thread_references(self):
# # This might still be needed if processing is meant to be single-shot
# log.debug("Resetting processing thread and handler references.")
# self.processing_thread = None
# self.processing_handler = None
# --- END REMOVED ---

View File

@ -0,0 +1,70 @@
import logging
import subprocess
from pathlib import Path
log = logging.getLogger(__name__) # Assume logger is configured elsewhere
def run_blender_script(blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str):
"""
Executes a Python script within Blender in the background.
Args:
blender_exe_path: Path to the Blender executable.
blend_file_path: Path to the .blend file to open.
python_script_path: Path to the Python script to execute within Blender.
asset_root_dir: Path to the processed asset library root directory (passed to the script).
Returns:
True if the script executed successfully (return code 0), False otherwise.
"""
log.info(f"Attempting to run Blender script: {Path(python_script_path).name} on {Path(blend_file_path).name}")
# Ensure paths are absolute strings for subprocess
blender_exe_path = str(Path(blender_exe_path).resolve())
blend_file_path = str(Path(blend_file_path).resolve())
python_script_path = str(Path(python_script_path).resolve())
asset_root_dir = str(Path(asset_root_dir).resolve())
# Construct the command arguments
# -b: Run in background (no UI)
# -S: Save the file after running the script
# --python: Execute the specified Python script
# --: Separator, arguments after this are passed to the Python script's sys.argv
command = [
blender_exe_path,
"-b", # Run in background
blend_file_path,
"--python", python_script_path,
"--", # Pass subsequent arguments to the script
asset_root_dir,
"-S" # Save the blend file after script execution
]
log.debug(f"Executing Blender command: {' '.join(command)}") # Log the command for debugging
try:
# Execute the command
# capture_output=True captures stdout and stderr
# text=True decodes stdout/stderr as text
# check=False prevents raising CalledProcessError on non-zero exit codes
result = subprocess.run(command, capture_output=True, text=True, check=False)
# Log results
log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}")
if result.stdout:
log.debug(f"Blender stdout:\n{result.stdout.strip()}")
if result.stderr:
# Log stderr as warning or error depending on return code
if result.returncode != 0:
log.error(f"Blender stderr:\n{result.stderr.strip()}")
else:
log.warning(f"Blender stderr (Return Code 0):\n{result.stderr.strip()}") # Log stderr even on success as scripts might print warnings
return result.returncode == 0
except FileNotFoundError:
log.error(f"Blender executable not found at: {blender_exe_path}")
return False
except Exception as e:
log.exception(f"An unexpected error occurred while running Blender script '{Path(python_script_path).name}': {e}")
return False

View File

@ -0,0 +1,346 @@
# Deprecated/Old-Code/main_py_cli_main_entry_line_329.py
import argparse
import sys
import time
import os
import logging
from pathlib import Path
import shutil
# Assuming these imports are needed based on the original context
try:
import config as core_config_module
# Import functions from previously created files
# Note: These imports assume the files are in the same directory or Python path
from main_py_cli_run_processing_line_258 import run_processing
from main_py_cli_blender_script_runner_line_365 import run_blender_script
# setup_arg_parser and setup_logging are defined in the main script,
# so they might not be directly importable here without refactoring.
# This function is modified to accept them as arguments.
except ImportError as e:
print(f"Warning: Could not import necessary modules/functions: {e}")
core_config_module = None
run_processing = None
run_blender_script = None
log = logging.getLogger(__name__) # Assume logger is configured elsewhere
# Note: setup_arg_parser and setup_logging were originally defined in main.py
# This function now accepts them as arguments.
def main(setup_arg_parser_func, setup_logging_func):
"""Parses arguments, sets up logging, runs processing, and reports summary."""
parser = setup_arg_parser_func()
args = parser.parse_args()
# Setup logging based on verbosity argument *before* logging status messages
setup_logging_func(args.verbose)
start_time = time.time()
log.info("Asset Processor Script Started (CLI Mode)")
# --- Validate Input Paths ---
valid_inputs = []
for p_str in args.input_paths:
p = Path(p_str)
if p.exists():
suffix = p.suffix.lower()
# Original check included .rar, .7z - keeping for historical accuracy
if p.is_dir() or (p.is_file() and suffix in ['.zip', '.rar', '.7z']):
valid_inputs.append(p_str) # Store the original string path
else:
log.warning(f"Input is not a directory or a supported archive type (.zip, .rar, .7z), skipping: {p_str}")
else:
log.warning(f"Input path not found, skipping: {p_str}")
if not valid_inputs:
log.error("No valid input paths found. Exiting.")
sys.exit(1) # Exit with error code
# --- Determine Output Directory ---
output_dir_str = args.output_dir # Get value from args (might be None)
if not output_dir_str:
log.debug("Output directory not specified via -o, reading default from config.py.")
try:
if core_config_module is None:
raise RuntimeError("core_config_module not imported.")
output_dir_str = getattr(core_config_module, 'OUTPUT_BASE_DIR', None)
if not output_dir_str:
log.error("Output directory not specified with -o and OUTPUT_BASE_DIR not found or empty in config.py. Exiting.")
sys.exit(1)
log.info(f"Using default output directory from config.py: {output_dir_str}")
except Exception as e:
log.error(f"Could not read OUTPUT_BASE_DIR from config.py: {e}")
sys.exit(1)
# --- Resolve Output Path (Handles Relative Paths Explicitly) ---
output_path_obj: Path
if os.path.isabs(output_dir_str):
output_path_obj = Path(output_dir_str)
log.info(f"Using absolute output directory: {output_path_obj}")
else:
# Path() interprets relative paths against CWD by default
output_path_obj = Path(output_dir_str)
log.info(f"Using relative output directory '{output_dir_str}'. Resolved against CWD to: {output_path_obj.resolve()}")
# --- Validate and Setup Output Directory ---
try:
# Resolve to ensure we have an absolute path for consistency and creation
resolved_output_dir = output_path_obj.resolve()
log.info(f"Ensuring output directory exists: {resolved_output_dir}")
resolved_output_dir.mkdir(parents=True, exist_ok=True)
# Use the resolved absolute path string for the processor
output_dir_for_processor = str(resolved_output_dir)
except Exception as e:
log.error(f"Cannot create or access output directory '{resolved_output_dir}': {e}", exc_info=True)
sys.exit(1)
# --- Check Preset Existence (Basic Check) ---
# Assuming __file__ might not be reliable here, using relative path logic
try:
# Try relative to CWD first
preset_dir = Path("Presets")
if not preset_dir.is_dir():
# Try relative to script location if possible? Less reliable.
# Go up two levels from Deprecated/Old-Code
preset_dir = Path(__file__).parent.parent / "Presets"
preset_file = preset_dir / f"{args.preset}.json"
if not preset_file.is_file():
log.error(f"Preset file not found: {preset_file}")
log.error(f"Ensure a file named '{args.preset}.json' exists in the directory: {preset_dir.resolve()}")
sys.exit(1)
except NameError: # __file__ might not be defined
log.error("Could not determine preset directory path.")
sys.exit(1)
# --- Execute Processing via the new function ---
if run_processing is None:
log.error("run_processing function not available. Cannot execute processing.")
sys.exit(1)
processing_results = run_processing(
valid_inputs=valid_inputs,
preset_name=args.preset,
output_dir_for_processor=output_dir_for_processor,
overwrite=args.overwrite,
num_workers=args.workers,
verbose=args.verbose # Pass the verbose flag
)
# --- Report Summary ---
duration = time.time() - start_time
successful_processed_count = processing_results["processed"]
skipped_count = processing_results["skipped"]
failed_count = processing_results["failed"]
results_list = processing_results["results_list"]
log.info("=" * 40)
log.info("Processing Summary")
log.info(f" Duration: {duration:.2f} seconds")
log.info(f" Assets Attempted: {len(valid_inputs)}")
log.info(f" Successfully Processed: {successful_processed_count}")
log.info(f" Skipped (Already Existed): {skipped_count}")
log.info(f" Failed: {failed_count}")
if processing_results.get("pool_error"):
log.error(f" Process Pool Error: {processing_results['pool_error']}")
# Ensure failed count reflects pool error if it happened
if failed_count == 0 and successful_processed_count == 0 and skipped_count == 0:
failed_count = len(valid_inputs) # Assume all failed if pool died early
exit_code = 0
if failed_count > 0:
log.warning("Failures occurred:")
# Iterate through results to show specific errors for failed items
for input_path, status, err_msg in results_list:
if status == "failed":
log.warning(f" - {Path(input_path).name}: {err_msg}")
exit_code = 1 # Exit with error code if failures occurred
else:
# Consider skipped assets as a form of success for the overall run exit code
if successful_processed_count > 0 or skipped_count > 0:
log.info("All assets processed or skipped successfully.")
exit_code = 0 # Exit code 0 indicates success (including skips)
else:
# This case might happen if all inputs were invalid initially
log.warning("No assets were processed, skipped, or failed (check input validation logs).")
exit_code = 0 # Still exit 0 as the script itself didn't crash
# --- Blender Script Execution (Optional) ---
run_nodegroups = False # Flags were defined but never set to True in original code
run_materials = False
nodegroup_blend_path = None
materials_blend_path = None
blender_exe = None
# 1. Find Blender Executable
try:
if core_config_module is None:
raise RuntimeError("core_config_module not imported.")
blender_exe_config = getattr(core_config_module, 'BLENDER_EXECUTABLE_PATH', None)
if blender_exe_config:
# Check if the path in config exists
if Path(blender_exe_config).is_file():
blender_exe = str(Path(blender_exe_config).resolve())
log.info(f"Using Blender executable from config: {blender_exe}")
else:
# Try finding it in PATH if config path is invalid
log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying to find 'blender' in PATH.")
blender_exe = shutil.which("blender")
if blender_exe:
log.info(f"Found Blender executable in PATH: {blender_exe}")
else:
log.warning("Could not find 'blender' in system PATH.")
else:
# Try finding it in PATH if not set in config
log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying to find 'blender' in PATH.")
blender_exe = shutil.which("blender")
if blender_exe:
log.info(f"Found Blender executable in PATH: {blender_exe}")
else:
log.warning("Could not find 'blender' in system PATH.")
if not blender_exe:
log.warning("Blender executable not found or configured. Skipping Blender script execution.")
except Exception as e:
log.error(f"Error checking Blender executable path: {e}")
blender_exe = None # Ensure it's None on error
# 2. Determine Blend File Paths if Blender Exe is available
if blender_exe:
# Nodegroup Blend Path
nodegroup_blend_arg = args.nodegroup_blend
if nodegroup_blend_arg:
p = Path(nodegroup_blend_arg)
if p.is_file() and p.suffix.lower() == '.blend':
nodegroup_blend_path = str(p.resolve())
log.info(f"Using nodegroup blend file from argument: {nodegroup_blend_path}")
else:
log.warning(f"Invalid nodegroup blend file path from argument: '{nodegroup_blend_arg}'. Ignoring.")
else:
if core_config_module is None:
log.warning("core_config_module not available to check default nodegroup path.")
else:
default_ng_path_str = getattr(core_config_module, 'DEFAULT_NODEGROUP_BLEND_PATH', None)
if default_ng_path_str:
p = Path(default_ng_path_str)
if p.is_file() and p.suffix.lower() == '.blend':
nodegroup_blend_path = str(p.resolve())
log.info(f"Using default nodegroup blend file from config: {nodegroup_blend_path}")
else:
log.warning(f"Invalid default nodegroup blend file path in config: '{default_ng_path_str}'. Ignoring.")
# Materials Blend Path
materials_blend_arg = args.materials_blend
if materials_blend_arg:
p = Path(materials_blend_arg)
if p.is_file() and p.suffix.lower() == '.blend':
materials_blend_path = str(p.resolve())
log.info(f"Using materials blend file from argument: {materials_blend_path}")
else:
log.warning(f"Invalid materials blend file path from argument: '{materials_blend_arg}'. Ignoring.")
else:
if core_config_module is None:
log.warning("core_config_module not available to check default materials path.")
else:
default_mat_path_str = getattr(core_config_module, 'DEFAULT_MATERIALS_BLEND_PATH', None)
if default_mat_path_str:
p = Path(default_mat_path_str)
if p.is_file() and p.suffix.lower() == '.blend':
materials_blend_path = str(p.resolve())
log.info(f"Using default materials blend file from config: {materials_blend_path}")
else:
log.warning(f"Invalid default materials blend file path in config: '{default_mat_path_str}'. Ignoring.")
# 3. Execute Scripts if Paths are Valid
if blender_exe:
# Determine script directory relative to this file's assumed location
try:
# Go up two levels from Deprecated/Old-Code
script_dir = Path(__file__).parent.parent / "blenderscripts"
except NameError:
script_dir = Path("blenderscripts") # Fallback if __file__ is not defined
nodegroup_script_path = script_dir / "create_nodegroups.py"
materials_script_path = script_dir / "create_materials.py"
asset_output_root = output_dir_for_processor # Use the resolved output dir
if run_blender_script is None:
log.error("run_blender_script function not available. Cannot execute Blender scripts.")
else:
# Check if nodegroup execution should run (based on original commented code, it wasn't explicitly triggered)
# if run_nodegroups: # This flag was never set to True
if nodegroup_blend_path: # Check if path exists instead
if nodegroup_script_path.is_file():
log.info("-" * 40)
log.info("Starting Blender Node Group Script Execution...")
success_ng = run_blender_script(
blender_exe_path=blender_exe,
blend_file_path=nodegroup_blend_path,
python_script_path=str(nodegroup_script_path),
asset_root_dir=asset_output_root
)
if not success_ng:
log.error("Blender node group script execution failed.")
# Optionally change exit code if Blender script fails?
# exit_code = 1
log.info("Finished Blender Node Group Script Execution.")
log.info("-" * 40)
else:
log.error(f"Node group script not found: {nodegroup_script_path}")
# Check if material execution should run (based on original commented code, it wasn't explicitly triggered)
# if run_materials: # This flag was never set to True
if materials_blend_path: # Check if path exists instead
if materials_script_path.is_file():
log.info("-" * 40)
log.info("Starting Blender Material Script Execution...")
success_mat = run_blender_script(
blender_exe_path=blender_exe,
blend_file_path=materials_blend_path,
python_script_path=str(materials_script_path),
asset_root_dir=asset_output_root
)
if not success_mat:
log.error("Blender material script execution failed.")
# Optionally change exit code if Blender script fails?
# exit_code = 1
log.info("Finished Blender Material Script Execution.")
log.info("-" * 40)
else:
log.error(f"Material script not found: {materials_script_path}")
# --- Final Exit ---
log.info("Asset Processor Script Finished.")
sys.exit(exit_code)
# Example of how this might be called if run standalone (requires providing the setup functions)
# if __name__ == "__main__":
# # Define dummy or actual setup functions here if needed for testing
# def dummy_setup_arg_parser():
# # Minimal parser for testing
# parser = argparse.ArgumentParser()
# parser.add_argument("input_paths", nargs='*', default=[])
# parser.add_argument("-p", "--preset")
# parser.add_argument("-o", "--output-dir")
# parser.add_argument("-w", "--workers", type=int, default=1)
# parser.add_argument("-v", "--verbose", action="store_true")
# parser.add_argument("--overwrite", action="store_true")
# parser.add_argument("--nodegroup-blend")
# parser.add_argument("--materials-blend")
# return parser
#
# def dummy_setup_logging(verbose):
# logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO,
# format='%(asctime)s [%(levelname)-8s] %(name)s: %(message)s')
#
# # Configure basic logging for the example run
# logging.basicConfig(level=logging.INFO)
#
# # Need to get setup_arg_parser and setup_logging from the original main.py somehow
# # This example won't run directly without them.
# # from main import setup_arg_parser, setup_logging # This would cause circular import if run directly
#
# # main(setup_arg_parser, setup_logging) # Call with the actual functions if available

View File

@ -0,0 +1,123 @@
import logging
from pathlib import Path
from concurrent.futures import ProcessPoolExecutor, as_completed
from typing import List, Dict, Tuple, Optional
# Assuming this import is needed based on the original context
try:
# Import the wrapper function from the file created in the previous step
# Note: This assumes the file is in the same directory or Python path
from main_py_cli_worker_wrapper_line_254 import process_single_asset_wrapper
except ImportError:
print("Warning: Could not import process_single_asset_wrapper. Ensure main_py_cli_worker_wrapper_line_254.py exists.")
# Define a dummy function if import fails
def process_single_asset_wrapper(*args, **kwargs) -> Tuple[str, str, Optional[str]]:
input_path = args[0] if args else "unknown_path"
return (input_path, "failed", "Dummy function: process_single_asset_wrapper not imported")
log = logging.getLogger(__name__) # Assume logger is configured elsewhere
def run_processing(
valid_inputs: List[str],
preset_name: str,
output_dir_for_processor: str,
overwrite: bool,
num_workers: int,
verbose: bool # Add verbose parameter here
) -> Dict:
"""
Executes the core asset processing logic using a process pool.
Args:
valid_inputs: List of validated input file/directory paths (strings).
preset_name: Name of the preset to use.
output_dir_for_processor: Absolute path string for the output base directory.
overwrite: Boolean flag to force reprocessing.
num_workers: Maximum number of worker processes.
verbose: Boolean flag for verbose logging.
Returns:
A dictionary containing processing results:
{
"processed": int,
"skipped": int,
"failed": int,
"results_list": List[Tuple[str, str, Optional[str]]] # (input_path, status, error_msg)
}
"""
log.info(f"Processing {len(valid_inputs)} asset(s) using preset '{preset_name}' with up to {num_workers} worker(s)...")
results_list = []
successful_processed_count = 0
skipped_count = 0
failed_count = 0
# Ensure at least one worker
num_workers = max(1, num_workers)
# Using ProcessPoolExecutor is generally good if AssetProcessor tasks are CPU-bound.
# If tasks are mostly I/O bound, ThreadPoolExecutor might be sufficient.
# Important: Ensure Configuration and AssetProcessor are "pickleable".
try:
with ProcessPoolExecutor(max_workers=num_workers) as executor:
# Create futures
futures = {}
log.debug(f"Submitting {len(valid_inputs)} tasks...")
# Removed the 1-second delay for potentially faster submission in non-CLI use
for i, input_path in enumerate(valid_inputs):
log.debug(f"Submitting task {i+1}/{len(valid_inputs)} for: {Path(input_path).name}")
future = executor.submit(
process_single_asset_wrapper, # Use the imported wrapper
input_path,
preset_name,
output_dir_for_processor,
overwrite,
verbose # Pass the verbose flag
)
futures[future] = input_path # Store future -> input_path mapping
# Process completed futures
for i, future in enumerate(as_completed(futures), 1):
input_path = futures[future]
asset_name = Path(input_path).name
log.info(f"--- [{i}/{len(valid_inputs)}] Worker finished attempt for: {asset_name} ---")
try:
# Get result tuple: (input_path_str, status_string, error_message_or_None)
result_tuple = future.result()
results_list.append(result_tuple)
input_path_res, status, err_msg = result_tuple
# Increment counters based on status
if status == "processed":
successful_processed_count += 1
elif status == "skipped":
skipped_count += 1
elif status == "failed":
failed_count += 1
else: # Should not happen, but log as warning/failure
log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.")
failed_count += 1
except Exception as e:
# Catch errors if the future itself fails (e.g., worker process crashed hard)
log.exception(f"Critical worker failure for {asset_name}: {e}")
results_list.append((input_path, "failed", f"Worker process crashed: {e}"))
failed_count += 1 # Count crashes as failures
except Exception as pool_exc:
log.exception(f"An error occurred with the process pool: {pool_exc}")
# Re-raise or handle as appropriate for the calling context (monitor.py)
# For now, log and return current counts
return {
"processed": successful_processed_count,
"skipped": skipped_count,
"failed": failed_count + (len(valid_inputs) - len(results_list)), # Count unprocessed as failed
"results_list": results_list,
"pool_error": str(pool_exc) # Add pool error info
}
return {
"processed": successful_processed_count,
"skipped": skipped_count,
"failed": failed_count,
"results_list": results_list
}

View File

@ -0,0 +1,100 @@
import os
import logging
from pathlib import Path
from typing import Tuple, Optional, List, Dict # Added List, Dict
# Assuming these imports are needed based on the original context
try:
from configuration import Configuration, ConfigurationError
from asset_processor import AssetProcessor, AssetProcessingError # Assuming this was the old processor
from rule_structure import SourceRule # Assuming this might be needed
except ImportError:
# Handle missing imports if this file is run standalone
print("Warning: Could not import necessary classes (Configuration, AssetProcessor, etc.).")
Configuration = None
AssetProcessor = None
ConfigurationError = Exception
AssetProcessingError = Exception
SourceRule = None # Define as None if not found
def process_single_asset_wrapper(input_path_str: str, preset_name: str, output_dir_str: str, overwrite: bool, verbose: bool, rules) -> Tuple[str, str, Optional[str]]:
"""
Wrapper function for processing a single input path (which might contain multiple assets)
in a separate process. Handles instantiation of Configuration and AssetProcessor,
passes the overwrite flag, catches errors, and interprets the multi-asset status dictionary.
Ensures logging is configured for the worker process.
Returns:
Tuple[str, str, Optional[str]]:
- input_path_str: The original input path processed.
- overall_status_string: A single status string summarizing the outcome
("processed", "skipped", "failed", "partial_success").
- error_message_or_None: An error message if failures occurred, potentially
listing failed assets.
"""
# Explicitly configure logging for this worker process
worker_log = logging.getLogger(f"Worker_{os.getpid()}") # Log with worker PID
if not logging.root.handlers:
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-8s] %(name)s: %(message)s')
worker_log.setLevel(logging.DEBUG if verbose else logging.INFO)
if verbose:
logging.root.setLevel(logging.DEBUG)
input_path_obj = Path(input_path_str)
input_name = input_path_obj.name
try:
worker_log.info(f"Starting processing attempt for input: {input_name}")
# Ensure Configuration is available before using
if Configuration is None:
raise RuntimeError("Configuration class not imported.")
config = Configuration(preset_name)
output_base_path = Path(output_dir_str)
# Ensure AssetProcessor is available before using
if AssetProcessor is None:
raise RuntimeError("AssetProcessor class not imported.")
processor = AssetProcessor(input_path_obj, config, output_base_path, overwrite=overwrite)
# processor.process() now returns a Dict[str, List[str]]
status_dict = processor.process(rules=rules)
# --- Interpret the status dictionary ---
processed_assets = status_dict.get("processed", [])
skipped_assets = status_dict.get("skipped", [])
failed_assets = status_dict.get("failed", [])
overall_status_string = "failed" # Default
error_message = None
if failed_assets:
overall_status_string = "failed"
error_message = f"Failed assets within {input_name}: {', '.join(failed_assets)}"
worker_log.error(error_message) # Log the failure details
elif processed_assets:
overall_status_string = "processed"
# Check for partial success (mix of processed/skipped and failed should be caught above)
if skipped_assets:
worker_log.info(f"Input '{input_name}' processed with some assets skipped. Processed: {processed_assets}, Skipped: {skipped_assets}")
else:
worker_log.info(f"Input '{input_name}' processed successfully. Assets: {processed_assets}")
elif skipped_assets:
overall_status_string = "skipped"
worker_log.info(f"Input '{input_name}' skipped (all contained assets already exist). Assets: {skipped_assets}")
else:
# Should not happen if input contained files, but handle as failure.
worker_log.warning(f"Input '{input_name}' resulted in no processed, skipped, or failed assets. Reporting as failed.")
overall_status_string = "failed"
error_message = f"No assets processed, skipped, or failed within {input_name}."
return (input_path_str, overall_status_string, error_message)
except (ConfigurationError, AssetProcessingError) as e:
# Catch errors during processor setup or the process() call itself if it raises before returning dict
worker_log.error(f"Processing failed for input '{input_name}': {type(e).__name__}: {e}")
return (input_path_str, "failed", f"{type(e).__name__}: {e}")
except Exception as e:
# Catch any other unexpected errors
worker_log.exception(f"Unexpected worker failure processing input '{input_name}': {e}")
return (input_path_str, "failed", f"Unexpected Worker Error: {e}")

View File

@ -323,10 +323,7 @@ class MATERIAL_OT_merge_materials(Operator):
return {'FINISHED'} return {'FINISHED'}
# Optional: Add invoke method if needed for more complex setup before execute # Optional: Add invoke method if needed for more complex setup before execute
# def invoke(self, context, event): # Commented-out code moved to Deprecated/Old-Code/blender_addon_material_merger_operator_py_invoke_method_line_326.py
# # Example: Open a dialog to select materials if not already selected
# return context.window_manager.invoke_props_dialog(self)
def register(): def register():
bpy.utils.register_class(MATERIAL_OT_merge_materials) bpy.utils.register_class(MATERIAL_OT_merge_materials)

View File

@ -707,15 +707,7 @@ class MainWindow(QMainWindow):
# --- Processing Action Methods --- # --- Processing Action Methods ---
def start_processing(self): def start_processing(self):
# REMOVED Check for old processing handler state # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_processing_state_checks_line_710.py
# if self.processing_handler and self.processing_handler.is_running:
# log.warning("Start clicked, but processing is already running.")
# self.statusBar().showMessage("Processing is already in progress.", 3000)
# return
# REMOVED Check for old processing handler import
# if ProcessingHandler is None:
# self.statusBar().showMessage("Error: Processing components not loaded.", 5000)
# return
if not hasattr(self, 'current_asset_paths') or not self.current_asset_paths: if not hasattr(self, 'current_asset_paths') or not self.current_asset_paths:
self.statusBar().showMessage("No assets added to process.", 3000) self.statusBar().showMessage("No assets added to process.", 3000)
return return
@ -811,35 +803,13 @@ class MainWindow(QMainWindow):
self.start_button.setText("Processing...") self.start_button.setText("Processing...")
self.cancel_button.setEnabled(True) # Enable cancel self.cancel_button.setEnabled(True) # Enable cancel
# --- Old direct processing call REMOVED --- # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_direct_processing_call_line_814.py
# self.set_controls_enabled(False)
# self.cancel_button.setEnabled(True)
# self.start_button.setText("Processing...")
# self.progress_bar.setValue(0)
# self.progress_bar.setFormat("%p%")
# self.setup_threads() # Ensure threads are ready (might be redundant if setup elsewhere)
# if self.processing_thread and self.processing_handler:
# # ... (old thread starting logic removed) ...
# else:
# log.error("Failed to start processing: Thread or handler not initialized.")
# self.statusBar().showMessage("Error: Failed to initialize processing thread.", 5000)
# self.set_controls_enabled(True)
# self.cancel_button.setEnabled(False)
# self.start_button.setText("Start Processing")
def cancel_processing(self): def cancel_processing(self):
# TODO: Implement cancellation by signaling the App/main thread to stop the QThreadPool tasks # TODO: Implement cancellation by signaling the App/main thread to stop the QThreadPool tasks
log.warning("Cancel button clicked, but cancellation logic needs reimplementation.") log.warning("Cancel button clicked, but cancellation logic needs reimplementation.")
self.statusBar().showMessage("Cancellation not yet implemented.", 3000) self.statusBar().showMessage("Cancellation not yet implemented.", 3000)
# if self.processing_handler and self.processing_handler.is_running: # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_cancel_processing_logic_line_834.py
# log.info("Cancel button clicked. Requesting cancellation.")
# self.statusBar().showMessage("Requesting cancellation...", 3000)
# self.processing_handler.request_cancel() # OLD HANDLER
# self.cancel_button.setEnabled(False)
# self.start_button.setText("Cancelling...")
# else:
# log.warning("Cancel clicked, but no processing is running.")
# self.statusBar().showMessage("Nothing to cancel.", 3000)
def clear_queue(self): def clear_queue(self):
"""Clears the current asset queue and the preview table.""" """Clears the current asset queue and the preview table."""
@ -891,9 +861,7 @@ class MainWindow(QMainWindow):
# Note: Cancellation is not immediate even if it existed. The thread would stop when it next checks the flag. # Note: Cancellation is not immediate even if it existed. The thread would stop when it next checks the flag.
# We proceed with updating the UI immediately. # We proceed with updating the UI immediately.
# --- REMOVED Old Preview Model Mode Setting and Table Configuration --- # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_preview_model_mode_setting_line_864.py
# The Unified View does not have a simple/detailed mode toggle.
# The Prediction Handler is triggered regardless of view settings.
log.debug(f"[{time.time():.4f}] ### LOG: Checking if prediction handler is running") log.debug(f"[{time.time():.4f}] ### LOG: Checking if prediction handler is running")
# --- Trigger Prediction Handler --- # --- Trigger Prediction Handler ---
@ -949,7 +917,7 @@ class MainWindow(QMainWindow):
# Clearing is handled by model's set_data now, no need to clear table view directly # Clearing is handled by model's set_data now, no need to clear table view directly
if self.prediction_thread and self.prediction_handler: if self.prediction_thread and self.prediction_handler:
# REMOVED Placeholder SourceRule creation # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_placeholder_sourcerule_creation_line_922.py
# Start the prediction thread # Start the prediction thread
# The thread should already be running or started once. Don't restart it here. # The thread should already be running or started once. Don't restart it here.
@ -975,26 +943,7 @@ class MainWindow(QMainWindow):
# --- Threading and Processing Control --- # --- Threading and Processing Control ---
def setup_threads(self): def setup_threads(self):
# --- REMOVED Old Processing Thread Setup --- # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_processing_thread_setup_line_978.py
# if ProcessingHandler and self.processing_thread is None:
# self.processing_thread = QThread(self)
# self.processing_handler = ProcessingHandler()
# self.processing_handler.moveToThread(self.processing_thread)
# self.processing_handler.progress_updated.connect(self.update_progress_bar)
# self.processing_handler.file_status_updated.connect(self.update_file_status)
# self.processing_handler.processing_finished.connect(self.on_processing_finished)
# self.processing_handler.status_message.connect(self.show_status_message)
# self.processing_handler.processing_finished.connect(self.processing_thread.quit)
# self.processing_handler.processing_finished.connect(self.processing_handler.deleteLater)
# self.processing_thread.finished.connect(self.processing_thread.deleteLater)
# self.processing_thread.finished.connect(self._reset_processing_thread_references)
# log.debug("Processing thread and handler set up.")
# elif not ProcessingHandler:
# log.error("ProcessingHandler not available. Cannot set up processing thread.")
# if hasattr(self, 'start_button'):
# self.start_button.setEnabled(False)
# self.start_button.setToolTip("Error: Backend processing components failed to load.")
# --- END REMOVED ---
# Setup Prediction Thread (Keep this) # Setup Prediction Thread (Keep this)
if PredictionHandler and self.prediction_thread is None: if PredictionHandler and self.prediction_thread is None:
@ -1009,24 +958,12 @@ class MainWindow(QMainWindow):
# Assume PredictionHandler.prediction_finished signal is changed to Signal(str) -> input_path # Assume PredictionHandler.prediction_finished signal is changed to Signal(str) -> input_path
self.prediction_handler.prediction_finished.connect(self.on_prediction_finished) # Connect finish signal (now with input_path) self.prediction_handler.prediction_finished.connect(self.on_prediction_finished) # Connect finish signal (now with input_path)
self.prediction_handler.status_message.connect(self.show_status_message) self.prediction_handler.status_message.connect(self.show_status_message)
# --- REMOVED connections causing thread/handler cleanup --- # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_prediction_thread_cleanup_connections_line_1012.py
# self.prediction_handler.prediction_finished.connect(self.prediction_thread.quit)
# self.prediction_handler.prediction_finished.connect(self.prediction_handler.deleteLater)
# self.prediction_thread.finished.connect(self.prediction_thread.deleteLater)
# self.prediction_thread.finished.connect(self._reset_prediction_thread_references)
# --- END REMOVED ---
log.debug("Prediction thread and handler set up to be persistent.") log.debug("Prediction thread and handler set up to be persistent.")
elif not PredictionHandler: elif not PredictionHandler:
log.error("PredictionHandler not available. Cannot set up prediction thread.") log.error("PredictionHandler not available. Cannot set up prediction thread.")
# --- REMOVED Old Processing Thread Reset --- # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_reset_processing_thread_references_slot_line_1022.py
# @Slot()
# def _reset_processing_thread_references(self):
# # This might still be needed if processing is meant to be single-shot
# log.debug("Resetting processing thread and handler references.")
# self.processing_thread = None
# self.processing_handler = None
# --- END REMOVED ---
@Slot() @Slot()
def _reset_prediction_thread_references(self): def _reset_prediction_thread_references(self):
@ -1045,12 +982,7 @@ class MainWindow(QMainWindow):
self.progress_bar.setValue(0) self.progress_bar.setValue(0)
self.progress_bar.setFormat("0/0") self.progress_bar.setFormat("0/0")
# Slot for prediction results (Updated for new format and coloring) - REMOVED # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_on_prediction_results_ready_slot_line_987.py
# @Slot(list)
# def on_prediction_results_ready(self, results: list):
# """Populates the preview table model with detailed prediction results."""
# # This is no longer needed as _on_rule_hierarchy_ready handles data loading for the new model.
# pass
# Slot signature assumes prediction_finished signal is updated to emit input_path: Signal(str) # Slot signature assumes prediction_finished signal is updated to emit input_path: Signal(str)
# Slot signature assumes prediction_finished signal is updated to emit input_path: Signal(str) # Slot signature assumes prediction_finished signal is updated to emit input_path: Signal(str)
@ -1617,11 +1549,7 @@ class MainWindow(QMainWindow):
event.accept() # Accept close event event.accept() # Accept close event
# --- REMOVED Slots for Old Hierarchy and Rule Editor --- # --- REMOVED Slots for Old Hierarchy and Rule Editor ---
# @Slot(QModelIndex) # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_hierarchy_and_rule_editor_slots_line_1553.py
# def _on_hierarchy_item_clicked(self, index: QModelIndex): ...
# @Slot(object)
# def _on_rule_updated(self, rule_object): ...
# Slot signature assumes rule_hierarchy_ready signal is updated to emit input_path: Signal(str, list) # Slot signature assumes rule_hierarchy_ready signal is updated to emit input_path: Signal(str, list)
# Slot signature matches rule_hierarchy_ready = Signal(list) # Slot signature matches rule_hierarchy_ready = Signal(list)

495
main.py
View File

@ -251,506 +251,19 @@ class ProcessingTask(QRunnable):
# --- CLI Worker Function (COMMENTED OUT - Replaced by GUI Flow) --- # --- CLI Worker Function (COMMENTED OUT - Replaced by GUI Flow) ---
# def process_single_asset_wrapper(input_path_str: str, preset_name: str, output_dir_str: str, overwrite: bool, verbose: bool, rules) -> Tuple[str, str, Optional[str]]: # Commented-out code moved to Deprecated/Old-Code/main_py_cli_worker_wrapper_line_254.py
# """
# Wrapper function for processing a single input path (which might contain multiple assets)
# in a separate process. Handles instantiation of Configuration and AssetProcessor,
# passes the overwrite flag, catches errors, and interprets the multi-asset status dictionary.
#
# Ensures logging is configured for the worker process.
#
# Returns:
# Tuple[str, str, Optional[str]]:
# - input_path_str: The original input path processed.
# - overall_status_string: A single status string summarizing the outcome
# ("processed", "skipped", "failed", "partial_success").
# - error_message_or_None: An error message if failures occurred, potentially
# listing failed assets.
# """
# # Explicitly configure logging for this worker process
# worker_log = logging.getLogger(f"Worker_{os.getpid()}") # Log with worker PID
# if not logging.root.handlers:
# logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-8s] %(name)s: %(message)s')
# worker_log.setLevel(logging.DEBUG if verbose else logging.INFO)
# if verbose:
# logging.root.setLevel(logging.DEBUG)
#
# input_path_obj = Path(input_path_str)
# input_name = input_path_obj.name
#
# try:
# worker_log.info(f"Starting processing attempt for input: {input_name}")
# config = Configuration(preset_name)
# output_base_path = Path(output_dir_str)
#
# processor = AssetProcessor(input_path_obj, config, output_base_path, overwrite=overwrite)
# # processor.process() now returns a Dict[str, List[str]]
# status_dict = processor.process(rules=rules)
#
# # --- Interpret the status dictionary ---
# processed_assets = status_dict.get("processed", [])
# skipped_assets = status_dict.get("skipped", [])
# failed_assets = status_dict.get("failed", [])
#
# overall_status_string = "failed" # Default
# error_message = None
#
# if failed_assets:
# overall_status_string = "failed"
# error_message = f"Failed assets within {input_name}: {', '.join(failed_assets)}"
# worker_log.error(error_message) # Log the failure details
# elif processed_assets:
# overall_status_string = "processed"
# # Check for partial success (mix of processed/skipped and failed should be caught above)
# if skipped_assets:
# worker_log.info(f"Input '{input_name}' processed with some assets skipped. Processed: {processed_assets}, Skipped: {skipped_assets}")
# else:
# worker_log.info(f"Input '{input_name}' processed successfully. Assets: {processed_assets}")
# elif skipped_assets:
# overall_status_string = "skipped"
# worker_log.info(f"Input '{input_name}' skipped (all contained assets already exist). Assets: {skipped_assets}")
# else:
# # Should not happen if input contained files, but handle as failure.
# worker_log.warning(f"Input '{input_name}' resulted in no processed, skipped, or failed assets. Reporting as failed.")
# overall_status_string = "failed"
# error_message = f"No assets processed, skipped, or failed within {input_name}."
#
#
# return (input_path_str, overall_status_string, error_message)
#
# except (ConfigurationError, AssetProcessingError) as e:
# # Catch errors during processor setup or the process() call itself if it raises before returning dict
# worker_log.error(f"Processing failed for input '{input_name}': {type(e).__name__}: {e}")
# return (input_path_str, "failed", f"{type(e).__name__}: {e}")
# except Exception as e:
# # Catch any other unexpected errors
# worker_log.exception(f"Unexpected worker failure processing input '{input_name}': {e}")
# return (input_path_str, "failed", f"Unexpected Worker Error: {e}")
# --- Core CLI Processing Function (COMMENTED OUT - Replaced by GUI Flow) --- # --- Core CLI Processing Function (COMMENTED OUT - Replaced by GUI Flow) ---
# def run_processing( # Commented-out code moved to Deprecated/Old-Code/main_py_cli_run_processing_line_258.py
# valid_inputs: List[str],
# preset_name: str,
# output_dir_for_processor: str,
# overwrite: bool,
# num_workers: int,
# verbose: bool # Add verbose parameter here
# ) -> Dict:
# """
# Executes the core asset processing logic using a process pool.
#
# Args:
# valid_inputs: List of validated input file/directory paths (strings).
# preset_name: Name of the preset to use.
# output_dir_for_processor: Absolute path string for the output base directory.
# overwrite: Boolean flag to force reprocessing.
# num_workers: Maximum number of worker processes.
# verbose: Boolean flag for verbose logging.
#
# Returns:
# A dictionary containing processing results:
# {
# "processed": int,
# "skipped": int,
# "failed": int,
# "results_list": List[Tuple[str, str, Optional[str]]] # (input_path, status, error_msg)
# }
# """
# log.info(f"Processing {len(valid_inputs)} asset(s) using preset '{preset_name}' with up to {num_workers} worker(s)...")
# results_list = []
# successful_processed_count = 0
# skipped_count = 0
# failed_count = 0
#
# # Ensure at least one worker
# num_workers = max(1, num_workers)
#
# # Using ProcessPoolExecutor is generally good if AssetProcessor tasks are CPU-bound.
# # If tasks are mostly I/O bound, ThreadPoolExecutor might be sufficient.
# # Important: Ensure Configuration and AssetProcessor are "pickleable".
# try:
# with ProcessPoolExecutor(max_workers=num_workers) as executor:
# # Create futures
# futures = {}
# log.debug(f"Submitting {len(valid_inputs)} tasks...")
# # Removed the 1-second delay for potentially faster submission in non-CLI use
# for i, input_path in enumerate(valid_inputs):
# log.debug(f"Submitting task {i+1}/{len(valid_inputs)} for: {Path(input_path).name}")
# future = executor.submit(
# process_single_asset_wrapper,
# input_path,
# preset_name,
# output_dir_for_processor,
# overwrite,
# verbose # Pass the verbose flag
# )
# futures[future] = input_path # Store future -> input_path mapping
#
# # Process completed futures
# for i, future in enumerate(as_completed(futures), 1):
# input_path = futures[future]
# asset_name = Path(input_path).name
# log.info(f"--- [{i}/{len(valid_inputs)}] Worker finished attempt for: {asset_name} ---")
# try:
# # Get result tuple: (input_path_str, status_string, error_message_or_None)
# result_tuple = future.result()
# results_list.append(result_tuple)
# input_path_res, status, err_msg = result_tuple
#
# # Increment counters based on status
# if status == "processed":
# successful_processed_count += 1
# elif status == "skipped":
# skipped_count += 1
# elif status == "failed":
# failed_count += 1
# else: # Should not happen, but log as warning/failure
# log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.")
# failed_count += 1
#
# except Exception as e:
# # Catch errors if the future itself fails (e.g., worker process crashed hard)
# log.exception(f"Critical worker failure for {asset_name}: {e}")
# results_list.append((input_path, "failed", f"Worker process crashed: {e}"))
# failed_count += 1 # Count crashes as failures
#
# except Exception as pool_exc:
# log.exception(f"An error occurred with the process pool: {pool_exc}")
# # Re-raise or handle as appropriate for the calling context (monitor.py)
# # For now, log and return current counts
# return {
# "processed": successful_processed_count,
# "skipped": skipped_count,
# "failed": failed_count + (len(valid_inputs) - len(results_list)), # Count unprocessed as failed
# "results_list": results_list,
# "pool_error": str(pool_exc) # Add pool error info
# }
#
# return {
# "processed": successful_processed_count,
# "skipped": skipped_count,
# "failed": failed_count,
# "results_list": results_list
# }
# --- Blender Script Execution Helper (COMMENTED OUT - Part of CLI Flow) --- # --- Blender Script Execution Helper (COMMENTED OUT - Part of CLI Flow) ---
# def run_blender_script(blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str): # Commented-out code moved to Deprecated/Old-Code/main_py_cli_blender_script_runner_line_365.py
# """
# Executes a Python script within Blender in the background.
#
# Args:
# blender_exe_path: Path to the Blender executable.
# blend_file_path: Path to the .blend file to open.
# python_script_path: Path to the Python script to execute within Blender.
# asset_root_dir: Path to the processed asset library root directory (passed to the script).
#
# Returns:
# True if the script executed successfully (return code 0), False otherwise.
# """
# log.info(f"Attempting to run Blender script: {Path(python_script_path).name} on {Path(blend_file_path).name}")
#
# # Ensure paths are absolute strings for subprocess
# blender_exe_path = str(Path(blender_exe_path).resolve())
# blend_file_path = str(Path(blend_file_path).resolve())
# python_script_path = str(Path(python_script_path).resolve())
# asset_root_dir = str(Path(asset_root_dir).resolve())
#
# # Construct the command arguments
# # -b: Run in background (no UI)
# # -S: Save the file after running the script
# # --python: Execute the specified Python script
# # --: Separator, arguments after this are passed to the Python script's sys.argv
# command = [
# blender_exe_path,
# "-b", # Run in background
# blend_file_path,
# "--python", python_script_path,
# "--", # Pass subsequent arguments to the script
# asset_root_dir,
# "-S" # Save the blend file after script execution
# ]
#
# log.debug(f"Executing Blender command: {' '.join(command)}") # Log the command for debugging
#
# try:
# # Execute the command
# # capture_output=True captures stdout and stderr
# # text=True decodes stdout/stderr as text
# # check=False prevents raising CalledProcessError on non-zero exit codes
# result = subprocess.run(command, capture_output=True, text=True, check=False)
#
# # Log results
# log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}")
# if result.stdout:
# log.debug(f"Blender stdout:\n{result.stdout.strip()}")
# if result.stderr:
# # Log stderr as warning or error depending on return code
# if result.returncode != 0:
# log.error(f"Blender stderr:\n{result.stderr.strip()}")
# else:
# log.warning(f"Blender stderr (Return Code 0):\n{result.stderr.strip()}") # Log stderr even on success as scripts might print warnings
#
# return result.returncode == 0
#
# except FileNotFoundError:
# log.error(f"Blender executable not found at: {blender_exe_path}")
# return False
# except Exception as e:
# log.exception(f"An unexpected error occurred while running Blender script '{Path(python_script_path).name}': {e}")
# return False
# --- Main CLI Execution (COMMENTED OUT - Replaced by GUI App Flow) --- # --- Main CLI Execution (COMMENTED OUT - Replaced by GUI App Flow) ---
# def main(): # Commented-out code moved to Deprecated/Old-Code/main_py_cli_main_entry_line_329.py
# """Parses arguments, sets up logging, runs processing, and reports summary."""
# parser = setup_arg_parser()
# args = parser.parse_args()
#
# # Setup logging based on verbosity argument *before* logging status messages
# setup_logging(args.verbose)
#
# start_time = time.time()
# log.info("Asset Processor Script Started (CLI Mode)")
#
# # --- Validate Input Paths ---
# valid_inputs = []
# for p_str in args.input_paths:
# p = Path(p_str)
# if p.exists():
# suffix = p.suffix.lower()
# if p.is_dir() or (p.is_file() and suffix in ['.zip', '.rar', '.7z']):
# valid_inputs.append(p_str) # Store the original string path
# else:
# log.warning(f"Input is not a directory or a supported archive type (.zip, .rar, .7z), skipping: {p_str}")
# else:
# log.warning(f"Input path not found, skipping: {p_str}")
#
# if not valid_inputs:
# log.error("No valid input paths found. Exiting.")
# sys.exit(1) # Exit with error code
#
# # --- Determine Output Directory ---
# output_dir_str = args.output_dir # Get value from args (might be None)
# if not output_dir_str:
# log.debug("Output directory not specified via -o, reading default from config.py.")
# try:
# output_dir_str = getattr(core_config_module, 'OUTPUT_BASE_DIR', None)
# if not output_dir_str:
# log.error("Output directory not specified with -o and OUTPUT_BASE_DIR not found or empty in config.py. Exiting.")
# sys.exit(1)
# log.info(f"Using default output directory from config.py: {output_dir_str}")
# except Exception as e:
# log.error(f"Could not read OUTPUT_BASE_DIR from config.py: {e}")
# sys.exit(1)
#
# # --- Resolve Output Path (Handles Relative Paths Explicitly) ---
# output_path_obj: Path
# if os.path.isabs(output_dir_str):
# output_path_obj = Path(output_dir_str)
# log.info(f"Using absolute output directory: {output_path_obj}")
# else:
# # Path() interprets relative paths against CWD by default
# output_path_obj = Path(output_dir_str)
# log.info(f"Using relative output directory '{output_dir_str}'. Resolved against CWD to: {output_path_obj.resolve()}")
#
# # --- Validate and Setup Output Directory ---
# try:
# # Resolve to ensure we have an absolute path for consistency and creation
# resolved_output_dir = output_path_obj.resolve()
# log.info(f"Ensuring output directory exists: {resolved_output_dir}")
# resolved_output_dir.mkdir(parents=True, exist_ok=True)
# # Use the resolved absolute path string for the processor
# output_dir_for_processor = str(resolved_output_dir)
# except Exception as e:
# log.error(f"Cannot create or access output directory '{resolved_output_dir}': {e}", exc_info=True)
# sys.exit(1)
#
# # --- Check Preset Existence (Basic Check) ---
# preset_dir = Path(__file__).parent / "presets"
# preset_file = preset_dir / f"{args.preset}.json"
# if not preset_file.is_file():
# log.error(f"Preset file not found: {preset_file}")
# log.error(f"Ensure a file named '{args.preset}.json' exists in the directory: {preset_dir.resolve()}")
# sys.exit(1)
#
# # --- Execute Processing via the new function ---
# processing_results = run_processing(
# valid_inputs=valid_inputs,
# preset_name=args.preset,
# output_dir_for_processor=output_dir_for_processor,
# overwrite=args.overwrite,
# num_workers=args.workers,
# verbose=args.verbose # Pass the verbose flag
# )
#
# # --- Report Summary ---
# duration = time.time() - start_time
# successful_processed_count = processing_results["processed"]
# skipped_count = processing_results["skipped"]
# failed_count = processing_results["failed"]
# results_list = processing_results["results_list"]
#
# log.info("=" * 40)
# log.info("Processing Summary")
# log.info(f" Duration: {duration:.2f} seconds")
# log.info(f" Assets Attempted: {len(valid_inputs)}")
# log.info(f" Successfully Processed: {successful_processed_count}")
# log.info(f" Skipped (Already Existed): {skipped_count}")
# log.info(f" Failed: {failed_count}")
#
# if processing_results.get("pool_error"):
# log.error(f" Process Pool Error: {processing_results['pool_error']}")
# # Ensure failed count reflects pool error if it happened
# if failed_count == 0 and successful_processed_count == 0 and skipped_count == 0:
# failed_count = len(valid_inputs) # Assume all failed if pool died early
#
# exit_code = 0
# if failed_count > 0:
# log.warning("Failures occurred:")
# # Iterate through results to show specific errors for failed items
# for input_path, status, err_msg in results_list:
# if status == "failed":
# log.warning(f" - {Path(input_path).name}: {err_msg}")
# exit_code = 1 # Exit with error code if failures occurred
# else:
# # Consider skipped assets as a form of success for the overall run exit code
# if successful_processed_count > 0 or skipped_count > 0:
# log.info("All assets processed or skipped successfully.")
# exit_code = 0 # Exit code 0 indicates success (including skips)
# else:
# # This case might happen if all inputs were invalid initially
# log.warning("No assets were processed, skipped, or failed (check input validation logs).")
# exit_code = 0 # Still exit 0 as the script itself didn't crash
#
# # --- Blender Script Execution (Optional) ---
# run_nodegroups = False
# run_materials = False
# nodegroup_blend_path = None
# materials_blend_path = None
# blender_exe = None
#
# # 1. Find Blender Executable
# try:
# blender_exe_config = getattr(core_config_module, 'BLENDER_EXECUTABLE_PATH', None)
# if blender_exe_config:
# # Check if the path in config exists
# if Path(blender_exe_config).is_file():
# blender_exe = str(Path(blender_exe_config).resolve())
# log.info(f"Using Blender executable from config: {blender_exe}")
# else:
# # Try finding it in PATH if config path is invalid
# log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying to find 'blender' in PATH.")
# blender_exe = shutil.which("blender")
# if blender_exe:
# log.info(f"Found Blender executable in PATH: {blender_exe}")
# else:
# log.warning("Could not find 'blender' in system PATH.")
# else:
# # Try finding it in PATH if not set in config
# log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying to find 'blender' in PATH.")
# blender_exe = shutil.which("blender")
# if blender_exe:
# log.info(f"Found Blender executable in PATH: {blender_exe}")
# else:
# log.warning("Could not find 'blender' in system PATH.")
#
# if not blender_exe:
# log.warning("Blender executable not found or configured. Skipping Blender script execution.")
#
# except Exception as e:
# log.error(f"Error checking Blender executable path: {e}")
# blender_exe = None # Ensure it's None on error
#
# # 2. Determine Blend File Paths if Blender Exe is available
# if blender_exe:
# # Nodegroup Blend Path
# nodegroup_blend_arg = args.nodegroup_blend
# if nodegroup_blend_arg:
# p = Path(nodegroup_blend_arg)
# if p.is_file() and p.suffix.lower() == '.blend':
# nodegroup_blend_path = str(p.resolve())
# log.info(f"Using nodegroup blend file from argument: {nodegroup_blend_path}")
# else:
# log.warning(f"Invalid nodegroup blend file path from argument: '{nodegroup_blend_arg}'. Ignoring.")
# else:
# default_ng_path_str = getattr(core_config_module, 'DEFAULT_NODEGROUP_BLEND_PATH', None)
# if default_ng_path_str:
# p = Path(default_ng_path_str)
# if p.is_file() and p.suffix.lower() == '.blend':
# nodegroup_blend_path = str(p.resolve())
# log.info(f"Using default nodegroup blend file from config: {nodegroup_blend_path}")
# else:
# log.warning(f"Invalid default nodegroup blend file path in config: '{default_ng_path_str}'. Ignoring.")
#
# # Materials Blend Path
# materials_blend_arg = args.materials_blend
# if materials_blend_arg:
# p = Path(materials_blend_arg)
# if p.is_file() and p.suffix.lower() == '.blend':
# materials_blend_path = str(p.resolve())
# log.info(f"Using materials blend file from argument: {materials_blend_path}")
# else:
# log.warning(f"Invalid materials blend file path from argument: '{materials_blend_arg}'. Ignoring.")
# else:
# default_mat_path_str = getattr(core_config_module, 'DEFAULT_MATERIALS_BLEND_PATH', None)
# if default_mat_path_str:
# p = Path(default_mat_path_str)
# if p.is_file() and p.suffix.lower() == '.blend':
# materials_blend_path = str(p.resolve())
# log.info(f"Using default materials blend file from config: {materials_blend_path}")
# else:
# log.warning(f"Invalid default materials blend file path in config: '{default_mat_path_str}'. Ignoring.")
#
# # 3. Execute Scripts if Paths are Valid
# if blender_exe:
# script_dir = Path(__file__).parent / "blenderscripts"
# nodegroup_script_path = script_dir / "create_nodegroups.py"
# materials_script_path = script_dir / "create_materials.py"
# asset_output_root = output_dir_for_processor # Use the resolved output dir
#
# if nodegroup_blend_path:
# if nodegroup_script_path.is_file():
# log.info("-" * 40)
# log.info("Starting Blender Node Group Script Execution...")
# success_ng = run_blender_script(
# blender_exe_path=blender_exe,
# blend_file_path=nodegroup_blend_path,
# python_script_path=str(nodegroup_script_path),
# asset_root_dir=asset_output_root
# )
# if not success_ng:
# log.error("Blender node group script execution failed.")
# # Optionally change exit code if Blender script fails?
# # exit_code = 1
# log.info("Finished Blender Node Group Script Execution.")
# log.info("-" * 40)
# else:
# log.error(f"Node group script not found: {nodegroup_script_path}")
#
# if materials_blend_path:
# if materials_script_path.is_file():
# log.info("-" * 40)
# log.info("Starting Blender Material Script Execution...")
# success_mat = run_blender_script(
# blender_exe_path=blender_exe,
# blend_file_path=materials_blend_path,
# python_script_path=str(materials_script_path),
# asset_root_dir=asset_output_root
# )
# if not success_mat:
# log.error("Blender material script execution failed.")
# # Optionally change exit code if Blender script fails?
# # exit_code = 1
# log.info("Finished Blender Material Script Execution.")
# log.info("-" * 40)
# else:
# log.error(f"Material script not found: {materials_script_path}")
#
# # --- Final Exit ---
# log.info("Asset Processor Script Finished.")
# sys.exit(exit_code)
# --- Main Application Class (Integrates GUI and Engine) --- # --- Main Application Class (Integrates GUI and Engine) ---