372 lines
20 KiB
Python
372 lines
20 KiB
Python
# gui/processing_handler.py
|
|
import logging
|
|
from pathlib import Path
|
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
import time # For potential delays if needed
|
|
|
|
import subprocess # <<< ADDED IMPORT
|
|
import shutil # <<< ADDED IMPORT
|
|
from typing import Optional # <<< ADDED IMPORT
|
|
from rule_structure import SourceRule # Import SourceRule
|
|
|
|
# --- PySide6 Imports ---
|
|
# Inherit from QObject to support signals/slots for thread communication
|
|
from PySide6.QtCore import QObject, Signal
|
|
|
|
# --- Backend Imports ---
|
|
# Need to import the worker function and potentially config/processor if needed directly
|
|
# Adjust path to ensure modules can be found relative to this file's location
|
|
import sys
|
|
script_dir = Path(__file__).parent
|
|
project_root = script_dir.parent
|
|
if str(project_root) not in sys.path:
|
|
sys.path.insert(0, str(project_root))
|
|
|
|
try:
|
|
# Import the worker function from main.py
|
|
from main import process_single_asset_wrapper
|
|
# Import exceptions if needed for type hinting or specific handling
|
|
from configuration import ConfigurationError, load_base_config # Import ConfigurationError and load_base_config
|
|
from asset_processor import AssetProcessingError
|
|
# Removed: import config as core_config # <<< ADDED IMPORT
|
|
BACKEND_AVAILABLE = True
|
|
except ImportError as e:
|
|
print(f"ERROR (ProcessingHandler): Failed to import backend modules/worker: {e}")
|
|
# Define placeholders if imports fail, so the GUI doesn't crash immediately
|
|
process_single_asset_wrapper = None
|
|
ConfigurationError = Exception
|
|
load_base_config = None # Placeholder
|
|
AssetProcessingError = Exception
|
|
BACKEND_AVAILABLE = False
|
|
|
|
log = logging.getLogger(__name__)
|
|
# Basic config if logger hasn't been set up elsewhere
|
|
if not log.hasHandlers():
|
|
logging.basicConfig(level=logging.INFO, format='%(levelname)s (Handler): %(message)s')
|
|
|
|
|
|
class ProcessingHandler(QObject):
|
|
"""
|
|
Handles the execution of the asset processing pipeline in a way that
|
|
can be run in a separate thread and communicate progress via signals.
|
|
"""
|
|
# --- Signals ---
|
|
# Emitted for overall progress bar update
|
|
progress_updated = Signal(int, int) # current_count, total_count
|
|
# Emitted for updating status of individual files in the list
|
|
file_status_updated = Signal(str, str, str) # input_path_str, status ("processing", "processed", "skipped", "failed"), message
|
|
# Emitted when the entire batch processing is finished
|
|
processing_finished = Signal(int, int, int) # processed_count, skipped_count, failed_count
|
|
# Emitted for general status messages to the status bar
|
|
status_message = Signal(str, int) # message, timeout_ms
|
|
|
|
def __init__(self, parent=None):
|
|
super().__init__(parent)
|
|
self._executor = None
|
|
self._futures = {} # Store future->input_path mapping
|
|
self._is_running = False
|
|
self._cancel_requested = False
|
|
|
|
@property
|
|
def is_running(self):
|
|
return self._is_running
|
|
|
|
# Removed _predict_single_asset method
|
|
|
|
@Slot(str, list, str, str, bool, int,
|
|
bool, str, str, bool, SourceRule) # Explicitly define types for the slot
|
|
def run_processing(self, input_source_identifier: str, original_input_paths: list[str], preset_name: str, output_dir_str: str, overwrite: bool, num_workers: int,
|
|
run_blender: bool, nodegroup_blend_path: str, materials_blend_path: str, verbose: bool, rules: SourceRule): # <<< ADDED verbose PARAM
|
|
"""
|
|
Starts the asset processing task and optionally runs Blender scripts afterwards.
|
|
This method should be called when the handler is moved to a separate thread.
|
|
"""
|
|
if self._is_running:
|
|
log.warning("Processing is already running.")
|
|
self.status_message.emit("Processing already in progress.", 3000)
|
|
return
|
|
|
|
if not BACKEND_AVAILABLE or not process_single_asset_wrapper:
|
|
log.error("Backend modules or worker function not available. Cannot start processing.")
|
|
self.status_message.emit("Error: Backend components missing. Cannot process.", 5000)
|
|
self.processing_finished.emit(0, 0, len(original_input_paths)) # Emit finished with all failed
|
|
return
|
|
|
|
self._is_running = True
|
|
self._cancel_requested = False
|
|
self._futures = {} # Reset futures
|
|
total_files = len(original_input_paths) # Use original_input_paths for total count
|
|
processed_count = 0
|
|
skipped_count = 0
|
|
failed_count = 0
|
|
completed_count = 0
|
|
|
|
log.info(f"Starting processing run: {total_files} assets, Preset='{preset_name}', Workers={num_workers}, Overwrite={overwrite}")
|
|
self.status_message.emit(f"Starting processing for {total_files} items...", 0) # Persistent message
|
|
|
|
try:
|
|
# Use 'with' statement for ProcessPoolExecutor for cleanup
|
|
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
|
self._executor = executor # Store for potential cancellation
|
|
|
|
# Submit tasks
|
|
for input_path in original_input_paths: # Iterate through the list of input paths
|
|
if self._cancel_requested: break # Check before submitting more
|
|
log.debug(f"Submitting task for: {input_path}")
|
|
# Pass the single SourceRule object to the worker
|
|
# --- DEBUG LOG: Inspect FileRule overrides before sending to worker ---
|
|
log.debug(f"ProcessingHandler: Inspecting rules for input '{input_path}' before submitting to worker:")
|
|
if rules: # Check if rules object exists
|
|
for asset_rule in rules.assets:
|
|
log.debug(f" Asset: {asset_rule.asset_name}")
|
|
for file_rule in asset_rule.files:
|
|
log.debug(f" File: {Path(file_rule.file_path).name}, ItemType: {file_rule.item_type}, Override: {file_rule.item_type_override}, StandardMap: {getattr(file_rule, 'standard_map_type', 'N/A')}")
|
|
else:
|
|
log.debug(" Rules object is None.")
|
|
# --- END DEBUG LOG ---
|
|
future = executor.submit(process_single_asset_wrapper, input_path, preset_name, output_dir_str, overwrite, verbose=verbose, rules=rules) # Pass verbose flag from GUI and rules
|
|
self._futures[future] = input_path # Map future back to input path
|
|
# Optionally emit "processing" status here
|
|
self.file_status_updated.emit(input_path, "processing", "")
|
|
|
|
if self._cancel_requested:
|
|
log.info("Processing cancelled during task submission.")
|
|
# Count remaining unsubmitted tasks as failed/cancelled
|
|
failed_count = total_files - len(self._futures)
|
|
|
|
# Process completed futures
|
|
for future in as_completed(self._futures):
|
|
completed_count += 1
|
|
input_path = self._futures[future] # Get original path
|
|
asset_name = Path(input_path).name
|
|
status = "failed" # Default status
|
|
error_message = "Unknown error"
|
|
|
|
if self._cancel_requested:
|
|
# If cancelled after submission, try to get result but count as failed
|
|
status = "failed"
|
|
error_message = "Cancelled"
|
|
failed_count += 1
|
|
# Don't try future.result() if cancelled, it might raise CancelledError
|
|
else:
|
|
try:
|
|
# Get result tuple: (input_path_str, status_string, error_message_or_None)
|
|
result_tuple = future.result()
|
|
_, status, error_message = result_tuple
|
|
error_message = error_message or "" # Ensure it's a string
|
|
|
|
# Increment counters based on status
|
|
if status == "processed":
|
|
processed_count += 1
|
|
elif status == "skipped":
|
|
skipped_count += 1
|
|
elif status == "failed":
|
|
failed_count += 1
|
|
else:
|
|
log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.")
|
|
failed_count += 1
|
|
error_message = f"Unknown status: {status}"
|
|
|
|
except Exception as e:
|
|
# Catch errors if the future itself fails (e.g., worker process crashed hard)
|
|
log.exception(f"Critical worker failure for {asset_name}: {e}")
|
|
failed_count += 1 # Count crashes as failures
|
|
status = "failed"
|
|
error_message = f"Worker process crashed: {e}"
|
|
|
|
# Emit progress signals
|
|
self.progress_updated.emit(completed_count, total_files)
|
|
self.file_status_updated.emit(input_path, status, error_message)
|
|
|
|
# Check for cancellation again after processing each result
|
|
if self._cancel_requested:
|
|
log.info("Cancellation detected after processing a result.")
|
|
# Count remaining unprocessed futures as failed/cancelled
|
|
remaining_futures = total_files - completed_count
|
|
failed_count += remaining_futures
|
|
break # Exit the as_completed loop
|
|
|
|
except Exception as pool_exc:
|
|
log.exception(f"An error occurred with the process pool: {pool_exc}")
|
|
self.status_message.emit(f"Error during processing: {pool_exc}", 5000)
|
|
# Mark all remaining as failed
|
|
failed_count = total_files - processed_count - skipped_count
|
|
|
|
finally:
|
|
# --- Blender Script Execution (Optional) ---
|
|
if run_blender and not self._cancel_requested:
|
|
log.info("Asset processing complete. Checking for Blender script execution.")
|
|
self.status_message.emit("Asset processing complete. Starting Blender scripts...", 0)
|
|
blender_exe = self._find_blender_executable()
|
|
if blender_exe:
|
|
script_dir = Path(__file__).parent.parent / "blenderscripts" # Go up one level from gui/
|
|
nodegroup_script_path = script_dir / "create_nodegroups.py"
|
|
materials_script_path = script_dir / "create_materials.py"
|
|
asset_output_root = output_dir_str # Use the same output dir
|
|
|
|
# Run Nodegroup Script
|
|
if nodegroup_blend_path and Path(nodegroup_blend_path).is_file():
|
|
if nodegroup_script_path.is_file():
|
|
log.info("-" * 20 + " Running Nodegroup Script " + "-" * 20)
|
|
self.status_message.emit(f"Running Blender nodegroup script on {Path(nodegroup_blend_path).name}...", 0)
|
|
success_ng = self._run_blender_script_subprocess(
|
|
blender_exe_path=blender_exe,
|
|
blend_file_path=nodegroup_blend_path,
|
|
python_script_path=str(nodegroup_script_path),
|
|
asset_root_dir=asset_output_root
|
|
)
|
|
if not success_ng:
|
|
log.error("Blender node group script execution failed.")
|
|
self.status_message.emit("Blender nodegroup script failed.", 5000)
|
|
else:
|
|
log.info("Blender nodegroup script finished successfully.")
|
|
self.status_message.emit("Blender nodegroup script finished.", 3000)
|
|
else:
|
|
log.error(f"Node group script not found: {nodegroup_script_path}")
|
|
self.status_message.emit(f"Error: Nodegroup script not found.", 5000)
|
|
elif run_blender and nodegroup_blend_path: # Log if path was provided but invalid
|
|
log.warning(f"Nodegroup blend path provided but invalid: {nodegroup_blend_path}")
|
|
self.status_message.emit(f"Warning: Invalid Nodegroup .blend path.", 5000)
|
|
|
|
|
|
# Run Materials Script (only if nodegroup script was attempted or not needed)
|
|
if materials_blend_path and Path(materials_blend_path).is_file():
|
|
if materials_script_path.is_file():
|
|
log.info("-" * 20 + " Running Materials Script " + "-" * 20)
|
|
self.status_message.emit(f"Running Blender materials script on {Path(materials_blend_path).name}...", 0)
|
|
# Pass the nodegroup blend path as the second argument to the script
|
|
success_mat = self._run_blender_script_subprocess(
|
|
blender_exe_path=blender_exe,
|
|
blend_file_path=materials_blend_path,
|
|
python_script_path=str(materials_script_path),
|
|
asset_root_dir=asset_output_root,
|
|
nodegroup_blend_file_path_arg=nodegroup_blend_path # Pass the nodegroup path
|
|
)
|
|
if not success_mat:
|
|
log.error("Blender material script execution failed.")
|
|
self.status_message.emit("Blender material script failed.", 5000)
|
|
else:
|
|
log.info("Blender material script finished successfully.")
|
|
self.status_message.emit("Blender material script finished.", 3000)
|
|
else:
|
|
log.error(f"Material script not found: {materials_script_path}")
|
|
self.status_message.emit(f"Error: Material script not found.", 5000)
|
|
elif run_blender and materials_blend_path: # Log if path was provided but invalid
|
|
log.warning(f"Materials blend path provided but invalid: {materials_blend_path}")
|
|
self.status_message.emit(f"Warning: Invalid Materials .blend path.", 5000)
|
|
|
|
else:
|
|
log.warning("Blender executable not found. Skipping Blender script execution.")
|
|
self.status_message.emit("Warning: Blender executable not found. Skipping scripts.", 5000)
|
|
elif self._cancel_requested:
|
|
log.info("Processing was cancelled. Skipping Blender script execution.")
|
|
# --- End Blender Script Execution ---
|
|
|
|
final_message = f"Finished. Processed: {processed_count}, Skipped: {skipped_count}, Failed: {failed_count}"
|
|
log.info(final_message)
|
|
self.status_message.emit(final_message, 5000) # Show final summary
|
|
self.processing_finished.emit(processed_count, skipped_count, failed_count)
|
|
self._is_running = False
|
|
self._executor = None
|
|
self._futures = {} # Clear futures
|
|
|
|
def request_cancel(self):
|
|
"""Requests cancellation of the ongoing processing task."""
|
|
if not self._is_running:
|
|
log.warning("Cancel requested but no processing is running.")
|
|
return
|
|
|
|
if self._cancel_requested:
|
|
log.warning("Cancellation already requested.")
|
|
return
|
|
|
|
log.info("Cancellation requested.")
|
|
self.status_message.emit("Cancellation requested...", 3000)
|
|
self._cancel_requested = True
|
|
|
|
# Attempt to shutdown the executor - this might cancel pending tasks
|
|
# but won't forcefully stop running ones. `cancel_futures=True` is Python 3.9+
|
|
if self._executor:
|
|
log.debug("Requesting executor shutdown...")
|
|
# For Python 3.9+: self._executor.shutdown(wait=False, cancel_futures=True)
|
|
# For older Python:
|
|
self._executor.shutdown(wait=False)
|
|
# Manually try cancelling futures that haven't started
|
|
for future in self._futures:
|
|
if not future.running() and not future.done():
|
|
future.cancel()
|
|
log.debug("Executor shutdown requested.")
|
|
|
|
# Note: True cancellation of running ProcessPoolExecutor tasks is complex.
|
|
# This implementation primarily prevents processing further results and
|
|
# attempts to cancel pending/unstarted tasks.
|
|
|
|
def _find_blender_executable(self) -> Optional[str]:
|
|
"""Finds the Blender executable path from config or system PATH."""
|
|
try:
|
|
# Use load_base_config to get the Blender executable path
|
|
if load_base_config:
|
|
base_config = load_base_config()
|
|
blender_exe_config = base_config.get('BLENDER_EXECUTABLE_PATH', None)
|
|
else:
|
|
blender_exe_config = None
|
|
log.warning("load_base_config not available. Cannot read BLENDER_EXECUTABLE_PATH from config.")
|
|
|
|
if blender_exe_config:
|
|
p = Path(blender_exe_config)
|
|
if p.is_file():
|
|
log.info(f"Using Blender executable from config: {p}")
|
|
return str(p.resolve())
|
|
else:
|
|
log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying PATH.")
|
|
else:
|
|
log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying PATH.")
|
|
|
|
blender_exe = shutil.which("blender")
|
|
if blender_exe:
|
|
log.info(f"Found Blender executable in PATH: {blender_exe}")
|
|
return blender_exe
|
|
else:
|
|
log.warning("Could not find 'blender' in system PATH.")
|
|
return None
|
|
except ConfigurationError as e:
|
|
log.error(f"Error reading base configuration for Blender executable path: {e}")
|
|
return None
|
|
except Exception as e:
|
|
log.error(f"Error checking Blender executable path: {e}")
|
|
return None
|
|
|
|
def _run_blender_script_subprocess(self, blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str, nodegroup_blend_file_path_arg: Optional[str] = None) -> bool:
|
|
"""Internal helper to run a single Blender script via subprocess."""
|
|
command_base = [
|
|
blender_exe_path,
|
|
"--factory-startup",
|
|
"-b",
|
|
blend_file_path,
|
|
"--log", "*", # <<< ADDED BLENDER LOGGING FLAG
|
|
"--python", python_script_path,
|
|
"--",
|
|
asset_root_dir,
|
|
]
|
|
# Add nodegroup blend file path if provided (for create_materials script)
|
|
if nodegroup_blend_file_path_arg:
|
|
command = command_base + [nodegroup_blend_file_path_arg]
|
|
else:
|
|
command = command_base
|
|
log.debug(f"Executing Blender command: {' '.join(map(str, command))}") # Ensure all parts are strings for join
|
|
try:
|
|
# Ensure all parts of the command are strings for subprocess
|
|
str_command = [str(part) for part in command]
|
|
result = subprocess.run(str_command, capture_output=True, text=True, check=False, encoding='utf-8') # Specify encoding
|
|
log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}")
|
|
if result.stdout: log.debug(f"Blender stdout:\n{result.stdout.strip()}")
|
|
if result.stderr:
|
|
if result.returncode != 0: log.error(f"Blender stderr:\n{result.stderr.strip()}")
|
|
else: log.warning(f"Blender stderr (RC=0):\n{result.stderr.strip()}")
|
|
return result.returncode == 0
|
|
except FileNotFoundError:
|
|
log.error(f"Blender executable not found at: {blender_exe_path}")
|
|
return False
|
|
except Exception as e:
|
|
log.exception(f"Error running Blender script '{Path(python_script_path).name}': {e}")
|
|
return False |