Initial commit
This commit is contained in:
BIN
gui/__pycache__/main_window.cpython-310.pyc
Normal file
BIN
gui/__pycache__/main_window.cpython-310.pyc
Normal file
Binary file not shown.
BIN
gui/__pycache__/main_window.cpython-313.pyc
Normal file
BIN
gui/__pycache__/main_window.cpython-313.pyc
Normal file
Binary file not shown.
BIN
gui/__pycache__/prediction_handler.cpython-310.pyc
Normal file
BIN
gui/__pycache__/prediction_handler.cpython-310.pyc
Normal file
Binary file not shown.
BIN
gui/__pycache__/prediction_handler.cpython-313.pyc
Normal file
BIN
gui/__pycache__/prediction_handler.cpython-313.pyc
Normal file
Binary file not shown.
BIN
gui/__pycache__/preset_editor_dialog.cpython-310.pyc
Normal file
BIN
gui/__pycache__/preset_editor_dialog.cpython-310.pyc
Normal file
Binary file not shown.
BIN
gui/__pycache__/preset_editor_dialog.cpython-313.pyc
Normal file
BIN
gui/__pycache__/preset_editor_dialog.cpython-313.pyc
Normal file
Binary file not shown.
BIN
gui/__pycache__/preview_table_model.cpython-313.pyc
Normal file
BIN
gui/__pycache__/preview_table_model.cpython-313.pyc
Normal file
Binary file not shown.
BIN
gui/__pycache__/processing_handler.cpython-310.pyc
Normal file
BIN
gui/__pycache__/processing_handler.cpython-310.pyc
Normal file
Binary file not shown.
BIN
gui/__pycache__/processing_handler.cpython-313.pyc
Normal file
BIN
gui/__pycache__/processing_handler.cpython-313.pyc
Normal file
Binary file not shown.
1638
gui/main_window.py
Normal file
1638
gui/main_window.py
Normal file
File diff suppressed because it is too large
Load Diff
232
gui/prediction_handler.py
Normal file
232
gui/prediction_handler.py
Normal file
@@ -0,0 +1,232 @@
|
||||
# gui/prediction_handler.py
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import time # For potential delays if needed
|
||||
import os # For cpu_count
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed # For parallel prediction
|
||||
|
||||
# --- PySide6 Imports ---
|
||||
from PySide6.QtCore import QObject, Signal, QThread # Import QThread
|
||||
|
||||
# --- Backend Imports ---
|
||||
# Adjust path to ensure modules can be found relative to this file's location
|
||||
import sys
|
||||
script_dir = Path(__file__).parent
|
||||
project_root = script_dir.parent
|
||||
if str(project_root) not in sys.path:
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
try:
|
||||
from configuration import Configuration, ConfigurationError
|
||||
from asset_processor import AssetProcessor, AssetProcessingError
|
||||
BACKEND_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"ERROR (PredictionHandler): Failed to import backend modules: {e}")
|
||||
# Define placeholders if imports fail
|
||||
Configuration = None
|
||||
AssetProcessor = None
|
||||
ConfigurationError = Exception
|
||||
AssetProcessingError = Exception
|
||||
BACKEND_AVAILABLE = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
# Basic config if logger hasn't been set up elsewhere
|
||||
if not log.hasHandlers():
|
||||
logging.basicConfig(level=logging.INFO, format='%(levelname)s (PredictHandler): %(message)s')
|
||||
|
||||
|
||||
class PredictionHandler(QObject):
|
||||
"""
|
||||
Handles running predictions in a separate thread to avoid GUI freezes.
|
||||
"""
|
||||
# --- Signals ---
|
||||
# Emits a list of dictionaries, each representing a file row for the table
|
||||
# Dict format: {'original_path': str, 'predicted_asset_name': str | None, 'predicted_output_name': str | None, 'status': str, 'details': str | None, 'source_asset': str}
|
||||
prediction_results_ready = Signal(list)
|
||||
# Emitted when all predictions for a batch are done
|
||||
prediction_finished = Signal()
|
||||
# Emitted for status updates
|
||||
status_message = Signal(str, int)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self._is_running = False
|
||||
# No explicit cancel needed for prediction for now, it should be fast per-item
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
return self._is_running
|
||||
|
||||
def _predict_single_asset(self, input_path_str: str, config: Configuration) -> list[dict]:
|
||||
"""
|
||||
Helper method to predict a single asset. Runs within the ThreadPoolExecutor.
|
||||
Returns a list of prediction dictionaries for the asset, or a single error dict.
|
||||
"""
|
||||
input_path = Path(input_path_str)
|
||||
source_asset_name = input_path.name # For reference in the results
|
||||
asset_results = []
|
||||
try:
|
||||
# Create AssetProcessor instance (needs dummy output path)
|
||||
# Ensure AssetProcessor is thread-safe or create a new instance per thread.
|
||||
# Based on its structure (using temp dirs), creating new instances should be safe.
|
||||
processor = AssetProcessor(input_path, config, Path(".")) # Dummy output path
|
||||
|
||||
# Get detailed file predictions
|
||||
detailed_predictions = processor.get_detailed_file_predictions()
|
||||
|
||||
if detailed_predictions is None:
|
||||
log.error(f"Detailed prediction failed critically for {input_path_str}. Adding asset-level error.")
|
||||
# Add a single error entry for the whole asset if the method returns None
|
||||
asset_results.append({
|
||||
'original_path': source_asset_name, # Use asset name as placeholder
|
||||
'predicted_asset_name': None, # New key
|
||||
'predicted_output_name': None, # New key
|
||||
'status': 'Error',
|
||||
'details': 'Critical prediction failure (check logs)',
|
||||
'source_asset': source_asset_name
|
||||
})
|
||||
else:
|
||||
log.debug(f"Received {len(detailed_predictions)} detailed predictions for {input_path_str}.")
|
||||
# Add source_asset key and ensure correct keys exist
|
||||
for prediction_dict in detailed_predictions:
|
||||
# Ensure all expected keys are present, even if None
|
||||
result_entry = {
|
||||
'original_path': prediction_dict.get('original_path', '[Missing Path]'),
|
||||
'predicted_asset_name': prediction_dict.get('predicted_asset_name'), # New key
|
||||
'predicted_output_name': prediction_dict.get('predicted_output_name'), # New key
|
||||
'status': prediction_dict.get('status', 'Error'),
|
||||
'details': prediction_dict.get('details', '[Missing Details]'),
|
||||
'source_asset': source_asset_name # Add the source asset identifier
|
||||
}
|
||||
asset_results.append(result_entry)
|
||||
|
||||
except AssetProcessingError as e: # Catch errors during processor instantiation or prediction setup
|
||||
log.error(f"Asset processing error during prediction setup for {input_path_str}: {e}")
|
||||
asset_results.append({
|
||||
'original_path': source_asset_name,
|
||||
'predicted_asset_name': None,
|
||||
'predicted_output_name': None,
|
||||
'status': 'Error',
|
||||
'details': f'Asset Error: {e}',
|
||||
'source_asset': source_asset_name
|
||||
})
|
||||
except Exception as e: # Catch unexpected errors
|
||||
log.exception(f"Unexpected error during prediction for {input_path_str}: {e}")
|
||||
asset_results.append({
|
||||
'original_path': source_asset_name,
|
||||
'predicted_asset_name': None,
|
||||
'predicted_output_name': None,
|
||||
'status': 'Error',
|
||||
'details': f'Unexpected Error: {e}',
|
||||
'source_asset': source_asset_name
|
||||
})
|
||||
finally:
|
||||
# Cleanup for the single asset prediction if needed (AssetProcessor handles its own temp dir)
|
||||
pass
|
||||
return asset_results
|
||||
|
||||
|
||||
def run_prediction(self, input_paths: list[str], preset_name: str):
|
||||
"""
|
||||
Runs the prediction logic for the given paths and preset using a ThreadPoolExecutor.
|
||||
This method is intended to be run in a separate QThread.
|
||||
"""
|
||||
if self._is_running:
|
||||
log.warning("Prediction is already running.")
|
||||
return
|
||||
if not BACKEND_AVAILABLE:
|
||||
log.error("Backend modules not available. Cannot run prediction.")
|
||||
self.status_message.emit("Error: Backend components missing.", 5000)
|
||||
self.prediction_finished.emit()
|
||||
return
|
||||
if not preset_name:
|
||||
log.warning("No preset selected for prediction.")
|
||||
self.status_message.emit("No preset selected.", 3000)
|
||||
self.prediction_finished.emit()
|
||||
return
|
||||
|
||||
self._is_running = True
|
||||
thread_id = QThread.currentThread() # Get current thread object
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered PredictionHandler.run_prediction. Starting run for {len(input_paths)} items, Preset='{preset_name}'")
|
||||
self.status_message.emit(f"Updating preview for {len(input_paths)} items...", 0)
|
||||
|
||||
config = None # Load config once if possible
|
||||
try:
|
||||
config = Configuration(preset_name)
|
||||
except ConfigurationError as e:
|
||||
log.error(f"Failed to load configuration for preset '{preset_name}': {e}")
|
||||
self.status_message.emit(f"Error loading preset '{preset_name}': {e}", 5000)
|
||||
# Emit error for all items? Or just finish? Finish for now.
|
||||
self.prediction_finished.emit()
|
||||
self._is_running = False
|
||||
return
|
||||
except Exception as e:
|
||||
log.exception(f"Unexpected error loading configuration for preset '{preset_name}': {e}")
|
||||
self.status_message.emit(f"Unexpected error loading preset '{preset_name}'.", 5000)
|
||||
self.prediction_finished.emit()
|
||||
return
|
||||
|
||||
all_file_results = [] # Accumulate results here
|
||||
futures = []
|
||||
# Determine number of workers - use half the cores, minimum 1, max 8?
|
||||
max_workers = min(max(1, (os.cpu_count() or 1) // 2), 8)
|
||||
log.info(f"Using ThreadPoolExecutor with max_workers={max_workers} for prediction.")
|
||||
|
||||
try:
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
# Submit tasks for each input path
|
||||
for input_path_str in input_paths:
|
||||
future = executor.submit(self._predict_single_asset, input_path_str, config)
|
||||
futures.append(future)
|
||||
|
||||
# Process results as they complete
|
||||
for future in as_completed(futures):
|
||||
try:
|
||||
# Result is a list of dicts for one asset
|
||||
asset_result_list = future.result()
|
||||
if asset_result_list: # Check if list is not empty
|
||||
all_file_results.extend(asset_result_list)
|
||||
except Exception as exc:
|
||||
# This catches errors within the future execution itself if not handled by _predict_single_asset
|
||||
log.error(f'Prediction task generated an exception: {exc}', exc_info=True)
|
||||
# We might not know which input path failed here easily without more mapping
|
||||
# Add a generic error?
|
||||
all_file_results.append({
|
||||
'original_path': '[Unknown Asset - Executor Error]',
|
||||
'predicted_asset_name': None,
|
||||
'predicted_output_name': None,
|
||||
'status': 'Error',
|
||||
'details': f'Executor Error: {exc}',
|
||||
'source_asset': '[Unknown]'
|
||||
})
|
||||
|
||||
except Exception as pool_exc:
|
||||
log.exception(f"An error occurred with the prediction ThreadPoolExecutor: {pool_exc}")
|
||||
self.status_message.emit(f"Error during prediction setup: {pool_exc}", 5000)
|
||||
# Add a generic error if the pool fails
|
||||
all_file_results.append({
|
||||
'original_path': '[Prediction Pool Error]',
|
||||
'predicted_asset_name': None,
|
||||
'predicted_output_name': None,
|
||||
'status': 'Error',
|
||||
'details': f'Pool Error: {pool_exc}',
|
||||
'source_asset': '[System]'
|
||||
})
|
||||
|
||||
# Emit the combined list of detailed file results at the end
|
||||
# Note: thread_id was already defined earlier in this function
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Parallel prediction run finished. Preparing to emit {len(all_file_results)} file results.")
|
||||
# <<< Add logging before emit >>>
|
||||
log.debug(f"[{time.time():.4f}][T:{thread_id}] Type of all_file_results before emit: {type(all_file_results)}")
|
||||
try:
|
||||
log.debug(f"[{time.time():.4f}][T:{thread_id}] Content of all_file_results (first 5) before emit: {all_file_results[:5]}")
|
||||
except Exception as e:
|
||||
log.error(f"[{time.time():.4f}][T:{thread_id}] Error logging all_file_results content: {e}")
|
||||
# <<< End added logging >>>
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Emitting prediction_results_ready signal...")
|
||||
self.prediction_results_ready.emit(all_file_results)
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Emitted prediction_results_ready signal.")
|
||||
self.status_message.emit("Preview update complete.", 3000)
|
||||
self.prediction_finished.emit()
|
||||
self._is_running = False
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting PredictionHandler.run_prediction.")
|
||||
469
gui/preview_table_model.py
Normal file
469
gui/preview_table_model.py
Normal file
@@ -0,0 +1,469 @@
|
||||
import logging # Import logging
|
||||
import time # For logging timestamps
|
||||
from PySide6.QtCore import QAbstractTableModel, Qt, QModelIndex, QSortFilterProxyModel, QThread # Import QThread
|
||||
from PySide6.QtGui import QColor
|
||||
|
||||
log = logging.getLogger(__name__) # Get logger
|
||||
|
||||
# Define colors for alternating asset groups
|
||||
COLOR_ASSET_GROUP_1 = QColor("#292929") # Dark grey 1
|
||||
COLOR_ASSET_GROUP_2 = QColor("#343434") # Dark grey 2
|
||||
|
||||
# Define text colors for statuses
|
||||
class PreviewTableModel(QAbstractTableModel):
|
||||
"""
|
||||
Custom table model for the GUI preview table.
|
||||
Holds detailed file prediction results or a simple list of source assets.
|
||||
"""
|
||||
# Define text colors for statuses
|
||||
STATUS_COLORS = {
|
||||
"Mapped": QColor("#9dd9db"),
|
||||
"Ignored": QColor("#c1753d"),
|
||||
"Extra": QColor("#cfdca4"),
|
||||
"Unrecognised": QColor("#92371f"),
|
||||
"Model": QColor("#a4b8dc"),
|
||||
"Unmatched Extra": QColor("#777777"),
|
||||
"Error": QColor(Qt.GlobalColor.red),
|
||||
"[No Status]": None # Use default color for no status
|
||||
}
|
||||
|
||||
# Define column roles for clarity (Detailed Mode)
|
||||
COL_STATUS = 0
|
||||
COL_PREDICTED_ASSET = 1
|
||||
COL_ORIGINAL_PATH = 2
|
||||
COL_PREDICTED_OUTPUT = 3 # Kept for internal data access, but hidden in view
|
||||
COL_DETAILS = 4
|
||||
COL_ADDITIONAL_FILES = 5 # New column for ignored/extra files
|
||||
|
||||
# Define internal data roles for sorting/filtering
|
||||
ROLE_RAW_STATUS = Qt.ItemDataRole.UserRole + 1
|
||||
ROLE_SOURCE_ASSET = Qt.ItemDataRole.UserRole + 2
|
||||
|
||||
# Column for Simple Mode
|
||||
COL_SIMPLE_PATH = 0
|
||||
|
||||
def __init__(self, data=None, parent=None):
|
||||
super().__init__(parent)
|
||||
log.debug("PreviewTableModel initialized.")
|
||||
# Data format: List of dictionaries, each representing a file's details
|
||||
# Example: {'original_path': '...', 'predicted_asset_name': '...', 'predicted_output_name': '...', 'status': '...', 'details': '...', 'source_asset': '...'}
|
||||
self._data = [] # Keep the original flat data for reference if needed, but not for display
|
||||
self._table_rows = [] # New structure for displaying rows
|
||||
self._simple_data = [] # List of unique source asset paths for simple mode
|
||||
self._simple_mode = False # Flag to toggle between detailed and simple view
|
||||
self._headers_detailed = ["Status", "Predicted Asset", "Original Path", "Predicted Output", "Details", "Additional Files"] # Added new column header
|
||||
self._sorted_unique_assets = [] # Store sorted unique asset names for coloring
|
||||
self._headers_simple = ["Input Path"]
|
||||
self.set_data(data or []) # Initialize data and simple_data
|
||||
|
||||
def set_simple_mode(self, enabled: bool):
|
||||
"""Toggles the model between detailed and simple view modes."""
|
||||
thread_id = QThread.currentThread() # Get current thread object
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered PreviewTableModel.set_simple_mode(enabled={enabled}). Current mode: {self._simple_mode}")
|
||||
if self._simple_mode != enabled:
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Calling beginResetModel()...")
|
||||
self.beginResetModel()
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Returned from beginResetModel(). Setting mode.")
|
||||
self._simple_mode = enabled
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Mode changed to: {self._simple_mode}. Calling endResetModel()...")
|
||||
self.endResetModel()
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Returned from endResetModel().")
|
||||
else:
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] PreviewTableModel mode is already as requested. No change.")
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting PreviewTableModel.set_simple_mode.")
|
||||
|
||||
|
||||
def rowCount(self, parent=QModelIndex()):
|
||||
"""Returns the number of rows in the model."""
|
||||
if parent.isValid():
|
||||
return 0
|
||||
row_count = len(self._simple_data) if self._simple_mode else len(self._table_rows) # Use _table_rows for detailed mode
|
||||
# log.debug(f"PreviewTableModel.rowCount called. Mode: {self._simple_mode}, Row Count: {row_count}")
|
||||
return row_count
|
||||
|
||||
def columnCount(self, parent=QModelIndex()):
|
||||
"""Returns the number of columns in the model."""
|
||||
if parent.isValid():
|
||||
return 0
|
||||
col_count = len(self._headers_simple) if self._simple_mode else len(self._headers_detailed) # Use updated headers_detailed
|
||||
# log.debug(f"PreviewTableModel.columnCount called. Mode: {self._simple_mode}, Column Count: {col_count}")
|
||||
return col_count
|
||||
|
||||
def data(self, index: QModelIndex, role: int = Qt.ItemDataRole.DisplayRole):
|
||||
"""Returns the data for a given index and role."""
|
||||
if not index.isValid():
|
||||
return None
|
||||
|
||||
row = index.row()
|
||||
col = index.column()
|
||||
|
||||
# --- Simple Mode ---
|
||||
if self._simple_mode:
|
||||
if row >= len(self._simple_data):
|
||||
# log.warning(f"data called with out of bounds row in simple mode: {row}/{len(self._simple_data)}")
|
||||
return None # Bounds check
|
||||
source_asset_path = self._simple_data[row]
|
||||
if role == Qt.ItemDataRole.DisplayRole:
|
||||
if col == self.COL_SIMPLE_PATH:
|
||||
return source_asset_path
|
||||
elif role == Qt.ItemDataRole.ToolTipRole:
|
||||
if col == self.COL_SIMPLE_PATH:
|
||||
return f"Input Asset: {source_asset_path}"
|
||||
return None
|
||||
|
||||
# --- Detailed Mode ---
|
||||
if row >= len(self._table_rows): # Use _table_rows
|
||||
# log.warning(f"data called with out of bounds row in detailed mode: {row}/{len(self._table_rows)}")
|
||||
return None # Bounds check
|
||||
row_data = self._table_rows[row] # Get data from the structured row
|
||||
|
||||
# --- Handle Custom Internal Roles ---
|
||||
# These roles are now handled by the proxy model based on the structured data
|
||||
if role == self.ROLE_RAW_STATUS:
|
||||
# Return status of the main file if it exists, otherwise a placeholder for additional rows
|
||||
main_file = row_data.get('main_file')
|
||||
return main_file.get('status', '[No Status]') if main_file else '[Additional]'
|
||||
if role == self.ROLE_SOURCE_ASSET:
|
||||
return row_data.get('source_asset', 'N/A')
|
||||
|
||||
# --- Handle Display Role ---
|
||||
if role == Qt.ItemDataRole.DisplayRole:
|
||||
if col == self.COL_STATUS:
|
||||
main_file = row_data.get('main_file')
|
||||
if main_file:
|
||||
raw_status = main_file.get('status', '[No Status]')
|
||||
details = main_file.get('details', '') # Get details for parsing
|
||||
|
||||
# Implement status text simplification
|
||||
if raw_status == "Unmatched Extra":
|
||||
if details and details.startswith("[Unmatched Extra (Regex match:"):
|
||||
try:
|
||||
pattern = details.split("match: '")[1].split("'")[0]
|
||||
return f"[Extra={pattern}]"
|
||||
except IndexError:
|
||||
return "Extra" # Fallback if parsing fails
|
||||
else:
|
||||
return "Extra"
|
||||
elif raw_status == "Ignored" and details and "Superseed by 16bit variant for" in details:
|
||||
try:
|
||||
filename = details.split("Superseed by 16bit variant for ")[1]
|
||||
return f"Superseeded by 16bit {filename}"
|
||||
except IndexError:
|
||||
return raw_status # Fallback if parsing fails
|
||||
else:
|
||||
return raw_status # Return original status if no simplification applies
|
||||
else:
|
||||
return "" # Empty for additional-only rows
|
||||
|
||||
elif col == self.COL_PREDICTED_ASSET:
|
||||
main_file = row_data.get('main_file')
|
||||
return main_file.get('predicted_asset_name', 'N/A') if main_file else ""
|
||||
elif col == self.COL_ORIGINAL_PATH:
|
||||
main_file = row_data.get('main_file')
|
||||
return main_file.get('original_path', '[Missing Path]') if main_file else ""
|
||||
elif col == self.COL_PREDICTED_OUTPUT:
|
||||
main_file = row_data.get('main_file')
|
||||
return main_file.get('predicted_output_name', '') if main_file else ""
|
||||
elif col == self.COL_DETAILS:
|
||||
main_file = row_data.get('main_file')
|
||||
return main_file.get('details', '') if main_file else ""
|
||||
elif col == self.COL_ADDITIONAL_FILES:
|
||||
return row_data.get('additional_file_path', '')
|
||||
return None # Should not happen with defined columns
|
||||
|
||||
# --- Handle Tooltip Role ---
|
||||
if role == Qt.ItemDataRole.ToolTipRole:
|
||||
if col == self.COL_ORIGINAL_PATH:
|
||||
main_file = row_data.get('main_file')
|
||||
if main_file:
|
||||
source_asset = row_data.get('source_asset', 'N/A')
|
||||
original_path = main_file.get('original_path', '[Missing Path]')
|
||||
return f"Source Asset: {source_asset}\nFull Path: {original_path}"
|
||||
else:
|
||||
return "" # No tooltip for empty cells
|
||||
elif col == self.COL_STATUS:
|
||||
main_file = row_data.get('main_file')
|
||||
if main_file:
|
||||
return main_file.get('details', main_file.get('status', '[No Status]'))
|
||||
else:
|
||||
return "" # No tooltip for empty cells
|
||||
elif col == self.COL_PREDICTED_ASSET:
|
||||
main_file = row_data.get('main_file')
|
||||
if main_file:
|
||||
predicted_asset_name = main_file.get('predicted_asset_name', 'None')
|
||||
return f"Predicted Asset Name: {predicted_asset_name}"
|
||||
else:
|
||||
return "" # No tooltip for empty cells
|
||||
elif col == self.COL_PREDICTED_OUTPUT:
|
||||
main_file = row_data.get('main_file')
|
||||
if main_file:
|
||||
predicted_output_name = main_file.get('predicted_output_name', 'None')
|
||||
return f"Predicted Output Name: {predicted_output_name}"
|
||||
else:
|
||||
return "" # No tooltip for empty cells
|
||||
elif col == self.COL_DETAILS:
|
||||
main_file = row_data.get('main_file')
|
||||
if main_file:
|
||||
return main_file.get('details', '')
|
||||
else:
|
||||
return "" # No tooltip for empty cells
|
||||
elif col == self.COL_ADDITIONAL_FILES:
|
||||
additional_file = row_data.get('additional_file_details')
|
||||
if additional_file:
|
||||
status = additional_file.get('status', '[No Status]')
|
||||
details = additional_file.get('details', '')
|
||||
return f"Status: {status}\nDetails: {details}"
|
||||
else:
|
||||
return "" # No tooltip if no additional file in this cell
|
||||
return None
|
||||
|
||||
# --- Handle Foreground (Text Color) Role ---
|
||||
if role == Qt.ItemDataRole.ForegroundRole:
|
||||
row_data = self._table_rows[row] # Get data from the structured row
|
||||
status = None
|
||||
|
||||
# Determine the relevant status based on column and row data
|
||||
if col in [self.COL_STATUS, self.COL_PREDICTED_ASSET, self.COL_ORIGINAL_PATH, self.COL_PREDICTED_OUTPUT, self.COL_DETAILS]:
|
||||
# These columns relate to the main file
|
||||
main_file = row_data.get('main_file')
|
||||
if main_file:
|
||||
status = main_file.get('status', '[No Status]')
|
||||
elif col == self.COL_ADDITIONAL_FILES:
|
||||
# This column relates to the additional file
|
||||
additional_file = row_data.get('additional_file_details')
|
||||
if additional_file:
|
||||
status = additional_file.get('status', '[No Status]')
|
||||
|
||||
# Look up color based on determined status
|
||||
if status in self.STATUS_COLORS:
|
||||
return self.STATUS_COLORS[status]
|
||||
else:
|
||||
return None # Use default text color if no specific status color or no relevant file data
|
||||
|
||||
# --- Handle Background Role ---
|
||||
if role == Qt.ItemDataRole.BackgroundRole:
|
||||
# Apply alternating background color based on asset group
|
||||
source_asset = row_data.get('source_asset')
|
||||
if source_asset and source_asset in self._sorted_unique_assets:
|
||||
try:
|
||||
asset_index = self._sorted_unique_assets.index(source_asset)
|
||||
if asset_index % 2 == 0:
|
||||
return COLOR_ASSET_GROUP_1
|
||||
else:
|
||||
return COLOR_ASSET_GROUP_2
|
||||
except ValueError:
|
||||
# Should not happen if logic is correct, but handle defensively
|
||||
log.warning(f"Asset '{source_asset}' not found in _sorted_unique_assets.")
|
||||
return None # Use default background
|
||||
return None # Use default background for rows without a source asset
|
||||
|
||||
|
||||
# --- Handle Text Alignment Role ---
|
||||
if role == Qt.ItemDataRole.TextAlignmentRole:
|
||||
if col == self.COL_ORIGINAL_PATH:
|
||||
return int(Qt.AlignmentFlag.AlignLeft | Qt.AlignmentFlag.AlignVCenter)
|
||||
elif col == self.COL_ADDITIONAL_FILES:
|
||||
return int(Qt.AlignmentFlag.AlignRight | Qt.AlignmentFlag.AlignVCenter)
|
||||
# For other columns, return default alignment (or None)
|
||||
return None
|
||||
|
||||
|
||||
# --- Handle Text Alignment Role ---
|
||||
if role == Qt.ItemDataRole.TextAlignmentRole:
|
||||
if col == self.COL_ORIGINAL_PATH:
|
||||
return int(Qt.AlignmentFlag.AlignLeft | Qt.AlignmentFlag.AlignVCenter)
|
||||
elif col == self.COL_ADDITIONAL_FILES:
|
||||
return int(Qt.AlignmentFlag.AlignRight | Qt.AlignmentFlag.AlignVCenter)
|
||||
# For other columns, return default alignment (or None)
|
||||
return None
|
||||
|
||||
|
||||
return None
|
||||
|
||||
def headerData(self, section: int, orientation: Qt.Orientation, role: int = Qt.ItemDataRole.DisplayRole):
|
||||
"""Returns the header data for a given section, orientation, and role."""
|
||||
if role == Qt.ItemDataRole.DisplayRole and orientation == Qt.Orientation.Horizontal:
|
||||
headers = self._headers_simple if self._simple_mode else self._headers_detailed
|
||||
if 0 <= section < len(headers):
|
||||
return headers[section]
|
||||
return None
|
||||
|
||||
def set_data(self, data: list):
|
||||
"""Sets the model's data, extracts simple data, and emits signals."""
|
||||
# Removed diagnostic import here
|
||||
thread_id = QThread.currentThread() # Get current thread object
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered PreviewTableModel.set_data. Received {len(data)} items.")
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Calling beginResetModel()...")
|
||||
self.beginResetModel()
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Returned from beginResetModel(). Processing data...")
|
||||
self._data = data or [] # Keep original data for reference if needed
|
||||
self._table_rows = [] # Clear previous structured data
|
||||
|
||||
# Group files by source asset
|
||||
grouped_data = {}
|
||||
unique_sources = set()
|
||||
if data and isinstance(data[0], dict): # Ensure data is in detailed format
|
||||
for file_details in data:
|
||||
source_asset = file_details.get('source_asset')
|
||||
if source_asset:
|
||||
if source_asset not in grouped_data:
|
||||
grouped_data[source_asset] = {'main_files': [], 'additional_files': []}
|
||||
unique_sources.add(source_asset)
|
||||
|
||||
status = file_details.get('status')
|
||||
# Separate into main and additional files based on status
|
||||
if status in ["Mapped", "Model", "Error"]:
|
||||
grouped_data[source_asset]['main_files'].append(file_details)
|
||||
else: # Ignored, Extra, Unrecognised, Unmatched Extra
|
||||
grouped_data[source_asset]['additional_files'].append(file_details)
|
||||
|
||||
# Sort main and additional files within each group (e.g., by original_path)
|
||||
for asset_data in grouped_data.values():
|
||||
asset_data['main_files'].sort(key=lambda x: x.get('original_path', ''))
|
||||
asset_data['additional_files'].sort(key=lambda x: x.get('original_path', '')) # Sort additional by their path
|
||||
|
||||
# Build the _table_rows structure
|
||||
sorted_assets = sorted(list(unique_sources)) # Sort assets alphabetically
|
||||
for asset_name in sorted_assets:
|
||||
asset_data = grouped_data[asset_name]
|
||||
main_files = asset_data['main_files']
|
||||
additional_files = asset_data['additional_files']
|
||||
max_rows = max(len(main_files), len(additional_files))
|
||||
|
||||
for i in range(max_rows):
|
||||
main_file = main_files[i] if i < len(main_files) else None
|
||||
additional_file = additional_files[i] if i < len(additional_files) else None
|
||||
|
||||
row_data = {
|
||||
'source_asset': asset_name,
|
||||
'main_file': main_file, # Store the full dict for easy access
|
||||
'additional_file_path': additional_file.get('original_path', '') if additional_file else '',
|
||||
'additional_file_details': additional_file, # Store full dict for tooltip
|
||||
'is_main_row': main_file is not None # True if this row has a main file
|
||||
}
|
||||
self._table_rows.append(row_data)
|
||||
|
||||
# Store sorted unique asset paths for simple mode and coloring
|
||||
self._sorted_unique_assets = sorted(list(unique_sources))
|
||||
self._simple_data = self._sorted_unique_assets # Simple data is just the sorted unique assets
|
||||
|
||||
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Structured data built: {len(self._table_rows)} rows.")
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Simple data extracted: {len(self._simple_data)} unique sources.")
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Calling endResetModel()...")
|
||||
self.endResetModel()
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Returned from endResetModel().")
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting PreviewTableModel.set_data.")
|
||||
|
||||
def clear_data(self):
|
||||
"""Clears the model's data."""
|
||||
thread_id = QThread.currentThread() # Get current thread object
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] PreviewTableModel.clear_data called.")
|
||||
self.set_data([])
|
||||
|
||||
|
||||
class PreviewSortFilterProxyModel(QSortFilterProxyModel):
|
||||
"""
|
||||
Custom proxy model for sorting the preview table.
|
||||
Implements multi-level sorting and custom status order.
|
||||
"""
|
||||
# Define the desired status priority for sorting
|
||||
# Lower numbers sort first. Mapped/Model have same priority.
|
||||
STATUS_PRIORITY = {
|
||||
"Error": 0,
|
||||
"Mapped": 1,
|
||||
"Model": 1,
|
||||
"Ignored": 2,
|
||||
"Extra": 3,
|
||||
"Unrecognised": 3, # Treat as Extra
|
||||
"Unmatched Extra": 3, # Treat as Extra
|
||||
"[No Status]": 99 # Lowest priority
|
||||
}
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
log.debug("PreviewSortFilterProxyModel initialized.")
|
||||
# Set default sort column and order (Status column, Ascending)
|
||||
# This will be overridden by the custom lessThan logic
|
||||
self.setSortRole(PreviewTableModel.ROLE_RAW_STATUS) # Sort using the raw status role
|
||||
self.sort(PreviewTableModel.COL_STATUS, Qt.SortOrder.AscendingOrder) # Apply initial sort
|
||||
|
||||
def lessThan(self, left: QModelIndex, right: QModelIndex):
|
||||
"""
|
||||
Custom comparison logic for multi-level sorting.
|
||||
Sorts by:
|
||||
1. Source Asset (Ascending)
|
||||
2. Status (Custom Order: Error > Mapped/Model > Ignored > Extra)
|
||||
3. Original Path (Ascending)
|
||||
"""
|
||||
model = self.sourceModel()
|
||||
if not model:
|
||||
# log.debug("ProxyModel.lessThan: No source model.")
|
||||
return super().lessThan(left, right) # Fallback if no source model
|
||||
|
||||
# If in simple mode, sort by the simple path column
|
||||
if isinstance(model, PreviewTableModel) and model._simple_mode:
|
||||
left_path = model.data(left.siblingAtColumn(model.COL_SIMPLE_PATH), Qt.ItemDataRole.DisplayRole)
|
||||
right_path = model.data(right.siblingAtColumn(model.COL_SIMPLE_PATH), Qt.ItemDataRole.DisplayRole)
|
||||
# log.debug(f"ProxyModel.lessThan (Simple Mode): Comparing '{left_path}' < '{right_path}'")
|
||||
if not left_path: return True
|
||||
if not right_path: return False
|
||||
return left_path < right_path
|
||||
|
||||
|
||||
# --- Detailed Mode Sorting ---
|
||||
# log.debug("ProxyModel.lessThan (Detailed Mode).")
|
||||
# Get the full row data from the source model's _table_rows
|
||||
left_row_data = model._table_rows[left.row()]
|
||||
right_row_data = model._table_rows[right.row()]
|
||||
|
||||
# --- Level 1: Sort by Source Asset ---
|
||||
left_asset = left_row_data.get('source_asset', 'N/A')
|
||||
right_asset = right_row_data.get('source_asset', 'N/A')
|
||||
|
||||
if left_asset != right_asset:
|
||||
# Handle None/empty strings for consistent sorting
|
||||
if not left_asset or left_asset == 'N/A': return True # Empty asset comes first
|
||||
if not right_asset or right_asset == 'N/A': return False # Non-empty asset comes first
|
||||
return left_asset < right_asset # Alphabetical sort for assets
|
||||
|
||||
# --- Level 2: Sort by Row Type (Main vs Additional-only) ---
|
||||
# Main rows (is_main_row == True) should come before additional-only rows
|
||||
left_is_main = left_row_data.get('is_main_row', False)
|
||||
right_is_main = right_row_data.get('is_main_row', False)
|
||||
|
||||
if left_is_main != right_is_main:
|
||||
return left_is_main > right_is_main # True > False
|
||||
|
||||
# --- Level 3: Sort within the row type ---
|
||||
if left_is_main: # Both are main rows
|
||||
# Sort by Original Path (Alphabetical)
|
||||
left_path = left_row_data.get('main_file', {}).get('original_path', '')
|
||||
right_path = right_row_data.get('main_file', {}).get('original_path', '')
|
||||
|
||||
if not left_path: return True
|
||||
if not right_path: return False
|
||||
return left_path < right_path
|
||||
|
||||
else: # Both are additional-only rows
|
||||
# Sort by Additional File Path (Alphabetical)
|
||||
left_additional_path = left_row_data.get('additional_file_path', '')
|
||||
right_additional_path = right_row_data.get('additional_file_path', '')
|
||||
|
||||
if not left_additional_path: return True
|
||||
if not right_additional_path: return False
|
||||
return left_additional_path < right_additional_path
|
||||
|
||||
# Should not reach here if logic is correct, but include a fallback
|
||||
return super().lessThan(left, right)
|
||||
|
||||
# Override sort method to ensure custom sorting is used
|
||||
def sort(self, column: int, order: Qt.SortOrder = Qt.SortOrder.AscendingOrder):
|
||||
# We ignore the column and order here and rely on lessThan for multi-level sort
|
||||
# However, calling this method is necessary to trigger the proxy model's sorting mechanism.
|
||||
# We can potentially use the column/order to toggle ascending/descending within each level in lessThan,
|
||||
# but for now, we'll stick to the defined order.
|
||||
log.debug(f"ProxyModel.sort called with column {column}, order {order}. Triggering lessThan.")
|
||||
# Call base class sort to trigger update. Pass a valid column, e.g., COL_STATUS,
|
||||
# as the actual sorting logic is in lessThan.
|
||||
super().sort(PreviewTableModel.COL_STATUS, Qt.SortOrder.AscendingOrder)
|
||||
345
gui/processing_handler.py
Normal file
345
gui/processing_handler.py
Normal file
@@ -0,0 +1,345 @@
|
||||
# gui/processing_handler.py
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
import time # For potential delays if needed
|
||||
|
||||
import subprocess # <<< ADDED IMPORT
|
||||
import shutil # <<< ADDED IMPORT
|
||||
from typing import Optional # <<< ADDED IMPORT
|
||||
|
||||
# --- PySide6 Imports ---
|
||||
# Inherit from QObject to support signals/slots for thread communication
|
||||
from PySide6.QtCore import QObject, Signal
|
||||
|
||||
# --- Backend Imports ---
|
||||
# Need to import the worker function and potentially config/processor if needed directly
|
||||
# Adjust path to ensure modules can be found relative to this file's location
|
||||
import sys
|
||||
script_dir = Path(__file__).parent
|
||||
project_root = script_dir.parent
|
||||
if str(project_root) not in sys.path:
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
try:
|
||||
# Import the worker function from main.py
|
||||
from main import process_single_asset_wrapper
|
||||
# Import exceptions if needed for type hinting or specific handling
|
||||
from configuration import ConfigurationError
|
||||
from asset_processor import AssetProcessingError
|
||||
import config as core_config # <<< ADDED IMPORT
|
||||
BACKEND_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"ERROR (ProcessingHandler): Failed to import backend modules/worker: {e}")
|
||||
# Define placeholders if imports fail, so the GUI doesn't crash immediately
|
||||
process_single_asset_wrapper = None
|
||||
ConfigurationError = Exception
|
||||
AssetProcessingError = Exception
|
||||
BACKEND_AVAILABLE = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
# Basic config if logger hasn't been set up elsewhere
|
||||
if not log.hasHandlers():
|
||||
logging.basicConfig(level=logging.INFO, format='%(levelname)s (Handler): %(message)s')
|
||||
|
||||
|
||||
class ProcessingHandler(QObject):
|
||||
"""
|
||||
Handles the execution of the asset processing pipeline in a way that
|
||||
can be run in a separate thread and communicate progress via signals.
|
||||
"""
|
||||
# --- Signals ---
|
||||
# Emitted for overall progress bar update
|
||||
progress_updated = Signal(int, int) # current_count, total_count
|
||||
# Emitted for updating status of individual files in the list
|
||||
file_status_updated = Signal(str, str, str) # input_path_str, status ("processing", "processed", "skipped", "failed"), message
|
||||
# Emitted when the entire batch processing is finished
|
||||
processing_finished = Signal(int, int, int) # processed_count, skipped_count, failed_count
|
||||
# Emitted for general status messages to the status bar
|
||||
status_message = Signal(str, int) # message, timeout_ms
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self._executor = None
|
||||
self._futures = {} # Store future->input_path mapping
|
||||
self._is_running = False
|
||||
self._cancel_requested = False
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
return self._is_running
|
||||
|
||||
def run_processing(self, input_paths: list[str], preset_name: str, output_dir_str: str, overwrite: bool, num_workers: int,
|
||||
run_blender: bool, nodegroup_blend_path: str, materials_blend_path: str, verbose: bool): # <<< ADDED verbose PARAM
|
||||
"""
|
||||
Starts the asset processing task and optionally runs Blender scripts afterwards.
|
||||
This method should be called when the handler is moved to a separate thread.
|
||||
"""
|
||||
if self._is_running:
|
||||
log.warning("Processing is already running.")
|
||||
self.status_message.emit("Processing already in progress.", 3000)
|
||||
return
|
||||
|
||||
if not BACKEND_AVAILABLE or not process_single_asset_wrapper:
|
||||
log.error("Backend modules or worker function not available. Cannot start processing.")
|
||||
self.status_message.emit("Error: Backend components missing. Cannot process.", 5000)
|
||||
self.processing_finished.emit(0, 0, len(input_paths)) # Emit finished with all failed
|
||||
return
|
||||
|
||||
self._is_running = True
|
||||
self._cancel_requested = False
|
||||
self._futures = {} # Reset futures
|
||||
total_files = len(input_paths)
|
||||
processed_count = 0
|
||||
skipped_count = 0
|
||||
failed_count = 0
|
||||
completed_count = 0
|
||||
|
||||
log.info(f"Starting processing run: {total_files} assets, Preset='{preset_name}', Workers={num_workers}, Overwrite={overwrite}")
|
||||
self.status_message.emit(f"Starting processing for {total_files} items...", 0) # Persistent message
|
||||
|
||||
try:
|
||||
# Use 'with' statement for ProcessPoolExecutor for cleanup
|
||||
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
||||
self._executor = executor # Store for potential cancellation
|
||||
|
||||
# Submit tasks
|
||||
for input_path in input_paths:
|
||||
if self._cancel_requested: break # Check before submitting more
|
||||
log.debug(f"Submitting task for: {input_path}")
|
||||
future = executor.submit(process_single_asset_wrapper, input_path, preset_name, output_dir_str, overwrite, verbose=verbose) # Pass verbose flag from GUI
|
||||
self._futures[future] = input_path # Map future back to input path
|
||||
# Optionally emit "processing" status here
|
||||
self.file_status_updated.emit(input_path, "processing", "")
|
||||
|
||||
if self._cancel_requested:
|
||||
log.info("Processing cancelled during task submission.")
|
||||
# Count remaining unsubmitted tasks as failed/cancelled
|
||||
failed_count = total_files - len(self._futures)
|
||||
|
||||
# Process completed futures
|
||||
for future in as_completed(self._futures):
|
||||
completed_count += 1
|
||||
input_path = self._futures[future] # Get original path
|
||||
asset_name = Path(input_path).name
|
||||
status = "failed" # Default status
|
||||
error_message = "Unknown error"
|
||||
|
||||
if self._cancel_requested:
|
||||
# If cancelled after submission, try to get result but count as failed
|
||||
status = "failed"
|
||||
error_message = "Cancelled"
|
||||
failed_count += 1
|
||||
# Don't try future.result() if cancelled, it might raise CancelledError
|
||||
else:
|
||||
try:
|
||||
# Get result tuple: (input_path_str, status_string, error_message_or_None)
|
||||
result_tuple = future.result()
|
||||
_, status, error_message = result_tuple
|
||||
error_message = error_message or "" # Ensure it's a string
|
||||
|
||||
# Increment counters based on status
|
||||
if status == "processed":
|
||||
processed_count += 1
|
||||
elif status == "skipped":
|
||||
skipped_count += 1
|
||||
elif status == "failed":
|
||||
failed_count += 1
|
||||
else:
|
||||
log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.")
|
||||
failed_count += 1
|
||||
error_message = f"Unknown status: {status}"
|
||||
|
||||
except Exception as e:
|
||||
# Catch errors if the future itself fails (e.g., worker process crashed hard)
|
||||
log.exception(f"Critical worker failure for {asset_name}: {e}")
|
||||
failed_count += 1 # Count crashes as failures
|
||||
status = "failed"
|
||||
error_message = f"Worker process crashed: {e}"
|
||||
|
||||
# Emit progress signals
|
||||
self.progress_updated.emit(completed_count, total_files)
|
||||
self.file_status_updated.emit(input_path, status, error_message)
|
||||
|
||||
# Check for cancellation again after processing each result
|
||||
if self._cancel_requested:
|
||||
log.info("Cancellation detected after processing a result.")
|
||||
# Count remaining unprocessed futures as failed/cancelled
|
||||
remaining_futures = total_files - completed_count
|
||||
failed_count += remaining_futures
|
||||
break # Exit the as_completed loop
|
||||
|
||||
except Exception as pool_exc:
|
||||
log.exception(f"An error occurred with the process pool: {pool_exc}")
|
||||
self.status_message.emit(f"Error during processing: {pool_exc}", 5000)
|
||||
# Mark all remaining as failed
|
||||
failed_count = total_files - processed_count - skipped_count
|
||||
|
||||
finally:
|
||||
# --- Blender Script Execution (Optional) ---
|
||||
if run_blender and not self._cancel_requested:
|
||||
log.info("Asset processing complete. Checking for Blender script execution.")
|
||||
self.status_message.emit("Asset processing complete. Starting Blender scripts...", 0)
|
||||
blender_exe = self._find_blender_executable()
|
||||
if blender_exe:
|
||||
script_dir = Path(__file__).parent.parent / "blenderscripts" # Go up one level from gui/
|
||||
nodegroup_script_path = script_dir / "create_nodegroups.py"
|
||||
materials_script_path = script_dir / "create_materials.py"
|
||||
asset_output_root = output_dir_str # Use the same output dir
|
||||
|
||||
# Run Nodegroup Script
|
||||
if nodegroup_blend_path and Path(nodegroup_blend_path).is_file():
|
||||
if nodegroup_script_path.is_file():
|
||||
log.info("-" * 20 + " Running Nodegroup Script " + "-" * 20)
|
||||
self.status_message.emit(f"Running Blender nodegroup script on {Path(nodegroup_blend_path).name}...", 0)
|
||||
success_ng = self._run_blender_script_subprocess(
|
||||
blender_exe_path=blender_exe,
|
||||
blend_file_path=nodegroup_blend_path,
|
||||
python_script_path=str(nodegroup_script_path),
|
||||
asset_root_dir=asset_output_root
|
||||
)
|
||||
if not success_ng:
|
||||
log.error("Blender node group script execution failed.")
|
||||
self.status_message.emit("Blender nodegroup script failed.", 5000)
|
||||
else:
|
||||
log.info("Blender nodegroup script finished successfully.")
|
||||
self.status_message.emit("Blender nodegroup script finished.", 3000)
|
||||
else:
|
||||
log.error(f"Node group script not found: {nodegroup_script_path}")
|
||||
self.status_message.emit(f"Error: Nodegroup script not found.", 5000)
|
||||
elif run_blender and nodegroup_blend_path: # Log if path was provided but invalid
|
||||
log.warning(f"Nodegroup blend path provided but invalid: {nodegroup_blend_path}")
|
||||
self.status_message.emit(f"Warning: Invalid Nodegroup .blend path.", 5000)
|
||||
|
||||
|
||||
# Run Materials Script (only if nodegroup script was attempted or not needed)
|
||||
if materials_blend_path and Path(materials_blend_path).is_file():
|
||||
if materials_script_path.is_file():
|
||||
log.info("-" * 20 + " Running Materials Script " + "-" * 20)
|
||||
self.status_message.emit(f"Running Blender materials script on {Path(materials_blend_path).name}...", 0)
|
||||
# Pass the nodegroup blend path as the second argument to the script
|
||||
success_mat = self._run_blender_script_subprocess(
|
||||
blender_exe_path=blender_exe,
|
||||
blend_file_path=materials_blend_path,
|
||||
python_script_path=str(materials_script_path),
|
||||
asset_root_dir=asset_output_root,
|
||||
nodegroup_blend_file_path_arg=nodegroup_blend_path # Pass the nodegroup path
|
||||
)
|
||||
if not success_mat:
|
||||
log.error("Blender material script execution failed.")
|
||||
self.status_message.emit("Blender material script failed.", 5000)
|
||||
else:
|
||||
log.info("Blender material script finished successfully.")
|
||||
self.status_message.emit("Blender material script finished.", 3000)
|
||||
else:
|
||||
log.error(f"Material script not found: {materials_script_path}")
|
||||
self.status_message.emit(f"Error: Material script not found.", 5000)
|
||||
elif run_blender and materials_blend_path: # Log if path was provided but invalid
|
||||
log.warning(f"Materials blend path provided but invalid: {materials_blend_path}")
|
||||
self.status_message.emit(f"Warning: Invalid Materials .blend path.", 5000)
|
||||
|
||||
else:
|
||||
log.warning("Blender executable not found. Skipping Blender script execution.")
|
||||
self.status_message.emit("Warning: Blender executable not found. Skipping scripts.", 5000)
|
||||
elif self._cancel_requested:
|
||||
log.info("Processing was cancelled. Skipping Blender script execution.")
|
||||
# --- End Blender Script Execution ---
|
||||
|
||||
final_message = f"Finished. Processed: {processed_count}, Skipped: {skipped_count}, Failed: {failed_count}"
|
||||
log.info(final_message)
|
||||
self.status_message.emit(final_message, 5000) # Show final summary
|
||||
self.processing_finished.emit(processed_count, skipped_count, failed_count)
|
||||
self._is_running = False
|
||||
self._executor = None
|
||||
self._futures = {} # Clear futures
|
||||
|
||||
def request_cancel(self):
|
||||
"""Requests cancellation of the ongoing processing task."""
|
||||
if not self._is_running:
|
||||
log.warning("Cancel requested but no processing is running.")
|
||||
return
|
||||
|
||||
if self._cancel_requested:
|
||||
log.warning("Cancellation already requested.")
|
||||
return
|
||||
|
||||
log.info("Cancellation requested.")
|
||||
self.status_message.emit("Cancellation requested...", 3000)
|
||||
self._cancel_requested = True
|
||||
|
||||
# Attempt to shutdown the executor - this might cancel pending tasks
|
||||
# but won't forcefully stop running ones. `cancel_futures=True` is Python 3.9+
|
||||
if self._executor:
|
||||
log.debug("Requesting executor shutdown...")
|
||||
# For Python 3.9+: self._executor.shutdown(wait=False, cancel_futures=True)
|
||||
# For older Python:
|
||||
self._executor.shutdown(wait=False)
|
||||
# Manually try cancelling futures that haven't started
|
||||
for future in self._futures:
|
||||
if not future.running() and not future.done():
|
||||
future.cancel()
|
||||
log.debug("Executor shutdown requested.")
|
||||
|
||||
# Note: True cancellation of running ProcessPoolExecutor tasks is complex.
|
||||
# This implementation primarily prevents processing further results and
|
||||
# attempts to cancel pending/unstarted tasks.
|
||||
|
||||
def _find_blender_executable(self) -> Optional[str]:
|
||||
"""Finds the Blender executable path from config or system PATH."""
|
||||
try:
|
||||
blender_exe_config = getattr(core_config, 'BLENDER_EXECUTABLE_PATH', None)
|
||||
if blender_exe_config:
|
||||
p = Path(blender_exe_config)
|
||||
if p.is_file():
|
||||
log.info(f"Using Blender executable from config: {p}")
|
||||
return str(p.resolve())
|
||||
else:
|
||||
log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying PATH.")
|
||||
else:
|
||||
log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying PATH.")
|
||||
|
||||
blender_exe = shutil.which("blender")
|
||||
if blender_exe:
|
||||
log.info(f"Found Blender executable in PATH: {blender_exe}")
|
||||
return blender_exe
|
||||
else:
|
||||
log.warning("Could not find 'blender' in system PATH.")
|
||||
return None
|
||||
except Exception as e:
|
||||
log.error(f"Error checking Blender executable path: {e}")
|
||||
return None
|
||||
|
||||
def _run_blender_script_subprocess(self, blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str, nodegroup_blend_file_path_arg: Optional[str] = None) -> bool:
|
||||
"""Internal helper to run a single Blender script via subprocess."""
|
||||
command_base = [
|
||||
blender_exe_path,
|
||||
"--factory-startup",
|
||||
"-b",
|
||||
blend_file_path,
|
||||
"--log", "*", # <<< ADDED BLENDER LOGGING FLAG
|
||||
"--python", python_script_path,
|
||||
"--",
|
||||
asset_root_dir,
|
||||
]
|
||||
# Add nodegroup blend file path if provided (for create_materials script)
|
||||
if nodegroup_blend_file_path_arg:
|
||||
command = command_base + [nodegroup_blend_file_path_arg]
|
||||
else:
|
||||
command = command_base
|
||||
log.debug(f"Executing Blender command: {' '.join(map(str, command))}") # Ensure all parts are strings for join
|
||||
try:
|
||||
# Ensure all parts of the command are strings for subprocess
|
||||
str_command = [str(part) for part in command]
|
||||
result = subprocess.run(str_command, capture_output=True, text=True, check=False, encoding='utf-8') # Specify encoding
|
||||
log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}")
|
||||
if result.stdout: log.debug(f"Blender stdout:\n{result.stdout.strip()}")
|
||||
if result.stderr:
|
||||
if result.returncode != 0: log.error(f"Blender stderr:\n{result.stderr.strip()}")
|
||||
else: log.warning(f"Blender stderr (RC=0):\n{result.stderr.strip()}")
|
||||
return result.returncode == 0
|
||||
except FileNotFoundError:
|
||||
log.error(f"Blender executable not found at: {blender_exe_path}")
|
||||
return False
|
||||
except Exception as e:
|
||||
log.exception(f"Error running Blender script '{Path(python_script_path).name}': {e}")
|
||||
return False
|
||||
Reference in New Issue
Block a user