Rusfort 6971b8189f Data Flow Overhaul
Known regressions in current commit:
- No "extra" files
- GLOSS map does not look corrected
- "override" flag is not respected
2025-05-01 09:13:20 +02:00

1173 lines
56 KiB
Python

# main.py
import argparse
import sys
import time
import os
import logging
from pathlib import Path
from concurrent.futures import ProcessPoolExecutor, as_completed
import platform # To potentially adjust worker count defaults
import subprocess
import shutil
import tempfile # Added for temporary workspace
import zipfile # Added for zip extraction
from typing import List, Dict, Tuple, Optional
# --- Qt Imports for Application Structure ---
from PySide6.QtCore import QObject, Slot, QThreadPool, QRunnable, Signal # Added for App structure and threading
from PySide6.QtCore import Qt # Added for ConnectionType
from PySide6.QtWidgets import QApplication # Added for App structure
# --- Backend Imports ---
try:
from configuration import Configuration, ConfigurationError
# from asset_processor import AssetProcessor, AssetProcessingError # REMOVED OLD PROCESSOR
from processing_engine import ProcessingEngine # <<< ADDED NEW ENGINE IMPORT
from rule_structure import SourceRule # Import SourceRule for type hinting
import config as core_config_module
from gui.main_window import MainWindow # Import MainWindow
except ImportError as e:
# Provide a more helpful error message if imports fail
script_dir = Path(__file__).parent.resolve()
print(f"ERROR: Failed to import necessary classes: {e}")
print(f"Ensure 'configuration.py' and 'asset_processor.py' exist in the directory:")
print(f" {script_dir}")
print("Or that the directory is included in your PYTHONPATH.")
sys.exit(1)
# --- Setup Logging ---
# Keep setup_logging as is, it's called by main() or potentially monitor.py
def setup_logging(verbose: bool):
"""Configures logging for the application."""
log_level = logging.DEBUG if verbose else logging.INFO
log_format = '%(asctime)s [%(levelname)-8s] %(name)s: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
# Configure root logger
# Remove existing handlers to avoid duplication if re-run in same session
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(
level=log_level,
format=log_format,
datefmt=date_format,
handlers=[
logging.StreamHandler(sys.stdout) # Log to console
# Optional: Add FileHandler for persistent logs
# logging.FileHandler("asset_processor.log", mode='a', encoding='utf-8')
]
)
# Get logger specifically for this main script
log = logging.getLogger(__name__) # or use 'main'
log.info(f"Logging level set to: {logging.getLevelName(log_level)}")
# Suppress overly verbose messages from libraries if needed (e.g., cv2)
# logging.getLogger('cv2').setLevel(logging.WARNING)
# Use module-level logger after configuration
log = logging.getLogger(__name__)
# --- Argument Parser Setup ---
# Keep setup_arg_parser as is, it's only used when running main.py directly
def setup_arg_parser():
"""Sets up and returns the command-line argument parser."""
# Determine a sensible default worker count
default_workers = 1
try:
# Use half the cores, but at least 1, max maybe 8-16? Depends on task nature.
# Let's try max(1, os.cpu_count() // 2)
cores = os.cpu_count()
if cores:
default_workers = max(1, cores // 2)
# Cap default workers? Maybe not necessary, let user decide via flag.
# default_workers = min(default_workers, 8) # Example cap
except NotImplementedError:
log.warning("Could not detect CPU count, defaulting workers to 1.")
parser = argparse.ArgumentParser(
description="Process asset files (ZIPs or folders) into a standardized library format using presets.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter # Shows default values in help message
)
parser.add_argument(
"input_paths",
metavar="INPUT_PATH",
type=str,
nargs='*', # Allow zero or more paths initially
default=[], # Default to empty list
help="Path(s) to the input ZIP file(s) or folder(s) containing assets (Required for CLI mode)."
)
parser.add_argument(
"-p", "--preset",
type=str,
required=False, # Make not required initially
default=None, # Default to None
help="Name of the configuration preset (Required for CLI mode)."
)
parser.add_argument(
"-o", "--output-dir",
type=str,
required=False, # No longer required
default=None, # Default is None, will check core_config later
help="Override the default base output directory defined in config.py." # Updated help
)
parser.add_argument(
"-w", "--workers",
type=int,
default=default_workers,
help="Maximum number of assets to process concurrently in parallel processes."
)
parser.add_argument(
"-v", "--verbose",
action="store_true", # Makes it a flag, value is True if present
help="Enable detailed DEBUG level logging for troubleshooting."
)
parser.add_argument(
"--overwrite",
action="store_true",
help="Force reprocessing and overwrite existing output asset folders if they exist."
)
parser.add_argument(
"--nodegroup-blend",
type=str,
default=None,
help="Path to the .blend file for creating/updating node groups. Overrides config.py default."
)
parser.add_argument(
"--materials-blend",
type=str,
default=None,
help="Path to the .blend file for creating/updating materials. Overrides config.py default."
)
parser.add_argument(
"--gui",
action="store_true",
help="Force launch in GUI mode, ignoring other arguments."
)
# Potential future flags:
# parser.add_argument("--log-file", type=str, default=None, help="Path to save log output to a file.")
return parser
# --- Worker Runnable for Thread Pool ---
class TaskSignals(QObject): # Create a QObject subclass for signals
finished = Signal(str, str, object) # rule_input_path, status, result/error
# error = Signal(str, str) # Can combine into finished signal
class ProcessingTask(QRunnable):
"""Wraps a call to processing_engine.process for execution in a thread pool."""
def __init__(self, engine: ProcessingEngine, rule: SourceRule, workspace_path: Path, output_base_path: Path): # Added paths
super().__init__()
self.engine = engine
self.rule = rule
self.workspace_path = workspace_path # Store path
self.output_base_path = output_base_path # Store path
self.signals = TaskSignals() # Instantiate signals object
@Slot() # Decorator required for QRunnable's run method
def run(self):
"""Prepares input files and executes the engine's process method."""
log.info(f"Worker Thread: Starting processing for rule: {self.rule.input_path}")
log.debug(f"DEBUG: Rule passed to ProcessingTask.run: {self.rule}") # DEBUG LOG
status = "failed" # Default status
result_or_error = None
temp_workspace_dir = None # Initialize outside try
try:
# --- 1. Prepare Input Workspace ---
original_input_path = Path(self.rule.input_path)
prepared_workspace_path = None
if not original_input_path.exists():
raise FileNotFoundError(f"Original input path does not exist: {original_input_path}")
# Create a temporary directory for processing
temp_workspace_dir = tempfile.mkdtemp(prefix="asset_proc_")
prepared_workspace_path = Path(temp_workspace_dir)
log.info(f"Created temporary workspace: {prepared_workspace_path}")
# Check if input is directory or zip file
if original_input_path.is_dir():
log.info(f"Input is a directory, copying contents to workspace: {original_input_path}")
# Copy directory contents into the temp workspace
shutil.copytree(original_input_path, prepared_workspace_path, dirs_exist_ok=True)
elif original_input_path.is_file() and original_input_path.suffix.lower() == '.zip':
log.info(f"Input is a zip file, extracting to workspace: {original_input_path}")
with zipfile.ZipFile(original_input_path, 'r') as zip_ref:
zip_ref.extractall(prepared_workspace_path)
else:
# Handle unsupported input types if necessary
raise ValueError(f"Unsupported input type: {original_input_path}. Must be a directory or .zip file.")
# --- DEBUG: List files in prepared workspace ---
try:
log.debug(f"Listing contents of prepared workspace: {prepared_workspace_path}")
for item in prepared_workspace_path.rglob('*'): # Recursively list all items
log.debug(f" Found item: {item.relative_to(prepared_workspace_path)}")
except Exception as list_err:
log.error(f"Error listing prepared workspace contents: {list_err}")
# --- END DEBUG ---
# --- 2. Execute Processing Engine ---
log.info(f"Calling ProcessingEngine.process with rule for input: {self.rule.input_path}, prepared workspace: {prepared_workspace_path}, output: {self.output_base_path}")
log.debug(f" Rule Details: {self.rule}") # Optional detailed log
# Pass rule positionally, prepared workspace, and output base path
result_or_error = self.engine.process(
self.rule, # Pass rule as first positional argument
workspace_path=prepared_workspace_path, # Use the prepared temp workspace
output_base_path=self.output_base_path
)
status = "processed" # Assume success if no exception
log.info(f"Worker Thread: Finished processing for rule: {self.rule.input_path}, Status: {status}")
# Signal emission moved to finally block
except (FileNotFoundError, ValueError, zipfile.BadZipFile, OSError) as prep_error:
log.exception(f"Worker Thread: Error preparing workspace for rule {self.rule.input_path}: {prep_error}")
status = "failed_preparation"
result_or_error = str(prep_error)
# Signal emission moved to finally block
except Exception as proc_error:
log.exception(f"Worker Thread: Error during engine processing for rule {self.rule.input_path}: {proc_error}")
status = "failed_processing"
result_or_error = str(proc_error)
# Signal emission moved to finally block
finally:
# --- Emit finished signal regardless of success or failure ---
try:
self.signals.finished.emit(str(self.rule.input_path), status, result_or_error)
log.debug(f"Worker Thread: Emitted finished signal for {self.rule.input_path} with status {status}")
except Exception as sig_err:
log.error(f"Worker Thread: Error emitting finished signal for {self.rule.input_path}: {sig_err}")
# --- 3. Cleanup Workspace ---
if temp_workspace_dir and Path(temp_workspace_dir).exists():
try:
log.info(f"Cleaning up temporary workspace: {temp_workspace_dir}")
shutil.rmtree(temp_workspace_dir)
except OSError as cleanup_error:
log.error(f"Worker Thread: Failed to cleanup temporary workspace {temp_workspace_dir}: {cleanup_error}")
# --- CLI Worker Function (COMMENTED OUT - Replaced by GUI Flow) ---
# def process_single_asset_wrapper(input_path_str: str, preset_name: str, output_dir_str: str, overwrite: bool, verbose: bool, rules) -> Tuple[str, str, Optional[str]]:
# """
# Wrapper function for processing a single input path (which might contain multiple assets)
# in a separate process. Handles instantiation of Configuration and AssetProcessor,
# passes the overwrite flag, catches errors, and interprets the multi-asset status dictionary.
#
# Ensures logging is configured for the worker process.
#
# Returns:
# Tuple[str, str, Optional[str]]:
# - input_path_str: The original input path processed.
# - overall_status_string: A single status string summarizing the outcome
# ("processed", "skipped", "failed", "partial_success").
# - error_message_or_None: An error message if failures occurred, potentially
# listing failed assets.
# """
# # Explicitly configure logging for this worker process
# worker_log = logging.getLogger(f"Worker_{os.getpid()}") # Log with worker PID
# if not logging.root.handlers:
# logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-8s] %(name)s: %(message)s')
# worker_log.setLevel(logging.DEBUG if verbose else logging.INFO)
# if verbose:
# logging.root.setLevel(logging.DEBUG)
#
# input_path_obj = Path(input_path_str)
# input_name = input_path_obj.name
#
# try:
# worker_log.info(f"Starting processing attempt for input: {input_name}")
# config = Configuration(preset_name)
# output_base_path = Path(output_dir_str)
#
# processor = AssetProcessor(input_path_obj, config, output_base_path, overwrite=overwrite)
# # processor.process() now returns a Dict[str, List[str]]
# status_dict = processor.process(rules=rules)
#
# # --- Interpret the status dictionary ---
# processed_assets = status_dict.get("processed", [])
# skipped_assets = status_dict.get("skipped", [])
# failed_assets = status_dict.get("failed", [])
#
# overall_status_string = "failed" # Default
# error_message = None
#
# if failed_assets:
# overall_status_string = "failed"
# error_message = f"Failed assets within {input_name}: {', '.join(failed_assets)}"
# worker_log.error(error_message) # Log the failure details
# elif processed_assets:
# overall_status_string = "processed"
# # Check for partial success (mix of processed/skipped and failed should be caught above)
# if skipped_assets:
# worker_log.info(f"Input '{input_name}' processed with some assets skipped. Processed: {processed_assets}, Skipped: {skipped_assets}")
# else:
# worker_log.info(f"Input '{input_name}' processed successfully. Assets: {processed_assets}")
# elif skipped_assets:
# overall_status_string = "skipped"
# worker_log.info(f"Input '{input_name}' skipped (all contained assets already exist). Assets: {skipped_assets}")
# else:
# # Should not happen if input contained files, but handle as failure.
# worker_log.warning(f"Input '{input_name}' resulted in no processed, skipped, or failed assets. Reporting as failed.")
# overall_status_string = "failed"
# error_message = f"No assets processed, skipped, or failed within {input_name}."
#
#
# return (input_path_str, overall_status_string, error_message)
#
# except (ConfigurationError, AssetProcessingError) as e:
# # Catch errors during processor setup or the process() call itself if it raises before returning dict
# worker_log.error(f"Processing failed for input '{input_name}': {type(e).__name__}: {e}")
# return (input_path_str, "failed", f"{type(e).__name__}: {e}")
# except Exception as e:
# # Catch any other unexpected errors
# worker_log.exception(f"Unexpected worker failure processing input '{input_name}': {e}")
# return (input_path_str, "failed", f"Unexpected Worker Error: {e}")
# --- Core CLI Processing Function (COMMENTED OUT - Replaced by GUI Flow) ---
# def run_processing(
# valid_inputs: List[str],
# preset_name: str,
# output_dir_for_processor: str,
# overwrite: bool,
# num_workers: int,
# verbose: bool # Add verbose parameter here
# ) -> Dict:
# """
# Executes the core asset processing logic using a process pool.
#
# Args:
# valid_inputs: List of validated input file/directory paths (strings).
# preset_name: Name of the preset to use.
# output_dir_for_processor: Absolute path string for the output base directory.
# overwrite: Boolean flag to force reprocessing.
# num_workers: Maximum number of worker processes.
# verbose: Boolean flag for verbose logging.
#
# Returns:
# A dictionary containing processing results:
# {
# "processed": int,
# "skipped": int,
# "failed": int,
# "results_list": List[Tuple[str, str, Optional[str]]] # (input_path, status, error_msg)
# }
# """
# log.info(f"Processing {len(valid_inputs)} asset(s) using preset '{preset_name}' with up to {num_workers} worker(s)...")
# results_list = []
# successful_processed_count = 0
# skipped_count = 0
# failed_count = 0
#
# # Ensure at least one worker
# num_workers = max(1, num_workers)
#
# # Using ProcessPoolExecutor is generally good if AssetProcessor tasks are CPU-bound.
# # If tasks are mostly I/O bound, ThreadPoolExecutor might be sufficient.
# # Important: Ensure Configuration and AssetProcessor are "pickleable".
# try:
# with ProcessPoolExecutor(max_workers=num_workers) as executor:
# # Create futures
# futures = {}
# log.debug(f"Submitting {len(valid_inputs)} tasks...")
# # Removed the 1-second delay for potentially faster submission in non-CLI use
# for i, input_path in enumerate(valid_inputs):
# log.debug(f"Submitting task {i+1}/{len(valid_inputs)} for: {Path(input_path).name}")
# future = executor.submit(
# process_single_asset_wrapper,
# input_path,
# preset_name,
# output_dir_for_processor,
# overwrite,
# verbose # Pass the verbose flag
# )
# futures[future] = input_path # Store future -> input_path mapping
#
# # Process completed futures
# for i, future in enumerate(as_completed(futures), 1):
# input_path = futures[future]
# asset_name = Path(input_path).name
# log.info(f"--- [{i}/{len(valid_inputs)}] Worker finished attempt for: {asset_name} ---")
# try:
# # Get result tuple: (input_path_str, status_string, error_message_or_None)
# result_tuple = future.result()
# results_list.append(result_tuple)
# input_path_res, status, err_msg = result_tuple
#
# # Increment counters based on status
# if status == "processed":
# successful_processed_count += 1
# elif status == "skipped":
# skipped_count += 1
# elif status == "failed":
# failed_count += 1
# else: # Should not happen, but log as warning/failure
# log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.")
# failed_count += 1
#
# except Exception as e:
# # Catch errors if the future itself fails (e.g., worker process crashed hard)
# log.exception(f"Critical worker failure for {asset_name}: {e}")
# results_list.append((input_path, "failed", f"Worker process crashed: {e}"))
# failed_count += 1 # Count crashes as failures
#
# except Exception as pool_exc:
# log.exception(f"An error occurred with the process pool: {pool_exc}")
# # Re-raise or handle as appropriate for the calling context (monitor.py)
# # For now, log and return current counts
# return {
# "processed": successful_processed_count,
# "skipped": skipped_count,
# "failed": failed_count + (len(valid_inputs) - len(results_list)), # Count unprocessed as failed
# "results_list": results_list,
# "pool_error": str(pool_exc) # Add pool error info
# }
#
# return {
# "processed": successful_processed_count,
# "skipped": skipped_count,
# "failed": failed_count,
# "results_list": results_list
# }
# --- Blender Script Execution Helper (COMMENTED OUT - Part of CLI Flow) ---
# def run_blender_script(blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str):
# """
# Executes a Python script within Blender in the background.
#
# Args:
# blender_exe_path: Path to the Blender executable.
# blend_file_path: Path to the .blend file to open.
# python_script_path: Path to the Python script to execute within Blender.
# asset_root_dir: Path to the processed asset library root directory (passed to the script).
#
# Returns:
# True if the script executed successfully (return code 0), False otherwise.
# """
# log.info(f"Attempting to run Blender script: {Path(python_script_path).name} on {Path(blend_file_path).name}")
#
# # Ensure paths are absolute strings for subprocess
# blender_exe_path = str(Path(blender_exe_path).resolve())
# blend_file_path = str(Path(blend_file_path).resolve())
# python_script_path = str(Path(python_script_path).resolve())
# asset_root_dir = str(Path(asset_root_dir).resolve())
#
# # Construct the command arguments
# # -b: Run in background (no UI)
# # -S: Save the file after running the script
# # --python: Execute the specified Python script
# # --: Separator, arguments after this are passed to the Python script's sys.argv
# command = [
# blender_exe_path,
# "-b", # Run in background
# blend_file_path,
# "--python", python_script_path,
# "--", # Pass subsequent arguments to the script
# asset_root_dir,
# "-S" # Save the blend file after script execution
# ]
#
# log.debug(f"Executing Blender command: {' '.join(command)}") # Log the command for debugging
#
# try:
# # Execute the command
# # capture_output=True captures stdout and stderr
# # text=True decodes stdout/stderr as text
# # check=False prevents raising CalledProcessError on non-zero exit codes
# result = subprocess.run(command, capture_output=True, text=True, check=False)
#
# # Log results
# log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}")
# if result.stdout:
# log.debug(f"Blender stdout:\n{result.stdout.strip()}")
# if result.stderr:
# # Log stderr as warning or error depending on return code
# if result.returncode != 0:
# log.error(f"Blender stderr:\n{result.stderr.strip()}")
# else:
# log.warning(f"Blender stderr (Return Code 0):\n{result.stderr.strip()}") # Log stderr even on success as scripts might print warnings
#
# return result.returncode == 0
#
# except FileNotFoundError:
# log.error(f"Blender executable not found at: {blender_exe_path}")
# return False
# except Exception as e:
# log.exception(f"An unexpected error occurred while running Blender script '{Path(python_script_path).name}': {e}")
# return False
# --- Main CLI Execution (COMMENTED OUT - Replaced by GUI App Flow) ---
# def main():
# """Parses arguments, sets up logging, runs processing, and reports summary."""
# parser = setup_arg_parser()
# args = parser.parse_args()
#
# # Setup logging based on verbosity argument *before* logging status messages
# setup_logging(args.verbose)
#
# start_time = time.time()
# log.info("Asset Processor Script Started (CLI Mode)")
#
# # --- Validate Input Paths ---
# valid_inputs = []
# for p_str in args.input_paths:
# p = Path(p_str)
# if p.exists():
# suffix = p.suffix.lower()
# if p.is_dir() or (p.is_file() and suffix in ['.zip', '.rar', '.7z']):
# valid_inputs.append(p_str) # Store the original string path
# else:
# log.warning(f"Input is not a directory or a supported archive type (.zip, .rar, .7z), skipping: {p_str}")
# else:
# log.warning(f"Input path not found, skipping: {p_str}")
#
# if not valid_inputs:
# log.error("No valid input paths found. Exiting.")
# sys.exit(1) # Exit with error code
#
# # --- Determine Output Directory ---
# output_dir_str = args.output_dir # Get value from args (might be None)
# if not output_dir_str:
# log.debug("Output directory not specified via -o, reading default from config.py.")
# try:
# output_dir_str = getattr(core_config_module, 'OUTPUT_BASE_DIR', None)
# if not output_dir_str:
# log.error("Output directory not specified with -o and OUTPUT_BASE_DIR not found or empty in config.py. Exiting.")
# sys.exit(1)
# log.info(f"Using default output directory from config.py: {output_dir_str}")
# except Exception as e:
# log.error(f"Could not read OUTPUT_BASE_DIR from config.py: {e}")
# sys.exit(1)
#
# # --- Resolve Output Path (Handles Relative Paths Explicitly) ---
# output_path_obj: Path
# if os.path.isabs(output_dir_str):
# output_path_obj = Path(output_dir_str)
# log.info(f"Using absolute output directory: {output_path_obj}")
# else:
# # Path() interprets relative paths against CWD by default
# output_path_obj = Path(output_dir_str)
# log.info(f"Using relative output directory '{output_dir_str}'. Resolved against CWD to: {output_path_obj.resolve()}")
#
# # --- Validate and Setup Output Directory ---
# try:
# # Resolve to ensure we have an absolute path for consistency and creation
# resolved_output_dir = output_path_obj.resolve()
# log.info(f"Ensuring output directory exists: {resolved_output_dir}")
# resolved_output_dir.mkdir(parents=True, exist_ok=True)
# # Use the resolved absolute path string for the processor
# output_dir_for_processor = str(resolved_output_dir)
# except Exception as e:
# log.error(f"Cannot create or access output directory '{resolved_output_dir}': {e}", exc_info=True)
# sys.exit(1)
#
# # --- Check Preset Existence (Basic Check) ---
# preset_dir = Path(__file__).parent / "presets"
# preset_file = preset_dir / f"{args.preset}.json"
# if not preset_file.is_file():
# log.error(f"Preset file not found: {preset_file}")
# log.error(f"Ensure a file named '{args.preset}.json' exists in the directory: {preset_dir.resolve()}")
# sys.exit(1)
#
# # --- Execute Processing via the new function ---
# processing_results = run_processing(
# valid_inputs=valid_inputs,
# preset_name=args.preset,
# output_dir_for_processor=output_dir_for_processor,
# overwrite=args.overwrite,
# num_workers=args.workers,
# verbose=args.verbose # Pass the verbose flag
# )
#
# # --- Report Summary ---
# duration = time.time() - start_time
# successful_processed_count = processing_results["processed"]
# skipped_count = processing_results["skipped"]
# failed_count = processing_results["failed"]
# results_list = processing_results["results_list"]
#
# log.info("=" * 40)
# log.info("Processing Summary")
# log.info(f" Duration: {duration:.2f} seconds")
# log.info(f" Assets Attempted: {len(valid_inputs)}")
# log.info(f" Successfully Processed: {successful_processed_count}")
# log.info(f" Skipped (Already Existed): {skipped_count}")
# log.info(f" Failed: {failed_count}")
#
# if processing_results.get("pool_error"):
# log.error(f" Process Pool Error: {processing_results['pool_error']}")
# # Ensure failed count reflects pool error if it happened
# if failed_count == 0 and successful_processed_count == 0 and skipped_count == 0:
# failed_count = len(valid_inputs) # Assume all failed if pool died early
#
# exit_code = 0
# if failed_count > 0:
# log.warning("Failures occurred:")
# # Iterate through results to show specific errors for failed items
# for input_path, status, err_msg in results_list:
# if status == "failed":
# log.warning(f" - {Path(input_path).name}: {err_msg}")
# exit_code = 1 # Exit with error code if failures occurred
# else:
# # Consider skipped assets as a form of success for the overall run exit code
# if successful_processed_count > 0 or skipped_count > 0:
# log.info("All assets processed or skipped successfully.")
# exit_code = 0 # Exit code 0 indicates success (including skips)
# else:
# # This case might happen if all inputs were invalid initially
# log.warning("No assets were processed, skipped, or failed (check input validation logs).")
# exit_code = 0 # Still exit 0 as the script itself didn't crash
#
# # --- Blender Script Execution (Optional) ---
# run_nodegroups = False
# run_materials = False
# nodegroup_blend_path = None
# materials_blend_path = None
# blender_exe = None
#
# # 1. Find Blender Executable
# try:
# blender_exe_config = getattr(core_config_module, 'BLENDER_EXECUTABLE_PATH', None)
# if blender_exe_config:
# # Check if the path in config exists
# if Path(blender_exe_config).is_file():
# blender_exe = str(Path(blender_exe_config).resolve())
# log.info(f"Using Blender executable from config: {blender_exe}")
# else:
# # Try finding it in PATH if config path is invalid
# log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying to find 'blender' in PATH.")
# blender_exe = shutil.which("blender")
# if blender_exe:
# log.info(f"Found Blender executable in PATH: {blender_exe}")
# else:
# log.warning("Could not find 'blender' in system PATH.")
# else:
# # Try finding it in PATH if not set in config
# log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying to find 'blender' in PATH.")
# blender_exe = shutil.which("blender")
# if blender_exe:
# log.info(f"Found Blender executable in PATH: {blender_exe}")
# else:
# log.warning("Could not find 'blender' in system PATH.")
#
# if not blender_exe:
# log.warning("Blender executable not found or configured. Skipping Blender script execution.")
#
# except Exception as e:
# log.error(f"Error checking Blender executable path: {e}")
# blender_exe = None # Ensure it's None on error
#
# # 2. Determine Blend File Paths if Blender Exe is available
# if blender_exe:
# # Nodegroup Blend Path
# nodegroup_blend_arg = args.nodegroup_blend
# if nodegroup_blend_arg:
# p = Path(nodegroup_blend_arg)
# if p.is_file() and p.suffix.lower() == '.blend':
# nodegroup_blend_path = str(p.resolve())
# log.info(f"Using nodegroup blend file from argument: {nodegroup_blend_path}")
# else:
# log.warning(f"Invalid nodegroup blend file path from argument: '{nodegroup_blend_arg}'. Ignoring.")
# else:
# default_ng_path_str = getattr(core_config_module, 'DEFAULT_NODEGROUP_BLEND_PATH', None)
# if default_ng_path_str:
# p = Path(default_ng_path_str)
# if p.is_file() and p.suffix.lower() == '.blend':
# nodegroup_blend_path = str(p.resolve())
# log.info(f"Using default nodegroup blend file from config: {nodegroup_blend_path}")
# else:
# log.warning(f"Invalid default nodegroup blend file path in config: '{default_ng_path_str}'. Ignoring.")
#
# # Materials Blend Path
# materials_blend_arg = args.materials_blend
# if materials_blend_arg:
# p = Path(materials_blend_arg)
# if p.is_file() and p.suffix.lower() == '.blend':
# materials_blend_path = str(p.resolve())
# log.info(f"Using materials blend file from argument: {materials_blend_path}")
# else:
# log.warning(f"Invalid materials blend file path from argument: '{materials_blend_arg}'. Ignoring.")
# else:
# default_mat_path_str = getattr(core_config_module, 'DEFAULT_MATERIALS_BLEND_PATH', None)
# if default_mat_path_str:
# p = Path(default_mat_path_str)
# if p.is_file() and p.suffix.lower() == '.blend':
# materials_blend_path = str(p.resolve())
# log.info(f"Using default materials blend file from config: {materials_blend_path}")
# else:
# log.warning(f"Invalid default materials blend file path in config: '{default_mat_path_str}'. Ignoring.")
#
# # 3. Execute Scripts if Paths are Valid
# if blender_exe:
# script_dir = Path(__file__).parent / "blenderscripts"
# nodegroup_script_path = script_dir / "create_nodegroups.py"
# materials_script_path = script_dir / "create_materials.py"
# asset_output_root = output_dir_for_processor # Use the resolved output dir
#
# if nodegroup_blend_path:
# if nodegroup_script_path.is_file():
# log.info("-" * 40)
# log.info("Starting Blender Node Group Script Execution...")
# success_ng = run_blender_script(
# blender_exe_path=blender_exe,
# blend_file_path=nodegroup_blend_path,
# python_script_path=str(nodegroup_script_path),
# asset_root_dir=asset_output_root
# )
# if not success_ng:
# log.error("Blender node group script execution failed.")
# # Optionally change exit code if Blender script fails?
# # exit_code = 1
# log.info("Finished Blender Node Group Script Execution.")
# log.info("-" * 40)
# else:
# log.error(f"Node group script not found: {nodegroup_script_path}")
#
# if materials_blend_path:
# if materials_script_path.is_file():
# log.info("-" * 40)
# log.info("Starting Blender Material Script Execution...")
# success_mat = run_blender_script(
# blender_exe_path=blender_exe,
# blend_file_path=materials_blend_path,
# python_script_path=str(materials_script_path),
# asset_root_dir=asset_output_root
# )
# if not success_mat:
# log.error("Blender material script execution failed.")
# # Optionally change exit code if Blender script fails?
# # exit_code = 1
# log.info("Finished Blender Material Script Execution.")
# log.info("-" * 40)
# else:
# log.error(f"Material script not found: {materials_script_path}")
#
# # --- Final Exit ---
# log.info("Asset Processor Script Finished.")
# sys.exit(exit_code)
# --- Main Application Class (Integrates GUI and Engine) ---
class App(QObject):
# Signal emitted when all queued processing tasks are complete
all_tasks_finished = Signal(int, int, int) # processed_count, skipped_count, failed_count (Placeholder counts for now)
def __init__(self):
super().__init__()
self.config_obj = None
self.processing_engine = None
self.main_window = None
self.thread_pool = QThreadPool()
self._active_tasks_count = 0 # Track running tasks
self._task_results = {"processed": 0, "skipped": 0, "failed": 0} # Store results
log.info(f"Maximum threads for pool: {self.thread_pool.maxThreadCount()}")
self._load_config()
self._init_engine()
self._init_gui()
def _load_config(self):
"""Loads the base configuration using a default preset."""
# The actual preset name comes from the GUI request later, but the engine
# needs an initial valid configuration object.
try:
# Find the first available preset to use as a default
preset_dir = Path(__file__).parent / "Presets"
default_preset_name = None
if preset_dir.is_dir():
presets = sorted([f.stem for f in preset_dir.glob("*.json") if f.is_file() and not f.name.startswith('_')])
if presets:
default_preset_name = presets[0]
log.info(f"Using first available preset as default for initial config: '{default_preset_name}'")
if not default_preset_name:
# Fallback or raise error if no presets found
log.error("No presets found in the 'Presets' directory. Cannot initialize default configuration.")
# Option 1: Raise an error
raise ConfigurationError("No presets found to load default configuration.")
# Option 2: Try initializing with None (if Configuration handles it, unlikely based on error)
# self.config_obj = Configuration(preset_name=None)
self.config_obj = Configuration(preset_name=default_preset_name) # Pass the default preset name
log.info(f"Base configuration loaded using default preset '{default_preset_name}'.")
except ConfigurationError as e:
log.error(f"Fatal: Failed to load base configuration using default preset: {e}")
# In a real app, show this error to the user before exiting
sys.exit(1)
except Exception as e:
log.exception(f"Fatal: Unexpected error loading configuration: {e}")
sys.exit(1)
def _init_engine(self):
"""Initializes the ProcessingEngine."""
if self.config_obj:
try:
self.processing_engine = ProcessingEngine(self.config_obj)
log.info("ProcessingEngine initialized.")
except Exception as e:
log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}")
# Show error and exit
sys.exit(1)
else:
log.error("Fatal: Cannot initialize ProcessingEngine without configuration.")
sys.exit(1)
def _init_gui(self):
"""Initializes the MainWindow and connects signals."""
if self.processing_engine:
self.main_window = MainWindow() # MainWindow now part of the App
# Connect the signal from the GUI to the App's slot using QueuedConnection
connection_success = self.main_window.processing_requested.connect(self.on_processing_requested, Qt.ConnectionType.QueuedConnection)
log.info(f"DEBUG: Connection result for processing_requested (Queued): {connection_success}") # <-- Modified LOG
if not connection_success:
log.error("*********************************************************")
log.error("FATAL: Failed to connect MainWindow.processing_requested signal to App.on_processing_requested slot!")
log.error("*********************************************************")
# Connect the App's completion signal to the MainWindow's slot
self.all_tasks_finished.connect(self.main_window.on_processing_finished)
log.info("MainWindow initialized and signals connected.")
else:
log.error("Fatal: Cannot initialize MainWindow without ProcessingEngine.")
sys.exit(1)
@Slot(list) # Slot to receive List[SourceRule]
def on_processing_requested(self, source_rules: list):
# log.info("*********************************************************") # REMOVED
log.debug("DEBUG: App.on_processing_requested slot entered.") # DEBUG Verify Entry (Keep this one)
# log.info("*********************************************************") # REMOVED
"""Handles the processing request from the GUI."""
# --- Restore original logic ---
log.info(f"Received processing request for {len(source_rules)} rule sets.")
log.info(f"DEBUG: Rules received by on_processing_requested: {source_rules}") # DEBUG LOG
log.info(f"VERIFY: App.on_processing_requested received {len(source_rules)} rules.") # DEBUG Verify
for i, rule in enumerate(source_rules):
log.debug(f" VERIFY Rule {i}: Input='{rule.input_path}', Assets={len(rule.assets)}") # DEBUG Verify
if not self.processing_engine:
log.error("Processing engine not available. Cannot process request.")
# Update GUI status bar maybe?
self.main_window.statusBar().showMessage("Error: Processing Engine not ready.", 5000)
return
if not source_rules:
log.warning("Processing requested with an empty rule list.")
self.main_window.statusBar().showMessage("No rules to process.", 3000)
return
# Reset task counter and results for this batch
self._active_tasks_count = len(source_rules)
self._task_results = {"processed": 0, "skipped": 0, "failed": 0}
log.debug(f"Initialized active task count to: {self._active_tasks_count}")
# Update GUI progress bar/status
self.main_window.progress_bar.setMaximum(len(source_rules))
self.main_window.progress_bar.setValue(0)
self.main_window.progress_bar.setFormat(f"0/{len(source_rules)} tasks")
# --- Get paths needed for ProcessingTask ---
try:
output_base_path_str = self.main_window.output_path_edit.text().strip()
if not output_base_path_str:
log.error("Cannot queue tasks: Output directory path is empty in the GUI.")
self.main_window.statusBar().showMessage("Error: Output directory cannot be empty.", 5000)
return
output_base_path = Path(output_base_path_str)
# Basic validation - check if it's likely a valid path structure (doesn't guarantee existence/writability here)
if not output_base_path.is_absolute():
# Or attempt to resolve relative to workspace? For now, require absolute from GUI.
log.warning(f"Output path '{output_base_path}' is not absolute. Processing might fail if relative path is not handled correctly by engine.")
# Consider resolving: output_base_path = Path.cwd() / output_base_path # If relative paths are allowed
# Define workspace path (assuming main.py is in the project root)
workspace_path = Path(__file__).parent.resolve()
log.debug(f"Using Workspace Path: {workspace_path}")
log.debug(f"Using Output Base Path: {output_base_path}")
except Exception as e:
log.exception(f"Error getting/validating paths for processing task: {e}")
self.main_window.statusBar().showMessage(f"Error preparing paths: {e}", 5000)
return
# --- End Get paths ---
# Queue tasks in the thread pool
log.debug("DEBUG: Entering task queuing loop.") # <-- Keep this log
for i, rule in enumerate(source_rules): # Added enumerate for index logging
if isinstance(rule, SourceRule):
log.debug(f"DEBUG: Preparing to queue task {i+1}/{len(source_rules)} for rule: {rule.input_path}") # <-- Keep this log
# Pass the required paths to the ProcessingTask constructor
task = ProcessingTask(
engine=self.processing_engine,
rule=rule,
workspace_path=workspace_path,
output_base_path=output_base_path
)
# Connect the task's finished signal to the App's slot
task.signals.finished.connect(self._on_task_finished)
log.debug(f"DEBUG: Calling thread_pool.start() for task {i+1}") # <-- Keep this log
self.thread_pool.start(task)
log.debug(f"DEBUG: Returned from thread_pool.start() for task {i+1}") # <-- Keep this log
else:
log.warning(f"Skipping invalid item (index {i}) in rule list: {type(rule)}") # Added index
log.info(f"Queued {len(source_rules)} processing tasks (finished loop).") # Added context
# --- End Restore original logic ---
# GUI status already updated in MainWindow when signal was emitted
# --- Slot to handle completion of individual tasks ---
@Slot(str, str, object)
def _on_task_finished(self, rule_input_path, status, result_or_error):
"""Handles the 'finished' signal from a ProcessingTask."""
log.info(f"Task finished signal received for {rule_input_path}. Status: {status}")
self._active_tasks_count -= 1
log.debug(f"Active tasks remaining: {self._active_tasks_count}")
# Update overall results (basic counts for now)
if status == "processed":
self._task_results["processed"] += 1
elif status == "skipped": # Assuming engine might return 'skipped' status eventually
self._task_results["skipped"] += 1
else: # Count all other statuses (failed_preparation, failed_processing) as failed
self._task_results["failed"] += 1
# Update progress bar
total_tasks = self.main_window.progress_bar.maximum()
completed_tasks = total_tasks - self._active_tasks_count
self.main_window.update_progress_bar(completed_tasks, total_tasks) # Use MainWindow's method
# Update status for the specific file in the GUI (if needed)
# self.main_window.update_file_status(rule_input_path, status, str(result_or_error) if result_or_error else "")
if self._active_tasks_count == 0:
log.info("All processing tasks finished.")
# Emit the signal with the final counts
self.all_tasks_finished.emit(
self._task_results["processed"],
self._task_results["skipped"],
self._task_results["failed"]
)
elif self._active_tasks_count < 0:
log.error("Error: Active task count went below zero!") # Should not happen
def run(self):
"""Shows the main window."""
if self.main_window:
self.main_window.show()
log.info("Application started. Showing main window.")
else:
log.error("Cannot run application, MainWindow not initialized.")
# --- Main CLI Execution Function (Adapted from old main()) ---
def run_cli(args): # Accept parsed args
"""Uses parsed arguments, sets up logging, runs processing, and reports summary for CLI mode."""
# parser = setup_arg_parser() # No longer needed
# args = parser.parse_args() # Args are passed in
# --- Validate required CLI arguments ---
if not args.input_paths:
log.error("CLI Error: Input path(s) are required for CLI mode.")
sys.exit(1)
if not args.preset:
log.error("CLI Error: Preset (-p/--preset) is required for CLI mode.")
sys.exit(1)
# --- End Validation ---
# Logging setup is already done outside this function in the __main__ block
start_time = time.time()
log.info("Asset Processor Script Started (CLI Mode)")
# --- Validate Input Paths ---
valid_inputs = []
for p_str in args.input_paths:
p = Path(p_str)
if p.exists():
suffix = p.suffix.lower()
# TODO: Add support for other archive types if needed (.rar, .7z)
if p.is_dir() or (p.is_file() and suffix == '.zip'):
valid_inputs.append(p_str) # Store the original string path
else:
log.warning(f"Input is not a directory or a supported archive type (.zip), skipping: {p_str}")
else:
log.warning(f"Input path not found, skipping: {p_str}")
if not valid_inputs:
log.error("No valid input paths found. Exiting.")
sys.exit(1) # Exit with error code
# --- Determine Output Directory ---
output_dir_str = args.output_dir # Get value from args (might be None)
if not output_dir_str:
log.debug("Output directory not specified via -o, reading default from config.py.")
try:
output_dir_str = getattr(core_config_module, 'OUTPUT_BASE_DIR', None)
if not output_dir_str:
log.error("Output directory not specified with -o and OUTPUT_BASE_DIR not found or empty in config.py. Exiting.")
sys.exit(1)
log.info(f"Using default output directory from config.py: {output_dir_str}")
except Exception as e:
log.error(f"Could not read OUTPUT_BASE_DIR from config.py: {e}")
sys.exit(1)
# --- Resolve Output Path ---
output_path_obj = Path(output_dir_str).resolve() # Resolve to absolute path
# --- Validate and Setup Output Directory ---
try:
log.info(f"Ensuring output directory exists: {output_path_obj}")
output_path_obj.mkdir(parents=True, exist_ok=True)
output_dir_for_processor = str(output_path_obj)
except Exception as e:
log.error(f"Cannot create or access output directory '{output_path_obj}': {e}", exc_info=True)
sys.exit(1)
# --- Load Configuration ---
try:
config = Configuration(args.preset) # Pass preset name from args
log.info(f"Configuration loaded for preset: {args.preset}")
except ConfigurationError as e:
log.error(f"Error loading configuration for preset '{args.preset}': {e}")
sys.exit(1)
except Exception as e:
log.exception(f"Unexpected error loading configuration: {e}")
sys.exit(1)
# --- Initialize Processing Engine ---
try:
engine = ProcessingEngine(config)
log.info("ProcessingEngine initialized for CLI mode.")
except Exception as e:
log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}")
sys.exit(1)
# --- Execute Processing (Simplified Sequential for now) ---
# TODO: Re-implement parallel processing using concurrent.futures if needed.
# TODO: CLI mode needs a way to generate SourceRule objects.
# For now, we'll pass a simplified structure or assume engine handles it.
# This part likely needs significant adaptation based on ProcessingEngine.process requirements.
log.warning("CLI processing currently uses simplified sequential execution.")
log.warning("SourceRule generation for CLI mode is basic and may need refinement.")
processed_count = 0
skipped_count = 0 # Placeholder
failed_count = 0
results_list = [] # Placeholder
for input_path_str in valid_inputs:
log.info(f"--- Processing Input: {Path(input_path_str).name} ---")
try:
# --- Basic SourceRule Creation (Needs Review/Adaptation) ---
# This is a placeholder. The engine likely needs more detailed file info.
# We might need to extract file list here like the GUI does.
input_path_obj = Path(input_path_str)
# Example: Create a rule assuming the input is a single asset
# This won't handle multi-asset archives correctly without more logic.
asset_name = input_path_obj.stem # Basic assumption
# File list extraction would be needed here for proper FileRule creation
# file_list = _extract_file_list(input_path_str) # Need to define/import this helper
# file_rules = [FileRule(file_path=f) for f in file_list] if file_list else []
# asset_rule = AssetRule(asset_name=asset_name, files=file_rules)
# rule = SourceRule(input_path=input_path_str, assets=[asset_rule], supplier_identifier=config.supplier_name)
# --- End Placeholder ---
# --- TEMPORARY: Call engine process with just config and path ---
# This assumes engine.process can handle this or needs adaptation.
# If engine.process strictly requires a SourceRule, this will fail.
# result = engine.process(config=config, input_path=input_path_obj, overwrite=args.overwrite)
# --- END TEMPORARY ---
# --- Attempt with Placeholder SourceRule (More likely signature) ---
# This still requires file list extraction and rule creation logic
log.error("CLI Processing Logic Incomplete: SourceRule creation and engine call need implementation.")
# Example (requires file list extraction and rule building):
# rule = build_basic_source_rule(input_path_str, config) # Hypothetical function
# if rule:
# engine.process(rule) # Assuming process takes one rule
# processed_count += 1 # Basic success tracking
# else:
# log.warning(f"Could not create basic rule for {input_path_str}, skipping.")
# failed_count += 1
# --- End Placeholder ---
raise NotImplementedError("CLI processing logic for SourceRule creation and engine call is not fully implemented.")
except NotImplementedError as e:
log.error(f"Stopping CLI run due to incomplete implementation: {e}")
failed_count += 1
break # Stop processing further items
except Exception as e:
log.exception(f"Error processing input '{Path(input_path_str).name}': {e}")
failed_count += 1
results_list.append((input_path_str, "failed", str(e))) # Placeholder result
# --- Report Summary ---
duration = time.time() - start_time
log.info("=" * 40)
log.info("CLI Processing Summary")
log.info(f" Duration: {duration:.2f} seconds")
log.info(f" Inputs Attempted: {len(valid_inputs)}")
log.info(f" Successfully Processed: {processed_count}")
log.info(f" Skipped: {skipped_count}")
log.info(f" Failed: {failed_count}")
exit_code = 0
if failed_count > 0:
log.warning("Failures occurred.")
# Log specific errors if results_list was populated
for input_path, status, err_msg in results_list:
if status == "failed":
log.warning(f" - {Path(input_path).name}: {err_msg}")
exit_code = 1 # Exit with error code if failures occurred
# --- Blender Script Execution (Optional - Copied from old main()) ---
# This section might need review based on current config/engine
run_blender = False # Placeholder, add logic if needed
if run_blender:
# ... (Blender execution logic from old main() would go here) ...
log.warning("Blender script execution from CLI not yet re-implemented.")
pass
# --- Final Exit ---
log.info("Asset Processor Script Finished (CLI Mode).")
sys.exit(exit_code)
if __name__ == "__main__":
# Setup argument parser
parser = setup_arg_parser()
# Parse all arguments now
args = parser.parse_args()
# Setup logging based on --verbose flag
setup_logging(args.verbose)
# Determine mode based on presence of required CLI args
if args.input_paths or args.preset:
# If either input_paths or preset is provided, assume CLI mode
# run_cli will handle validation that *both* are actually present
log.info("CLI arguments detected (input_paths or preset), attempting CLI mode.")
run_cli(args) # Pass parsed args to run_cli
else:
# If neither input_paths nor preset is provided, run GUI mode
log.info("No required CLI arguments detected, starting GUI mode.")
# --- Run the GUI Application ---
try:
qt_app = QApplication(sys.argv) # Pass original sys.argv
# Optional: Apply style/palette if desired
qt_app.setStyle('Fusion')
# palette = qt_app.palette() ... set colors ... qt_app.setPalette(palette)
app_instance = App()
app_instance.run()
sys.exit(qt_app.exec())
except Exception as gui_exc:
log.exception(f"An error occurred during GUI startup or execution: {gui_exc}")
sys.exit(1)
# --- Old logic removed ---