540 lines
26 KiB
Python

import argparse
import sys
import time
import os
import logging
from pathlib import Path
from concurrent.futures import ProcessPoolExecutor, as_completed
import subprocess
import shutil
import tempfile
import zipfile
from typing import List, Dict, Tuple, Optional
# --- Utility Imports ---
from utils.hash_utils import calculate_sha256
from utils.path_utils import get_next_incrementing_value
# --- Qt Imports for Application Structure ---
from PySide6.QtCore import QObject, Slot, QThreadPool, QRunnable, Signal
from PySide6.QtCore import Qt
from PySide6.QtWidgets import QApplication
# --- Backend Imports ---
# Add current directory to sys.path for direct execution
import sys
import os
sys.path.append(os.path.dirname(__file__))
try:
from configuration import Configuration, ConfigurationError
from processing_engine import ProcessingEngine
from rule_structure import SourceRule
from gui.main_window import MainWindow
from utils.workspace_utils import prepare_processing_workspace
except ImportError as e:
script_dir = Path(__file__).parent.resolve()
print(f"ERROR: Cannot import Configuration or rule_structure classes.")
print(f"Ensure configuration.py and rule_structure.py are in the same directory or Python path.")
print(f"ERROR: Failed to import necessary classes: {e}")
print(f"Ensure 'configuration.py' and 'asset_processor.py' exist in the directory:")
print(f" {script_dir}")
print("Or that the directory is included in your PYTHONPATH.")
sys.exit(1)
# --- Setup Logging ---
# Keep setup_logging as is, it's called by main() or potentially monitor.py
def setup_logging(verbose: bool):
"""Configures logging for the application."""
log_level = logging.DEBUG if verbose else logging.INFO
log_format = '%(asctime)s [%(levelname)-8s] %(name)s: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
# Remove existing handlers to avoid duplication if re-run in same session
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(
level=log_level,
format=log_format,
datefmt=date_format,
handlers=[
logging.StreamHandler(sys.stdout)
]
)
log = logging.getLogger(__name__)
log.info(f"Logging level set to: {logging.getLevelName(log_level)}")
log = logging.getLogger(__name__)
# --- Argument Parser Setup ---
# Keep setup_arg_parser as is, it's only used when running main.py directly
def setup_arg_parser():
"""Sets up and returns the command-line argument parser."""
default_workers = 1
try:
# Use half the cores, but at least 1, max maybe 8-16? Depends on task nature.
# Let's try max(1, os.cpu_count() // 2)
cores = os.cpu_count()
if cores:
default_workers = max(1, cores // 2)
except NotImplementedError:
log.warning("Could not detect CPU count, defaulting workers to 1.")
parser = argparse.ArgumentParser(
description="Process asset files (ZIPs or folders) into a standardized library format using presets.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"input_paths",
metavar="INPUT_PATH",
type=str,
nargs='*',
default=[],
help="Path(s) to the input ZIP file(s) or folder(s) containing assets (Required for CLI mode)."
)
parser.add_argument(
"-p", "--preset",
type=str,
required=False,
default=None,
help="Name of the configuration preset (Required for CLI mode)."
)
parser.add_argument(
"-o", "--output-dir",
type=str,
required=False,
default=None,
help="Override the default base output directory defined in config.py."
)
parser.add_argument(
"-w", "--workers",
type=int,
default=default_workers,
help="Maximum number of assets to process concurrently in parallel processes."
)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Enable detailed DEBUG level logging for troubleshooting."
)
parser.add_argument(
"--overwrite",
action="store_true",
help="Force reprocessing and overwrite existing output asset folders if they exist."
)
parser.add_argument(
"--nodegroup-blend",
type=str,
default=None,
help="Path to the .blend file for creating/updating node groups. Overrides config.py default."
)
parser.add_argument(
"--materials-blend",
type=str,
default=None,
help="Path to the .blend file for creating/updating materials. Overrides config.py default."
)
parser.add_argument(
"--gui",
action="store_true",
help="Force launch in GUI mode, ignoring other arguments."
)
return parser
# --- Worker Runnable for Thread Pool ---
class TaskSignals(QObject):
finished = Signal(str, str, object) # rule_input_path, status, result/error
class ProcessingTask(QRunnable):
"""Wraps a call to processing_engine.process for execution in a thread pool."""
def __init__(self, engine: ProcessingEngine, rule: SourceRule, workspace_path: Path, output_base_path: Path):
super().__init__()
self.engine = engine
self.rule = rule
self.workspace_path = workspace_path
self.output_base_path = output_base_path
self.signals = TaskSignals()
@Slot() # Decorator required for QRunnable's run method
def run(self):
"""Prepares input files and executes the engine's process method."""
log.info(f"Worker Thread: Starting processing for rule: {self.rule.input_path}")
log.debug(f"DEBUG: Rule passed to ProcessingTask.run: {self.rule}")
status = "failed"
result_or_error = None
prepared_workspace_path = None # Initialize path for prepared content outside try
try:
# --- 1. Prepare Input Workspace using Utility Function ---
# The utility function creates the temp dir, prepares it, and returns its path.
# It raises exceptions on failure (FileNotFoundError, ValueError, zipfile.BadZipFile, OSError).
prepared_workspace_path = prepare_processing_workspace(self.rule.input_path)
log.info(f"Workspace prepared successfully at: {prepared_workspace_path}")
# --- DEBUG: List files in prepared workspace ---
try:
log.debug(f"Listing contents of prepared workspace: {prepared_workspace_path}")
for item in prepared_workspace_path.rglob('*'):
log.debug(f" Found item: {item.relative_to(prepared_workspace_path)}")
except Exception as list_err:
log.error(f"Error listing prepared workspace contents: {list_err}")
# --- END DEBUG ---
# --- 2. Execute Processing Engine ---
log.info(f"Calling ProcessingEngine.process with rule for input: {self.rule.input_path}, prepared workspace: {prepared_workspace_path}, output: {self.output_base_path}")
log.debug(f" Rule Details: {self.rule}")
# --- Calculate SHA5 and Incrementing Value ---
config = self.engine.config_obj
archive_path = self.rule.input_path
output_dir = self.output_base_path # This is already a Path object from App.on_processing_requested
sha5_value = None
try:
archive_path_obj = Path(archive_path)
if archive_path_obj.is_file():
log.debug(f"Calculating SHA256 for file: {archive_path_obj}")
full_sha = calculate_sha256(archive_path_obj)
if full_sha:
sha5_value = full_sha[:5]
log.info(f"Calculated SHA5 for {archive_path}: {sha5_value}")
else:
log.warning(f"SHA256 calculation returned None for {archive_path}")
elif archive_path_obj.is_dir():
log.debug(f"Input path {archive_path} is a directory, skipping SHA5 calculation.")
else:
log.warning(f"Input path {archive_path} is not a valid file or directory for SHA5 calculation.")
except FileNotFoundError:
log.error(f"SHA5 calculation failed: File not found at {archive_path}")
except Exception as e:
log.exception(f"Error calculating SHA5 for {archive_path}: {e}")
next_increment_str = None
try:
# output_dir should already be a Path object
pattern = getattr(config, 'output_directory_pattern', None)
if pattern:
log.debug(f"Calculating next incrementing value for dir: {output_dir} using pattern: {pattern}")
next_increment_str = get_next_incrementing_value(output_dir, pattern)
log.info(f"Calculated next incrementing value for {output_dir}: {next_increment_str}")
else:
log.warning(f"Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration for preset {config.preset_name}")
except Exception as e:
log.exception(f"Error calculating next incrementing value for {output_dir}: {e}")
# --- End Calculation ---
log.info(f"Calling engine.process with sha5='{sha5_value}', incrementing_value='{next_increment_str}'")
result_or_error = self.engine.process(
self.rule,
workspace_path=prepared_workspace_path,
output_base_path=self.output_base_path,
incrementing_value=next_increment_str,
sha5_value=sha5_value
)
status = "processed" # Assume success if no exception
log.info(f"Worker Thread: Finished processing for rule: {self.rule.input_path}, Status: {status}")
# Signal emission moved to finally block
except (FileNotFoundError, ValueError, zipfile.BadZipFile, OSError) as prep_error:
log.exception(f"Worker Thread: Error preparing workspace for rule {self.rule.input_path}: {prep_error}")
status = "failed_preparation"
result_or_error = str(prep_error)
# Signal emission moved to finally block
except Exception as proc_error:
log.exception(f"Worker Thread: Error during engine processing for rule {self.rule.input_path}: {proc_error}")
status = "failed_processing"
result_or_error = str(proc_error)
# Signal emission moved to finally block
finally:
# --- Emit finished signal regardless of success or failure ---
try:
self.signals.finished.emit(str(self.rule.input_path), status, result_or_error)
log.debug(f"Worker Thread: Emitted finished signal for {self.rule.input_path} with status {status}")
except Exception as sig_err:
log.error(f"Worker Thread: Error emitting finished signal for {self.rule.input_path}: {sig_err}")
# --- 3. Cleanup Workspace ---
# Use the path returned by the utility function for cleanup
if prepared_workspace_path and prepared_workspace_path.exists():
try:
log.info(f"Cleaning up temporary workspace: {prepared_workspace_path}")
shutil.rmtree(prepared_workspace_path)
except OSError as cleanup_error:
log.error(f"Worker Thread: Failed to cleanup temporary workspace {prepared_workspace_path}: {cleanup_error}")
# --- Main Application Class (Integrates GUI and Engine) ---
class App(QObject):
# Signal emitted when all queued processing tasks are complete
all_tasks_finished = Signal(int, int, int) # processed_count, skipped_count, failed_count (Placeholder counts for now)
def __init__(self):
super().__init__()
self.config_obj = None
self.processing_engine = None
self.main_window = None
self.thread_pool = QThreadPool()
self._active_tasks_count = 0
self._task_results = {"processed": 0, "skipped": 0, "failed": 0}
log.info(f"Maximum threads for pool: {self.thread_pool.maxThreadCount()}")
self._load_config()
self._init_engine()
self._init_gui()
def _load_config(self):
"""Loads the base configuration using a default preset."""
# The actual preset name comes from the GUI request later, but the engine
# needs an initial valid configuration object.
try:
# Find the first available preset to use as a default
preset_dir = Path(__file__).parent / "Presets"
default_preset_name = None
if preset_dir.is_dir():
presets = sorted([f.stem for f in preset_dir.glob("*.json") if f.is_file() and not f.name.startswith('_')])
if presets:
default_preset_name = presets[0]
log.info(f"Using first available preset as default for initial config: '{default_preset_name}'")
if not default_preset_name:
# Fallback or raise error if no presets found
log.error("No presets found in the 'Presets' directory. Cannot initialize default configuration.")
# Option 1: Raise an error
raise ConfigurationError("No presets found to load default configuration.")
self.config_obj = Configuration(preset_name=default_preset_name)
log.info(f"Base configuration loaded using default preset '{default_preset_name}'.")
except ConfigurationError as e:
log.error(f"Fatal: Failed to load base configuration using default preset: {e}")
# In a real app, show this error to the user before exiting
sys.exit(1)
except Exception as e:
log.exception(f"Fatal: Unexpected error loading configuration: {e}")
sys.exit(1)
def _init_engine(self):
"""Initializes the ProcessingEngine."""
if self.config_obj:
try:
self.processing_engine = ProcessingEngine(self.config_obj)
log.info("ProcessingEngine initialized.")
except Exception as e:
log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}")
# Show error and exit
sys.exit(1)
else:
log.error("Fatal: Cannot initialize ProcessingEngine without configuration.")
sys.exit(1)
def _init_gui(self):
"""Initializes the MainWindow and connects signals."""
if self.processing_engine:
self.main_window = MainWindow() # MainWindow now part of the App
# Connect the signal from the GUI to the App's slot using QueuedConnection
# Connect the signal from the MainWindow (which is triggered by the panel) to the App's slot
connection_success = self.main_window.start_backend_processing.connect(self.on_processing_requested, Qt.ConnectionType.QueuedConnection)
log.info(f"DEBUG: Connection result for processing_requested (Queued): {connection_success}")
if not connection_success:
log.error("*********************************************************")
log.error("FATAL: Failed to connect MainWindow.processing_requested signal to App.on_processing_requested slot!")
log.error("*********************************************************")
# Connect the App's completion signal to the MainWindow's slot
self.all_tasks_finished.connect(self.main_window.on_processing_finished)
log.info("MainWindow initialized and signals connected.")
else:
log.error("Fatal: Cannot initialize MainWindow without ProcessingEngine.")
sys.exit(1)
@Slot(list, dict) # Slot to receive List[SourceRule] and processing_settings dict
def on_processing_requested(self, source_rules: list, processing_settings: dict):
log.debug("DEBUG: App.on_processing_requested slot entered.")
"""Handles the processing request from the GUI."""
log.info(f"Received processing request for {len(source_rules)} rule sets.")
log.info(f"DEBUG: Rules received by on_processing_requested: {source_rules}")
log.info(f"VERIFY: App.on_processing_requested received {len(source_rules)} rules.")
for i, rule in enumerate(source_rules):
log.debug(f" VERIFY Rule {i}: Input='{rule.input_path}', Assets={len(rule.assets)}")
if not self.processing_engine:
log.error("Processing engine not available. Cannot process request.")
self.main_window.statusBar().showMessage("Error: Processing Engine not ready.", 5000)
return
if not source_rules:
log.warning("Processing requested with an empty rule list.")
self.main_window.statusBar().showMessage("No rules to process.", 3000)
return
# Reset task counter and results for this batch
self._active_tasks_count = len(source_rules)
self._task_results = {"processed": 0, "skipped": 0, "failed": 0}
log.debug(f"Initialized active task count to: {self._active_tasks_count}")
# Update GUI progress bar/status via MainPanelWidget
self.main_window.main_panel_widget.progress_bar.setMaximum(len(source_rules))
self.main_window.main_panel_widget.progress_bar.setValue(0)
self.main_window.main_panel_widget.progress_bar.setFormat(f"0/{len(source_rules)} tasks")
# --- Get paths needed for ProcessingTask ---
try:
# Access output path via MainPanelWidget
output_base_path_str = self.main_window.main_panel_widget.output_path_edit.text().strip()
if not output_base_path_str:
log.error("Cannot queue tasks: Output directory path is empty in the GUI.")
self.main_window.statusBar().showMessage("Error: Output directory cannot be empty.", 5000)
return
output_base_path = Path(output_base_path_str)
# Basic validation - check if it's likely a valid path structure (doesn't guarantee existence/writability here)
if not output_base_path.is_absolute():
# Or attempt to resolve relative to workspace? For now, require absolute from GUI.
log.warning(f"Output path '{output_base_path}' is not absolute. Processing might fail if relative path is not handled correctly by engine.")
# Consider resolving: output_base_path = Path.cwd() / output_base_path # If relative paths are allowed
# Define workspace path (assuming main.py is in the project root)
workspace_path = Path(__file__).parent.resolve()
log.debug(f"Using Workspace Path: {workspace_path}")
log.debug(f"Using Output Base Path: {output_base_path}")
except Exception as e:
log.exception(f"Error getting/validating paths for processing task: {e}")
self.main_window.statusBar().showMessage(f"Error preparing paths: {e}", 5000)
return
# --- End Get paths ---
# Set max threads based on GUI setting
worker_count = processing_settings.get('workers', 1)
self.thread_pool.setMaxThreadCount(worker_count)
log.info(f"Set thread pool max workers to: {worker_count}")
# Queue tasks in the thread pool
log.debug("DEBUG: Entering task queuing loop.")
for i, rule in enumerate(source_rules):
if isinstance(rule, SourceRule):
log.info(f"DEBUG Task {i+1}: Rule Input='{rule.input_path}', Supplier ID='{getattr(rule, 'supplier_identifier', 'Not Set')}', Preset='{getattr(rule, 'preset_name', 'Not Set')}'")
log.debug(f"DEBUG: Preparing to queue task {i+1}/{len(source_rules)} for rule: {rule.input_path}")
# --- Create a new Configuration and Engine instance for this specific task ---
task_engine = None
try:
# Get preset name from the rule, fallback to app's default if missing
preset_name_for_task = getattr(rule, 'preset_name', None)
if not preset_name_for_task:
log.warning(f"Task {i+1} (Rule: {rule.input_path}): SourceRule missing preset_name. Falling back to default preset '{self.config_obj.preset_name}'.")
preset_name_for_task = self.config_obj.preset_name
task_config = Configuration(preset_name=preset_name_for_task)
task_engine = ProcessingEngine(task_config)
log.debug(f"Task {i+1}: Created new ProcessingEngine instance with preset '{preset_name_for_task}'.")
except ConfigurationError as config_err:
log.error(f"Task {i+1} (Rule: {rule.input_path}): Failed to load configuration for preset '{preset_name_for_task}': {config_err}. Skipping task.")
self._active_tasks_count -= 1 # Decrement count as this task won't run
self._task_results["failed"] += 1
# Optionally update GUI status for this specific rule
self.main_window.update_file_status(str(rule.input_path), "failed", f"Config Error: {config_err}")
continue # Skip to the next rule
except Exception as engine_err:
log.exception(f"Task {i+1} (Rule: {rule.input_path}): Failed to initialize ProcessingEngine for preset '{preset_name_for_task}': {engine_err}. Skipping task.")
self._active_tasks_count -= 1 # Decrement count
self._task_results["failed"] += 1
self.main_window.update_file_status(str(rule.input_path), "failed", f"Engine Init Error: {engine_err}")
continue # Skip to the next rule
if task_engine is None: # Should not happen if exceptions are caught, but safety check
log.error(f"Task {i+1} (Rule: {rule.input_path}): Engine is None after initialization attempt. Skipping task.")
self._active_tasks_count -= 1 # Decrement count
self._task_results["failed"] += 1
self.main_window.update_file_status(str(rule.input_path), "failed", "Engine initialization failed (unknown reason).")
continue # Skip to the next rule
# --- End Engine Instantiation ---
task = ProcessingTask(
engine=task_engine,
rule=rule,
workspace_path=workspace_path,
output_base_path=output_base_path
)
task.signals.finished.connect(self._on_task_finished)
log.debug(f"DEBUG: Calling thread_pool.start() for task {i+1}")
self.thread_pool.start(task)
log.debug(f"DEBUG: Returned from thread_pool.start() for task {i+1}")
else:
log.warning(f"Skipping invalid item (index {i}) in rule list: {type(rule)}")
log.info(f"Queued {len(source_rules)} processing tasks (finished loop).")
# GUI status already updated in MainWindow when signal was emitted
# --- Slot to handle completion of individual tasks ---
@Slot(str, str, object)
def _on_task_finished(self, rule_input_path, status, result_or_error):
"""Handles the 'finished' signal from a ProcessingTask."""
log.info(f"Task finished signal received for {rule_input_path}. Status: {status}")
self._active_tasks_count -= 1
log.debug(f"Active tasks remaining: {self._active_tasks_count}")
# Update overall results (basic counts for now)
if status == "processed":
self._task_results["processed"] += 1
elif status == "skipped": # Assuming engine might return 'skipped' status eventually
self._task_results["skipped"] += 1
else: # Count all other statuses (failed_preparation, failed_processing) as failed
self._task_results["failed"] += 1
# Update progress bar via MainPanelWidget
total_tasks = self.main_window.main_panel_widget.progress_bar.maximum()
completed_tasks = total_tasks - self._active_tasks_count
self.main_window.main_panel_widget.update_progress_bar(completed_tasks, total_tasks) # Use MainPanelWidget's method
# Update status for the specific file in the GUI (if needed)
if self._active_tasks_count == 0:
log.info("All processing tasks finished.")
# Emit the signal with the final counts
self.all_tasks_finished.emit(
self._task_results["processed"],
self._task_results["skipped"],
self._task_results["failed"]
)
elif self._active_tasks_count < 0:
log.error("Error: Active task count went below zero!") # Should not happen
def run(self):
"""Shows the main window."""
if self.main_window:
self.main_window.show()
log.info("Application started. Showing main window.")
else:
log.error("Cannot run application, MainWindow not initialized.")
if __name__ == "__main__":
parser = setup_arg_parser()
args = parser.parse_args()
setup_logging(args.verbose)
# Determine mode based on presence of required CLI args
if args.input_paths or args.preset:
# If either input_paths or preset is provided, assume CLI mode
# run_cli will handle validation that *both* are actually present
log.info("CLI arguments detected (input_paths or preset), attempting CLI mode.")
run_cli(args)
else:
# If neither input_paths nor preset is provided, run GUI mode
log.info("No required CLI arguments detected, starting GUI mode.")
# --- Run the GUI Application ---
try:
qt_app = QApplication(sys.argv)
app_instance = App()
app_instance.run()
sys.exit(qt_app.exec())
except Exception as gui_exc:
log.exception(f"An error occurred during GUI startup or execution: {gui_exc}")
sys.exit(1)