727 lines
36 KiB
Python
727 lines
36 KiB
Python
# main.py
|
|
|
|
import argparse
|
|
import sys
|
|
import time
|
|
import os
|
|
import logging
|
|
from pathlib import Path
|
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
import platform # To potentially adjust worker count defaults
|
|
import subprocess
|
|
import shutil
|
|
import tempfile # Added for temporary workspace
|
|
import zipfile # Added for zip extraction
|
|
from typing import List, Dict, Tuple, Optional
|
|
|
|
# --- Qt Imports for Application Structure ---
|
|
from PySide6.QtCore import QObject, Slot, QThreadPool, QRunnable, Signal # Added for App structure and threading
|
|
from PySide6.QtCore import Qt # Added for ConnectionType
|
|
from PySide6.QtWidgets import QApplication # Added for App structure
|
|
|
|
# --- Backend Imports ---
|
|
try:
|
|
from configuration import Configuration, ConfigurationError
|
|
# from asset_processor import AssetProcessor, AssetProcessingError # REMOVED OLD PROCESSOR
|
|
from processing_engine import ProcessingEngine # <<< ADDED NEW ENGINE IMPORT
|
|
from rule_structure import SourceRule # Import SourceRule for type hinting
|
|
from gui.main_window import MainWindow # Import MainWindow
|
|
except ImportError as e:
|
|
# Provide a more helpful error message if imports fail
|
|
script_dir = Path(__file__).parent.resolve()
|
|
print(f"ERROR: Failed to import necessary classes: {e}")
|
|
print(f"Ensure 'configuration.py' and 'asset_processor.py' exist in the directory:")
|
|
print(f" {script_dir}")
|
|
print("Or that the directory is included in your PYTHONPATH.")
|
|
sys.exit(1)
|
|
|
|
# --- Setup Logging ---
|
|
# Keep setup_logging as is, it's called by main() or potentially monitor.py
|
|
def setup_logging(verbose: bool):
|
|
"""Configures logging for the application."""
|
|
log_level = logging.DEBUG if verbose else logging.INFO
|
|
log_format = '%(asctime)s [%(levelname)-8s] %(name)s: %(message)s'
|
|
date_format = '%Y-%m-%d %H:%M:%S'
|
|
|
|
# Configure root logger
|
|
# Remove existing handlers to avoid duplication if re-run in same session
|
|
for handler in logging.root.handlers[:]:
|
|
logging.root.removeHandler(handler)
|
|
|
|
logging.basicConfig(
|
|
level=log_level,
|
|
format=log_format,
|
|
datefmt=date_format,
|
|
handlers=[
|
|
logging.StreamHandler(sys.stdout) # Log to console
|
|
# Optional: Add FileHandler for persistent logs
|
|
# logging.FileHandler("asset_processor.log", mode='a', encoding='utf-8')
|
|
]
|
|
)
|
|
# Get logger specifically for this main script
|
|
log = logging.getLogger(__name__) # or use 'main'
|
|
log.info(f"Logging level set to: {logging.getLevelName(log_level)}")
|
|
# Suppress overly verbose messages from libraries if needed (e.g., cv2)
|
|
# logging.getLogger('cv2').setLevel(logging.WARNING)
|
|
|
|
# Use module-level logger after configuration
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
# --- Argument Parser Setup ---
|
|
# Keep setup_arg_parser as is, it's only used when running main.py directly
|
|
def setup_arg_parser():
|
|
"""Sets up and returns the command-line argument parser."""
|
|
# Determine a sensible default worker count
|
|
default_workers = 1
|
|
try:
|
|
# Use half the cores, but at least 1, max maybe 8-16? Depends on task nature.
|
|
# Let's try max(1, os.cpu_count() // 2)
|
|
cores = os.cpu_count()
|
|
if cores:
|
|
default_workers = max(1, cores // 2)
|
|
# Cap default workers? Maybe not necessary, let user decide via flag.
|
|
# default_workers = min(default_workers, 8) # Example cap
|
|
except NotImplementedError:
|
|
log.warning("Could not detect CPU count, defaulting workers to 1.")
|
|
|
|
parser = argparse.ArgumentParser(
|
|
description="Process asset files (ZIPs or folders) into a standardized library format using presets.",
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter # Shows default values in help message
|
|
)
|
|
parser.add_argument(
|
|
"input_paths",
|
|
metavar="INPUT_PATH",
|
|
type=str,
|
|
nargs='*', # Allow zero or more paths initially
|
|
default=[], # Default to empty list
|
|
help="Path(s) to the input ZIP file(s) or folder(s) containing assets (Required for CLI mode)."
|
|
)
|
|
parser.add_argument(
|
|
"-p", "--preset",
|
|
type=str,
|
|
required=False, # Make not required initially
|
|
default=None, # Default to None
|
|
help="Name of the configuration preset (Required for CLI mode)."
|
|
)
|
|
parser.add_argument(
|
|
"-o", "--output-dir",
|
|
type=str,
|
|
required=False, # No longer required
|
|
default=None, # Default is None, will check core_config later
|
|
help="Override the default base output directory defined in config.py." # Updated help
|
|
)
|
|
parser.add_argument(
|
|
"-w", "--workers",
|
|
type=int,
|
|
default=default_workers,
|
|
help="Maximum number of assets to process concurrently in parallel processes."
|
|
)
|
|
parser.add_argument(
|
|
"-v", "--verbose",
|
|
action="store_true", # Makes it a flag, value is True if present
|
|
help="Enable detailed DEBUG level logging for troubleshooting."
|
|
)
|
|
parser.add_argument(
|
|
"--overwrite",
|
|
action="store_true",
|
|
help="Force reprocessing and overwrite existing output asset folders if they exist."
|
|
)
|
|
parser.add_argument(
|
|
"--nodegroup-blend",
|
|
type=str,
|
|
default=None,
|
|
help="Path to the .blend file for creating/updating node groups. Overrides config.py default."
|
|
)
|
|
parser.add_argument(
|
|
"--materials-blend",
|
|
type=str,
|
|
default=None,
|
|
help="Path to the .blend file for creating/updating materials. Overrides config.py default."
|
|
)
|
|
parser.add_argument(
|
|
"--gui",
|
|
action="store_true",
|
|
help="Force launch in GUI mode, ignoring other arguments."
|
|
)
|
|
# Potential future flags:
|
|
# parser.add_argument("--log-file", type=str, default=None, help="Path to save log output to a file.")
|
|
return parser
|
|
|
|
|
|
# --- Worker Runnable for Thread Pool ---
|
|
class TaskSignals(QObject): # Create a QObject subclass for signals
|
|
finished = Signal(str, str, object) # rule_input_path, status, result/error
|
|
# error = Signal(str, str) # Can combine into finished signal
|
|
|
|
class ProcessingTask(QRunnable):
|
|
"""Wraps a call to processing_engine.process for execution in a thread pool."""
|
|
|
|
def __init__(self, engine: ProcessingEngine, rule: SourceRule, workspace_path: Path, output_base_path: Path): # Added paths
|
|
super().__init__()
|
|
self.engine = engine
|
|
self.rule = rule
|
|
self.workspace_path = workspace_path # Store path
|
|
self.output_base_path = output_base_path # Store path
|
|
self.signals = TaskSignals() # Instantiate signals object
|
|
|
|
@Slot() # Decorator required for QRunnable's run method
|
|
def run(self):
|
|
"""Prepares input files and executes the engine's process method."""
|
|
log.info(f"Worker Thread: Starting processing for rule: {self.rule.input_path}")
|
|
log.debug(f"DEBUG: Rule passed to ProcessingTask.run: {self.rule}") # DEBUG LOG
|
|
status = "failed" # Default status
|
|
result_or_error = None
|
|
temp_workspace_dir = None # Initialize outside try
|
|
|
|
try:
|
|
# --- 1. Prepare Input Workspace ---
|
|
original_input_path = Path(self.rule.input_path)
|
|
prepared_workspace_path = None
|
|
|
|
if not original_input_path.exists():
|
|
raise FileNotFoundError(f"Original input path does not exist: {original_input_path}")
|
|
|
|
# Create a temporary directory for processing
|
|
temp_workspace_dir = tempfile.mkdtemp(prefix="asset_proc_")
|
|
prepared_workspace_path = Path(temp_workspace_dir)
|
|
log.info(f"Created temporary workspace: {prepared_workspace_path}")
|
|
|
|
# Check if input is directory or zip file
|
|
if original_input_path.is_dir():
|
|
log.info(f"Input is a directory, copying contents to workspace: {original_input_path}")
|
|
# Copy directory contents into the temp workspace
|
|
shutil.copytree(original_input_path, prepared_workspace_path, dirs_exist_ok=True)
|
|
elif original_input_path.is_file() and original_input_path.suffix.lower() == '.zip':
|
|
log.info(f"Input is a zip file, extracting to workspace: {original_input_path}")
|
|
with zipfile.ZipFile(original_input_path, 'r') as zip_ref:
|
|
zip_ref.extractall(prepared_workspace_path)
|
|
else:
|
|
# Handle unsupported input types if necessary
|
|
raise ValueError(f"Unsupported input type: {original_input_path}. Must be a directory or .zip file.")
|
|
|
|
# --- DEBUG: List files in prepared workspace ---
|
|
try:
|
|
log.debug(f"Listing contents of prepared workspace: {prepared_workspace_path}")
|
|
for item in prepared_workspace_path.rglob('*'): # Recursively list all items
|
|
log.debug(f" Found item: {item.relative_to(prepared_workspace_path)}")
|
|
except Exception as list_err:
|
|
log.error(f"Error listing prepared workspace contents: {list_err}")
|
|
# --- END DEBUG ---
|
|
# --- 2. Execute Processing Engine ---
|
|
log.info(f"Calling ProcessingEngine.process with rule for input: {self.rule.input_path}, prepared workspace: {prepared_workspace_path}, output: {self.output_base_path}")
|
|
log.debug(f" Rule Details: {self.rule}") # Optional detailed log
|
|
|
|
# Pass rule positionally, prepared workspace, and output base path
|
|
result_or_error = self.engine.process(
|
|
self.rule, # Pass rule as first positional argument
|
|
workspace_path=prepared_workspace_path, # Use the prepared temp workspace
|
|
output_base_path=self.output_base_path
|
|
)
|
|
status = "processed" # Assume success if no exception
|
|
log.info(f"Worker Thread: Finished processing for rule: {self.rule.input_path}, Status: {status}")
|
|
# Signal emission moved to finally block
|
|
|
|
except (FileNotFoundError, ValueError, zipfile.BadZipFile, OSError) as prep_error:
|
|
log.exception(f"Worker Thread: Error preparing workspace for rule {self.rule.input_path}: {prep_error}")
|
|
status = "failed_preparation"
|
|
result_or_error = str(prep_error)
|
|
# Signal emission moved to finally block
|
|
except Exception as proc_error:
|
|
log.exception(f"Worker Thread: Error during engine processing for rule {self.rule.input_path}: {proc_error}")
|
|
status = "failed_processing"
|
|
result_or_error = str(proc_error)
|
|
# Signal emission moved to finally block
|
|
finally:
|
|
# --- Emit finished signal regardless of success or failure ---
|
|
try:
|
|
self.signals.finished.emit(str(self.rule.input_path), status, result_or_error)
|
|
log.debug(f"Worker Thread: Emitted finished signal for {self.rule.input_path} with status {status}")
|
|
except Exception as sig_err:
|
|
log.error(f"Worker Thread: Error emitting finished signal for {self.rule.input_path}: {sig_err}")
|
|
|
|
# --- 3. Cleanup Workspace ---
|
|
if temp_workspace_dir and Path(temp_workspace_dir).exists():
|
|
try:
|
|
log.info(f"Cleaning up temporary workspace: {temp_workspace_dir}")
|
|
shutil.rmtree(temp_workspace_dir)
|
|
except OSError as cleanup_error:
|
|
log.error(f"Worker Thread: Failed to cleanup temporary workspace {temp_workspace_dir}: {cleanup_error}")
|
|
|
|
|
|
# --- CLI Worker Function (COMMENTED OUT - Replaced by GUI Flow) ---
|
|
# Commented-out code moved to Deprecated/Old-Code/main_py_cli_worker_wrapper_line_254.py
|
|
|
|
|
|
# --- Core CLI Processing Function (COMMENTED OUT - Replaced by GUI Flow) ---
|
|
# Commented-out code moved to Deprecated/Old-Code/main_py_cli_run_processing_line_258.py
|
|
|
|
|
|
# --- Blender Script Execution Helper (COMMENTED OUT - Part of CLI Flow) ---
|
|
# Commented-out code moved to Deprecated/Old-Code/main_py_cli_blender_script_runner_line_365.py
|
|
|
|
|
|
# --- Main CLI Execution (COMMENTED OUT - Replaced by GUI App Flow) ---
|
|
# Commented-out code moved to Deprecated/Old-Code/main_py_cli_main_entry_line_329.py
|
|
|
|
|
|
# --- Main Application Class (Integrates GUI and Engine) ---
|
|
class App(QObject):
|
|
# Signal emitted when all queued processing tasks are complete
|
|
all_tasks_finished = Signal(int, int, int) # processed_count, skipped_count, failed_count (Placeholder counts for now)
|
|
|
|
def __init__(self):
|
|
super().__init__()
|
|
self.config_obj = None
|
|
self.processing_engine = None
|
|
self.main_window = None
|
|
self.thread_pool = QThreadPool()
|
|
self._active_tasks_count = 0 # Track running tasks
|
|
self._task_results = {"processed": 0, "skipped": 0, "failed": 0} # Store results
|
|
log.info(f"Maximum threads for pool: {self.thread_pool.maxThreadCount()}")
|
|
|
|
self._load_config()
|
|
self._init_engine()
|
|
self._init_gui()
|
|
|
|
def _load_config(self):
|
|
"""Loads the base configuration using a default preset."""
|
|
# The actual preset name comes from the GUI request later, but the engine
|
|
# needs an initial valid configuration object.
|
|
try:
|
|
# Find the first available preset to use as a default
|
|
preset_dir = Path(__file__).parent / "Presets"
|
|
default_preset_name = None
|
|
if preset_dir.is_dir():
|
|
presets = sorted([f.stem for f in preset_dir.glob("*.json") if f.is_file() and not f.name.startswith('_')])
|
|
if presets:
|
|
default_preset_name = presets[0]
|
|
log.info(f"Using first available preset as default for initial config: '{default_preset_name}'")
|
|
|
|
if not default_preset_name:
|
|
# Fallback or raise error if no presets found
|
|
log.error("No presets found in the 'Presets' directory. Cannot initialize default configuration.")
|
|
# Option 1: Raise an error
|
|
raise ConfigurationError("No presets found to load default configuration.")
|
|
# Option 2: Try initializing with None (if Configuration handles it, unlikely based on error)
|
|
# self.config_obj = Configuration(preset_name=None)
|
|
|
|
self.config_obj = Configuration(preset_name=default_preset_name) # Pass the default preset name
|
|
log.info(f"Base configuration loaded using default preset '{default_preset_name}'.")
|
|
except ConfigurationError as e:
|
|
log.error(f"Fatal: Failed to load base configuration using default preset: {e}")
|
|
# In a real app, show this error to the user before exiting
|
|
sys.exit(1)
|
|
except Exception as e:
|
|
log.exception(f"Fatal: Unexpected error loading configuration: {e}")
|
|
sys.exit(1)
|
|
|
|
def _init_engine(self):
|
|
"""Initializes the ProcessingEngine."""
|
|
if self.config_obj:
|
|
try:
|
|
self.processing_engine = ProcessingEngine(self.config_obj)
|
|
log.info("ProcessingEngine initialized.")
|
|
except Exception as e:
|
|
log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}")
|
|
# Show error and exit
|
|
sys.exit(1)
|
|
else:
|
|
log.error("Fatal: Cannot initialize ProcessingEngine without configuration.")
|
|
sys.exit(1)
|
|
|
|
def _init_gui(self):
|
|
"""Initializes the MainWindow and connects signals."""
|
|
if self.processing_engine:
|
|
self.main_window = MainWindow() # MainWindow now part of the App
|
|
# Connect the signal from the GUI to the App's slot using QueuedConnection
|
|
connection_success = self.main_window.processing_requested.connect(self.on_processing_requested, Qt.ConnectionType.QueuedConnection)
|
|
log.info(f"DEBUG: Connection result for processing_requested (Queued): {connection_success}") # <-- Modified LOG
|
|
if not connection_success:
|
|
log.error("*********************************************************")
|
|
log.error("FATAL: Failed to connect MainWindow.processing_requested signal to App.on_processing_requested slot!")
|
|
log.error("*********************************************************")
|
|
# Connect the App's completion signal to the MainWindow's slot
|
|
self.all_tasks_finished.connect(self.main_window.on_processing_finished)
|
|
log.info("MainWindow initialized and signals connected.")
|
|
else:
|
|
log.error("Fatal: Cannot initialize MainWindow without ProcessingEngine.")
|
|
sys.exit(1)
|
|
|
|
@Slot(list) # Slot to receive List[SourceRule]
|
|
def on_processing_requested(self, source_rules: list):
|
|
# log.info("*********************************************************") # REMOVED
|
|
log.debug("DEBUG: App.on_processing_requested slot entered.") # DEBUG Verify Entry (Keep this one)
|
|
# log.info("*********************************************************") # REMOVED
|
|
"""Handles the processing request from the GUI."""
|
|
# --- Restore original logic ---
|
|
log.info(f"Received processing request for {len(source_rules)} rule sets.")
|
|
log.info(f"DEBUG: Rules received by on_processing_requested: {source_rules}") # DEBUG LOG
|
|
log.info(f"VERIFY: App.on_processing_requested received {len(source_rules)} rules.") # DEBUG Verify
|
|
for i, rule in enumerate(source_rules):
|
|
log.debug(f" VERIFY Rule {i}: Input='{rule.input_path}', Assets={len(rule.assets)}") # DEBUG Verify
|
|
if not self.processing_engine:
|
|
log.error("Processing engine not available. Cannot process request.")
|
|
# Update GUI status bar maybe?
|
|
self.main_window.statusBar().showMessage("Error: Processing Engine not ready.", 5000)
|
|
return
|
|
if not source_rules:
|
|
log.warning("Processing requested with an empty rule list.")
|
|
self.main_window.statusBar().showMessage("No rules to process.", 3000)
|
|
return
|
|
|
|
# Reset task counter and results for this batch
|
|
self._active_tasks_count = len(source_rules)
|
|
self._task_results = {"processed": 0, "skipped": 0, "failed": 0}
|
|
log.debug(f"Initialized active task count to: {self._active_tasks_count}")
|
|
|
|
# Update GUI progress bar/status
|
|
self.main_window.progress_bar.setMaximum(len(source_rules))
|
|
self.main_window.progress_bar.setValue(0)
|
|
self.main_window.progress_bar.setFormat(f"0/{len(source_rules)} tasks")
|
|
|
|
# --- Get paths needed for ProcessingTask ---
|
|
try:
|
|
output_base_path_str = self.main_window.output_path_edit.text().strip()
|
|
if not output_base_path_str:
|
|
log.error("Cannot queue tasks: Output directory path is empty in the GUI.")
|
|
self.main_window.statusBar().showMessage("Error: Output directory cannot be empty.", 5000)
|
|
return
|
|
output_base_path = Path(output_base_path_str)
|
|
# Basic validation - check if it's likely a valid path structure (doesn't guarantee existence/writability here)
|
|
if not output_base_path.is_absolute():
|
|
# Or attempt to resolve relative to workspace? For now, require absolute from GUI.
|
|
log.warning(f"Output path '{output_base_path}' is not absolute. Processing might fail if relative path is not handled correctly by engine.")
|
|
# Consider resolving: output_base_path = Path.cwd() / output_base_path # If relative paths are allowed
|
|
|
|
# Define workspace path (assuming main.py is in the project root)
|
|
workspace_path = Path(__file__).parent.resolve()
|
|
log.debug(f"Using Workspace Path: {workspace_path}")
|
|
log.debug(f"Using Output Base Path: {output_base_path}")
|
|
|
|
except Exception as e:
|
|
log.exception(f"Error getting/validating paths for processing task: {e}")
|
|
self.main_window.statusBar().showMessage(f"Error preparing paths: {e}", 5000)
|
|
return
|
|
# --- End Get paths ---
|
|
|
|
|
|
# Queue tasks in the thread pool
|
|
log.debug("DEBUG: Entering task queuing loop.") # <-- Keep this log
|
|
for i, rule in enumerate(source_rules): # Added enumerate for index logging
|
|
if isinstance(rule, SourceRule):
|
|
log.info(f"DEBUG Task {i+1}: Rule Input='{rule.input_path}', Supplier ID='{getattr(rule, 'supplier_identifier', 'Not Set')}', Preset='{getattr(rule, 'preset_name', 'Not Set')}'") # <-- ADDED LOGGING (Corrected Indentation)
|
|
log.debug(f"DEBUG: Preparing to queue task {i+1}/{len(source_rules)} for rule: {rule.input_path}") # <-- Keep this log
|
|
|
|
# --- Create a new Configuration and Engine instance for this specific task ---
|
|
task_engine = None
|
|
try:
|
|
# Get preset name from the rule, fallback to app's default if missing
|
|
preset_name_for_task = getattr(rule, 'preset_name', None)
|
|
if not preset_name_for_task:
|
|
log.warning(f"Task {i+1} (Rule: {rule.input_path}): SourceRule missing preset_name. Falling back to default preset '{self.config_obj.preset_name}'.")
|
|
preset_name_for_task = self.config_obj.preset_name # Use the initially loaded default
|
|
|
|
# Load the specific configuration for this task's preset
|
|
task_config = Configuration(preset_name=preset_name_for_task)
|
|
task_engine = ProcessingEngine(task_config)
|
|
log.debug(f"Task {i+1}: Created new ProcessingEngine instance with preset '{preset_name_for_task}'.")
|
|
|
|
except ConfigurationError as config_err:
|
|
log.error(f"Task {i+1} (Rule: {rule.input_path}): Failed to load configuration for preset '{preset_name_for_task}': {config_err}. Skipping task.")
|
|
self._active_tasks_count -= 1 # Decrement count as this task won't run
|
|
self._task_results["failed"] += 1
|
|
# Optionally update GUI status for this specific rule
|
|
self.main_window.update_file_status(str(rule.input_path), "failed", f"Config Error: {config_err}")
|
|
continue # Skip to the next rule
|
|
except Exception as engine_err:
|
|
log.exception(f"Task {i+1} (Rule: {rule.input_path}): Failed to initialize ProcessingEngine for preset '{preset_name_for_task}': {engine_err}. Skipping task.")
|
|
self._active_tasks_count -= 1 # Decrement count
|
|
self._task_results["failed"] += 1
|
|
self.main_window.update_file_status(str(rule.input_path), "failed", f"Engine Init Error: {engine_err}")
|
|
continue # Skip to the next rule
|
|
|
|
if task_engine is None: # Should not happen if exceptions are caught, but safety check
|
|
log.error(f"Task {i+1} (Rule: {rule.input_path}): Engine is None after initialization attempt. Skipping task.")
|
|
self._active_tasks_count -= 1 # Decrement count
|
|
self._task_results["failed"] += 1
|
|
self.main_window.update_file_status(str(rule.input_path), "failed", "Engine initialization failed (unknown reason).")
|
|
continue # Skip to the next rule
|
|
# --- End Engine Instantiation ---
|
|
|
|
# Pass the required paths and the NEW engine instance to the ProcessingTask constructor
|
|
task = ProcessingTask(
|
|
engine=task_engine, # Pass the newly created engine
|
|
rule=rule,
|
|
workspace_path=workspace_path,
|
|
output_base_path=output_base_path
|
|
)
|
|
# Connect the task's finished signal to the App's slot
|
|
task.signals.finished.connect(self._on_task_finished)
|
|
log.debug(f"DEBUG: Calling thread_pool.start() for task {i+1}") # <-- Keep this log
|
|
self.thread_pool.start(task)
|
|
log.debug(f"DEBUG: Returned from thread_pool.start() for task {i+1}") # <-- Keep this log
|
|
else:
|
|
log.warning(f"Skipping invalid item (index {i}) in rule list: {type(rule)}") # Added index
|
|
|
|
log.info(f"Queued {len(source_rules)} processing tasks (finished loop).") # Added context
|
|
# --- End Restore original logic ---
|
|
# GUI status already updated in MainWindow when signal was emitted
|
|
|
|
# --- Slot to handle completion of individual tasks ---
|
|
@Slot(str, str, object)
|
|
def _on_task_finished(self, rule_input_path, status, result_or_error):
|
|
"""Handles the 'finished' signal from a ProcessingTask."""
|
|
log.info(f"Task finished signal received for {rule_input_path}. Status: {status}")
|
|
self._active_tasks_count -= 1
|
|
log.debug(f"Active tasks remaining: {self._active_tasks_count}")
|
|
|
|
# Update overall results (basic counts for now)
|
|
if status == "processed":
|
|
self._task_results["processed"] += 1
|
|
elif status == "skipped": # Assuming engine might return 'skipped' status eventually
|
|
self._task_results["skipped"] += 1
|
|
else: # Count all other statuses (failed_preparation, failed_processing) as failed
|
|
self._task_results["failed"] += 1
|
|
|
|
# Update progress bar
|
|
total_tasks = self.main_window.progress_bar.maximum()
|
|
completed_tasks = total_tasks - self._active_tasks_count
|
|
self.main_window.update_progress_bar(completed_tasks, total_tasks) # Use MainWindow's method
|
|
|
|
# Update status for the specific file in the GUI (if needed)
|
|
# self.main_window.update_file_status(rule_input_path, status, str(result_or_error) if result_or_error else "")
|
|
|
|
if self._active_tasks_count == 0:
|
|
log.info("All processing tasks finished.")
|
|
# Emit the signal with the final counts
|
|
self.all_tasks_finished.emit(
|
|
self._task_results["processed"],
|
|
self._task_results["skipped"],
|
|
self._task_results["failed"]
|
|
)
|
|
elif self._active_tasks_count < 0:
|
|
log.error("Error: Active task count went below zero!") # Should not happen
|
|
|
|
def run(self):
|
|
"""Shows the main window."""
|
|
if self.main_window:
|
|
self.main_window.show()
|
|
log.info("Application started. Showing main window.")
|
|
else:
|
|
log.error("Cannot run application, MainWindow not initialized.")
|
|
|
|
|
|
# --- Main CLI Execution Function (Adapted from old main()) ---
|
|
def run_cli(args): # Accept parsed args
|
|
"""Uses parsed arguments, sets up logging, runs processing, and reports summary for CLI mode."""
|
|
# parser = setup_arg_parser() # No longer needed
|
|
# args = parser.parse_args() # Args are passed in
|
|
|
|
# --- Validate required CLI arguments ---
|
|
if not args.input_paths:
|
|
log.error("CLI Error: Input path(s) are required for CLI mode.")
|
|
sys.exit(1)
|
|
if not args.preset:
|
|
log.error("CLI Error: Preset (-p/--preset) is required for CLI mode.")
|
|
sys.exit(1)
|
|
# --- End Validation ---
|
|
|
|
# Logging setup is already done outside this function in the __main__ block
|
|
|
|
start_time = time.time()
|
|
log.info("Asset Processor Script Started (CLI Mode)")
|
|
|
|
# --- Validate Input Paths ---
|
|
valid_inputs = []
|
|
for p_str in args.input_paths:
|
|
p = Path(p_str)
|
|
if p.exists():
|
|
suffix = p.suffix.lower()
|
|
# TODO: Add support for other archive types if needed (.rar, .7z)
|
|
if p.is_dir() or (p.is_file() and suffix == '.zip'):
|
|
valid_inputs.append(p_str) # Store the original string path
|
|
else:
|
|
log.warning(f"Input is not a directory or a supported archive type (.zip), skipping: {p_str}")
|
|
else:
|
|
log.warning(f"Input path not found, skipping: {p_str}")
|
|
|
|
if not valid_inputs:
|
|
log.error("No valid input paths found. Exiting.")
|
|
sys.exit(1) # Exit with error code
|
|
|
|
# --- Determine Output Directory ---
|
|
output_dir_str = args.output_dir # Get value from args (might be None)
|
|
if not output_dir_str:
|
|
log.debug("Output directory not specified via -o, reading default from app_settings.json via load_base_config().")
|
|
try:
|
|
base_config = load_base_config()
|
|
output_dir_str = base_config.get('OUTPUT_BASE_DIR')
|
|
if not output_dir_str:
|
|
log.error("Output directory not specified with -o and 'OUTPUT_BASE_DIR' not found or empty in app_settings.json. Exiting.")
|
|
sys.exit(1)
|
|
log.info(f"Using default output directory from app_settings.json: {output_dir_str}")
|
|
except ConfigurationError as e:
|
|
log.error(f"Error reading base configuration for OUTPUT_BASE_DIR: {e}")
|
|
sys.exit(1)
|
|
except Exception as e:
|
|
log.exception(f"Unexpected error reading base configuration for OUTPUT_BASE_DIR: {e}")
|
|
sys.exit(1)
|
|
|
|
# --- Resolve Output Path ---
|
|
output_path_obj = Path(output_dir_str).resolve() # Resolve to absolute path
|
|
|
|
# --- Validate and Setup Output Directory ---
|
|
try:
|
|
log.info(f"Ensuring output directory exists: {output_path_obj}")
|
|
output_path_obj.mkdir(parents=True, exist_ok=True)
|
|
output_dir_for_processor = str(output_path_obj)
|
|
except Exception as e:
|
|
log.error(f"Cannot create or access output directory '{output_path_obj}': {e}", exc_info=True)
|
|
sys.exit(1)
|
|
|
|
# --- Load Configuration ---
|
|
try:
|
|
config = Configuration(args.preset) # Pass preset name from args
|
|
log.info(f"Configuration loaded for preset: {args.preset}")
|
|
except ConfigurationError as e:
|
|
log.error(f"Error loading configuration for preset '{args.preset}': {e}")
|
|
sys.exit(1)
|
|
except Exception as e:
|
|
log.exception(f"Unexpected error loading configuration: {e}")
|
|
sys.exit(1)
|
|
|
|
# --- Initialize Processing Engine ---
|
|
try:
|
|
engine = ProcessingEngine(config)
|
|
log.info("ProcessingEngine initialized for CLI mode.")
|
|
except Exception as e:
|
|
log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}")
|
|
sys.exit(1)
|
|
|
|
# --- Execute Processing (Simplified Sequential for now) ---
|
|
# TODO: Re-implement parallel processing using concurrent.futures if needed.
|
|
# TODO: CLI mode needs a way to generate SourceRule objects.
|
|
# For now, we'll pass a simplified structure or assume engine handles it.
|
|
# This part likely needs significant adaptation based on ProcessingEngine.process requirements.
|
|
log.warning("CLI processing currently uses simplified sequential execution.")
|
|
log.warning("SourceRule generation for CLI mode is basic and may need refinement.")
|
|
|
|
processed_count = 0
|
|
skipped_count = 0 # Placeholder
|
|
failed_count = 0
|
|
results_list = [] # Placeholder
|
|
|
|
for input_path_str in valid_inputs:
|
|
log.info(f"--- Processing Input: {Path(input_path_str).name} ---")
|
|
try:
|
|
# --- Basic SourceRule Creation (Needs Review/Adaptation) ---
|
|
# This is a placeholder. The engine likely needs more detailed file info.
|
|
# We might need to extract file list here like the GUI does.
|
|
input_path_obj = Path(input_path_str)
|
|
# Example: Create a rule assuming the input is a single asset
|
|
# This won't handle multi-asset archives correctly without more logic.
|
|
asset_name = input_path_obj.stem # Basic assumption
|
|
# File list extraction would be needed here for proper FileRule creation
|
|
# file_list = _extract_file_list(input_path_str) # Need to define/import this helper
|
|
# file_rules = [FileRule(file_path=f) for f in file_list] if file_list else []
|
|
# asset_rule = AssetRule(asset_name=asset_name, files=file_rules)
|
|
# rule = SourceRule(input_path=input_path_str, assets=[asset_rule], supplier_identifier=config.settings.get('supplier_identifier')) # Access from config object
|
|
# --- End Placeholder ---
|
|
|
|
# --- TEMPORARY: Call engine process with just config and path ---
|
|
# This assumes engine.process can handle this or needs adaptation.
|
|
# If engine.process strictly requires a SourceRule, this will fail.
|
|
# result = engine.process(config=config, input_path=input_path_obj, overwrite=args.overwrite)
|
|
# --- END TEMPORARY ---
|
|
|
|
# --- Attempt with Placeholder SourceRule (More likely signature) ---
|
|
# This still requires file list extraction and rule creation logic
|
|
log.error("CLI Processing Logic Incomplete: SourceRule creation and engine call need implementation.")
|
|
# Example (requires file list extraction and rule building):
|
|
# rule = build_basic_source_rule(input_path_str, config) # Hypothetical function
|
|
# if rule:
|
|
# engine.process(rule) # Assuming process takes one rule
|
|
# processed_count += 1 # Basic success tracking
|
|
# else:
|
|
# log.warning(f"Could not create basic rule for {input_path_str}, skipping.")
|
|
# failed_count += 1
|
|
# --- End Placeholder ---
|
|
raise NotImplementedError("CLI processing logic for SourceRule creation and engine call is not fully implemented.")
|
|
|
|
|
|
except NotImplementedError as e:
|
|
log.error(f"Stopping CLI run due to incomplete implementation: {e}")
|
|
failed_count += 1
|
|
break # Stop processing further items
|
|
except Exception as e:
|
|
log.exception(f"Error processing input '{Path(input_path_str).name}': {e}")
|
|
failed_count += 1
|
|
results_list.append((input_path_str, "failed", str(e))) # Placeholder result
|
|
|
|
# --- Report Summary ---
|
|
duration = time.time() - start_time
|
|
log.info("=" * 40)
|
|
log.info("CLI Processing Summary")
|
|
log.info(f" Duration: {duration:.2f} seconds")
|
|
log.info(f" Inputs Attempted: {len(valid_inputs)}")
|
|
log.info(f" Successfully Processed: {processed_count}")
|
|
log.info(f" Skipped: {skipped_count}")
|
|
log.info(f" Failed: {failed_count}")
|
|
|
|
exit_code = 0
|
|
if failed_count > 0:
|
|
log.warning("Failures occurred.")
|
|
# Log specific errors if results_list was populated
|
|
for input_path, status, err_msg in results_list:
|
|
if status == "failed":
|
|
log.warning(f" - {Path(input_path).name}: {err_msg}")
|
|
exit_code = 1 # Exit with error code if failures occurred
|
|
|
|
# --- Blender Script Execution (Optional - Copied from old main()) ---
|
|
# This section might need review based on current config/engine
|
|
run_blender = False # Placeholder, add logic if needed
|
|
if run_blender:
|
|
# ... (Blender execution logic from old main() would go here) ...
|
|
log.warning("Blender script execution from CLI not yet re-implemented.")
|
|
pass
|
|
|
|
# --- Final Exit ---
|
|
log.info("Asset Processor Script Finished (CLI Mode).")
|
|
sys.exit(exit_code)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
# Setup argument parser
|
|
parser = setup_arg_parser()
|
|
# Parse all arguments now
|
|
args = parser.parse_args()
|
|
|
|
# Setup logging based on --verbose flag
|
|
setup_logging(args.verbose)
|
|
|
|
# Determine mode based on presence of required CLI args
|
|
if args.input_paths or args.preset:
|
|
# If either input_paths or preset is provided, assume CLI mode
|
|
# run_cli will handle validation that *both* are actually present
|
|
log.info("CLI arguments detected (input_paths or preset), attempting CLI mode.")
|
|
run_cli(args) # Pass parsed args to run_cli
|
|
else:
|
|
# If neither input_paths nor preset is provided, run GUI mode
|
|
log.info("No required CLI arguments detected, starting GUI mode.")
|
|
# --- Run the GUI Application ---
|
|
try:
|
|
qt_app = QApplication(sys.argv) # Pass original sys.argv
|
|
# Optional: Apply style/palette if desired
|
|
qt_app.setStyle('Fusion')
|
|
# palette = qt_app.palette() ... set colors ... qt_app.setPalette(palette)
|
|
|
|
app_instance = App()
|
|
app_instance.run()
|
|
|
|
sys.exit(qt_app.exec())
|
|
except Exception as gui_exc:
|
|
log.exception(f"An error occurred during GUI startup or execution: {gui_exc}")
|
|
sys.exit(1)
|
|
|
|
# --- Old logic removed --- |