2025-04-30 17:30:51 +02:00

642 lines
28 KiB
Python

# main.py
import argparse
import sys
import time
import os
import logging
from pathlib import Path
from concurrent.futures import ProcessPoolExecutor, as_completed
import platform # To potentially adjust worker count defaults
import subprocess # <<< ADDED IMPORT
import shutil # <<< ADDED IMPORT
from typing import List, Dict, Tuple, Optional # Added for type hinting
# --- Assuming classes are in sibling files ---
try:
from configuration import Configuration, ConfigurationError
from asset_processor import AssetProcessor, AssetProcessingError
import config as core_config_module # <<< IMPORT config.py HERE
except ImportError as e:
# Provide a more helpful error message if imports fail
script_dir = Path(__file__).parent.resolve()
print(f"ERROR: Failed to import necessary classes: {e}")
print(f"Ensure 'configuration.py' and 'asset_processor.py' exist in the directory:")
print(f" {script_dir}")
print("Or that the directory is included in your PYTHONPATH.")
sys.exit(1)
# --- Setup Logging ---
# Keep setup_logging as is, it's called by main() or potentially monitor.py
def setup_logging(verbose: bool):
"""Configures logging for the application."""
log_level = logging.DEBUG if verbose else logging.INFO
log_format = '%(asctime)s [%(levelname)-8s] %(name)s: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
# Configure root logger
# Remove existing handlers to avoid duplication if re-run in same session
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(
level=log_level,
format=log_format,
datefmt=date_format,
handlers=[
logging.StreamHandler(sys.stdout) # Log to console
# Optional: Add FileHandler for persistent logs
# logging.FileHandler("asset_processor.log", mode='a', encoding='utf-8')
]
)
# Get logger specifically for this main script
log = logging.getLogger(__name__) # or use 'main'
log.info(f"Logging level set to: {logging.getLevelName(log_level)}")
# Suppress overly verbose messages from libraries if needed (e.g., cv2)
# logging.getLogger('cv2').setLevel(logging.WARNING)
# Use module-level logger after configuration
log = logging.getLogger(__name__)
# --- Argument Parser Setup ---
# Keep setup_arg_parser as is, it's only used when running main.py directly
def setup_arg_parser():
"""Sets up and returns the command-line argument parser."""
# Determine a sensible default worker count
default_workers = 1
try:
# Use half the cores, but at least 1, max maybe 8-16? Depends on task nature.
# Let's try max(1, os.cpu_count() // 2)
cores = os.cpu_count()
if cores:
default_workers = max(1, cores // 2)
# Cap default workers? Maybe not necessary, let user decide via flag.
# default_workers = min(default_workers, 8) # Example cap
except NotImplementedError:
log.warning("Could not detect CPU count, defaulting workers to 1.")
parser = argparse.ArgumentParser(
description="Process asset files (ZIPs or folders) into a standardized library format using presets.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter # Shows default values in help message
)
parser.add_argument(
"input_paths",
metavar="INPUT_PATH",
type=str,
nargs='+', # Requires one or more input paths
help="Path(s) to the input ZIP file(s) or folder(s) containing assets."
)
parser.add_argument(
"-p", "--preset",
type=str,
required=True,
help="Name of the configuration preset (e.g., 'poliigon') located in the 'presets' directory (without .json extension)."
)
parser.add_argument(
"-o", "--output-dir",
type=str,
required=False, # No longer required
default=None, # Default is None, will check core_config later
help="Override the default base output directory defined in config.py." # Updated help
)
parser.add_argument(
"-w", "--workers",
type=int,
default=default_workers,
help="Maximum number of assets to process concurrently in parallel processes."
)
parser.add_argument(
"-v", "--verbose",
action="store_true", # Makes it a flag, value is True if present
help="Enable detailed DEBUG level logging for troubleshooting."
)
parser.add_argument(
"--overwrite",
action="store_true",
help="Force reprocessing and overwrite existing output asset folders if they exist."
)
parser.add_argument(
"--nodegroup-blend",
type=str,
default=None,
help="Path to the .blend file for creating/updating node groups. Overrides config.py default."
)
parser.add_argument(
"--materials-blend",
type=str,
default=None,
help="Path to the .blend file for creating/updating materials. Overrides config.py default."
)
# Potential future flags:
# parser.add_argument("--log-file", type=str, default=None, help="Path to save log output to a file.")
return parser
# --- Worker Function ---
def process_single_asset_wrapper(input_path_str: str, preset_name: str, output_dir_str: str, overwrite: bool, verbose: bool, rules) -> Tuple[str, str, Optional[str]]:
"""
Wrapper function for processing a single input path (which might contain multiple assets)
in a separate process. Handles instantiation of Configuration and AssetProcessor,
passes the overwrite flag, catches errors, and interprets the multi-asset status dictionary.
Ensures logging is configured for the worker process.
Returns:
Tuple[str, str, Optional[str]]:
- input_path_str: The original input path processed.
- overall_status_string: A single status string summarizing the outcome
("processed", "skipped", "failed", "partial_success").
- error_message_or_None: An error message if failures occurred, potentially
listing failed assets.
"""
# Explicitly configure logging for this worker process
worker_log = logging.getLogger(f"Worker_{os.getpid()}") # Log with worker PID
if not logging.root.handlers:
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-8s] %(name)s: %(message)s')
worker_log.setLevel(logging.DEBUG if verbose else logging.INFO)
if verbose:
logging.root.setLevel(logging.DEBUG)
input_path_obj = Path(input_path_str)
input_name = input_path_obj.name
try:
worker_log.info(f"Starting processing attempt for input: {input_name}")
config = Configuration(preset_name)
output_base_path = Path(output_dir_str)
processor = AssetProcessor(input_path_obj, config, output_base_path, overwrite=overwrite)
# processor.process() now returns a Dict[str, List[str]]
status_dict = processor.process(rules=rules)
# --- Interpret the status dictionary ---
processed_assets = status_dict.get("processed", [])
skipped_assets = status_dict.get("skipped", [])
failed_assets = status_dict.get("failed", [])
overall_status_string = "failed" # Default
error_message = None
if failed_assets:
overall_status_string = "failed"
error_message = f"Failed assets within {input_name}: {', '.join(failed_assets)}"
worker_log.error(error_message) # Log the failure details
elif processed_assets:
overall_status_string = "processed"
# Check for partial success (mix of processed/skipped and failed should be caught above)
if skipped_assets:
worker_log.info(f"Input '{input_name}' processed with some assets skipped. Processed: {processed_assets}, Skipped: {skipped_assets}")
else:
worker_log.info(f"Input '{input_name}' processed successfully. Assets: {processed_assets}")
elif skipped_assets:
overall_status_string = "skipped"
worker_log.info(f"Input '{input_name}' skipped (all contained assets already exist). Assets: {skipped_assets}")
else:
# Should not happen if input contained files, but handle as failure.
worker_log.warning(f"Input '{input_name}' resulted in no processed, skipped, or failed assets. Reporting as failed.")
overall_status_string = "failed"
error_message = f"No assets processed, skipped, or failed within {input_name}."
return (input_path_str, overall_status_string, error_message)
except (ConfigurationError, AssetProcessingError) as e:
# Catch errors during processor setup or the process() call itself if it raises before returning dict
worker_log.error(f"Processing failed for input '{input_name}': {type(e).__name__}: {e}")
return (input_path_str, "failed", f"{type(e).__name__}: {e}")
except Exception as e:
# Catch any other unexpected errors
worker_log.exception(f"Unexpected worker failure processing input '{input_name}': {e}")
return (input_path_str, "failed", f"Unexpected Worker Error: {e}")
# --- Core Processing Function ---
def run_processing(
valid_inputs: List[str],
preset_name: str,
output_dir_for_processor: str,
overwrite: bool,
num_workers: int,
verbose: bool # Add verbose parameter here
) -> Dict:
"""
Executes the core asset processing logic using a process pool.
Args:
valid_inputs: List of validated input file/directory paths (strings).
preset_name: Name of the preset to use.
output_dir_for_processor: Absolute path string for the output base directory.
overwrite: Boolean flag to force reprocessing.
num_workers: Maximum number of worker processes.
verbose: Boolean flag for verbose logging.
Returns:
A dictionary containing processing results:
{
"processed": int,
"skipped": int,
"failed": int,
"results_list": List[Tuple[str, str, Optional[str]]] # (input_path, status, error_msg)
}
"""
log.info(f"Processing {len(valid_inputs)} asset(s) using preset '{preset_name}' with up to {num_workers} worker(s)...")
results_list = []
successful_processed_count = 0
skipped_count = 0
failed_count = 0
# Ensure at least one worker
num_workers = max(1, num_workers)
# Using ProcessPoolExecutor is generally good if AssetProcessor tasks are CPU-bound.
# If tasks are mostly I/O bound, ThreadPoolExecutor might be sufficient.
# Important: Ensure Configuration and AssetProcessor are "pickleable".
try:
with ProcessPoolExecutor(max_workers=num_workers) as executor:
# Create futures
futures = {}
log.debug(f"Submitting {len(valid_inputs)} tasks...")
# Removed the 1-second delay for potentially faster submission in non-CLI use
for i, input_path in enumerate(valid_inputs):
log.debug(f"Submitting task {i+1}/{len(valid_inputs)} for: {Path(input_path).name}")
future = executor.submit(
process_single_asset_wrapper,
input_path,
preset_name,
output_dir_for_processor,
overwrite,
verbose # Pass the verbose flag
)
futures[future] = input_path # Store future -> input_path mapping
# Process completed futures
for i, future in enumerate(as_completed(futures), 1):
input_path = futures[future]
asset_name = Path(input_path).name
log.info(f"--- [{i}/{len(valid_inputs)}] Worker finished attempt for: {asset_name} ---")
try:
# Get result tuple: (input_path_str, status_string, error_message_or_None)
result_tuple = future.result()
results_list.append(result_tuple)
input_path_res, status, err_msg = result_tuple
# Increment counters based on status
if status == "processed":
successful_processed_count += 1
elif status == "skipped":
skipped_count += 1
elif status == "failed":
failed_count += 1
else: # Should not happen, but log as warning/failure
log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.")
failed_count += 1
except Exception as e:
# Catch errors if the future itself fails (e.g., worker process crashed hard)
log.exception(f"Critical worker failure for {asset_name}: {e}")
results_list.append((input_path, "failed", f"Worker process crashed: {e}"))
failed_count += 1 # Count crashes as failures
except Exception as pool_exc:
log.exception(f"An error occurred with the process pool: {pool_exc}")
# Re-raise or handle as appropriate for the calling context (monitor.py)
# For now, log and return current counts
return {
"processed": successful_processed_count,
"skipped": skipped_count,
"failed": failed_count + (len(valid_inputs) - len(results_list)), # Count unprocessed as failed
"results_list": results_list,
"pool_error": str(pool_exc) # Add pool error info
}
return {
"processed": successful_processed_count,
"skipped": skipped_count,
"failed": failed_count,
"results_list": results_list
}
# --- Blender Script Execution Helper ---
def run_blender_script(blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str):
"""
Executes a Python script within Blender in the background.
Args:
blender_exe_path: Path to the Blender executable.
blend_file_path: Path to the .blend file to open.
python_script_path: Path to the Python script to execute within Blender.
asset_root_dir: Path to the processed asset library root directory (passed to the script).
Returns:
True if the script executed successfully (return code 0), False otherwise.
"""
log.info(f"Attempting to run Blender script: {Path(python_script_path).name} on {Path(blend_file_path).name}")
# Ensure paths are absolute strings for subprocess
blender_exe_path = str(Path(blender_exe_path).resolve())
blend_file_path = str(Path(blend_file_path).resolve())
python_script_path = str(Path(python_script_path).resolve())
asset_root_dir = str(Path(asset_root_dir).resolve())
# Construct the command arguments
# -b: Run in background (no UI)
# -S: Save the file after running the script
# --python: Execute the specified Python script
# --: Separator, arguments after this are passed to the Python script's sys.argv
command = [
blender_exe_path,
"-b", # Run in background
blend_file_path,
"--python", python_script_path,
"--", # Pass subsequent arguments to the script
asset_root_dir,
"-S" # Save the blend file after script execution
]
log.debug(f"Executing Blender command: {' '.join(command)}") # Log the command for debugging
try:
# Execute the command
# capture_output=True captures stdout and stderr
# text=True decodes stdout/stderr as text
# check=False prevents raising CalledProcessError on non-zero exit codes
result = subprocess.run(command, capture_output=True, text=True, check=False)
# Log results
log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}")
if result.stdout:
log.debug(f"Blender stdout:\n{result.stdout.strip()}")
if result.stderr:
# Log stderr as warning or error depending on return code
if result.returncode != 0:
log.error(f"Blender stderr:\n{result.stderr.strip()}")
else:
log.warning(f"Blender stderr (Return Code 0):\n{result.stderr.strip()}") # Log stderr even on success as scripts might print warnings
return result.returncode == 0
except FileNotFoundError:
log.error(f"Blender executable not found at: {blender_exe_path}")
return False
except Exception as e:
log.exception(f"An unexpected error occurred while running Blender script '{Path(python_script_path).name}': {e}")
return False
# --- Main Execution (for CLI usage) ---
def main():
"""Parses arguments, sets up logging, runs processing, and reports summary."""
parser = setup_arg_parser()
args = parser.parse_args()
# Setup logging based on verbosity argument *before* logging status messages
setup_logging(args.verbose)
start_time = time.time()
log.info("Asset Processor Script Started (CLI Mode)")
# --- Validate Input Paths ---
valid_inputs = []
for p_str in args.input_paths:
p = Path(p_str)
if p.exists():
suffix = p.suffix.lower()
if p.is_dir() or (p.is_file() and suffix in ['.zip', '.rar', '.7z']):
valid_inputs.append(p_str) # Store the original string path
else:
log.warning(f"Input is not a directory or a supported archive type (.zip, .rar, .7z), skipping: {p_str}")
else:
log.warning(f"Input path not found, skipping: {p_str}")
if not valid_inputs:
log.error("No valid input paths found. Exiting.")
sys.exit(1) # Exit with error code
# --- Determine Output Directory ---
output_dir_str = args.output_dir # Get value from args (might be None)
if not output_dir_str:
log.debug("Output directory not specified via -o, reading default from config.py.")
try:
output_dir_str = getattr(core_config_module, 'OUTPUT_BASE_DIR', None)
if not output_dir_str:
log.error("Output directory not specified with -o and OUTPUT_BASE_DIR not found or empty in config.py. Exiting.")
sys.exit(1)
log.info(f"Using default output directory from config.py: {output_dir_str}")
except Exception as e:
log.error(f"Could not read OUTPUT_BASE_DIR from config.py: {e}")
sys.exit(1)
# --- Resolve Output Path (Handles Relative Paths Explicitly) ---
output_path_obj: Path
if os.path.isabs(output_dir_str):
output_path_obj = Path(output_dir_str)
log.info(f"Using absolute output directory: {output_path_obj}")
else:
# Path() interprets relative paths against CWD by default
output_path_obj = Path(output_dir_str)
log.info(f"Using relative output directory '{output_dir_str}'. Resolved against CWD to: {output_path_obj.resolve()}")
# --- Validate and Setup Output Directory ---
try:
# Resolve to ensure we have an absolute path for consistency and creation
resolved_output_dir = output_path_obj.resolve()
log.info(f"Ensuring output directory exists: {resolved_output_dir}")
resolved_output_dir.mkdir(parents=True, exist_ok=True)
# Use the resolved absolute path string for the processor
output_dir_for_processor = str(resolved_output_dir)
except Exception as e:
log.error(f"Cannot create or access output directory '{resolved_output_dir}': {e}", exc_info=True)
sys.exit(1)
# --- Check Preset Existence (Basic Check) ---
preset_dir = Path(__file__).parent / "presets"
preset_file = preset_dir / f"{args.preset}.json"
if not preset_file.is_file():
log.error(f"Preset file not found: {preset_file}")
log.error(f"Ensure a file named '{args.preset}.json' exists in the directory: {preset_dir.resolve()}")
sys.exit(1)
# --- Execute Processing via the new function ---
processing_results = run_processing(
valid_inputs=valid_inputs,
preset_name=args.preset,
output_dir_for_processor=output_dir_for_processor,
overwrite=args.overwrite,
num_workers=args.workers,
verbose=args.verbose # Pass the verbose flag
)
# --- Report Summary ---
duration = time.time() - start_time
successful_processed_count = processing_results["processed"]
skipped_count = processing_results["skipped"]
failed_count = processing_results["failed"]
results_list = processing_results["results_list"]
log.info("=" * 40)
log.info("Processing Summary")
log.info(f" Duration: {duration:.2f} seconds")
log.info(f" Assets Attempted: {len(valid_inputs)}")
log.info(f" Successfully Processed: {successful_processed_count}")
log.info(f" Skipped (Already Existed): {skipped_count}")
log.info(f" Failed: {failed_count}")
if processing_results.get("pool_error"):
log.error(f" Process Pool Error: {processing_results['pool_error']}")
# Ensure failed count reflects pool error if it happened
if failed_count == 0 and successful_processed_count == 0 and skipped_count == 0:
failed_count = len(valid_inputs) # Assume all failed if pool died early
exit_code = 0
if failed_count > 0:
log.warning("Failures occurred:")
# Iterate through results to show specific errors for failed items
for input_path, status, err_msg in results_list:
if status == "failed":
log.warning(f" - {Path(input_path).name}: {err_msg}")
exit_code = 1 # Exit with error code if failures occurred
else:
# Consider skipped assets as a form of success for the overall run exit code
if successful_processed_count > 0 or skipped_count > 0:
log.info("All assets processed or skipped successfully.")
exit_code = 0 # Exit code 0 indicates success (including skips)
else:
# This case might happen if all inputs were invalid initially
log.warning("No assets were processed, skipped, or failed (check input validation logs).")
exit_code = 0 # Still exit 0 as the script itself didn't crash
# --- Blender Script Execution (Optional) ---
run_nodegroups = False
run_materials = False
nodegroup_blend_path = None
materials_blend_path = None
blender_exe = None
# 1. Find Blender Executable
try:
blender_exe_config = getattr(core_config_module, 'BLENDER_EXECUTABLE_PATH', None)
if blender_exe_config:
# Check if the path in config exists
if Path(blender_exe_config).is_file():
blender_exe = str(Path(blender_exe_config).resolve())
log.info(f"Using Blender executable from config: {blender_exe}")
else:
# Try finding it in PATH if config path is invalid
log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying to find 'blender' in PATH.")
blender_exe = shutil.which("blender")
if blender_exe:
log.info(f"Found Blender executable in PATH: {blender_exe}")
else:
log.warning("Could not find 'blender' in system PATH.")
else:
# Try finding it in PATH if not set in config
log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying to find 'blender' in PATH.")
blender_exe = shutil.which("blender")
if blender_exe:
log.info(f"Found Blender executable in PATH: {blender_exe}")
else:
log.warning("Could not find 'blender' in system PATH.")
if not blender_exe:
log.warning("Blender executable not found or configured. Skipping Blender script execution.")
except Exception as e:
log.error(f"Error checking Blender executable path: {e}")
blender_exe = None # Ensure it's None on error
# 2. Determine Blend File Paths if Blender Exe is available
if blender_exe:
# Nodegroup Blend Path
nodegroup_blend_arg = args.nodegroup_blend
if nodegroup_blend_arg:
p = Path(nodegroup_blend_arg)
if p.is_file() and p.suffix.lower() == '.blend':
nodegroup_blend_path = str(p.resolve())
log.info(f"Using nodegroup blend file from argument: {nodegroup_blend_path}")
else:
log.warning(f"Invalid nodegroup blend file path from argument: '{nodegroup_blend_arg}'. Ignoring.")
else:
default_ng_path_str = getattr(core_config_module, 'DEFAULT_NODEGROUP_BLEND_PATH', None)
if default_ng_path_str:
p = Path(default_ng_path_str)
if p.is_file() and p.suffix.lower() == '.blend':
nodegroup_blend_path = str(p.resolve())
log.info(f"Using default nodegroup blend file from config: {nodegroup_blend_path}")
else:
log.warning(f"Invalid default nodegroup blend file path in config: '{default_ng_path_str}'. Ignoring.")
# Materials Blend Path
materials_blend_arg = args.materials_blend
if materials_blend_arg:
p = Path(materials_blend_arg)
if p.is_file() and p.suffix.lower() == '.blend':
materials_blend_path = str(p.resolve())
log.info(f"Using materials blend file from argument: {materials_blend_path}")
else:
log.warning(f"Invalid materials blend file path from argument: '{materials_blend_arg}'. Ignoring.")
else:
default_mat_path_str = getattr(core_config_module, 'DEFAULT_MATERIALS_BLEND_PATH', None)
if default_mat_path_str:
p = Path(default_mat_path_str)
if p.is_file() and p.suffix.lower() == '.blend':
materials_blend_path = str(p.resolve())
log.info(f"Using default materials blend file from config: {materials_blend_path}")
else:
log.warning(f"Invalid default materials blend file path in config: '{default_mat_path_str}'. Ignoring.")
# 3. Execute Scripts if Paths are Valid
if blender_exe:
script_dir = Path(__file__).parent / "blenderscripts"
nodegroup_script_path = script_dir / "create_nodegroups.py"
materials_script_path = script_dir / "create_materials.py"
asset_output_root = output_dir_for_processor # Use the resolved output dir
if nodegroup_blend_path:
if nodegroup_script_path.is_file():
log.info("-" * 40)
log.info("Starting Blender Node Group Script Execution...")
success_ng = run_blender_script(
blender_exe_path=blender_exe,
blend_file_path=nodegroup_blend_path,
python_script_path=str(nodegroup_script_path),
asset_root_dir=asset_output_root
)
if not success_ng:
log.error("Blender node group script execution failed.")
# Optionally change exit code if Blender script fails?
# exit_code = 1
log.info("Finished Blender Node Group Script Execution.")
log.info("-" * 40)
else:
log.error(f"Node group script not found: {nodegroup_script_path}")
if materials_blend_path:
if materials_script_path.is_file():
log.info("-" * 40)
log.info("Starting Blender Material Script Execution...")
success_mat = run_blender_script(
blender_exe_path=blender_exe,
blend_file_path=materials_blend_path,
python_script_path=str(materials_script_path),
asset_root_dir=asset_output_root
)
if not success_mat:
log.error("Blender material script execution failed.")
# Optionally change exit code if Blender script fails?
# exit_code = 1
log.info("Finished Blender Material Script Execution.")
log.info("-" * 40)
else:
log.error(f"Material script not found: {materials_script_path}")
# --- Final Exit ---
log.info("Asset Processor Script Finished.")
sys.exit(exit_code)
if __name__ == "__main__":
# This ensures the main() function runs only when the script is executed directly
# Important for multiprocessing to work correctly on some platforms (like Windows)
main()