diff --git a/.gitignore b/.gitignore index 15dd6a2..b49ad98 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,7 @@ build/ Thumbs.db gui/__pycache__ __pycache__ + +Testfiles +Testfiles/ +Testfiles_ diff --git a/Documentation/00_Overview.md b/Documentation/00_Overview.md index 9c03bbc..897442c 100644 --- a/Documentation/00_Overview.md +++ b/Documentation/00_Overview.md @@ -12,9 +12,9 @@ This documentation strictly excludes details on environment setup, dependency in ## Architecture and Codebase Summary -For developers interested in contributing, the tool's architecture is designed around a **Core Processing Engine** (`asset_processor.py`) that handles the pipeline for single assets. This engine is supported by a **Configuration System** (`configuration.py` and `config.py` with `Presets/*.json`) and a new **Hierarchical Rule System** (`rule_structure.py`) that allows dynamic overrides of static configurations at Source, Asset, and File levels. Multiple interfaces are provided: a **Graphical User Interface** (`gui/`), a **Command-Line Interface** (`main.py`), and a **Directory Monitor** (`monitor.py`). Optional **Blender Integration** (`blenderscripts/`) is also included. Key new files supporting the hierarchical rule system include `rule_structure.py`, `gui/rule_hierarchy_model.py`, and `gui/rule_editor_widget.py`. +For developers interested in contributing, the tool's architecture centers on a **Core Processing Engine** (`processing_engine.py`) executing a pipeline based on a **Hierarchical Rule System** (`rule_structure.py`) and a **Configuration System** (`configuration.py` loading `config/app_settings.json` and `Presets/*.json`). The **Graphical User Interface** (`gui/`) has been significantly refactored: `MainWindow` (`main_window.py`) acts as a coordinator, delegating tasks to specialized widgets (`MainPanelWidget`, `PresetEditorWidget`, `LogConsoleWidget`) and background handlers (`RuleBasedPredictionHandler`, `LLMPredictionHandler`, `LLMInteractionHandler`, `AssetRestructureHandler`). The **Directory Monitor** (`monitor.py`) now processes archives asynchronously using a thread pool and utility functions (`utils/prediction_utils.py`, `utils/workspace_utils.py`). The **Command-Line Interface** entry point (`main.py`) primarily launches the GUI, with core CLI functionality currently non-operational. Optional **Blender Integration** (`blenderscripts/`) remains. A new `utils/` directory houses shared helper functions. -The codebase is organized into key directories and files reflecting these components. The `gui/` directory contains all GUI-related code, `Presets/` holds configuration presets, and `blenderscripts/` contains scripts for Blender interaction. The core logic resides in files like `asset_processor.py`, `configuration.py`, `config.py`, `main.py`, and `monitor.py`. The processing pipeline involves steps such as file classification, map processing, channel merging, and metadata generation. +The codebase reflects this structure. The `gui/` directory contains the refactored UI components, `utils/` holds shared utilities, `Presets/` contains JSON presets, and `blenderscripts/` holds Blender scripts. Core logic resides in `processing_engine.py`, `configuration.py`, `rule_structure.py`, `monitor.py`, and `main.py`. The processing pipeline, executed by `processing_engine.py`, relies entirely on the input `SourceRule` and static configuration for steps like map processing, channel merging, and metadata generation. ## Table of Contents diff --git a/Documentation/01_User_Guide/05_Usage_GUI.md b/Documentation/01_User_Guide/05_Usage_GUI.md index 06912a7..25bebbc 100644 --- a/Documentation/01_User_Guide/05_Usage_GUI.md +++ b/Documentation/01_User_Guide/05_Usage_GUI.md @@ -21,16 +21,18 @@ python -m gui.main_window * **Preset Selector:** Choose the preset to use for *processing* the current queue. This dropdown now includes a new option: "- LLM Interpretation -". Selecting this option will use the experimental LLM Predictor instead of the traditional rule-based prediction system defined in presets. * **Output Directory:** Set the output path (defaults to `config/app_settings.json`, use "Browse...") * **Drag and Drop Area:** Add asset `.zip`, `.rar`, `.7z` files, or folders by dragging and dropping them here. - * **Preview Table:** Shows queued assets in a hierarchical view (Source -> Asset -> File). Initially, this area displays a message prompting you to select a preset. Once a preset is selected from the Preset List, the detailed file preview will load here. The mode of the preview depends on the "View" menu: - * **Detailed Preview (Default):** Lists all files, predicted status (`Mapped`, `Model`, `Extra`, `Unrecognised`, `Ignored`, `Error`), output name, etc., based on the selected *processing* preset. The columns displayed are: Name, Target Asset, Supplier, Asset Type, Item Type. The "Target Asset" column stretches to fill available space, while others resize to content. The previous "Status" and "Output Path" columns have been removed. Text colors are applied to cells based on the status of the individual file they represent. Rows use alternating background colors per asset group for visual separation. - * **Simple View (Preview Disabled):** Lists only top-level input asset paths. + * **Preview Table:** Shows queued assets in a hierarchical view (Source -> Asset -> File). Assets (files, directories, archives) added via drag-and-drop appear immediately in the table. + * If no preset is selected ("-- Select a Preset --"), added items (including files within directories/archives) are displayed with empty prediction fields (Target Asset, Asset Type, Item Type), which can be manually edited. + * If a valid preset or LLM mode is selected, the table populates with prediction results as they become available. + * The table always displays the detailed view structure with columns: Name, Target Asset, Supplier, Asset Type, Item Type. The "Target Asset" column stretches to fill available space. + * **Coloring:** The *text color* of file items is determined by their Item Type (colors defined in `config/app_settings.json`). The *background color* of file items is a 30% darker shade of their parent asset's background, helping to visually group files within an asset. Asset rows themselves may use alternating background colors based on the application theme. * **Progress Bar:** Shows overall processing progress. * **Blender Post-Processing:** Checkbox to enable Blender scripts. If enabled, shows fields and browse buttons for target `.blend` files (defaults from `config/app_settings.json`). * **Options & Controls (Bottom):** * `Overwrite Existing`: Checkbox to force reprocessing. * `Workers`: Spinbox for concurrent processes. * `Clear Queue`: Button to clear the queue and preview. - * `Start Processing`: Button to start processing the queue. This button is disabled until a valid preset is selected from the Preset List. + * `Start Processing`: Button to start processing the queue. This button is enabled as long as there are items listed in the Preview Table. When clicked, any items that do not have a value assigned in the "Target Asset" column will be automatically ignored for that processing run. * `Cancel`: Button to attempt stopping processing. * **Re-interpret Selected with LLM:** This button appears when the "- LLM Interpretation -" preset is selected. It allows you to re-process only the currently selected items in the Preview Table using the LLM, without affecting other items in the queue. This is useful for refining predictions on specific assets. * **Status Bar:** Displays current status, errors, and completion messages. During LLM processing, the status bar will show messages indicating the progress of the LLM requests. diff --git a/Documentation/02_Developer_Guide/01_Architecture.md b/Documentation/02_Developer_Guide/01_Architecture.md index 203e5c5..4ff04e9 100644 --- a/Documentation/02_Developer_Guide/01_Architecture.md +++ b/Documentation/02_Developer_Guide/01_Architecture.md @@ -6,17 +6,17 @@ This document provides a high-level overview of the Asset Processor Tool's archi The Asset Processor Tool is designed to process 3D asset source files into a standardized library format. Its high-level architecture consists of: -1. **Core Processing Engine (`processing_engine.py`):** The primary component responsible for executing the asset processing pipeline for a single input asset based on a provided `SourceRule` object and static configuration. The older `asset_processor.py` remains in the codebase for reference but is no longer used in the main processing flow. -2. **Prediction System:** Responsible for analyzing input files and generating the initial `SourceRule` hierarchy with predicted values. This system now includes two alternative components: - * **Rule-Based Predictor (`prediction_handler.py`):** Uses predefined rules from presets to classify files and determine initial processing parameters. - * **LLM Predictor (`gui/llm_prediction_handler.py`):** An experimental alternative that uses a Large Language Model (LLM) to interpret file contents and context to predict processing parameters. Its role is to generate `SourceRule` objects based on LLM output, which are then used by the processing pipeline. +1. **Core Processing Engine (`processing_engine.py`):** The primary component responsible for executing the asset processing pipeline for a single input asset based on a provided `SourceRule` object and static configuration. The previous `asset_processor.py` has been removed. +2. **Prediction System:** Responsible for analyzing input files and generating the initial `SourceRule` hierarchy with predicted values. This system utilizes a base handler (`gui/base_prediction_handler.py::BasePredictionHandler`) with specific implementations: + * **Rule-Based Predictor (`gui/prediction_handler.py::RuleBasedPredictionHandler`):** Uses predefined rules from presets to classify files and determine initial processing parameters. + * **LLM Predictor (`gui/llm_prediction_handler.py::LLMPredictionHandler`):** An experimental alternative that uses a Large Language Model (LLM) to interpret file contents and context to predict processing parameters. 3. **Configuration System (`Configuration`):** Handles loading core settings (including centralized type definitions and LLM-specific configuration) and merging them with supplier-specific rules defined in JSON presets and the persistent `config/suppliers.json` file. 4. **Multiple Interfaces:** Provides different ways to interact with the tool: * Graphical User Interface (GUI) - * Command-Line Interface (CLI) + * Command-Line Interface (CLI) - *Note: The primary CLI execution logic (`run_cli` in `main.py`) is currently non-functional/commented out post-refactoring.* * Directory Monitor for automated processing. -The GUI now acts as the primary source of truth for processing rules, generating and managing the `SourceRule` hierarchy before sending it to the processing engine. It also accumulates prediction results from multiple input sources before updating the view. The CLI and Monitor interfaces can also generate `SourceRule` objects to bypass the GUI for automated workflows. -5. **Optional Integration:** Includes scripts and logic for integrating with external software, specifically Blender, to automate material and node group creation. +The GUI acts as the primary source of truth for processing rules, coordinating the generation and management of the `SourceRule` hierarchy before sending it to the processing engine. It accumulates prediction results from multiple input sources before updating the view. The Monitor interface can also generate `SourceRule` objects (using `utils/prediction_utils.py`) to bypass the GUI for automated workflows. +5. **Optional Integration:** Includes scripts (`blenderscripts/`) for integrating with Blender. Logic for executing these scripts was intended to be centralized in `utils/blender_utils.py`, but this utility has not yet been implemented. ## Hierarchical Rule System @@ -35,22 +35,33 @@ This hierarchy allows for fine-grained control over processing parameters. The G * `Presets/*.json`: Supplier-specific JSON files defining rules for file interpretation and initial prediction. * `configuration.py` (`Configuration` class): Loads `config/app_settings.json` settings and merges them with a selected preset, pre-compiling regex patterns for efficiency. This static configuration is used by the processing engine. * `rule_structure.py`: Defines the `SourceRule`, `AssetRule`, and `FileRule` dataclasses used to represent the hierarchical processing rules. -* `gui/`: Directory containing modules for the Graphical User Interface (GUI), built with PySide6. The GUI is responsible for generating and managing the `SourceRule` hierarchy via the Unified View, accumulating prediction results, and interacting with background handlers (`ProcessingHandler`, `PredictionHandler`). - * `unified_view_model.py`: Implements the `QAbstractItemModel` for the Unified Hierarchical View, holding the `SourceRule` data, handling inline editing (including direct model restructuring for `target_asset_name_override`), and managing row coloring based on config definitions. - * `delegates.py`: Contains custom `QStyledItemDelegate` implementations for inline editing in the Unified View, including the new `SupplierSearchDelegate` for supplier name auto-completion and management. - * `prediction_handler.py`: Generates the initial `SourceRule` hierarchy with predicted values for a single input source based on its files and the selected preset. It uses the `"standard_type"` from the configuration's `FILE_TYPE_DEFINITIONS` to populate `FileRule.standard_map_type` and implements a two-pass classification logic to handle and prioritize bit-depth variants (e.g., `_DISP16_` vs `_DISP_`). - * `processing_engine.py` (`ProcessingEngine` class): The new core component that executes the processing pipeline for a single `SourceRule` object using the static `Configuration`. A new instance is created per task for state isolation. It contains no internal prediction or fallback logic. Supplier overrides from the GUI are correctly preserved and used by the engine for output path generation and metadata. -* `asset_processor.py` (`AssetProcessor` class): The older processing engine, kept for reference but not used in the main processing flow. -* `main.py`: The entry point for the Command-Line Interface (CLI). It handles argument parsing, logging, parallel processing orchestration, and triggering Blender scripts. It now orchestrates processing by passing `SourceRule` objects to the `ProcessingEngine`. -* `monitor.py`: Implements the directory monitoring feature using `watchdog`. +* `gui/`: Directory containing modules for the Graphical User Interface (GUI), built with PySide6. The `MainWindow` (`main_window.py`) acts as a coordinator, orchestrating interactions between various components. Key GUI components include: + * `main_panel_widget.py::MainPanelWidget`: Contains the primary controls for loading sources, selecting presets, viewing/editing rules, and initiating processing. + * `preset_editor_widget.py::PresetEditorWidget`: Provides the interface for managing presets. + * `log_console_widget.py::LogConsoleWidget`: Displays application logs. + * `unified_view_model.py::UnifiedViewModel`: Implements the `QAbstractItemModel` for the hierarchical rule view, holding `SourceRule` data and managing display logic (coloring, etc.). Caches configuration data for performance. + * `rule_hierarchy_model.py::RuleHierarchyModel`: A simpler model used internally by the `UnifiedViewModel` to manage the `SourceRule` data structure. + * `delegates.py`: Contains custom `QStyledItemDelegate` implementations for inline editing in the rule view. + * `asset_restructure_handler.py::AssetRestructureHandler`: Handles complex model updates when a file's target asset is changed via the GUI, ensuring the `SourceRule` hierarchy is correctly modified. + * `base_prediction_handler.py::BasePredictionHandler`: Abstract base class for prediction logic. + * `prediction_handler.py::RuleBasedPredictionHandler`: Generates the initial `SourceRule` hierarchy based on presets and file analysis. Inherits from `BasePredictionHandler`. + * `llm_prediction_handler.py::LLMPredictionHandler`: Experimental predictor using an LLM. Inherits from `BasePredictionHandler`. + * `llm_interaction_handler.py::LLMInteractionHandler`: Manages communication with the LLM service for the LLM predictor. +* `processing_engine.py` (`ProcessingEngine` class): The core component that executes the processing pipeline for a single `SourceRule` object using the static `Configuration`. A new instance is created per task for state isolation. +* `main.py`: The main entry point for the application. Primarily launches the GUI. Contains commented-out/non-functional CLI logic (`run_cli`). +* `monitor.py`: Implements the directory monitoring feature using `watchdog`. It now processes archives asynchronously using a `ThreadPoolExecutor`, leveraging `utils.prediction_utils.py` for rule generation and `utils.workspace_utils.py` for workspace management before invoking the `ProcessingEngine`. * `blenderscripts/`: Contains Python scripts designed to be executed *within* Blender for post-processing tasks. +* `utils/`: Directory containing utility modules: + * `workspace_utils.py`: Contains functions like `prepare_processing_workspace` for handling temporary directories and archive extraction. + * `prediction_utils.py`: Contains functions like `generate_source_rule_from_archive` used by the monitor for rule-based prediction. + * `blender_utils.py`: (Intended location for Blender script execution logic, currently not implemented). ## Processing Pipeline (Simplified) The primary processing engine (`processing_engine.py`) executes a series of steps for each asset based on the provided `SourceRule` object and static configuration: -1. Extraction of input to a temporary workspace. -2. Classification of files (map, model, extra, ignored, unrecognised) using preset rules. +1. Extraction of input to a temporary workspace (using `utils.workspace_utils.py`). +2. Classification of files (map, model, extra, ignored, unrecognised) based *only* on the provided `SourceRule` object (classification/prediction happens *before* the engine is called). 3. Determination of base metadata (asset name, category, archetype). 4. Skip check if output exists and overwrite is not forced. 5. Processing of maps (resize, format/bit depth conversion, inversion, stats calculation). @@ -58,6 +69,6 @@ The primary processing engine (`processing_engine.py`) executes a series of step 7. Generation of `metadata.json` file. 8. Organization of processed files into the final output structure. 9. Cleanup of the temporary workspace. -10. (Optional) Execution of Blender scripts for post-processing. +10. (Optional) Execution of Blender scripts (currently triggered directly, intended to use `utils.blender_utils.py`). -This architecture allows for a modular design, separating configuration, rule generation/management (primarily in the GUI), and core processing execution. The `SourceRule` object serves as a clear data contract between the GUI/prediction layer and the processing engine. Parallel processing is utilized for efficiency, and background threads keep the GUI responsive. \ No newline at end of file +This architecture allows for a modular design, separating configuration, rule generation/management (GUI, Monitor utilities), and core processing execution. The `SourceRule` object serves as a clear data contract between the rule generation layer and the processing engine. Parallel processing (in Monitor) and background threads (in GUI) are utilized for efficiency and responsiveness. \ No newline at end of file diff --git a/Documentation/02_Developer_Guide/02_Codebase_Structure.md b/Documentation/02_Developer_Guide/02_Codebase_Structure.md index 99a8022..a969bee 100644 --- a/Documentation/02_Developer_Guide/02_Codebase_Structure.md +++ b/Documentation/02_Developer_Guide/02_Codebase_Structure.md @@ -4,69 +4,90 @@ This document outlines the key files and directories within the Asset Processor ``` Asset_processor_tool/ -├── asset_processor.py # Older core class, kept for reference (not used in main flow) -├── config.py # Core settings, constants, and definitions for allowed asset/file types -├── config/ # Directory for configuration files -│ └── suppliers.json # Persistent list of known supplier names for GUI auto-completion -├── configuration.py # Class for loading and accessing configuration (merges config.py and presets) -├── detailed_documentation_plan.md # (Existing file, potentially outdated) +├── configuration.py # Class for loading and accessing configuration (merges app_settings.json and presets) ├── Dockerfile # Instructions for building the Docker container image -├── documentation_plan.md # Plan for the new documentation structure (this plan) -├── documentation.txt # Original developer documentation (to be migrated) -├── main.py # CLI Entry Point & processing orchestrator (calls processing_engine) -├── monitor.py # Directory monitoring script for automated processing -├── processing_engine.py # New core class handling single asset processing based on SourceRule -├── readme.md # Original main documentation file (to be migrated) -├── readme.md.bak # Backup of readme.md +├── main.py # Main application entry point (primarily GUI launcher) +├── monitor.py # Directory monitoring script for automated processing (async) +├── processing_engine.py # Core class handling single asset processing based on SourceRule ├── requirements-docker.txt # Dependencies specifically for the Docker environment ├── requirements.txt # Python package dependencies for standard execution ├── rule_structure.py # Dataclasses for hierarchical rules (SourceRule, AssetRule, FileRule) ├── blenderscripts/ # Scripts for integration with Blender │ ├── create_materials.py # Script to create materials linking to node groups │ └── create_nodegroups.py # Script to create node groups from processed assets -├── Deprecated-POC/ # Directory containing original proof of concept scripts -│ ├── Blender-MaterialsFromNodegroups.py -│ ├── Blender-NodegroupsFromPBRSETS.py -│ └── Standalonebatcher-Main.py -├── Documentation/ # New directory for organized documentation (this structure) +├── config/ # Directory for configuration files +│ ├── app_settings.json # Core settings, constants, and type definitions +│ └── suppliers.json # Persistent list of known supplier names for GUI auto-completion +├── Deprecated/ # Contains old code, documentation, and POC scripts +│ ├── ... +├── Documentation/ # Directory for organized documentation (this structure) │ ├── 00_Overview.md │ ├── 01_User_Guide/ │ └── 02_Developer_Guide/ -├── gui/ # Contains files related to the Graphical User Interface -│ ├── delegates.py # Custom delegates for inline editing in Unified View -│ ├── main_window.py # Main GUI application window and layout -│ ├── processing_handler.py # Handles background processing logic for the GUI -│ ├── prediction_handler.py # Generates initial SourceRule hierarchy with predictions -│ ├── unified_view_model.py # Model for the Unified Hierarchical View -│ └── ... # Other GUI components -├── Presets/ # Preset definition files +├── gui/ # Contains files related to the Graphical User Interface (PySide6) +│ ├── asset_restructure_handler.py # Handles model updates for target asset changes +│ ├── base_prediction_handler.py # Abstract base class for prediction logic +│ ├── config_editor_dialog.py # Dialog for editing configuration files +│ ├── delegates.py # Custom delegates for inline editing in rule view +│ ├── llm_interaction_handler.py # Manages communication with LLM service +│ ├── llm_prediction_handler.py # LLM-based prediction handler +│ ├── log_console_widget.py # Widget for displaying logs +│ ├── main_panel_widget.py # Main panel containing core GUI controls +│ ├── main_window.py # Main GUI application window (coordinator) +│ ├── prediction_handler.py # Rule-based prediction handler +│ ├── preset_editor_widget.py # Widget for managing presets +│ ├── preview_table_model.py # Model for the (deprecated?) preview table +│ ├── rule_editor_widget.py # Widget containing the rule hierarchy view and editor +│ ├── rule_hierarchy_model.py # Internal model for rule hierarchy data +│ └── unified_view_model.py # QAbstractItemModel for the rule hierarchy view +├── llm_prototype/ # Files related to the experimental LLM predictor prototype +│ ├── ... +├── Presets/ # Preset definition files (JSON) │ ├── _template.json # Template for creating new presets │ ├── Poliigon.json # Example preset for Poliigon assets │ └── ... # Other presets -├── Project Notes/ # Directory for issue and feature tracking (Markdown files) -│ ├── ... # Various planning and note files -└── Testfiles/ # Directory containing example input assets for testing - └── ... # Example asset ZIPs +├── ProjectNotes/ # Directory for developer notes, plans, etc. (Markdown files) +│ ├── ... +├── PythonCheatsheats/ # Utility Python reference files +│ ├── ... +├── Testfiles/ # Directory containing example input assets for testing +│ ├── ... +├── Tickets/ # Directory for issue and feature tracking (Markdown files) +│ ├── ... +└── utils/ # Utility modules shared across the application + ├── prediction_utils.py # Utilities for prediction (e.g., used by monitor) + └── workspace_utils.py # Utilities for managing processing workspaces ``` **Key Files and Directories:** -* `asset_processor.py`: Contains the older `AssetProcessor` class. It is kept for reference but is no longer used in the main processing flow orchestrated by `main.py` or the GUI. -* `config.py`: Stores global default settings, constants, core rules, and centralized definitions for allowed asset and file types (`ASSET_TYPE_DEFINITIONS`, `FILE_TYPE_DEFINITIONS`) used for validation, GUI dropdowns, and coloring. -* `config/`: Directory containing configuration files, such as `suppliers.json`. -* `config/suppliers.json`: A JSON file storing a persistent list of known supplier names, used by the GUI's `SupplierSearchDelegate` for auto-completion. -* `configuration.py`: Defines the `Configuration` class. Responsible for loading core settings from `config.py` and merging them with a specified preset JSON file (`Presets/*.json`). Pre-compiles regex patterns from presets for efficiency. An instance of this class is passed to the `ProcessingEngine`. -* `rule_structure.py`: Defines the `SourceRule`, `AssetRule`, and `FileRule` dataclasses. These structures represent the hierarchical processing rules and are the primary data contract passed from the GUI/prediction layer to the processing engine. -* `processing_engine.py`: Defines the new `ProcessingEngine` class. This is the core component that executes the processing pipeline for a single asset based *solely* on a provided `SourceRule` object and the static `Configuration`. It contains no internal prediction or fallback logic. -* `main.py`: Entry point for the Command-Line Interface (CLI). It handles argument parsing, logging setup, parallel processing orchestration (using `concurrent.futures.ProcessPoolExecutor`), and triggering Blender scripts. It now orchestrates processing by generating or receiving `SourceRule` objects and passing them to the `ProcessingEngine`. -* `monitor.py`: Implements the automated directory monitoring feature using the `watchdog` library. Contains the `ZipHandler` class to detect new ZIP files and trigger processing via `main.run_processing`. -* `gui/`: Directory containing all code related to the Graphical User Interface (GUI), built with PySide6. The GUI is responsible for managing user input, generating and editing the `SourceRule` hierarchy, and interacting with background handlers. - * `main_window.py`: Defines the `MainWindow` class, the main application window structure, UI layout, event handling, and menu setup. Integrates the Unified Hierarchical View. Manages GUI-specific logging (`QtLogHandler`). - * `unified_view_model.py`: Implements the `QAbstractItemModel` for the Unified Hierarchical View (`QTreeView`). It holds the `SourceRule` hierarchy and provides data and flags for display and inline editing. - * `delegates.py`: Contains custom `QStyledItemDelegate` implementations (e.g., for `QComboBox`, `QLineEdit`) used by the Unified View to provide inline editors for rule attributes. - * `processing_handler.py`: Defines the `ProcessingHandler` class (runs on a `QThread`). Manages the execution of the `ProcessingEngine` in background processes and communicates status/results back to the GUI. - * `prediction_handler.py`: Defines the `PredictionHandler` class (runs on a `QThread`). Generates the initial `SourceRule` hierarchy with predicted values based on input files and the selected preset. Emits a signal with the generated `SourceRule` list for the GUI. +* `config/`: Directory containing configuration files. + * `app_settings.json`: Stores global default settings, constants, core rules, and centralized definitions for allowed asset and file types (`ASSET_TYPE_DEFINITIONS`, `FILE_TYPE_DEFINITIONS`) used for validation, GUI elements, and coloring. Replaces the old `config.py`. + * `suppliers.json`: A JSON file storing a persistent list of known supplier names, used by the GUI for auto-completion. +* `configuration.py`: Defines the `Configuration` class. Responsible for loading core settings from `config/app_settings.json` and merging them with a specified preset JSON file (`Presets/*.json`). Pre-compiles regex patterns from presets for efficiency. An instance of this class is passed to the `ProcessingEngine`. +* `rule_structure.py`: Defines the `SourceRule`, `AssetRule`, and `FileRule` dataclasses. These structures represent the hierarchical processing rules and are the primary data contract passed from the rule generation layer (GUI, Monitor) to the processing engine. +* `processing_engine.py`: Defines the `ProcessingEngine` class. This is the core component that executes the processing pipeline for a single asset based *solely* on a provided `SourceRule` object and the static `Configuration`. It contains no internal prediction or fallback logic. +* `main.py`: Main entry point for the application. Primarily responsible for initializing and launching the GUI (`gui.main_window.MainWindow`). Contains non-functional/commented-out CLI logic (`run_cli`). +* `monitor.py`: Implements the automated directory monitoring feature using `watchdog`. It now processes detected archives asynchronously using a `ThreadPoolExecutor`. It utilizes `utils.prediction_utils.generate_source_rule_from_archive` for rule-based prediction and `utils.workspace_utils.prepare_processing_workspace` for workspace setup before invoking the `ProcessingEngine`. +* `gui/`: Directory containing all code related to the Graphical User Interface (GUI), built with PySide6. The `MainWindow` acts as a coordinator, delegating functionality to specialized widgets and handlers. + * `main_window.py`: Defines the `MainWindow` class. Acts as the main application window and coordinator, connecting signals and slots between different GUI components. + * `main_panel_widget.py`: Defines `MainPanelWidget`, containing the primary user controls (source loading, preset selection, rule view/editor integration, processing buttons). + * `preset_editor_widget.py`: Defines `PresetEditorWidget` for managing presets (loading, saving, editing). + * `log_console_widget.py`: Defines `LogConsoleWidget` for displaying application logs within the GUI. + * `rule_editor_widget.py`: Defines `RuleEditorWidget`, which houses the `QTreeView` for displaying the rule hierarchy. + * `unified_view_model.py`: Defines `UnifiedViewModel` (`QAbstractItemModel`) for the rule hierarchy view. Holds `SourceRule` data, manages display logic (coloring), handles inline editing requests, and caches configuration data for performance. + * `rule_hierarchy_model.py`: Defines `RuleHierarchyModel`, a simpler internal model used by `UnifiedViewModel` to manage the underlying `SourceRule` data structure. + * `delegates.py`: Contains custom `QStyledItemDelegate` implementations used by the `UnifiedViewModel` to provide appropriate inline editors (e.g., dropdowns, text boxes) for different rule attributes. + * `asset_restructure_handler.py`: Defines `AssetRestructureHandler`. Handles the complex logic of modifying the `SourceRule` hierarchy when a user changes a file's target asset via the GUI, ensuring data integrity. Triggered by signals from the model. + * `base_prediction_handler.py`: Defines the abstract `BasePredictionHandler` class, providing a common interface and threading (`QRunnable`) for prediction tasks. + * `prediction_handler.py`: Defines `RuleBasedPredictionHandler` (inherits from `BasePredictionHandler`). Generates the initial `SourceRule` hierarchy with predicted values based on input files and the selected preset rules. Runs in a background thread. + * `llm_prediction_handler.py`: Defines `LLMPredictionHandler` (inherits from `BasePredictionHandler`). Experimental handler using an LLM for prediction. Runs in a background thread. + * `llm_interaction_handler.py`: Defines `LLMInteractionHandler`. Manages the communication details (API calls, etc.) with the LLM service, used by `LLMPredictionHandler`. +* `utils/`: Directory containing shared utility modules. + * `workspace_utils.py`: Provides functions for managing processing workspaces, such as creating temporary directories and extracting archives (`prepare_processing_workspace`). Used by `main.py` (ProcessingTask) and `monitor.py`. + * `prediction_utils.py`: Provides utility functions related to prediction, such as generating a `SourceRule` from an archive (`generate_source_rule_from_archive`), used by `monitor.py`. * `blenderscripts/`: Contains Python scripts (`create_nodegroups.py`, `create_materials.py`) designed to be executed *within* Blender for post-processing. -* `Presets/`: Contains supplier-specific configuration files in JSON format, used by the `PredictionHandler` for initial rule generation. +* `Presets/`: Contains supplier-specific configuration files in JSON format, used by the `RuleBasedPredictionHandler` for initial rule generation. * `Testfiles/`: Contains example input assets for testing purposes. -* `Tickets/`: Directory for issue and feature tracking using Markdown files. \ No newline at end of file +* `Tickets/`: Directory for issue and feature tracking using Markdown files. +* `Deprecated/`: Contains older code, documentation, and proof-of-concept scripts that are no longer actively used. \ No newline at end of file diff --git a/Documentation/02_Developer_Guide/03_Key_Components.md b/Documentation/02_Developer_Guide/03_Key_Components.md index b53f60f..d547ecf 100644 --- a/Documentation/02_Developer_Guide/03_Key_Components.md +++ b/Documentation/02_Developer_Guide/03_Key_Components.md @@ -6,7 +6,7 @@ This document describes the major classes and modules that form the core of the The `ProcessingEngine` class is the new core component responsible for executing the asset processing pipeline for a *single* input asset. Unlike the older `AssetProcessor`, this engine operates *solely* based on a complete `SourceRule` object provided to its `process()` method and the static `Configuration` object passed during initialization. It contains no internal prediction, classification, or fallback logic. Its key responsibilities include: -* Setting up and cleaning up a temporary workspace for processing. +* Setting up and cleaning up a temporary workspace for processing (potentially using `utils.workspace_utils`). * Extracting or copying input files to the workspace. * Processing files based on the explicit rules and predicted values contained within the input `SourceRule`. * Processing texture maps (resizing, format/bit depth conversion, inversion, stats calculation) using parameters from the `SourceRule` or static `Configuration`. @@ -14,10 +14,6 @@ The `ProcessingEngine` class is the new core component responsible for executing * Generating the `metadata.json` file containing details about the processed asset, incorporating information from the `SourceRule`. * Organizing the final output files into the structured library directory. -## `AssetProcessor` (`asset_processor.py`) - -The `AssetProcessor` class is the older processing engine. It is kept in the codebase for reference but is **no longer used** in the main processing flow orchestrated by `main.py` or the GUI. Its original role was similar to the new `ProcessingEngine`, but it included internal prediction, classification, and fallback logic based on hierarchical rules and static configuration. - ## `Rule Structure` (`rule_structure.py`) This module defines the data structures used to represent the hierarchical processing rules: @@ -26,13 +22,13 @@ This module defines the data structures used to represent the hierarchical proce * `AssetRule`: A dataclass representing rules applied at the asset level. It contains nested `FileRule` objects. * `FileRule`: A dataclass representing rules applied at the file level. -These classes hold specific rule parameters (e.g., `supplier_identifier`, `asset_type`, `asset_type_override`, `item_type`, `item_type_override`, `target_asset_name_override`). Attributes like `asset_type` and `item_type_override` now use string types, which are validated against centralized lists in `config.py`. These structures support serialization (Pickle, JSON) to allow them to be passed between different parts of the application, including across process boundaries. +These classes hold specific rule parameters (e.g., `supplier_identifier`, `asset_type`, `asset_type_override`, `item_type`, `item_type_override`, `target_asset_name_override`). Attributes like `asset_type` and `item_type_override` now use string types, which are validated against centralized lists in `config/app_settings.json`. These structures support serialization (Pickle, JSON) to allow them to be passed between different parts of the application, including across process boundaries. ## `Configuration` (`configuration.py`) The `Configuration` class manages the tool's settings. It is responsible for: -* Loading the core default settings defined in `config.py`. +* Loading the core default settings defined in `config/app_settings.json`. * Loading the supplier-specific rules from a selected preset JSON file (`Presets/*.json`). * Merging the core settings and preset rules into a single, unified configuration object. * Validating the loaded configuration to ensure required settings are present. @@ -40,86 +36,147 @@ The `Configuration` class manages the tool's settings. It is responsible for: An instance of the `Configuration` class is typically created once per application run (or per processing batch) and passed to the `ProcessingEngine`. -## `MainWindow` (`gui/main_window.py`) +## GUI Components (`gui/`) -The `MainWindow` class is the main application window for the Graphical User Interface (GUI). It handles the overall UI layout and user interaction: +The GUI has been refactored into several key components: -* Defines the main application window structure and layout using PySide6 widgets. -* Arranges the Preset Editor panel (left) and the Unified Hierarchical View (right). -* Setting up the menu bar, including the "View" menu for toggling the Log Console. -* Connecting user interactions (button clicks, drag-and-drop events, edits in the Unified View) to corresponding methods (slots) within the `MainWindow` or other handler classes. -* Managing the display of application logs in the UI console using a custom `QtLogHandler`. -* Interacting with background handlers (`ProcessingHandler`, `PredictionHandler`) via Qt signals and slots to ensure thread-safe updates to the UI during long-running operations. -* Receiving the initial `SourceRule` hierarchy from the `PredictionHandler` and populating the `UnifiedViewModel`. -* Sending the final, potentially user-modified, `SourceRule` list to `main.py` to initiate processing via the `ProcessingEngine`. +### `MainWindow` (`gui/main_window.py`) -## `Unified View Model` (`gui/unified_view_model.py`) +The `MainWindow` class acts as the main application window and **coordinator** for the GUI. Its primary responsibilities now include: + +* Setting up the main window structure and menu bar. +* Instantiating and arranging the major GUI widgets: + * `MainPanelWidget` (containing core controls and the rule editor) + * `PresetEditorWidget` + * `LogConsoleWidget` +* Connecting signals and slots between these widgets, the underlying models (`UnifiedViewModel`), and background handlers (`RuleBasedPredictionHandler`, `LLMPredictionHandler`, `LLMInteractionHandler`). +* Managing the overall application state related to GUI interactions (e.g., enabling/disabling controls). +* Handling top-level actions like loading sources (drag-and-drop), initiating predictions, and starting the processing task (via `main.ProcessingTask`). +* Managing the `QThreadPool` for running background tasks (prediction). +* Implementing slots like `_handle_prediction_completion` to update the model/view when prediction results are ready. + +### `MainPanelWidget` (`gui/main_panel_widget.py`) + +This widget contains the central part of the GUI, including: + +* Controls for loading source files/directories. +* The preset selection dropdown. +* Buttons for initiating prediction and processing. +* The `RuleEditorWidget` which houses the hierarchical rule view. + +### `PresetEditorWidget` (`gui/preset_editor_widget.py`) + +This widget provides the interface for managing presets: + +* Loading, saving, and editing preset files (`Presets/*.json`). +* Displaying preset rules and settings. + +### `LogConsoleWidget` (`gui/log_console_widget.py`) + +This widget displays application logs within the GUI: + +* Provides a text area for log messages. +* Integrates with Python's `logging` system via a custom `QtLogHandler`. +* Can be shown/hidden via the main window's "View" menu. + +### `UnifiedViewModel` (`gui/unified_view_model.py`) The `UnifiedViewModel` implements a `QAbstractItemModel` for use with Qt's model-view architecture. It is specifically designed to: * Wrap a list of `SourceRule` objects and expose their hierarchical structure (Source -> Asset -> File) to a `QTreeView` (the Unified Hierarchical View). * Provide methods (`data`, `index`, `parent`, `rowCount`, `columnCount`, `flags`, `setData`) required by `QAbstractItemModel` to allow the `QTreeView` to display the rule hierarchy and support inline editing of specific attributes (e.g., `supplier_override`, `asset_type_override`, `item_type_override`, `target_asset_name_override`). -* Handle the direct restructuring of the underlying `SourceRule` hierarchy when `target_asset_name_override` is edited, including moving `FileRule`s and managing `AssetRule` creation/deletion. -* Determine row background colors based on the `asset_type` and `item_type`/`item_type_override` using color metadata from `config.py`. -* Hold the `SourceRule` data that is the single source of truth for the GUI's processing rules. +* Handle requests for data editing (`setData`) by validating input and updating the underlying `RuleHierarchyModel`. **Note:** Complex restructuring logic (e.g., moving files between assets when `target_asset_name_override` changes) is now delegated to the `AssetRestructureHandler`. +* Determine row background colors based on the `asset_type` and `item_type`/`item_type_override` using color metadata from the `Configuration`. +* Hold the `SourceRule` data (via `RuleHierarchyModel`) that is the single source of truth for the GUI's processing rules. +* Cache configuration data (`ASSET_TYPE_DEFINITIONS`, `FILE_TYPE_DEFINITIONS`, color maps) during initialization for improved performance in the `data()` method. +* Includes the `update_rules_for_sources` method, which intelligently merges new prediction results into the existing model data, preserving user overrides where possible. -## `Delegates` (`gui/delegates.py`) +### `RuleHierarchyModel` (`gui/rule_hierarchy_model.py`) + +A simpler, non-Qt model used internally by `UnifiedViewModel` to manage the list of `SourceRule` objects and provide methods for accessing and modifying the hierarchy. + +### `AssetRestructureHandler` (`gui/asset_restructure_handler.py`) + +This handler contains the complex logic required to modify the `SourceRule` hierarchy when a file's target asset is changed via the GUI's `UnifiedViewModel`. It: + +* Is triggered by a signal (`targetAssetOverrideChanged`) from the `UnifiedViewModel`. +* Uses dedicated methods on the `RuleHierarchyModel` (`moveFileRule`, `createAssetRule`, `removeAssetRule`) to safely move `FileRule` objects between `AssetRule`s, creating or removing `AssetRule`s as needed. +* Ensures data consistency during these potentially complex restructuring operations. + +### `Delegates` (`gui/delegates.py`) This module contains custom `QStyledItemDelegate` implementations used by the Unified Hierarchical View (`QTreeView`) to provide inline editors for specific data types or rule attributes. Examples include delegates for: -* `ComboBoxDelegate`: For selecting from predefined lists of allowed asset and file types, sourced from `config.py`. +* `ComboBoxDelegate`: For selecting from predefined lists of allowed asset and file types, sourced from the `Configuration` (originally from `config/app_settings.json`). * `LineEditDelegate`: For free-form text editing, such as the `target_asset_name_override`. -* `SupplierSearchDelegate`: A new delegate for the "Supplier" column. It provides a `QLineEdit` with auto-completion suggestions loaded from `config/suppliers.json` and handles adding/saving new suppliers. +* `SupplierSearchDelegate`: For the "Supplier" column. Provides a `QLineEdit` with auto-completion suggestions loaded from `config/suppliers.json` and handles adding/saving new suppliers. These delegates handle the presentation and editing of data within the tree view cells, interacting with the `UnifiedViewModel` to get and set data. -## `ProcessingHandler` (`gui/processing_handler.py`) +## Prediction Handlers (`gui/`) -The `ProcessingHandler` class is designed to run in a separate `QThread` within the GUI. Its purpose is to manage the execution of the main asset processing pipeline using the **`ProcessingEngine`** in the background, preventing the GUI from freezing. It: +Prediction logic is handled by classes inheriting from a common base class, running in background threads. -* Manages a `concurrent.futures.ProcessPoolExecutor` to run individual asset processing tasks (`ProcessingEngine.process()`) in separate worker processes. -* Submits processing tasks to the pool, passing the relevant `SourceRule` object and `Configuration` instance to the `ProcessingEngine`. -* Monitors task completion and communicates progress, status updates, and results back to the `MainWindow` using Qt signals. -* Handles the execution of optional Blender scripts via subprocess calls after asset processing is complete. -* Provides logic for cancelling ongoing processing tasks. +### `BasePredictionHandler` (`gui/base_prediction_handler.py`) -## `PredictionHandler` (`gui/prediction_handler.py`) +An abstract base class (`QRunnable`) for prediction handlers. It defines the common structure and signals (`prediction_signal`) used by specific predictor implementations. It's designed to be run in a `QThreadPool`. -The `PredictionHandler` class runs in a separate `QThread` in the GUI and is responsible for generating the initial `SourceRule` hierarchy with predicted values based on the input files and the selected preset *when the rule-based prediction method is selected*. It: +### `RuleBasedPredictionHandler` (`gui/prediction_handler.py`) -* Takes an input source identifier (path), a list of files within that source, and the selected preset name as input. -* Uses logic (including accessing preset rules and the `Configuration`'s allowed types) to analyze files and predict initial values for overridable fields in the `SourceRule`, `AssetRule`, and `FileRule` objects (e.g., `supplier_identifier`, `asset_type`, `item_type`, `target_asset_name_override`). -* Constructs a `SourceRule` hierarchy for the single input source. -* Emits a signal (`rule_hierarchy_ready`) with the input source identifier and the generated `SourceRule` object (within a list) to the `MainWindow` for accumulation and eventual population of the `UnifiedViewModel`. +This class (inheriting from `BasePredictionHandler`) is responsible for generating the initial `SourceRule` hierarchy using predefined rules from presets. It: -## `LLMPredictionHandler` (`gui/llm_prediction_handler.py`) +* Takes an input source identifier, file list, and `Configuration` object. +* Analyzes files based on regex patterns and rules defined in the loaded preset. +* Constructs a `SourceRule` hierarchy with predicted values. +* Emits the `prediction_signal` with the generated `SourceRule` object. -The `LLMPredictionHandler` class is an experimental component that runs in a separate `QThread` and provides an alternative to the `PredictionHandler` by using a Large Language Model (LLM) for prediction. Its key responsibilities include: -* Communicating with an external LLM API endpoint (configured via `app_settings.json`). -* Sending relevant file information and context to the LLM based on the `llm_predictor_prompt` and `llm_predictor_examples` settings. -* Parsing the LLM's response to extract predicted values for `SourceRule`, `AssetRule`, and `FileRule` objects. -* Constructs a `SourceRule` hierarchy based on the LLM's interpretation. -* Emits a signal (`llm_prediction_ready`) with the input source identifier and the generated `SourceRule` object (within a list) to the `MainWindow` for accumulation and population of the `UnifiedViewModel`. +### `LLMPredictionHandler` (`gui/llm_prediction_handler.py`) -## `UnifiedViewModel` (`gui/unified_view_model.py`) +An experimental predictor (inheriting from `BasePredictionHandler`) that uses a Large Language Model (LLM). It: -*(Note: This section is being moved here from the GUI Internals document for better organization as it's a key component.)* +* Takes an input source identifier, file list, and `Configuration` object. +* Interacts with the `LLMInteractionHandler` to send data to the LLM and receive predictions. +* Parses the LLM response to construct a `SourceRule` hierarchy. +* Emits the `prediction_signal` with the generated `SourceRule` object. -The `UnifiedViewModel` implements a `QAbstractItemModel` for use with Qt's model-view architecture. It is specifically designed to: -* Wrap a list of `SourceRule` objects and expose their hierarchical structure (Source -> Asset -> File) to a `QTreeView` (the Unified Hierarchical View). -* Provide methods (`data`, `index`, `parent`, `rowCount`, `columnCount`, `flags`, `setData`) required by `QAbstractItemModel` to allow the `QTreeView` to display the rule hierarchy and support inline editing of specific attributes (e.g., `supplier_override`, `asset_type_override`, `item_type_override`, `target_asset_name_override`). -* Handle the direct restructuring of the underlying `SourceRule` hierarchy when `target_asset_name_override` is edited, including moving `FileRule`s and managing `AssetRule` creation/deletion. -* Determine row background colors based on the `asset_type` and `item_type`/`item_type_override` using color metadata from the `Configuration`. -* Hold the `SourceRule` data that is the single source of truth for the GUI's processing rules. -* Includes the `update_rules_for_sources` method, which is called by `MainWindow` to update the model's internal `SourceRule` data with new prediction results (from either the `PredictionHandler` or `LLMPredictionHandler`) and trigger the view to refresh. +### `LLMInteractionHandler` (`gui/llm_interaction_handler.py`) -## `ZipHandler` (`monitor.py`) +This class manages the specifics of communicating with the configured LLM API: -The `ZipHandler` is a custom event handler used by the `monitor.py` script, built upon the `watchdog` library. It is responsible for: +* Handles constructing prompts based on templates and input data. +* Sends requests to the LLM endpoint. +* Receives and potentially pre-processes the LLM's response before returning it to the `LLMPredictionHandler`. -* Detecting file system events, specifically the creation of new `.zip` files, in the monitored input directory. -* Validating the filename format of detected ZIPs to extract the intended preset name. -* Triggering the main asset processing logic (`main.run_processing`) for valid new ZIP files. -* Managing the movement of processed source ZIP files to 'processed' or 'error' directories. +## Utility Modules (`utils/`) -These key components work together to provide the tool's functionality, separating concerns and utilizing concurrency for performance and responsiveness. The `SourceRule` object serves as a clear data contract between the GUI/prediction layer and the processing engine. \ No newline at end of file +Common utility functions have been extracted into separate modules: + +### `workspace_utils.py` + +Contains functions related to managing the processing workspace: + +* `prepare_processing_workspace`: Creates temporary directories, extracts archive files (ZIP, RAR, 7z), and returns the path to the prepared workspace. Used by `main.ProcessingTask` and `monitor.py`. + +### `prediction_utils.py` + +Contains utility functions supporting prediction tasks: + +* `generate_source_rule_from_archive`: A helper function used by `monitor.py` to perform rule-based prediction directly on an archive file without needing the full GUI setup. It extracts files temporarily, runs prediction logic similar to `RuleBasedPredictionHandler`, and returns a `SourceRule`. + +## Monitor (`monitor.py`) + +The `monitor.py` script implements the directory monitoring feature. It has been refactored to: + +* Use `watchdog` to detect new archive files in the input directory. +* Use a `ThreadPoolExecutor` to process detected archives asynchronously in a `_process_archive_task` function. +* Within the task, it: + * Loads the necessary `Configuration`. + * Calls `utils.prediction_utils.generate_source_rule_from_archive` to get the `SourceRule`. + * Calls `utils.workspace_utils.prepare_processing_workspace` to set up the workspace. + * Instantiates and runs the `ProcessingEngine`. + * Handles moving the source archive to 'processed' or 'error' directories. + * Cleans up the workspace. + +## Summary + +These key components, along with the refactored GUI structure and new utility modules, work together to provide the tool's functionality. The architecture emphasizes separation of concerns (configuration, rule generation, processing, UI), utilizes background processing for responsiveness (GUI prediction, Monitor tasks), and relies on the `SourceRule` object as the central data structure passed between different stages of the workflow. \ No newline at end of file diff --git a/Documentation/02_Developer_Guide/05_Processing_Pipeline.md b/Documentation/02_Developer_Guide/05_Processing_Pipeline.md index c417bf6..03c1c9d 100644 --- a/Documentation/02_Developer_Guide/05_Processing_Pipeline.md +++ b/Documentation/02_Developer_Guide/05_Processing_Pipeline.md @@ -6,70 +6,61 @@ The `ProcessingEngine.process()` method orchestrates the following pipeline base The pipeline steps are: -1. **Workspace Setup (`_setup_workspace`)**: - * Creates a temporary directory using `tempfile.mkdtemp()` to isolate the processing of the current asset. +1. **Workspace Preparation (External)**: + * Before the `ProcessingEngine` is invoked, the calling code (e.g., `main.ProcessingTask`, `monitor._process_archive_task`) is responsible for setting up a temporary workspace. + * This typically involves using `utils.workspace_utils.prepare_processing_workspace`, which creates a temporary directory and extracts the input source (archive or folder) into it. + * The path to this prepared workspace is passed to the `ProcessingEngine` during initialization. -2. **Input Extraction (`_extract_input`)**: - * If the input is a supported archive type (.zip, .rar, .7z), it's extracted into the temporary workspace using the appropriate library (`zipfile`, `rarfile`, or `py7zr`). - * If the input is a directory, its contents are copied into the temporary workspace. - * Includes basic error handling for invalid or password-protected archives. - - 3. **Prediction and Rule Generation (Handled Externally)**: - * Before the `ProcessingEngine` is invoked, either the `PredictionHandler` (rule-based) or the `LLMPredictionHandler` (LLM-based) is used (typically triggered by the GUI) to analyze the input files and generate a `SourceRule` object. - * This `SourceRule` object contains the predicted classifications (`item_type`, `asset_type`, etc.) and any initial overrides based on the chosen prediction method (preset rules or LLM interpretation). - * The GUI allows the user to review and modify these predicted rules before processing begins. - * The final, potentially user-modified, `SourceRule` object is the primary input to the `ProcessingEngine`. - - 4. **File Inventory (`_inventory_and_classify_files`)**: - * Scans the contents of the temporary workspace. - * This step primarily inventories the files present. The *classification* itself (determining `item_type`, etc.) has already been performed by the external prediction handler and is stored within the input `SourceRule`. The engine uses the classifications provided in the `SourceRule`. - * Stores the file paths and their associated rules from the `SourceRule` in `self.classified_files`. - - 5. **Base Metadata Determination (`_determine_base_metadata`, `_determine_single_asset_metadata`)**: - * Determines the base asset name, category, and archetype using the explicit values provided in the input `SourceRule` object and the static configuration from the `Configuration` object. Overrides (like `supplier_identifier`, `asset_type`, and `asset_name_override`), including supplier overrides from the GUI, are taken directly from the `SourceRule`. +2. **Prediction and Rule Generation (External)**: + * Also handled before the `ProcessingEngine` is invoked. + * Either the `RuleBasedPredictionHandler`, `LLMPredictionHandler` (triggered by the GUI), or `utils.prediction_utils.generate_source_rule_from_archive` (used by the Monitor) analyzes the input files and generates a `SourceRule` object. + * This `SourceRule` contains predicted classifications and initial overrides. + * If using the GUI, the user can modify these rules. + * The final `SourceRule` object is the primary input to the `ProcessingEngine.process()` method. + +3. **File Inventory (`_inventory_and_classify_files`)**: + * Scans the contents of the *already prepared* temporary workspace. + * This step primarily inventories the files present. The *classification* (determining `item_type`, etc.) is taken directly from the input `SourceRule`. + * Stores the file paths and their associated rules from the `SourceRule` in `self.classified_files`. + +4. **Base Metadata Determination (`_determine_base_metadata`, `_determine_single_asset_metadata`)**: + * Determines the base asset name, category, and archetype using the explicit values provided in the input `SourceRule` and the static `Configuration`. Overrides (like `supplier_identifier`, `asset_type`, `asset_name_override`) are taken directly from the `SourceRule`. 5. **Skip Check**: - * If the `overwrite` flag (passed during initialization) is `False`, the tool checks if the final output directory for the determined asset name already exists and contains a `metadata.json` file. - * If both exist, processing for this specific asset is skipped, marked as "skipped", and the pipeline moves to the next asset (if processing multiple assets from one source) or finishes. + * If the `overwrite` flag is `False`, checks if the final output directory already exists and contains `metadata.json`. + * If so, processing for this asset is skipped. 6. **Map Processing (`_process_maps`)**: - * Iterates through the files classified as texture maps for the current asset based on the `SourceRule`. Configuration values used in this step, such as target resolutions, bit depth rules, and output format rules, are retrieved directly from the static `Configuration` object or explicit overrides in the `SourceRule`. - * Loads the image using `cv2.imread` (handling grayscale and unchanged flags). Converts BGR to RGB internally for consistency (except for saving non-EXR formats). - * Handles Glossiness-to-Roughness inversion if necessary (loads gloss, inverts `1.0 - img/norm`, prioritizes gloss source if both exist). - * Resizes the image to target resolutions defined in `IMAGE_RESOULTIONS` (from `Configuration`) using `cv2.resize` (`INTER_LANCZOS4` for downscaling). Upscaling is generally avoided by checks. - * Determines the output bit depth based on `MAP_BIT_DEPTH_RULES` (from `Configuration`) or overrides in the `SourceRule`. - * Determines the output file format (`.jpg`, `.png`, `.exr`) based on a hierarchy of rules defined in the `Configuration` or overrides in the `SourceRule`. - * Converts the NumPy array data type appropriately before saving (e.g., float to uint8/uint16 with scaling). - * Saves the processed map using `cv2.imwrite` (converting RGB back to BGR if saving to non-EXR formats). Includes fallback logic (e.g., attempting PNG if saving 16-bit EXR fails). - * Calculates image statistics (Min/Max/Mean) using `_calculate_image_stats` on normalized float64 data for the `CALCULATE_STATS_RESOLUTION` (from `Configuration`). - * Determines the aspect ratio change string (e.g., `"EVEN"`, `"X150"`) using `_normalize_aspect_ratio_change`. - * Stores details about each processed map (path, resolution, format, stats, etc.) in `processed_maps_details_asset`. + * Iterates through files classified as maps in the `SourceRule`. + * Loads images (`cv2.imread`). + * Handles Glossiness-to-Roughness inversion. + * Resizes images based on `Configuration`. + * Determines output bit depth and format based on `Configuration` and `SourceRule`. + * Converts data types and saves images (`cv2.imwrite`). + * Calculates image statistics. + * Stores processed map details. 7. **Map Merging (`_merge_maps_from_source`)**: - * Iterates through the `MAP_MERGE_RULES` defined in the `Configuration`. - * Identifies the required *source* map files needed as input for each merge rule based on the classified files in the `SourceRule`. - * Determines common resolutions available across the required input maps. - * Loads the necessary source map channels for each common resolution (using a helper `_load_and_transform_source` which includes caching). - * Converts inputs to normalized float32 (0-1). - * Injects default channel values (from rule `defaults` in `Configuration` or overrides in `SourceRule`) if an input channel is missing. - * Merges channels using `cv2.merge`. - * Determines output bit depth and format based on rules in `Configuration` or overrides in `SourceRule`. Handles potential JPG 16-bit conflict by forcing 8-bit. - * Saves the merged map using the `_save_image` helper (includes data type/color space conversions and fallback). - * Stores details about each merged map in `merged_maps_details_asset`. + * Iterates through `MAP_MERGE_RULES` in `Configuration`. + * Identifies required source maps based on `SourceRule`. + * Loads source channels, handling missing inputs with defaults from `Configuration` or `SourceRule`. + * Merges channels (`cv2.merge`). + * Determines output format/bit depth and saves the merged map. + * Stores merged map details. 8. **Metadata File Generation (`_generate_metadata_file`)**: - * Collects all determined information for the current asset: base metadata, details from `processed_maps_details_asset` and `merged_maps_details_asset`, list of ignored files, source preset used, etc. This information is derived from the input `SourceRule` and the processing results. - * Writes this collected data into the `metadata.json` file within the temporary workspace using `json.dump`. + * Collects asset metadata, processed/merged map details, ignored files list, etc., primarily from the `SourceRule` and internal processing results. + * Writes data to `metadata.json` in the temporary workspace. 9. **Output Organization (`_organize_output_files`)**: - * Creates the final structured output directory: `///`. The `supplier_name` used here is derived from the `SourceRule`, ensuring that supplier overrides from the GUI are respected in the output path. - * Creates subdirectories `Extra/`, `Unrecognised/`, and `Ignored/` within the asset directory. - * Moves the processed maps, merged maps, model files, `metadata.json`, and files classified as Extra, Unrecognised, or Ignored from the temporary workspace into their respective locations in the final output directory structure. + * Creates the final structured output directory (`///`), using the supplier name from the `SourceRule`. + * Moves processed maps, merged maps, models, metadata, and other classified files from the temporary workspace to the final output directory. -10. **Workspace Cleanup (`_cleanup_workspace`)**: - * Removes the temporary workspace directory and its contents using `shutil.rmtree()`. This is called within a `finally` block to ensure cleanup is attempted even if errors occur during processing. +10. **Workspace Cleanup (External)**: + * After the `ProcessingEngine.process()` method completes (successfully or with errors), the *calling code* is responsible for cleaning up the temporary workspace directory created in Step 1. This is often done in a `finally` block where `utils.workspace_utils.prepare_processing_workspace` was called. -11. **(Optional) Blender Script Execution**: - * If triggered via CLI arguments (`--nodegroup-blend`, `--materials-blend`) or GUI controls, the orchestrator (`main.py` or `gui/processing_handler.py`) executes the corresponding Blender scripts (`blenderscripts/*.py`) using `subprocess.run` after the `ProcessingEngine.process()` call completes successfully for an asset batch. See `Developer Guide: Blender Integration Internals` for more details. +11. **(Optional) Blender Script Execution (External)**: + * If triggered (e.g., via CLI arguments or GUI controls), the orchestrating code (e.g., `main.ProcessingTask`) executes the corresponding Blender scripts (`blenderscripts/*.py`) using `subprocess.run` *after* the `ProcessingEngine.process()` call completes successfully. + * *Note: Centralized logic for this was intended for `utils/blender_utils.py`, but this utility has not yet been implemented.* See `Developer Guide: Blender Integration Internals` for more details. This pipeline, executed by the `ProcessingEngine`, provides a clear and explicit processing flow based on the complete rule set provided by the GUI or other interfaces. \ No newline at end of file diff --git a/Documentation/02_Developer_Guide/06_GUI_Internals.md b/Documentation/02_Developer_Guide/06_GUI_Internals.md index bde2a9e..88b00f7 100644 --- a/Documentation/02_Developer_Guide/06_GUI_Internals.md +++ b/Documentation/02_Developer_Guide/06_GUI_Internals.md @@ -8,103 +8,156 @@ The GUI is built using `PySide6`, which provides Python bindings for the Qt fram ## Main Window (`gui/main_window.py`) -The `MainWindow` class is the central component of the GUI application. It is responsible for: +The `MainWindow` class acts as the central **coordinator** for the GUI application. It is responsible for: -* Defining the main application window structure and layout using PySide6 widgets. -* Arranging the Preset Editor panel (left) and the **Unified Hierarchical View** (right). -* Setting up the menu bar, including the "View" menu for toggling the Log Console. -* Connecting user interactions (button clicks, drag-and-drop events, edits in the Unified View) to corresponding methods (slots) within the `MainWindow` or other handler classes. -* Managing the display of application logs in the UI console using a custom `QtLogHandler`. -* Interacting with background handlers (`ProcessingHandler`, `PredictionHandler`, `LLMPredictionHandler`) via Qt signals and slots to ensure thread-safe updates to the UI during long-running operations. -* Accumulating prediction results from either the `PredictionHandler` (for rule-based presets) or `LLMPredictionHandler` (for LLM interpretation) for multiple input sources before updating the `UnifiedViewModel`. -* Receiving the initial `SourceRule` hierarchy from the appropriate prediction handler (`rule_hierarchy_ready` or `llm_prediction_ready` signals) and calling the `UnifiedViewModel`'s `update_rules_for_sources` method to populate the view model. -* Sending the final, potentially user-modified, `SourceRule` list to `main.py` to initiate processing via the `ProcessingEngine`. -* Handling the selection in the processing preset dropdown (`self.preset_selector`), distinguishing between standard presets and the special `"- LLM Interpretation -"` value. -* Initializing and managing the `self.llm_processing_queue` (a `deque`) when LLM interpretation is selected, adding items to be processed by the LLM. -* Implementing the `_start_llm_prediction` method to initiate the LLM prediction process for the queued items by calling `_process_next_llm_item`. -* Implementing the `_process_next_llm_item` method, which takes the next item from the `llm_processing_queue`, prepares the necessary data, and starts the `LLMPredictionHandler` thread to process that single item. -* Connecting signals from the `LLMPredictionHandler` instance: - * `llm_prediction_ready` signal to a slot (e.g., `_on_llm_prediction_ready`) that receives the generated `SourceRule`, updates the `UnifiedViewModel` (via `update_rules_for_sources`), and calls `_process_next_llm_item` to continue processing the queue. - * `llm_status_update` signal to a slot (e.g., `_on_llm_status_update`) to display LLM processing status messages in the status bar. - * `finished` signal to handle thread cleanup. +* Setting up the main application window structure and menu bar. +* Instantiating and arranging the major GUI widgets: + * `MainPanelWidget` (`gui/main_panel_widget.py`): Contains the core controls, preset selection, and the rule editor. + * `PresetEditorWidget` (`gui/preset_editor_widget.py`): Handles preset loading, saving, and editing. + * `LogConsoleWidget` (`gui/log_console_widget.py`): Displays application logs. +* Instantiating key models and handlers: + * `UnifiedViewModel` (`gui/unified_view_model.py`): The model for the rule hierarchy view. + * `LLMInteractionHandler` (`gui/llm_interaction_handler.py`): Manages communication with the LLM service. +* Connecting signals and slots between these components to orchestrate the application flow. +* Handling top-level user interactions like drag-and-drop for loading sources (`add_input_paths`). This method now handles the "placeholder" state (no preset selected) by scanning directories or inspecting archives (ZIP) and creating placeholder `SourceRule`/`AssetRule`/`FileRule` objects to immediately populate the `UnifiedViewModel` with the file structure. +* Initiating predictions based on the selected preset mode (Rule-Based or LLM) when presets change or sources are added. +* Starting the processing task (`_on_process_requested`): This slot now filters the `SourceRule` list obtained from the `UnifiedViewModel`, excluding sources where no asset has a `Target Asset` name assigned, before emitting the `start_backend_processing` signal. It also manages enabling/disabling controls. +* Managing the `QThreadPool` for running background prediction tasks (`RuleBasedPredictionHandler`, `LLMPredictionHandler`). +* Implementing slots to handle results from background tasks: + * `_handle_prediction_completion(source_id, source_rule_list)`: Receives results from either prediction handler via the `prediction_signal`. It calls `self.unified_view_model.update_rules_for_sources()` to update the view model, preserving user overrides where possible. For LLM predictions, it also triggers processing the next item in the queue. + * Slots to handle status updates from the LLM handler. ## Threading and Background Tasks -To keep the UI responsive during intensive operations like asset processing and rule prediction, the GUI utilizes background threads managed by `QThread`. +To keep the UI responsive, prediction tasks run in background threads managed by a `QThreadPool`. -* **`ProcessingHandler` (`gui/processing_handler.py`):** This class is designed to run in a separate `QThread`. It manages the execution of the main asset processing pipeline using the **`ProcessingEngine`** for multiple assets concurrently using `concurrent.futures.ProcessPoolExecutor`. It submits individual asset processing tasks to the pool, passing the relevant `SourceRule` object and `Configuration` instance to the `ProcessingEngine`. It monitors task completion and communicates progress, status updates, and results back to the `MainWindow` on the main UI thread using Qt signals. It also handles the execution of optional Blender scripts via subprocess calls after processing. -* **`PredictionHandler` (`gui/prediction_handler.py`):** Runs in a `QThread` when a rule-based preset is selected. Generates the initial `SourceRule` hierarchy based on preset rules and emits `rule_hierarchy_ready`. -* **`LLMPredictionHandler` (`gui/llm_prediction_handler.py`):** Runs in a `QThread` when "- LLM Interpretation -" is selected. Communicates with the LLM API, parses the response, generates the `SourceRule` hierarchy for a *single* input item at a time, and emits `llm_prediction_ready` and `llm_status_update`. +* **`BasePredictionHandler` (`gui/base_prediction_handler.py`):** An abstract `QRunnable` base class defining the common interface and signals (`prediction_signal`, `status_signal`) for prediction tasks. +* **`RuleBasedPredictionHandler` (`gui/prediction_handler.py`):** Inherits from `BasePredictionHandler`. Runs as a `QRunnable` in the thread pool when a rule-based preset is selected. Generates the `SourceRule` hierarchy based on preset rules and emits `prediction_signal`. +* **`LLMPredictionHandler` (`gui/llm_prediction_handler.py`):** Inherits from `BasePredictionHandler`. Runs as a `QRunnable` in the thread pool when "- LLM Interpretation -" is selected. Interacts with `LLMInteractionHandler`, parses the response, generates the `SourceRule` hierarchy for a *single* input item, and emits `prediction_signal` and `status_signal`. +* **`LLMInteractionHandler` (`gui/llm_interaction_handler.py`):** Manages the communication with the LLM service. This handler itself may perform network operations but typically runs synchronously within the `LLMPredictionHandler`'s thread. + +*(Note: The actual processing via `ProcessingEngine` is now handled by `main.ProcessingTask`, which runs in a separate process managed outside the GUI's direct threading model, though the GUI initiates it).* ## Communication (Signals and Slots) -Communication between the main UI thread (`MainWindow`) and the background threads (`ProcessingHandler`, `PredictionHandler`, `LLMPredictionHandler`) relies heavily on Qt's signals and slots mechanism. This is a thread-safe way for objects in different threads to communicate. +Communication between the `MainWindow` (main UI thread) and the background prediction tasks relies on Qt's signals and slots. -* Background handlers emit signals to indicate events (e.g., progress updated, file status changed, task finished, prediction ready, LLM status update). -* The `MainWindow` connects slots (methods) to these signals. When a signal is emitted, the connected slot is invoked on the thread that owns the receiving object (the main UI thread for `MainWindow`), ensuring UI updates happen safely. Key signals/slots related to LLM integration: - * `LLMPredictionHandler.llm_prediction_ready(source_id, source_rule_list)` -> `MainWindow._on_llm_prediction_ready(source_id, source_rule_list)` (updates model via `update_rules_for_sources`, processes next queue item) - * `LLMPredictionHandler.llm_status_update(message)` -> `MainWindow._on_llm_status_update(message)` (updates status bar) - * `LLMPredictionHandler.finished` -> `MainWindow._on_llm_thread_finished` (handles thread cleanup) +* Prediction handlers (`RuleBasedPredictionHandler`, `LLMPredictionHandler`) emit signals from the `BasePredictionHandler`: + * `prediction_signal(source_id, source_rule_list)`: Indicates prediction for a source is complete. + * `status_signal(message)`: Provides status updates (primarily from LLM handler). +* The `MainWindow` connects slots to these signals: + * `prediction_signal` -> `MainWindow._handle_prediction_completion(source_id, source_rule_list)` + * `status_signal` -> `MainWindow._on_status_update(message)` (updates status bar) +* Signals from the `UnifiedViewModel` (`dataChanged`, `layoutChanged`) trigger updates in the `QTreeView`. +* Signals from the `UnifiedViewModel` (`targetAssetOverrideChanged`) trigger the `AssetRestructureHandler`. -## Preset Editor +## Preset Editor (`gui/preset_editor_widget.py`) -The GUI includes an integrated preset editor panel. This allows users to interactively create, load, modify, and save preset `.json` files directly within the application. The editor typically uses standard UI widgets to display and edit the key fields of the preset structure. +The `PresetEditorWidget` provides a dedicated interface for managing presets. It handles loading, displaying, editing, and saving preset `.json` files. It communicates with the `MainWindow` (e.g., via signals) when a preset is loaded or saved. -## Unified Hierarchical View (`gui/unified_view_model.py`, `gui/delegates.py`) +## Unified Hierarchical View -## Unified Hierarchical View (`gui/unified_view_model.py`, `gui/delegates.py`, `gui/main_window.py`) +The core rule editing interface is built around a `QTreeView` managed within the `MainPanelWidget`, using a custom model and delegates. -The core of the GUI's rule editing interface is the Unified Hierarchical View, implemented using a `QTreeView` with a custom model and delegates. This view is managed within the `MainWindow`. +* **`UnifiedViewModel` (`gui/unified_view_model.py`):** Implements `QAbstractItemModel`. + * Wraps the `RuleHierarchyModel` to expose the `SourceRule` list (Source -> Asset -> File) to the `QTreeView`. + * Provides data for display and flags for editing. + * **Handles `setData` requests:** Validates input and updates the underlying `RuleHierarchyModel`. Crucially, it **delegates** complex restructuring (when `target_asset_name_override` changes) to the `AssetRestructureHandler` by emitting the `targetAssetOverrideChanged` signal. + * **Row Coloring:** Provides data for `Qt.ForegroundRole` (text color) based on the `item_type` and the colors defined in `config/app_settings.json`. Provides data for `Qt.BackgroundRole` based on calculating a 30% darker shade of the parent asset's background color. + * **Caching:** Caches configuration data (`ASSET_TYPE_DEFINITIONS`, `FILE_TYPE_DEFINITIONS`, color maps) in `__init__` for performance. + * **`update_rules_for_sources` Method:** Intelligently merges new prediction results or placeholder rules into the existing model data, preserving user overrides where applicable. + * *(Note: The previous concept of switching between "simple" and "detailed" display modes has been removed. The model always represents the full detailed structure.)* +* **`RuleHierarchyModel` (`gui/rule_hierarchy_model.py`):** A non-Qt model holding the actual list of `SourceRule` objects. Provides methods for accessing and modifying the hierarchy (used by `UnifiedViewModel` and `AssetRestructureHandler`). +* **`AssetRestructureHandler` (`gui/asset_restructure_handler.py`):** Contains the logic to modify the `RuleHierarchyModel` when a file's target asset is changed. It listens for the `targetAssetOverrideChanged` signal from the `UnifiedViewModel` and uses methods on the `RuleHierarchyModel` (`moveFileRule`, `createAssetRule`, `removeAssetRule`) to perform the restructuring safely. +* **`Delegates` (`gui/delegates.py`):** Custom `QStyledItemDelegate` implementations provide inline editors: + * **`ComboBoxDelegate`:** For selecting predefined types (from `Configuration`). + * **`LineEditDelegate`:** For free-form text editing. + * **`SupplierSearchDelegate`:** For supplier names with auto-completion (using `config/suppliers.json`). -* **`Unified View Model` (`gui/unified_view_model.py`):** This class implements a `QAbstractItemModel` to expose the structure of a list of `SourceRule` objects (Source -> Asset -> File) to the `QTreeView`. It holds the `SourceRule` data that is the single source of truth for the GUI's processing rules. It provides data and flags for display in multiple columns and supports inline editing of specific rule attributes (e.g., asset type, item type override, target asset name override) by interacting with delegates. - * **Column Order and Resizing:** The view currently displays the following columns in order: Name, Target Asset, Supplier, Asset Type, Item Type. The "Target Asset" column is set to stretch to fill available space, while other columns resize to their contents. The previous "Status" and "Output Path" columns have been removed. - * **Direct Model Restructuring:** The `setData` method now includes logic to directly restructure the underlying `SourceRule` hierarchy when the `target_asset_name_override` field of a `FileRule` is edited. This involves moving the `FileRule` to a different `AssetRule` (creating a new one if necessary) and removing the old `AssetRule` if it becomes empty. This replaces the previous mechanism of re-running prediction after an edit. - * **Row Coloring:** Row background colors are dynamically determined based on the `asset_type` (for `AssetRule`s) and `item_type` or `item_type_override` (for `FileRule`s), using the color metadata defined in the `ASSET_TYPE_DEFINITIONS` and `FILE_TYPE_DEFINITIONS` dictionaries sourced from the configuration loaded by `configuration.py` (which includes data from `config/app_settings.json`). `SourceRule` rows have a fixed color. -* **`Delegates` (`gui/delegates.py`):** This module contains custom `QStyledItemDelegate` implementations used by the `QTreeView` to provide inline editors for specific data types or rule attributes. - * **`ComboBoxDelegate`:** Used for selecting from predefined lists (e.g., allowed asset types, allowed file types sourced from the configuration loaded by `configuration.py`). - * **`LineEditDelegate`:** Used for free-form text editing (e.g., target asset name override). - * **`SupplierSearchDelegate`:** A new delegate used for the "Supplier" column. It provides a `QLineEdit` with auto-completion suggestions loaded from `config/suppliers.json`. It also handles adding new, unique supplier names entered by the user to the list and saving the updated list back to the JSON file. - -The appropriate prediction handler (`PredictionHandler` or `LLMPredictionHandler`) generates the initial `SourceRule` hierarchy (either for all sources at once or one source at a time for LLM). The `MainWindow` receives this via a signal (`rule_hierarchy_ready` or `llm_prediction_ready`) and calls the `UnifiedViewModel`'s `update_rules_for_sources(source_id, source_rule_list)` method. This method updates the model's internal data structure with the new or updated `SourceRule` object(s) for the given `source_id` and emits the necessary signals (`dataChanged`, `layoutChanged`) to refresh the `QTreeView` display. Edits made in the view directly modify the attributes of the underlying rule objects in the `SourceRule` hierarchy held by the model, with the `UnifiedViewModel` handling the necessary model restructuring and signal emission for view updates. - -**Data Flow Diagram (GUI Rule Management):** +**Data Flow Diagram (GUI Rule Management - Refactored):** ```mermaid -graph LR - A[User Input (Drag/Drop, Preset Select)] --> B(MainWindow); - B -- Selects Preset/LLM --> B; - B -- Starts --> C{Prediction Handler (Rule or LLM)}; - C -- rule_hierarchy_ready / llm_prediction_ready --> B; - B -- Calls update_rules_for_sources(source_id, rules) --> D(UnifiedViewModel); - D -- Emits dataChanged/layoutChanged --> E(QTreeView - Unified View); - B -- Sets Model --> E; - E -- Displays Data from --> D; - E -- Uses Delegates from --> F(Delegates); - F -- Interact with --> D; - User -- Edits Rules via --> E; - E -- Updates Data in --> D; - B -- Triggers Processing with Final SourceRule List --> G(main.py / ProcessingHandler); +graph TD + subgraph MainWindow [MainWindow Coordinator] + direction LR + MW_Input[User Input (Drag/Drop, Preset Select)] --> MW(MainWindow); + MW -- Initiates --> PredPool{QThreadPool}; + MW -- Connects Signals --> VM(UnifiedViewModel); + MW -- Connects Signals --> ARH(AssetRestructureHandler); + MW -- Owns/Manages --> MPW(MainPanelWidget); + MW -- Owns/Manages --> PEW(PresetEditorWidget); + MW -- Owns/Manages --> LCW(LogConsoleWidget); + MW -- Owns/Manages --> LLMIH(LLMInteractionHandler); + end + + subgraph MainPanel [MainPanelWidget] + direction TB + MPW_UI[UI Controls (Load, Predict, Process Btns)]; + MPW_UI --> MPW; + MPW -- Contains --> REW(RuleEditorWidget); + end + + subgraph RuleEditor [RuleEditorWidget] + direction TB + REW -- Contains --> TV(QTreeView - Rule View); + end + + subgraph Prediction [Background Prediction] + direction TB + PredPool -- Runs --> RBP(RuleBasedPredictionHandler); + PredPool -- Runs --> LLMP(LLMPredictionHandler); + LLMP -- Uses --> LLMIH; + RBP -- prediction_signal --> MW; + LLMP -- prediction_signal --> MW; + LLMP -- status_signal --> MW; + end + + subgraph ModelView [Model/View Components] + direction TB + TV -- Sets Model --> VM; + TV -- Displays Data From --> VM; + TV -- Uses Delegates --> Del(Delegates); + UserEdit[User Edits Rules] --> TV; + TV -- setData --> VM; + VM -- Wraps --> RHM(RuleHierarchyModel); + VM -- dataChanged/layoutChanged --> TV; + VM -- targetAssetOverrideChanged --> ARH; + ARH -- Modifies --> RHM; + Del -- Get/Set Data --> VM; + end + + MW -- _handle_prediction_completion --> VM; + MW -- Triggers Processing --> ProcTask(main.ProcessingTask); + + %% Connections between subgraphs + MPW --> MW; + PEW --> MW; + LCW --> MW; + VM --> MW; + ARH --> MW; + LLMIH --> MW; + REW --> MPW; ``` ## Application Styling -The application style is explicitly set to 'Fusion' in `gui/main_window.py` to provide a more consistent look and feel across different operating systems. A custom `QPalette` is also applied to the application to adjust default colors within the 'Fusion' style. +The application style is explicitly set to 'Fusion' in `gui/main_window.py`. A custom `QPalette` adjusts default colors. -## Logging +## Logging (`gui/log_console_widget.py`) -A custom `QtLogHandler` is used to redirect log messages from the standard Python `logging` module to a text area or console widget within the GUI, allowing users to see detailed application output and errors. +The `LogConsoleWidget` displays logs captured by a custom `QtLogHandler` from Python's `logging` module. ## Cancellation -The GUI provides a "Cancel" button to stop ongoing processing. The `ProcessingHandler` implements logic to handle cancellation requests. This typically involves setting an internal flag and attempting to shut down the `ProcessPoolExecutor`. However, it's important to note that this does not immediately terminate worker processes that are already executing; it primarily prevents new tasks from starting and stops processing results from completed futures once the cancellation flag is checked. +The GUI provides a "Cancel" button. Cancellation logic for the actual processing is now likely handled within the `main.ProcessingTask` or the code that manages it, as the `ProcessingHandler` has been removed. The GUI button would signal this external task manager. ## GUI Configuration Editor (`gui/config_editor_dialog.py`) -A dedicated dialog, implemented in `gui/config_editor_dialog.py`, provides a graphical interface for editing the core application settings stored in `config/app_settings.json`. +A dedicated dialog for editing `config/app_settings.json`. -* **Functionality:** This dialog loads the current content of `config/app_settings.json` and presents it in a tabbed layout (e.g., "General", "Output & Naming") using standard GUI widgets mapped to the JSON structure. It supports editing basic fields, tables for definitions (`FILE_TYPE_DEFINITIONS`, `ASSET_TYPE_DEFINITIONS`), and a list/detail view for merge rules (`MAP_MERGE_RULES`). The definitions tables include dynamic color editing features. -* **Limitations:** Currently, editing complex fields like `IMAGE_RESOLUTIONS` or the full details of `MAP_MERGE_RULES` via the UI is not fully supported. -* **Integration:** The `MainWindow` is responsible for creating and displaying an instance of this dialog when the user selects the "Edit" -> "Preferences..." menu option. -* **Persistence:** Changes saved via this editor are written directly to the `config/app_settings.json` file, ensuring they persist across application sessions. However, the `Configuration` class loads settings at application startup, so a restart is required for changes made in the editor to take effect in the application's processing logic. +* **Functionality:** Loads `config/app_settings.json`, presents in tabs, allows editing basic fields, definitions tables (with color editing), and merge rules list/detail. +* **Limitations:** Editing complex fields like `IMAGE_RESOLUTIONS` or full `MAP_MERGE_RULES` details might still be limited. +* **Integration:** Launched by `MainWindow` ("Edit" -> "Preferences..."). +* **Persistence:** Saves changes to `config/app_settings.json`. Requires application restart for changes to affect processing logic loaded by the `Configuration` class. -These key components work together to provide the tool's functionality, separating concerns and utilizing concurrency for performance and responsiveness. The Unified Hierarchical View centralizes rule management in the GUI, and the `SourceRule` object serves as a clear data contract passed to the processing engine. \ No newline at end of file +The refactored GUI separates concerns into distinct widgets and handlers, coordinated by the `MainWindow`. Background tasks use `QThreadPool` and `QRunnable`. The `UnifiedViewModel` focuses on data presentation and simple edits, delegating complex restructuring to the `AssetRestructureHandler`. \ No newline at end of file diff --git a/Documentation/02_Developer_Guide/07_Monitor_Internals.md b/Documentation/02_Developer_Guide/07_Monitor_Internals.md index 1694099..6ddeaaa 100644 --- a/Documentation/02_Developer_Guide/07_Monitor_Internals.md +++ b/Documentation/02_Developer_Guide/07_Monitor_Internals.md @@ -4,30 +4,41 @@ This document provides technical details about the implementation of the Directo ## Overview -The `monitor.py` script provides an automated way to process assets by monitoring a specified input directory for new ZIP files. It is built using the `watchdog` library. +The `monitor.py` script provides an automated way to process assets by monitoring a specified input directory for new archive files. It has been refactored to use a `ThreadPoolExecutor` for asynchronous processing. ## Key Components -* **`watchdog` Library:** The script relies on the `watchdog` library for monitoring file system events. Specifically, it uses a `PollingObserver` to watch the `INPUT_DIR` for changes. -* **`ZipHandler` Class:** This is a custom event handler class defined within `monitor.py`. It inherits from a `watchdog` event handler class (likely `FileSystemEventHandler` or similar, though not explicitly stated in the source text, it's the standard pattern). Its primary method of interest is the one that handles file creation events (`on_created`). -* **`main.run_processing`:** The monitor script triggers the main asset processing logic by calling the `run_processing` function from the `main.py` module. +* **`watchdog` Library:** Used for monitoring file system events (specifically file creation) in the `INPUT_DIR`. A `PollingObserver` is typically used. +* **`concurrent.futures.ThreadPoolExecutor`:** Manages a pool of worker threads to process detected archives concurrently. The number of workers can often be configured (e.g., via `NUM_WORKERS` environment variable). +* **`_process_archive_task` Function:** The core function executed by the thread pool for each detected archive. It encapsulates the entire processing workflow for a single archive. +* **`utils.prediction_utils.generate_source_rule_from_archive`:** A utility function called by `_process_archive_task` to perform rule-based prediction directly on the archive file and generate the necessary `SourceRule` object. +* **`utils.workspace_utils.prepare_processing_workspace`:** A utility function called by `_process_archive_task` to create a temporary workspace and extract the archive contents into it. +* **`ProcessingEngine` (`processing_engine.py`):** The core engine instantiated and run within `_process_archive_task` to perform the actual asset processing based on the generated `SourceRule`. +* **`Configuration` (`configuration.py`):** Loaded within `_process_archive_task` based on the preset derived from the archive filename. -## Functionality Details +## Functionality Details (Asynchronous Workflow) -1. **Watching:** A `PollingObserver` is set up to monitor the directory specified by the `INPUT_DIR` environment variable. Polling is used, checking for changes at a frequency defined by `POLL_INTERVAL`. -2. **Event Handling:** The `ZipHandler` is attached to the observer. When a file is created in the monitored directory, the `on_created` method of the `ZipHandler` is triggered. -3. **ZIP File Detection:** The `on_created` method checks if the newly created file is a `.zip` file. -4. **Filename Parsing:** If it's a ZIP file, the script expects the filename to follow a specific format: `[preset]_filename.zip`. It uses a regular expression (`PRESET_FILENAME_REGEX`, likely defined in `config.py` or similar) to extract the `[preset]` part from the filename. -5. **Preset Validation:** It validates whether the extracted `preset` name corresponds to an existing preset JSON file in the `Presets/` directory. -6. **Triggering Processing:** If the preset is valid, the `monitor.py` script calls `main.run_processing`, passing the path to the detected ZIP file and the extracted preset name. This initiates the main asset processing pipeline for that single asset. A `PROCESS_DELAY` can be configured to wait before triggering processing, potentially allowing large files to finish copying. -7. **Source ZIP Management:** After the processing initiated by `main.run_processing` completes, the original source `.zip` file is moved to either the `PROCESSED_DIR` (if processing was successful or skipped) or the `ERROR_DIR` (if processing failed or the preset was invalid). +1. **Watching:** A `watchdog` observer monitors the `INPUT_DIR` for file creation events. +2. **Event Handling:** When a file is created, an event handler (e.g., `on_created` method) is triggered. +3. **Archive Detection:** The handler checks if the new file is a supported archive type (e.g., `.zip`, `.rar`, `.7z`). +4. **Filename Parsing:** If it's a supported archive, the script attempts to parse the filename to extract the intended preset name (e.g., using a regex like `[preset]_filename.ext`). +5. **Preset Validation:** The extracted preset name is validated against existing preset files (`Presets/*.json`). +6. **Task Submission:** If the preset is valid, the path to the archive file and the validated preset name are submitted as a task to the `ThreadPoolExecutor`, which will eventually run the `_process_archive_task` function with these arguments in a worker thread. +7. **`_process_archive_task` Execution (Worker Thread):** + * **Load Configuration:** Loads the `Configuration` object using the provided preset name. + * **Generate SourceRule:** Calls `utils.prediction_utils.generate_source_rule_from_archive`, passing the archive path and `Configuration`. This utility handles temporary extraction (if needed internally) and rule-based prediction, returning the `SourceRule`. + * **Prepare Workspace:** Calls `utils.workspace_utils.prepare_processing_workspace`, passing the archive path. This creates a unique temporary directory and extracts the archive contents. It returns the path to the prepared workspace. This step should ideally be wrapped in a `try...finally` block to ensure cleanup. + * **Instantiate Engine:** Creates an instance of the `ProcessingEngine`, passing the loaded `Configuration` and the prepared workspace path. + * **Run Processing:** Calls the `ProcessingEngine.process()` method, passing the generated `SourceRule`. + * **Handle Results:** Based on the success or failure of the processing, moves the original source archive file from `INPUT_DIR` to either `PROCESSED_DIR` or `ERROR_DIR`. + * **Cleanup Workspace:** Ensures the temporary workspace directory created by `prepare_processing_workspace` is removed (e.g., in the `finally` block). ## Configuration -The monitor's behavior is primarily controlled by environment variables, which are read by the `monitor.py` script. These include `INPUT_DIR`, `OUTPUT_DIR`, `PROCESSED_DIR`, `ERROR_DIR`, `LOG_LEVEL`, `POLL_INTERVAL`, and `NUM_WORKERS`. +The monitor's behavior is controlled by environment variables or configuration settings, likely including `INPUT_DIR`, `OUTPUT_DIR`, `PROCESSED_DIR`, `ERROR_DIR`, `LOG_LEVEL`, `POLL_INTERVAL`, and potentially `NUM_WORKERS` to control the size of the `ThreadPoolExecutor`. ## Limitations -* The current implementation of the directory monitor does *not* support triggering the optional Blender script execution after processing. This post-processing step is only available when running the tool via the CLI or GUI. +* The monitor likely still does *not* support triggering optional Blender script execution post-processing, as this integration point was complex and potentially removed or not yet reimplemented in the refactored workflow. -Understanding the interaction between `watchdog`, the `ZipHandler`, and the call to `main.run_processing` is key to debugging or modifying the directory monitoring functionality. \ No newline at end of file +Understanding the asynchronous nature, the role of the `ThreadPoolExecutor`, the `_process_archive_task` function, and the reliance on utility modules (`prediction_utils`, `workspace_utils`) is key to debugging or modifying the directory monitoring functionality. \ No newline at end of file diff --git a/Testfiles/BarkBrown013.zip b/Testfiles/BarkBrown013.zip deleted file mode 100644 index 3ae0ea3..0000000 Binary files a/Testfiles/BarkBrown013.zip and /dev/null differ diff --git a/Testfiles/CarpetMultiLevelLoopPileHerringbone001.zip b/Testfiles/CarpetMultiLevelLoopPileHerringbone001.zip deleted file mode 100644 index 78ed38a..0000000 Binary files a/Testfiles/CarpetMultiLevelLoopPileHerringbone001.zip and /dev/null differ diff --git a/Testfiles/Concrete24.zip b/Testfiles/Concrete24.zip deleted file mode 100644 index cfa968b..0000000 Binary files a/Testfiles/Concrete24.zip and /dev/null differ diff --git a/Testfiles/Dinesen-Test.zip b/Testfiles/Dinesen-Test.zip deleted file mode 100644 index 415dcb8..0000000 Binary files a/Testfiles/Dinesen-Test.zip and /dev/null differ diff --git a/Testfiles/FabricDenim003.zip b/Testfiles/FabricDenim003.zip deleted file mode 100644 index 5752e20..0000000 Binary files a/Testfiles/FabricDenim003.zip and /dev/null differ diff --git a/asset_processor.py b/asset_processor.py deleted file mode 100644 index 18a4e10..0000000 --- a/asset_processor.py +++ /dev/null @@ -1,2763 +0,0 @@ -# asset_processor.py - -import os -import math -import shutil -import tempfile -import zipfile -import logging -import json -import re -import time -from pathlib import Path -from fnmatch import fnmatch # For pattern matching like *.fbx, *_Preview* -from typing import List, Dict, Tuple, Optional # Added for type hinting -from collections import defaultdict # Added for grouping -from rule_structure import SourceRule # Import SourceRule - - -# Attempt to import archive libraries -try: - import rarfile - import py7zr -except ImportError as e: - print(f"ERROR: Missing required archive libraries: {e}") - print("Please install them using:") - print("pip install rarfile py7zr") - # Do not exit here, allow the script to run but extraction will fail for these types - rarfile = None # Set to None so checks can still be made - py7zr = None # Set to None - -# Attempt to import image processing libraries -try: - import cv2 - import numpy as np -except ImportError: - print("ERROR: Missing required image processing libraries. Please install opencv-python and numpy:") - print("pip install opencv-python numpy") - exit(1) # Exit if essential libraries are missing - -# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types -try: - import OpenEXR - import Imath - _HAS_OPENEXR = True -except ImportError: - _HAS_OPENEXR = False - # Log this information - basic EXR might still work via OpenCV - logging.debug("Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.") - - -# Assuming Configuration class is in configuration.py -try: - from configuration import Configuration, ConfigurationError -except ImportError: - print("ERROR: Cannot import Configuration class from configuration.py.") - print("Ensure configuration.py is in the same directory or Python path.") - exit(1) - -# Use logger defined in main.py (or configure one here if run standalone) -log = logging.getLogger(__name__) -# Basic config if logger hasn't been set up elsewhere (e.g., during testing) -if not log.hasHandlers(): - logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') # Reverted basicConfig level - - -# --- Custom Exception --- -class AssetProcessingError(Exception): - """Custom exception for errors during asset processing.""" - pass - -# --- Helper Functions --- -def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]: - """ - Calculates target dimensions by first scaling to fit target_max_dim - while maintaining aspect ratio, then finding the nearest power-of-two - value for each resulting dimension (Stretch/Squash to POT). - """ - if orig_w <= 0 or orig_h <= 0: - # Fallback to target_max_dim if original dimensions are invalid - pot_dim = get_nearest_pot(target_max_dim) - log.warning(f"Invalid original dimensions ({orig_w}x{orig_h}). Falling back to nearest POT of target_max_dim: {pot_dim}x{pot_dim}") - return (pot_dim, pot_dim) - - # Step 1: Calculate intermediate dimensions maintaining aspect ratio - ratio = orig_w / orig_h - if ratio > 1: # Width is dominant - scaled_w = target_max_dim - scaled_h = max(1, round(scaled_w / ratio)) - else: # Height is dominant or square - scaled_h = target_max_dim - scaled_w = max(1, round(scaled_h * ratio)) - - # Step 2: Find the nearest power of two for each scaled dimension - pot_w = get_nearest_pot(scaled_w) - pot_h = get_nearest_pot(scaled_h) - - log.debug(f"POT Calc: Orig=({orig_w}x{orig_h}), MaxDim={target_max_dim} -> Scaled=({scaled_w}x{scaled_h}) -> POT=({pot_w}x{pot_h})") - - return int(pot_w), int(pot_h) - -def _calculate_image_stats(image_data: np.ndarray) -> dict | None: - """ - Calculates min, max, mean for a given numpy image array. - Handles grayscale and multi-channel images. Converts to float64 for calculation. - """ - if image_data is None: - log.warning("Attempted to calculate stats on None image data.") - return None - try: - # Use float64 for calculations to avoid potential overflow/precision issues - data_float = image_data.astype(np.float64) - - # Normalize data_float based on original dtype before calculating stats - if image_data.dtype == np.uint16: - log.debug("Stats calculation: Normalizing uint16 data to 0-1 range.") - data_float /= 65535.0 - elif image_data.dtype == np.uint8: - log.debug("Stats calculation: Normalizing uint8 data to 0-1 range.") - data_float /= 255.0 - # Assuming float inputs are already in 0-1 range or similar - - log.debug(f"Stats calculation: data_float dtype: {data_float.dtype}, shape: {data_float.shape}") - # Log a few sample values to check range after normalization - if data_float.size > 0: - sample_values = data_float.flatten()[:10] # Get first 10 values - log.debug(f"Stats calculation: Sample values (first 10) after normalization: {sample_values.tolist()}") - - - if len(data_float.shape) == 2: # Grayscale (H, W) - min_val = float(np.min(data_float)) - max_val = float(np.max(data_float)) - mean_val = float(np.mean(data_float)) - stats = {"min": min_val, "max": max_val, "mean": mean_val} - log.debug(f"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}") - elif len(data_float.shape) == 3: # Color (H, W, C) - channels = data_float.shape[2] - min_val = [float(v) for v in np.min(data_float, axis=(0, 1))] - max_val = [float(v) for v in np.max(data_float, axis=(0, 1))] - mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))] - # The input data_float is now expected to be in RGB order after conversion in _process_maps - stats = {"min": min_val, "max": max_val, "mean": mean_val} - log.debug(f"Calculated {channels}-Channel Stats (RGB order): Min={min_val}, Max={max_val}, Mean={mean_val}") - else: - log.warning(f"Cannot calculate stats for image with unsupported shape {data_float.shape}") - return None - return stats - except Exception as e: - log.error(f"Error calculating image stats: {e}", exc_info=True) # Log exception info - return {"error": str(e)} - - -# --- Helper function --- -def _get_base_map_type(target_map_string: str) -> str: - """Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').""" - match = re.match(r"([a-zA-Z]+)", target_map_string) - if match: - return match.group(1).upper() - return target_map_string.upper() # Fallback if no number suffix - - -def _is_power_of_two(n: int) -> bool: - """Checks if a number is a power of two.""" - return (n > 0) and (n & (n - 1) == 0) - -def get_nearest_pot(value: int) -> int: - """Finds the nearest power of two to the given value.""" - if value <= 0: - return 1 # Or raise error, POT must be positive - if _is_power_of_two(value): - return value - - # Calculate the powers of two below and above the value - lower_pot = 1 << (value.bit_length() - 1) - upper_pot = 1 << value.bit_length() - - # Determine which power of two is closer - if (value - lower_pot) < (upper_pot - value): - return lower_pot - else: - return upper_pot - -# --- Asset Processor Class --- -class AssetProcessor: - """ - Handles the processing pipeline for a single asset (ZIP or folder). - """ - # Define the list of known grayscale map types (adjust as needed) - GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK'] - - def __init__(self, input_path: Path, config: Configuration, output_base_path: Path, overwrite: bool = False): - """ - Initializes the processor for a given input asset. - - Args: - input_path: Path to the input ZIP file or folder. - config: The loaded Configuration object. - output_base_path: The base directory where processed output will be saved. - overwrite: If True, forces reprocessing even if output exists. - """ - if not isinstance(input_path, Path): input_path = Path(input_path) - if not isinstance(output_base_path, Path): output_base_path = Path(output_base_path) - if not isinstance(config, Configuration): raise TypeError("config must be a Configuration object.") - - if not input_path.exists(): - raise AssetProcessingError(f"Input path does not exist: {input_path}") - supported_suffixes = ['.zip', '.rar', '.7z'] - if not (input_path.is_dir() or (input_path.is_file() and input_path.suffix.lower() in supported_suffixes)): - raise AssetProcessingError(f"Input path must be a directory or a supported archive file (.zip, .rar, .7z): {input_path}") - - self.input_path: Path = input_path - self.config: Configuration = config - self.output_base_path: Path = output_base_path - self.overwrite: bool = overwrite # Store the overwrite flag - - self.temp_dir: Path | None = None # Path to the temporary working directory - self.classified_files: dict[str, list[dict]] = { - "maps": [], "models": [], "extra": [], "ignored": [] - } - # These will no longer store instance-wide results, but are kept for potential future use or refactoring - # self.processed_maps_details: dict[str, dict[str, dict]] = {} - # self.merged_maps_details: dict[str, dict[str, dict]] = {} - # self.metadata_file_path_temp: Path | None = None - # self.metadata: dict = {} # Metadata is now handled per-asset within the process loop - - log.debug(f"AssetProcessor initialized for: {self.input_path.name}") - - - # --- Helper Method: Get Rule with Fallback --- - def _get_rule_with_fallback(self, rules: SourceRule, rule_key: str, file_path: Path | None = None, asset_name: str | None = None, default=None): - """ - Retrieves a rule value using hierarchical fallback logic: - File-specific > Asset-specific > Source/General Rules > Config Default. - """ - # Prioritize File > Asset > Source > Config - # Prioritize File > Asset > Source > Config - # Check File-specific rules by iterating through AssetRule and FileRule objects - if file_path and rules.assets: # Check if file_path is provided and there are assets to search - log.debug(f"Checking File level rules for '{file_path}'...") - for asset_rule in rules.assets: - # Check if the current asset_rule matches the asset_name being processed - # This requires asset_name to be passed correctly to this method - if asset_name and asset_rule.asset_name == asset_name: - log.debug(f" Found matching AssetRule for '{asset_name}'. Checking its files...") - for file_rule in asset_rule.files: - # Check if the file_rule's path matches the file_path being processed - if file_rule.file_path and Path(file_rule.file_path) == file_path: - log.debug(f" Found matching FileRule for '{file_path}'. Checking for rule_key '{rule_key}'.") - # Check if rule_key is in channel_merge_instructions - if rule_key in file_rule.channel_merge_instructions: - log.debug(f" Rule '{rule_key}' found in channel_merge_instructions.") - return file_rule.channel_merge_instructions[rule_key] - # Check if rule_key is a direct attribute of FileRule (e.g., map_type_override) - if hasattr(file_rule, rule_key) and getattr(file_rule, rule_key) is not None: - log.debug(f" Rule '{rule_key}' found as direct attribute in FileRule.") - return getattr(file_rule, rule_key) - log.debug(f" Rule '{rule_key}' not found in matching FileRule for '{file_path}'.") - # Found the file rule, but the key wasn't there, so no need to check other files for this asset - break - # Found the asset rule, checked its files, now break from asset loop - break - - # Check Asset-specific rules by iterating through the list of AssetRule objects - # Check Asset-specific rules by iterating through the list of AssetRule objects - if asset_name and rules.assets: - for asset_rule in rules.assets: - if asset_rule.asset_name == asset_name: - log.debug(f"Found matching AssetRule for '{asset_name}'. Checking for rule_key '{rule_key}'.") - # Check if rule_key is a direct attribute of AssetRule - if hasattr(asset_rule, rule_key) and getattr(asset_rule, rule_key) is not None: - log.debug(f"Rule '{rule_key}' found as direct attribute in AssetRule for '{asset_name}'.") - return getattr(asset_rule, rule_key) - # Check if rule_key is in common_metadata - if rule_key in asset_rule.common_metadata: - log.debug(f"Rule '{rule_key}' found in common_metadata for '{asset_name}'.") - return asset_rule.common_metadata[rule_key] - log.debug(f"Rule '{rule_key}' not found in matching AssetRule for '{asset_name}'.") - break # Found the asset rule, but the key wasn't there, so no need to continue searching assets - if rule_key in rules.high_level_sorting_parameters: - log.debug(f"Rule '{rule_key}' found at Source level.") - return rules.high_level_sorting_parameters[rule_key] - # Fallback to config - log.debug(f"Rule '{rule_key}' not found in rules, falling back to config default.") - config_default = getattr(self.config, rule_key, default) - # If the default is a callable method on the config object, call it - if callable(config_default) and hasattr(self.config, rule_key): - log.debug(f"Config default for '{rule_key}' is a callable method. Calling it.") - return config_default() - return config_default - - - # --- New Helper Function: Load and Transform Source --- - def _load_and_transform_source(self, source_path_rel: Path, map_type: str, target_resolution_key: str, is_gloss_source: bool, cache: dict) -> Tuple[Optional[np.ndarray], Optional[np.dtype]]: - """ - Loads a source image file, performs initial prep (BGR->RGB, Gloss->Rough), - resizes it to the target resolution, and caches the result. - - Args: - source_path_rel: Relative path to the source file within the temp directory. - map_type: The standard map type (e.g., "NRM", "ROUGH"). - target_resolution_key: The key for the target resolution (e.g., "4K"). - is_gloss_source: Boolean indicating if this source should be treated as gloss for inversion. - cache: The dictionary used for caching loaded/resized data. - - Returns: - Tuple containing: - - Resized NumPy array (float32) or None if loading/processing fails. - - Original source NumPy dtype or None if loading fails. - """ - if not self.temp_dir: - log.error("Temporary directory not set in _load_and_transform_source.") - return None, None - - cache_key = (source_path_rel, target_resolution_key) - if cache_key in cache: - log.debug(f"CACHE HIT: Returning cached data for {source_path_rel} at {target_resolution_key}") - return cache[cache_key] # Return tuple (image_data, source_dtype) - - log.debug(f"CACHE MISS: Loading and transforming {source_path_rel} for {target_resolution_key}") - full_source_path = self.temp_dir / source_path_rel - img_prepared = None - source_dtype = None - - try: - # --- 1. Load Source Image --- - # Determine read flag (Grayscale for specific types, unchanged otherwise) - read_flag = cv2.IMREAD_GRAYSCALE if map_type.upper() in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED - # Special case for MASK: always load unchanged first to check alpha - if map_type.upper() == 'MASK': read_flag = cv2.IMREAD_UNCHANGED - - log.debug(f"Loading source {full_source_path.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}") - img_loaded = cv2.imread(str(full_source_path), read_flag) - if img_loaded is None: - raise AssetProcessingError(f"Failed to load image file: {full_source_path.name} with flag {read_flag}") - source_dtype = img_loaded.dtype - log.debug(f"Loaded source {full_source_path.name}, dtype: {source_dtype}, shape: {img_loaded.shape}") - - # MASK Handling (Extract alpha or convert) - Do this BEFORE general color conversions - if _get_base_map_type(map_type) == 'MASK': - log.debug(f"Processing as MASK type for {source_path_rel.name}.") - shape = img_loaded.shape # Use img_loaded - if len(shape) == 3 and shape[2] == 4: - log.debug("MASK processing: Extracting alpha channel (4-channel source).") - img_prepared = img_loaded[:, :, 3] # Extract alpha from img_loaded - elif len(shape) == 3 and shape[2] == 3: - log.debug("MASK processing: Converting BGR to Grayscale (3-channel source).") # OpenCV loads as BGR - img_prepared = cv2.cvtColor(img_loaded, cv2.COLOR_BGR2GRAY) # Convert BGR to Gray - elif len(shape) == 2: - log.debug("MASK processing: Source is already grayscale.") - img_prepared = img_loaded # Keep as is - else: - log.warning(f"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.") - img_prepared = None # Cannot process - # MASK should ideally be uint8 for saving later, but keep float for now if inverted? - # Let _save_image handle final conversion based on format rules. - else: - # For non-MASK types, start with the loaded image - img_prepared = img_loaded - # --- 2. Initial Preparation (BGR->RGB, Gloss Inversion, MASK handling) --- - img_prepared = img_loaded # Start with loaded image - - # BGR -> RGB conversion (only for 3-channel images) - if len(img_prepared.shape) == 3 and img_prepared.shape[2] >= 3: # Check for 3 or 4 channels - # Ensure it's not already grayscale before attempting conversion - if read_flag != cv2.IMREAD_GRAYSCALE: - log.debug(f"Converting loaded image from BGR to RGB for {source_path_rel.name}.") - # Handle 4-channel (BGRA) by converting to RGB first - if img_prepared.shape[2] == 4: - img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGRA2RGB) - else: # 3-channel (BGR) - img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGR2RGB) - else: - log.debug(f"Skipping BGR->RGB conversion for {source_path_rel.name} as it was loaded grayscale.") - elif len(img_prepared.shape) == 2: - log.debug(f"Image {source_path_rel.name} is grayscale, no BGR->RGB conversion needed.") - else: - log.warning(f"Unexpected image shape {img_prepared.shape} for {source_path_rel.name} after loading.") - - - # Gloss -> Roughness Inversion - if map_type == 'ROUGH' and is_gloss_source: - log.info(f"Performing Gloss->Roughness inversion for {source_path_rel.name}") - # Ensure grayscale before inversion - if len(img_prepared.shape) == 3: - img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY) # Use RGB2GRAY as it's already converted - - # Normalize based on original source dtype before inversion - if source_dtype == np.uint16: - img_float = 1.0 - (img_prepared.astype(np.float32) / 65535.0) - elif source_dtype == np.uint8: - img_float = 1.0 - (img_prepared.astype(np.float32) / 255.0) - else: # Assuming float input is already 0-1 range - img_float = 1.0 - img_prepared.astype(np.float32) - img_prepared = np.clip(img_float, 0.0, 1.0) # Result is float32 - log.debug(f"Inverted gloss map stored as float32 for ROUGH, original dtype: {source_dtype}") - - - # Ensure data is float32 for resizing if it came from gloss inversion - if isinstance(img_prepared, np.ndarray) and img_prepared.dtype != np.float32 and map_type == 'ROUGH' and is_gloss_source: - img_prepared = img_prepared.astype(np.float32) - elif isinstance(img_prepared, np.ndarray) and img_prepared.dtype not in [np.uint8, np.uint16, np.float32, np.float16]: - # Convert other potential types (like bool) to float32 for resizing compatibility - log.warning(f"Converting unexpected dtype {img_prepared.dtype} to float32 before resizing.") - img_prepared = img_prepared.astype(np.float32) - - - # --- 3. Resize --- - if img_prepared is None: raise AssetProcessingError("Image data is None after initial prep.") - orig_h, orig_w = img_prepared.shape[:2] - target_dim_px = self.config.image_resolutions.get(target_resolution_key) - if not target_dim_px: - raise AssetProcessingError(f"Target resolution key '{target_resolution_key}' not found in config.") - - # Avoid upscaling check - max_original_dimension = max(orig_w, orig_h) - if target_dim_px > max_original_dimension: - log.warning(f"Target dimension {target_dim_px}px is larger than original {max_original_dimension}px for {source_path_rel}. Skipping resize for {target_resolution_key}.") - # Store None in cache for this specific resolution to avoid retrying - cache[cache_key] = (None, source_dtype) - return None, source_dtype # Indicate resize was skipped - - if orig_w <= 0 or orig_h <= 0: - raise AssetProcessingError(f"Invalid original dimensions ({orig_w}x{orig_h}) for {source_path_rel}.") - - target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim_px) - interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC - log.debug(f"Resizing {source_path_rel.name} from ({orig_w}x{orig_h}) to ({target_w}x{target_h}) for {target_resolution_key}") - img_resized = cv2.resize(img_prepared, (target_w, target_h), interpolation=interpolation) - - # --- 4. Cache and Return --- - # Ensure result is float32 if it came from gloss inversion, otherwise keep resized dtype - final_data_to_cache = img_resized - if map_type == 'ROUGH' and is_gloss_source and final_data_to_cache.dtype != np.float32: - final_data_to_cache = final_data_to_cache.astype(np.float32) - - log.debug(f"CACHING result for {cache_key}. Shape: {final_data_to_cache.shape}, Dtype: {final_data_to_cache.dtype}") - cache[cache_key] = (final_data_to_cache, source_dtype) - return final_data_to_cache, source_dtype - - except Exception as e: - log.error(f"Error in _load_and_transform_source for {source_path_rel} at {target_resolution_key}: {e}", exc_info=True) - # Cache None to prevent retrying on error for this specific key - cache[cache_key] = (None, None) - return None, None - - - # --- New Helper Function: Save Image --- - def _save_image(self, image_data: np.ndarray, map_type: str, resolution_key: str, asset_base_name: str, source_info: dict, output_bit_depth_rule: str, temp_dir: Path, - # Parameters passed down from calling methods using _get_rule_with_fallback - output_formats_16bit: Tuple[str, str], output_format_8bit: str, resolution_threshold_for_jpg: int, - force_lossless_map_types: List[str], jpg_quality: int, png_compression_level: int, - target_filename_pattern: str, image_resolutions: Dict[str, int]) -> Optional[Dict]: - """ - Handles saving an image NumPy array to a temporary file, including determining - format, bit depth, performing final conversions, and fallback logic. - - Args: - image_data: NumPy array containing the image data to save. - map_type: The standard map type being saved (e.g., "COL", "NRMRGH"). - resolution_key: The resolution key (e.g., "4K"). - asset_base_name: The sanitized base name of the asset. - source_info: Dictionary containing details about the source(s), e.g., - {'original_extension': '.tif', 'source_bit_depth': 16, 'involved_extensions': {'.tif', '.png'}} - output_bit_depth_rule: Rule for determining output bit depth ('respect', 'force_8bit', 'force_16bit', 'respect_inputs'). - temp_dir: The temporary directory path to save the file in. - output_formats_16bit: Tuple of primary and fallback 16-bit formats (e.g., ('exr', 'png')). - output_format_8bit: Default 8-bit format (e.g., 'jpg'). - resolution_threshold_for_jpg: Threshold above which 8-bit images become JPG. - force_lossless_map_types: List of map types to always save losslessly. - jpg_quality: Quality setting for JPG saves. - png_compression_level: Compression level for PNG saves. - target_filename_pattern: Format string for output filenames. - image_resolutions: Dictionary mapping resolution keys to pixel dimensions. - - Returns: - A dictionary containing details of the saved file (path, width, height, - bit_depth, format) or None if saving failed. - """ - if image_data is None: - log.error(f"Cannot save image for {map_type} ({resolution_key}): image_data is None.") - return None - if not temp_dir or not temp_dir.exists(): - log.error(f"Cannot save image for {map_type} ({resolution_key}): temp_dir is invalid.") - return None - - try: - h, w = image_data.shape[:2] - current_dtype = image_data.dtype - log.debug(f"Saving {map_type} ({resolution_key}) for asset '{asset_base_name}'. Input shape: {image_data.shape}, dtype: {current_dtype}") - - # --- 1. Determine Output Bit Depth --- - source_bpc = source_info.get('source_bit_depth', 8) # Default to 8 if missing - max_input_bpc = source_info.get('max_input_bit_depth', source_bpc) # For 'respect_inputs' merge rule - output_dtype_target, output_bit_depth = np.uint8, 8 # Default - - if output_bit_depth_rule == 'force_8bit': - output_dtype_target, output_bit_depth = np.uint8, 8 - elif output_bit_depth_rule == 'force_16bit': - output_dtype_target, output_bit_depth = np.uint16, 16 - elif output_bit_depth_rule == 'respect': # For individual maps - if source_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16 - # Handle float source? Assume 16-bit output if source was float? Needs clarification. - # For now, stick to uint8/16 based on source_bpc. - elif output_bit_depth_rule == 'respect_inputs': # For merged maps - if max_input_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16 - else: # Default to 8-bit if rule is unknown - log.warning(f"Unknown output_bit_depth_rule '{output_bit_depth_rule}'. Defaulting to 8-bit.") - output_dtype_target, output_bit_depth = np.uint8, 8 - - log.debug(f"Target output bit depth: {output_bit_depth}-bit (dtype: {output_dtype_target.__name__}) based on rule '{output_bit_depth_rule}'") - - # --- 2. Determine Output Format --- - output_format, output_ext, save_params, needs_float16 = "", "", [], False - # Use passed-in parameters instead of self.config - primary_fmt_16, fallback_fmt_16 = output_formats_16bit - fmt_8bit_config = output_format_8bit - threshold = resolution_threshold_for_jpg - force_lossless = map_type in force_lossless_map_types - original_extension = source_info.get('original_extension', '.png') # Primary source ext - involved_extensions = source_info.get('involved_extensions', {original_extension}) # For merges - target_dim_px = image_resolutions.get(resolution_key, 0) # Get target dimension size - - # Apply format determination logic (similar to old _process_maps/_merge_maps) - if force_lossless: - log.debug(f"Format forced to lossless for map type '{map_type}'.") - if output_bit_depth == 16: - output_format = primary_fmt_16 - if output_format.startswith("exr"): - output_ext, needs_float16 = ".exr", True - save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) - else: # Assume PNG if primary 16-bit isn't EXR - if output_format != "png": log.warning(f"Primary 16-bit format '{output_format}' not PNG/EXR for forced lossless. Using fallback '{fallback_fmt_16}'.") - output_format = fallback_fmt_16 if fallback_fmt_16 == "png" else "png" # Ensure PNG - output_ext = ".png" - # Use passed-in parameter - save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) - else: # 8-bit lossless -> PNG - output_format = "png"; output_ext = ".png" - # Use passed-in parameter - save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_compression_level] - - elif output_bit_depth == 8 and target_dim_px >= threshold: - output_format = 'jpg'; output_ext = '.jpg' - # Use passed-in parameter - save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality]) - log.debug(f"Using JPG format (Quality: {jpg_quality}) for {map_type} at {resolution_key} due to resolution threshold ({target_dim_px} >= {threshold}).") - else: - # Determine highest format involved (for merges) or use original (for individuals) - highest_format_str = 'jpg' # Default lowest - # Check against passed-in map_merge_rules if needed, but currently not used here - relevant_extensions = involved_extensions # Use involved_extensions directly - if '.exr' in relevant_extensions: highest_format_str = 'exr' - elif '.tif' in relevant_extensions: highest_format_str = 'tif' - elif '.png' in relevant_extensions: highest_format_str = 'png' - - if highest_format_str == 'exr': - if output_bit_depth == 16: output_format, output_ext, needs_float16 = "exr", ".exr", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) - else: output_format, output_ext = "png", ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) # Use param - elif highest_format_str == 'tif': - if output_bit_depth == 16: - output_format = primary_fmt_16 - if output_format.startswith("exr"): output_ext, needs_float16 = ".exr", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) - else: output_format = "png"; output_ext = ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) # Use param - else: output_format, output_ext = "png", ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) # Use param - elif highest_format_str == 'png': - if output_bit_depth == 16: - output_format = primary_fmt_16 - if output_format.startswith("exr"): output_ext, needs_float16 = ".exr", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) - else: output_format = "png"; output_ext = ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) # Use param - else: output_format, output_ext = "png", ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) # Use param - else: # Default to configured 8-bit format if highest was JPG or unknown - output_format = fmt_8bit_config; output_ext = f".{output_format}" - if output_format == "png": save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) # Use param - elif output_format == "jpg": save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality]) # Use param - - # Final check: JPG must be 8-bit - if output_format == "jpg" and output_bit_depth == 16: - log.warning(f"Output format is JPG, but target bit depth is 16. Forcing 8-bit for {map_type} ({resolution_key}).") - output_dtype_target, output_bit_depth = np.uint8, 8 - - log.debug(f"Determined save format: {output_format}, ext: {output_ext}, bit_depth: {output_bit_depth}, needs_float16: {needs_float16}") - - # --- 3. Final Data Type Conversion --- - img_to_save = image_data.copy() # Work on a copy - if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8: - log.debug(f"Converting image data from {img_to_save.dtype} to uint8 for saving.") - if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8) - elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8) - else: img_to_save = img_to_save.astype(np.uint8) # Direct cast for other types (e.g., bool) - elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16: - log.debug(f"Converting image data from {img_to_save.dtype} to uint16 for saving.") - if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257 # Proper 8->16 bit scaling - elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16) - else: img_to_save = img_to_save.astype(np.uint16) - if needs_float16 and img_to_save.dtype != np.float16: - log.debug(f"Converting image data from {img_to_save.dtype} to float16 for EXR saving.") - if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16) - elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16) - elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16) - else: log.warning(f"Cannot convert {img_to_save.dtype} to float16 for EXR save."); return None - - # --- 4. Final Color Space Conversion (RGB -> BGR for non-EXR) --- - img_save_final = img_to_save - is_3_channel = len(img_to_save.shape) == 3 and img_to_save.shape[2] == 3 - if is_3_channel and not output_format.startswith("exr"): - log.debug(f"Converting RGB to BGR for saving {map_type} ({resolution_key}) as {output_format}") - try: - img_save_final = cv2.cvtColor(img_to_save, cv2.COLOR_RGB2BGR) - except Exception as cvt_err: - log.error(f"Failed RGB->BGR conversion before save for {map_type} ({resolution_key}): {cvt_err}. Saving original RGB.") - img_save_final = img_to_save # Fallback - - # --- 5. Construct Filename & Save --- - # Use passed-in parameter - filename = target_filename_pattern.format( - base_name=asset_base_name, - map_type=map_type, - resolution=resolution_key, - ext=output_ext.lstrip('.') - ) - output_path_temp = temp_dir / filename - log.debug(f"Attempting to save: {output_path_temp.name} (Format: {output_format}, Dtype: {img_save_final.dtype})") - - saved_successfully = False - actual_format_saved = output_format - try: - cv2.imwrite(str(output_path_temp), img_save_final, save_params) - saved_successfully = True - log.info(f" > Saved {map_type} ({resolution_key}, {output_bit_depth}-bit) as {output_format}") - except Exception as save_err: - log.error(f"Save failed ({output_format}) for {map_type} {resolution_key}: {save_err}") - # --- Try Fallback --- - if output_bit_depth == 16 and output_format.startswith("exr") and fallback_fmt_16 != output_format and fallback_fmt_16 == "png": - log.warning(f"Attempting fallback PNG save for {map_type} {resolution_key}") - actual_format_saved = "png"; output_ext = ".png"; - # Use passed-in parameter - filename = target_filename_pattern.format(base_name=asset_base_name, map_type=map_type, resolution=resolution_key, ext="png") - output_path_temp = temp_dir / filename - # Use passed-in parameter - save_params_fallback = [cv2.IMWRITE_PNG_COMPRESSION, png_compression_level] - img_fallback = None; target_fallback_dtype = np.uint16 - - # Convert original data (before float16 conversion) to uint16 for PNG fallback - if img_to_save.dtype == np.float16: # This means original was likely float or uint16/8 converted to float16 - # Need to get back to uint16 - use the pre-float16 converted data if possible? - # Safest is to convert the float16 back to uint16 - img_scaled = np.clip(img_to_save.astype(np.float32) * 65535.0, 0, 65535) - img_fallback = img_scaled.astype(target_fallback_dtype) - elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already uint16 - else: log.error(f"Cannot convert {img_to_save.dtype} for PNG fallback."); return None - - # --- Conditional RGB -> BGR Conversion for fallback --- - img_fallback_save_final = img_fallback - is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3 - if is_3_channel_fallback: # PNG is non-EXR - log.debug(f"Converting RGB to BGR for fallback PNG save {map_type} ({resolution_key})") - try: img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR) - except Exception as cvt_err_fb: log.error(f"Failed RGB->BGR conversion for fallback PNG: {cvt_err_fb}. Saving original.") - - try: - cv2.imwrite(str(output_path_temp), img_fallback_save_final, save_params_fallback) - saved_successfully = True - log.info(f" > Saved {map_type} ({resolution_key}) using fallback PNG") - except Exception as fallback_err: - log.error(f"Fallback PNG save failed for {map_type} {resolution_key}: {fallback_err}", exc_info=True) - else: - log.error(f"No suitable fallback available or applicable for failed save of {map_type} ({resolution_key}) as {output_format}.") - - - # --- 6. Return Result --- - if saved_successfully: - return { - "path": output_path_temp.relative_to(self.temp_dir), # Store relative path - "resolution": resolution_key, - "width": w, "height": h, - "bit_depth": output_bit_depth, - "format": actual_format_saved - } - else: - return None # Indicate save failure - - except Exception as e: - log.error(f"Unexpected error in _save_image for {map_type} ({resolution_key}): {e}", exc_info=True) - return None - - def process(self, rules: SourceRule) -> Dict[str, List[str]]: - """ - Executes the full processing pipeline for the input path, handling - multiple assets within a single input if detected. - - Returns: - Dict[str, List[str]]: A dictionary summarizing the status of each - detected asset within the input: - {"processed": [asset_name1, ...], - "skipped": [asset_name2, ...], - "failed": [asset_name3, ...]} - """ - log.info(f"Starting processing for input: {self.input_path.name}") - overall_status = {"processed": [], "skipped": [], "failed": []} - supplier_name = self.config.supplier_name # Get once - loaded_data_cache = {} # Initialize cache for this process call - - try: - self._setup_workspace() - self._extract_input() - # Pass rules to classification - self._inventory_and_classify_files(rules) # Classifies all files in self.classified_files - - # Determine distinct assets and file mapping, pass rules - distinct_base_names, file_to_base_name_map = self._determine_base_metadata(rules) - unmatched_files_paths = [p for p, name in file_to_base_name_map.items() if name is None] - if unmatched_files_paths: - log.warning(f"Found {len(unmatched_files_paths)} files not matched to any specific asset base name. They will be copied to each asset's Extra folder.") - log.debug(f"Unmatched files: {[str(p) for p in unmatched_files_paths]}") - - - # --- Loop through each detected asset --- - for current_asset_name in distinct_base_names: - log.info(f"--- Processing detected asset: '{current_asset_name}' ---") - asset_processed = False - asset_skipped = False - asset_failed = False - temp_metadata_path_asset = None # Track metadata file for this asset - map_details_asset = {} # Store map details for this asset - - try: - # --- Filter classified files for the current asset --- - filtered_classified_files_asset = defaultdict(list) - for category, file_list in self.classified_files.items(): - for file_info in file_list: - file_path = file_info.get('source_path') - if file_path and file_to_base_name_map.get(file_path) == current_asset_name: - filtered_classified_files_asset[category].append(file_info) - log.debug(f"Asset '{current_asset_name}': Filtered files - Maps: {len(filtered_classified_files_asset.get('maps',[]))}, Models: {len(filtered_classified_files_asset.get('models',[]))}, Extra: {len(filtered_classified_files_asset.get('extra',[]))}, Ignored: {len(filtered_classified_files_asset.get('ignored',[]))}") - - # --- Assign Suffixes Per-Asset --- - log.debug(f"Asset '{current_asset_name}': Assigning map type suffixes...") - asset_maps = filtered_classified_files_asset.get('maps', []) - grouped_asset_maps = defaultdict(list) - for map_info in asset_maps: - # Group by the base map type stored earlier - grouped_asset_maps[map_info['map_type']].append(map_info) - - for base_map_type, maps_in_group in grouped_asset_maps.items(): - log.debug(f" Assigning suffixes for base type '{base_map_type}' within asset '{current_asset_name}' ({len(maps_in_group)} maps)") - # Sorting is already done by _inventory_and_classify_files, just need to assign suffix - respect_variants = base_map_type in self.config.respect_variant_map_types - for i, map_info in enumerate(maps_in_group): - if respect_variants: - final_map_type = f"{base_map_type}-{i + 1}" - else: - final_map_type = base_map_type - log.debug(f" Updating '{map_info['source_path']}' map_type from '{map_info['map_type']}' to '{final_map_type}'") - map_info['map_type'] = final_map_type # Update the map_type in the dictionary - - # --- Determine Metadata for this specific asset, pass rules --- - asset_specific_metadata = self._determine_single_asset_metadata(current_asset_name, filtered_classified_files_asset, rules) - current_asset_metadata = { - "asset_name": current_asset_name, - # Supplier name is determined by rules/config in _determine_single_asset_metadata - "supplier_name": asset_specific_metadata.get("supplier_name", supplier_name), # Use determined supplier name - "asset_category": asset_specific_metadata.get("asset_category", self.config.default_asset_category), - "archetype": asset_specific_metadata.get("archetype", "Unknown"), - # Initialize fields that will be populated by processing steps - "maps_present": [], - "merged_maps": [], - "shader_features": [], - "source_files_in_extra": [], # Will be populated in _generate_metadata - "image_stats_1k": {}, - "map_details": {}, # Will be populated by _process_maps - "aspect_ratio_change_string": "N/A" - } - - # --- Skip Check for this specific asset --- - if not self.overwrite: - supplier_sanitized = self._sanitize_filename(supplier_name) - asset_name_sanitized = self._sanitize_filename(current_asset_name) - final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized - metadata_file_path = final_dir / self.config.metadata_filename - if final_dir.exists() and metadata_file_path.is_file(): - log.info(f"Output directory and metadata found for asset '{asset_name_sanitized}' and overwrite is False. Skipping this asset.") - overall_status["skipped"].append(current_asset_name) - asset_skipped = True - continue # Skip to the next asset in the loop - elif self.overwrite: - log.info(f"Overwrite flag is set. Processing asset '{current_asset_name}' even if output exists.") - - # --- Process Individual Maps for this asset, pass rules --- - processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps = self._process_individual_maps( - filtered_maps_list=filtered_classified_files_asset.get('maps', []), - current_asset_metadata=current_asset_metadata, # Pass base metadata - loaded_data_cache=loaded_data_cache, # Pass cache - rules=rules # Pass rules - ) - # Update current metadata with results - current_asset_metadata["image_stats_1k"] = image_stats_asset - current_asset_metadata["aspect_ratio_change_string"] = aspect_ratio_change_string_asset - # Add newly ignored rough maps to the asset's specific ignored list - if ignored_rough_maps: - filtered_classified_files_asset['ignored'].extend(ignored_rough_maps) - # Store map details (like source bit depth) collected during processing - # This was previously stored in self.metadata["map_details"] - map_details_asset = {k: v for k, v in current_asset_metadata.pop("map_details", {}).items() if k in processed_maps_details_asset} - - - # --- Merge Maps from Source for this asset, pass rules --- - merged_maps_details_asset = self._merge_maps_from_source( - processed_maps_details_asset=processed_maps_details_asset, # Still needed for source info lookup? Or pass classified files? Check impl. - filtered_classified_files=filtered_classified_files_asset, - current_asset_metadata=current_asset_metadata, - loaded_data_cache=loaded_data_cache, # Pass cache - rules=rules # Pass rules - ) - - # --- Generate Metadata for this asset, pass rules --- - # Get config values using fallback for _generate_metadata_file - preset_name_rule = self._get_rule_with_fallback(rules, 'preset_name', asset_name=current_asset_name, default=self.config.preset_name) - metadata_filename_rule = self._get_rule_with_fallback(rules, 'metadata_filename', asset_name=current_asset_name, default=self.config.metadata_filename) - - temp_metadata_path_asset = self._generate_metadata_file( - current_asset_metadata=current_asset_metadata, # Pass the populated dict - processed_maps_details_asset=processed_maps_details_asset, - merged_maps_details_asset=merged_maps_details_asset, - filtered_classified_files_asset=filtered_classified_files_asset, - unmatched_files_paths=unmatched_files_paths, # Pass the list of unmatched files - map_details_asset=map_details_asset, # Pass the filtered map details - rules=rules, # Pass rules - # Pass retrieved config values - preset_name=preset_name_rule, - metadata_filename=metadata_filename_rule - ) - - # --- Organize Output Files for this asset --- - self._organize_output_files( - current_asset_name=current_asset_name, - processed_maps_details_asset=processed_maps_details_asset, - merged_maps_details_asset=merged_maps_details_asset, - filtered_classified_files_asset=filtered_classified_files_asset, - unmatched_files_paths=unmatched_files_paths, # Pass unmatched files for copying - temp_metadata_path=temp_metadata_path_asset - ) - - log.info(f"--- Asset '{current_asset_name}' processed successfully. ---") - overall_status["processed"].append(current_asset_name) - asset_processed = True - - except Exception as asset_err: - log.error(f"--- Failed processing asset '{current_asset_name}': {asset_err} ---", exc_info=True) - overall_status["failed"].append(current_asset_name) - asset_failed = True - # Continue to the next asset even if one fails - - # --- Determine Final Consolidated Status --- - # This logic remains the same, interpreting the overall_status dict - final_status = "failed" # Default if nothing else matches - if overall_status["processed"] and not overall_status["failed"]: - final_status = "processed" - elif overall_status["skipped"] and not overall_status["processed"] and not overall_status["failed"]: - final_status = "skipped" - elif overall_status["processed"] and overall_status["failed"]: - final_status = "partial_success" # Indicate some succeeded, some failed - elif overall_status["processed"] and overall_status["skipped"] and not overall_status["failed"]: - final_status = "processed" # Consider processed+skipped as processed overall - elif overall_status["skipped"] and overall_status["failed"] and not overall_status["processed"]: - final_status = "failed" # If only skips and fails, report as failed - # Add any other combinations if needed - - log.info(f"Finished processing input '{self.input_path.name}'. Overall Status: {final_status}. Summary: {overall_status}") - # Return the detailed status dictionary instead of just a string - # The wrapper function in main.py will interpret this - return overall_status - - except Exception as e: - # Catch errors during initial setup (before asset loop) - if not isinstance(e, (AssetProcessingError, ConfigurationError)): - log.exception(f"Asset processing failed unexpectedly for {self.input_path.name} during setup: {e}") - if not isinstance(e, AssetProcessingError): - raise AssetProcessingError(f"Failed processing {self.input_path.name}: {e}") from e - else: - raise - finally: - # Ensure cleanup always happens - self._cleanup_workspace() - - def _setup_workspace(self): - """Creates a temporary directory for processing.""" - try: - self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config.temp_dir_prefix)) - log.debug(f"Created temporary workspace: {self.temp_dir}") - except Exception as e: - raise AssetProcessingError(f"Failed to create temporary workspace: {e}") from e - - def _extract_input(self): - """Extracts ZIP or copies folder contents to the temporary workspace.""" - if not self.temp_dir: - raise AssetProcessingError("Temporary workspace not setup before extraction.") - - log.info(f"Preparing source files from {self.input_path.name}...") - try: - if self.input_path.is_file(): - suffix = self.input_path.suffix.lower() - if suffix == '.zip': - log.debug(f"Extracting ZIP file: {self.input_path}") - with zipfile.ZipFile(self.input_path, 'r') as zip_ref: - zip_ref.extractall(self.temp_dir) - log.info(f"ZIP extracted to {self.temp_dir}") - elif suffix == '.rar': - log.debug(f"Extracting RAR file: {self.input_path}") - # rarfile requires unrar to be installed and in the system's PATH - # We assume this is handled by the user's environment setup. - # Basic error handling for common rarfile exceptions. - try: - with rarfile.RarFile(self.input_path, 'r') as rar_ref: - rar_ref.extractall(self.temp_dir) - log.info(f"RAR extracted to {self.temp_dir}") - except rarfile.BadRarFile: - raise AssetProcessingError(f"Input file is not a valid RAR archive: {self.input_path.name}") - except rarfile.NeedFirstVolume: - raise AssetProcessingError(f"RAR archive is part of a multi-volume set, but the first volume is missing: {self.input_path.name}") - except rarfile.PasswordRequired: - # As per plan, we don't handle passwords at this stage - raise AssetProcessingError(f"RAR archive is password protected. Skipping: {self.input_path.name}") - except rarfile.NoRarEntry: - raise AssetProcessingError(f"RAR archive is empty or corrupted: {self.input_path.name}") - except Exception as rar_err: - # Catch any other unexpected rarfile errors - raise AssetProcessingError(f"Failed to extract RAR archive {self.input_path.name}: {rar_err}") from rar_err - - elif suffix == '.7z': - log.debug(f"Extracting 7z file: {self.input_path}") - # py7zr handles extraction directly - try: - with py7zr.SevenZipFile(self.input_path, mode='r') as sz_ref: - sz_ref.extractall(path=self.temp_dir) - log.info(f"7z extracted to {self.temp_dir}") - except py7zr.Bad7zFile: - raise AssetProcessingError(f"Input file is not a valid 7z archive: {self.input_path.name}") - except py7zr.PasswordRequired: - # As per plan, we don't handle passwords at this stage - raise AssetProcessingError(f"7z archive is password protected. Skipping: {self.input_path.name}") - except Exception as sz_err: - # Catch any other unexpected py7zr errors - raise AssetProcessingError(f"Failed to extract 7z archive {self.input_path.name}: {sz_err}") from sz_err - - else: - # If it's a file but not zip, rar, or 7z, treat it as an error for now - # Or could add logic to copy single files? Plan says zip or folder. - raise AssetProcessingError(f"Input file is not a supported archive type (.zip, .rar, .7z): {self.input_path.name}") - - elif self.input_path.is_dir(): - log.debug(f"Copying directory contents: {self.input_path}") - for item in self.input_path.iterdir(): - destination = self.temp_dir / item.name - if item.is_dir(): - # Use dirs_exist_ok=True for robustness if Python version supports it (3.8+) - try: - shutil.copytree(item, destination, dirs_exist_ok=True) - except TypeError: # Fallback for older Python - if not destination.exists(): - shutil.copytree(item, destination) - else: - log.warning(f"Subdirectory '{item.name}' already exists in temp dir, skipping copytree (potential issue on older Python).") - - else: - shutil.copy2(item, destination) - log.info(f"Directory contents copied to {self.temp_dir}") - else: - # This case should be caught by __init__ but included for robustness - raise AssetProcessingError(f"Input path must be a directory or a supported archive file (.zip, .rar, .7z): {self.input_path}") - - except AssetProcessingError: - # Re-raise our custom exception directly - raise - except Exception as e: - # Wrap any other unexpected exceptions - raise AssetProcessingError(f"An unexpected error occurred during input extraction for {self.input_path.name}: {e}") from e - - def _inventory_and_classify_files(self, rules: SourceRule): - """ - Scans workspace, classifies files according to hierarchical rules and preset rules, - handling 16-bit prioritization and multiple variants of the same base map type. - """ - if not self.temp_dir: - raise AssetProcessingError("Temporary workspace not setup before inventory.") - - - - - log.info("Scanning and classifying files...") - log.debug("--- Starting File Inventory and Classification (v2) ---") - all_files_rel = [] - for root, _, files in os.walk(self.temp_dir): - root_path = Path(root) - for file in files: - full_path = root_path / file - relative_path = full_path.relative_to(self.temp_dir) - all_files_rel.append(relative_path) - - log.debug(f"Found {len(all_files_rel)} files in workspace: {[str(p) for p in all_files_rel]}") - - # --- Initialization --- - processed_files = set() # Track relative paths handled (Extra, Models, Ignored, Final Maps) - potential_map_candidates = [] # List to store potential map file info - # Reset classified files (important if this method is ever called multiple times) - self.classified_files = {"maps": [], "models": [], "extra": [], "ignored": []} - - - - # --- Step 1: Identify Explicit 'Extra' Files --- - log.debug("Step 1: Checking for files to move to 'Extra' (using regex)...") - # Get extra regex patterns using fallback logic - extra_patterns = self._get_rule_with_fallback(rules, 'extra_file_patterns', default=[]) - compiled_extra_regex = [re.compile(p, re.IGNORECASE) for p in extra_patterns] # Compile with ignore case - log.debug(f" 'Extra' regex patterns (from rules/config): {[p for p in extra_patterns]}") - for file_rel_path in all_files_rel: - if file_rel_path in processed_files: continue - for compiled_regex in compiled_extra_regex: - if compiled_regex.search(file_rel_path.name): - log.debug(f" REGEX MATCH FOUND: Marking '{file_rel_path}' for 'Extra' folder based on pattern '{compiled_regex.pattern}'.") - self.classified_files["extra"].append({'source_path': file_rel_path, 'reason': f'Regex match: {compiled_regex.pattern}'}) - processed_files.add(file_rel_path) - log.debug(f" Added '{file_rel_path}' to processed files.") - break # Stop checking extra patterns for this file - - - # --- Step 2: Identify Model Files --- - log.debug("Step 2: Identifying model files (using regex)...") - # Get model patterns using fallback logic - model_patterns = self._get_rule_with_fallback(rules, 'model_file_patterns', default=[]) - compiled_model_regex = [re.compile(p, re.IGNORECASE) for p in model_patterns] # Compile with ignore case - log.debug(f" 'Model' regex patterns (from rules/config): {[p for p in model_patterns]}") - for file_rel_path in all_files_rel: - if file_rel_path in processed_files: continue - for compiled_regex in compiled_model_regex: - if compiled_regex.search(file_rel_path.name): - log.debug(f" REGEX MATCH FOUND: Identified '{file_rel_path}' as model file based on pattern '{compiled_regex.pattern}'.") - self.classified_files["models"].append({'source_path': file_rel_path}) - processed_files.add(file_rel_path) - log.debug(f" Added '{file_rel_path}' to processed files.") - break # Stop checking model patterns for this file - - - # --- Step 3: Gather Potential Map Candidates --- - log.debug("Step 3: Gathering potential map candidates (iterating files first)...") - - # Get map type mapping rules using fallback logic - map_type_mapping_rules = self._get_rule_with_fallback(rules, 'map_type_mapping', default=[]) - # Compile regex patterns from the rules (similar to config initialization) - compiled_map_keyword_regex_tuples = defaultdict(list) - for rule_index, rule in enumerate(map_type_mapping_rules): - target_type = rule.get("target_type") - keywords = rule.get("keywords", []) - if target_type and keywords: - for keyword in keywords: - # Escape special regex characters in the keyword, then replace '*' with '.*' - pattern = re.escape(keyword).replace(re.escape('*'), '.*') - # Anchor the pattern to match the whole stem or parts separated by the naming separator - # This is a simplified approach; a more robust one might involve tokenizing the stem - # For now, check if the pattern exists as a whole word or part of a path segment - # Let's use a simple contains check for prediction/initial classification - compiled_map_keyword_regex_tuples[target_type].append((re.compile(pattern, re.IGNORECASE), keyword, rule_index)) - - log.debug(f" 'Map Type Mapping' rules (from rules/config): {map_type_mapping_rules}") - - for file_rel_path in all_files_rel: - # Skip files already classified as Extra or Model - if file_rel_path in processed_files: - continue - - file_stem = file_rel_path.stem - match_found = False - - # Iterate through base types and their associated regex tuples - for base_map_type, regex_tuples in compiled_map_keyword_regex_tuples.items(): - if match_found: break # Stop checking types for this file once matched - - # Get the original keywords list for the current rule index from the *applied* rules - original_rule = None - # Find the rule based on the first tuple's rule_index (they should all be the same for this base_map_type) - if regex_tuples: - current_rule_index = regex_tuples[0][2] # Get rule_index from the first tuple - if current_rule_index < len(map_type_mapping_rules): - rule_candidate = map_type_mapping_rules[current_rule_index] - # Verify it's the correct rule by checking target_type - if rule_candidate.get("target_type") == base_map_type: - original_rule = rule_candidate - else: - log.warning(f"Rule index mismatch for {base_map_type} at index {current_rule_index} in applied rules. Searching...") - # Fallback search if index doesn't match (shouldn't happen ideally) - for idx, rule in enumerate(map_type_mapping_rules): - if rule.get("target_type") == base_map_type: - original_rule = rule - log.warning(f"Found rule for {base_map_type} at index {idx} instead.") - break - - original_keywords_list = [] - if original_rule and 'keywords' in original_rule: - original_keywords_list = original_rule['keywords'] - else: - log.warning(f"Could not find original keywords list for rule matching base_map_type '{base_map_type}'. Keyword indexing may fail.") - - for kw_regex, original_keyword, rule_index in regex_tuples: - if kw_regex.search(file_stem): -# --- DIAGNOSTIC LOGGING START --- - log.debug(f" Checking file '{file_rel_path.name}' against keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'") - # --- DIAGNOSTIC LOGGING END --- - log.debug(f" Match found: '{file_rel_path}' matches keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'") - - # Find the index of the matched keyword within its rule's list - - keyword_index_in_rule = -1 # Default if not found - - if original_keywords_list: - try: - # Use the original_keyword string directly - keyword_index_in_rule = original_keywords_list.index(original_keyword) - except ValueError: - log.warning(f"Keyword '{original_keyword}' not found in its original rule list? {original_keywords_list}") - - # --- DIAGNOSTIC LOGGING START --- - log.debug(f" Checking file '{file_rel_path.name}' against keyword '{original_keyword}' (rule {rule_index}, pattern: '{kw_regex.pattern}') for base type '{base_map_type}'") - # --- DIAGNOSTIC LOGGING END --- - else: - log.warning(f"Original keywords list empty for rule {rule_index}, cannot find index for '{original_keyword}'.") - - # Add candidate only if not already added - if not any(c['source_path'] == file_rel_path for c in potential_map_candidates): - potential_map_candidates.append({ - 'source_path': file_rel_path, - 'matched_keyword': original_keyword, - 'base_map_type': base_map_type, - 'preset_rule_index': rule_index, - 'keyword_index_in_rule': keyword_index_in_rule, # <<< STORE THE KEYWORD INDEX - 'is_16bit_source': False - }) - else: - log.warning(f" '{file_rel_path}' was already added as a candidate? Skipping duplicate add.") - - match_found = True - break # Stop checking regex tuples for this base_type once matched - - log.debug(f"Gathered {len(potential_map_candidates)} potential map candidates based on keywords.") - - # --- Step 3.5: Identify Standalone 16-bit Variants (Not caught by keywords) --- - log.debug("Step 3.5: Checking for standalone 16-bit variants...") - # Get bit depth regex map using fallback logic - bit_depth_patterns = self._get_rule_with_fallback(rules, 'bit_depth_patterns', default={}) - compiled_bit_depth_regex = { - base_type: re.compile(pattern, re.IGNORECASE) - for base_type, pattern in bit_depth_patterns.items() - } - log.debug(f" 'Bit Depth' regex patterns (from rules/config): {bit_depth_patterns}") - for file_rel_path in all_files_rel: - # Skip if already processed or already identified as a candidate - if file_rel_path in processed_files or any(c['source_path'] == file_rel_path for c in potential_map_candidates): - continue - - for base_type, compiled_regex in compiled_bit_depth_regex.items(): - log.debug(f" Step 3.5: Checking file '{file_rel_path.name}' against 16-bit pattern for '{base_type}': {compiled_regex.pattern}") # ADDED LOG - match = compiled_regex.search(file_rel_path.name) # Store result - if match: - log.debug(f" --> MATCH FOUND for standalone 16-bit variant: '{file_rel_path}' for base type '{base_type}'") # MODIFIED LOG - potential_map_candidates.append({ - 'source_path': file_rel_path, - 'matched_keyword': 'N/A (16bit variant)', # Placeholder keyword - 'base_map_type': base_type, - 'preset_rule_index': 9999, # Assign high index to avoid interfering with keyword priority - 'is_16bit_source': True # Mark as 16-bit immediately - }) - log.debug(f" Added candidate: {potential_map_candidates[-1]}") - # Don't add to processed_files yet, let Step 4 handle filtering - break # Stop checking bit depth patterns for this file - - log.debug(f"Total potential map candidates after checking standalone 16-bit: {len(potential_map_candidates)}") - - - # --- Step 4: Prioritize 16-bit Variants & Filter Candidates --- - log.debug("Step 4: Prioritizing 16-bit variants and filtering candidates...") - # Use the compiled bit depth regex from Step 3.5 - candidates_to_keep = [] - candidates_to_ignore = [] # Store 8-bit versions superseded by 16-bit - - # Mark 16-bit candidates - for candidate in potential_map_candidates: - base_type = candidate['base_map_type'] - # Check if the base type exists in the bit depth map AND the filename matches the regex - if base_type in compiled_bit_depth_regex: - if compiled_bit_depth_regex[base_type].search(candidate['source_path'].name): - candidate['is_16bit_source'] = True - log.debug(f" Marked '{candidate['source_path']}' as 16-bit source for base type '{base_type}'.") - - - # Identify base types that have a 16-bit version present - prioritized_16bit_bases = { - candidate['base_map_type'] for candidate in potential_map_candidates if candidate['is_16bit_source'] - } - log.debug(f" Base map types with 16-bit variants found: {prioritized_16bit_bases}") - - # Filter: Keep 16-bit versions, or 8-bit versions if no 16-bit exists for that base type - for candidate in potential_map_candidates: - if candidate['is_16bit_source']: - candidates_to_keep.append(candidate) - log.debug(f" Keeping 16-bit candidate: {candidate['source_path']} ({candidate['base_map_type']})") - elif candidate['base_map_type'] not in prioritized_16bit_bases: - candidates_to_keep.append(candidate) - log.debug(f" Keeping 8-bit candidate (no 16-bit found): {candidate['source_path']} ({candidate['base_map_type']})") - else: - # This is an 8-bit candidate whose 16-bit counterpart exists - candidates_to_ignore.append(candidate) - log.debug(f" Ignoring 8-bit candidate (16-bit found): {candidate['source_path']} ({candidate['base_map_type']})") - - # Add ignored 8-bit files to the main ignored list - for ignored_candidate in candidates_to_ignore: - self.classified_files["ignored"].append({ - 'source_path': ignored_candidate['source_path'], - 'reason': f'Superseded by 16bit variant for {ignored_candidate["base_map_type"]}' - }) - processed_files.add(ignored_candidate['source_path']) # Mark as processed - - log.debug(f"Filtered candidates. Keeping: {len(candidates_to_keep)}, Ignored: {len(candidates_to_ignore)}") - - # --- Step 5: Group, Sort, Assign Suffixes, and Finalize Maps --- - log.debug("Step 5: Grouping, sorting, assigning suffixes, and finalizing maps...") - # from collections import defaultdict # Moved import to top of file - grouped_by_base_type = defaultdict(list) - for candidate in candidates_to_keep: - grouped_by_base_type[candidate['base_map_type']].append(candidate) - - final_map_list = [] - for base_map_type, candidates in grouped_by_base_type.items(): - # --- DIAGNOSTIC LOGGING START --- - candidate_paths_str = [str(c['source_path']) for c in candidates] - log.debug(f" [DIAGNOSIS] Processing base_map_type: '{base_map_type}'. Candidates before sort: {candidate_paths_str}") - # --- DIAGNOSTIC LOGGING END --- - log.debug(f" Processing final candidates for base type: '{base_map_type}' ({len(candidates)} candidates)") - - # --- NEW SORTING LOGIC --- - # Sort candidates based on: - # 1. The index of the rule object in the preset's map_type_mapping list. - # 2. The index of the matched keyword within that rule object's 'keywords' list. - # 3. Alphabetical order of the source file path as a tie-breaker. - candidates.sort(key=lambda c: ( - c.get('preset_rule_index', 9999), # Use get with fallback for safety - c.get('keyword_index_in_rule', 9999), # Use get with fallback for safety - str(c['source_path']) - )) - # --- END NEW SORTING LOGIC --- - - # Removed diagnostic log - - # Add sorted candidates to the final list, but without assigning the suffix yet. - # Suffix assignment will happen per-asset later. - for final_candidate in candidates: # Use the directly sorted list - # Store the base map type for now. - final_map_list.append({ - "map_type": base_map_type, # Store BASE type only - "source_path": final_candidate["source_path"], - "source_keyword": final_candidate["matched_keyword"], - "is_16bit_source": final_candidate["is_16bit_source"], - "original_extension": final_candidate["source_path"].suffix.lower() # Store original extension - }) - processed_files.add(final_candidate["source_path"]) # Mark final map source as processed - - self.classified_files["maps"] = final_map_list - - # --- Step 6: Classify Remaining Files as 'Unrecognised' (in 'Extra') --- - log.debug("Step 6: Classifying remaining files as 'Unrecognised'...") - remaining_count = 0 - for file_rel_path in all_files_rel: - if file_rel_path not in processed_files: - log.debug(f" Marking remaining file '{file_rel_path}' for 'Extra' folder (Unrecognised).") - self.classified_files["extra"].append({'source_path': file_rel_path, 'reason': 'Unrecognised'}) - remaining_count += 1 - # No need to add to processed_files here, it's the final step - log.debug(f" Marked {remaining_count} remaining files as 'Unrecognised'.") - - # --- Final Summary --- - # Note: self.metadata["source_files_in_extra"] is now populated per-asset in _generate_metadata_file - log.info(f"File classification complete.") - log.debug("--- Final Classification Summary (v2) ---") - map_details_log = [f"{m['map_type']}:{m['source_path']}" for m in self.classified_files["maps"]] - model_details_log = [str(f['source_path']) for f in self.classified_files["models"]] - extra_details_log = [f"{str(f['source_path'])} ({f['reason']})" for f in self.classified_files["extra"]] - ignored_details_log = [f"{str(f['source_path'])} ({f['reason']})" for f in self.classified_files["ignored"]] - log.debug(f" Identified Maps ({len(self.classified_files['maps'])}): {map_details_log}") - log.debug(f" Model Files ({len(self.classified_files['models'])}): {model_details_log}") - log.debug(f" Extra/Unrecognised Files ({len(self.classified_files['extra'])}): {extra_details_log}") - log.debug(f" Ignored Files ({len(self.classified_files['ignored'])}): {ignored_details_log}") - log.debug("--- End File Inventory and Classification (v2) ---") - - - def _determine_base_metadata(self, rules: SourceRule) -> Tuple[List[str], Dict[Path, Optional[str]]]: - """ - Determines distinct asset base names within the input based on preset rules - and maps each relevant source file to its determined base name. - - Args: - rules: The hierarchical rules object. - - Returns: - Tuple[List[str], Dict[Path, Optional[str]]]: - - A list of unique, sanitized base names found. - - A dictionary mapping source file relative paths to their determined - base name string (or None if no base name could be determined for that file). - """ - if not self.temp_dir: raise AssetProcessingError("Workspace not setup.") - log.info("Determining distinct base names and file mapping...") - - # Combine map and model files for base name determination - relevant_files = self.classified_files.get('maps', []) + self.classified_files.get('models', []) - if not relevant_files: - log.warning("No map or model files found to determine base name(s).") - # Fallback: Use input path name as a single asset - input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name - sanitized_input_name = self._sanitize_filename(input_name or "UnknownInput") - # Map all files (maps, models, extra, ignored) to this fallback name - all_files_paths = [f['source_path'] for cat in self.classified_files.values() for f in cat if 'source_path' in f] - file_to_base_name_map = {f_path: sanitized_input_name for f_path in all_files_paths} - log.info(f"Using input path name '{sanitized_input_name}' as the single asset name.") - return [sanitized_input_name], file_to_base_name_map - - # --- Determine Base Names from Files --- - # Get naming rules using fallback logic - naming_rules = self._get_rule_with_fallback(rules, 'source_naming_rules', default={}) - separator = naming_rules.get('separator', self.config.source_naming_separator) # Fallback to config if not in rules - indices_dict = naming_rules.get('indices', self.config.source_naming_indices) # Fallback to config if not in rules - base_index_raw = indices_dict.get('base_name') - base_index = None - if base_index_raw is not None: - try: - base_index = int(base_index_raw) - except (ValueError, TypeError): - log.warning(f"Could not convert base_name index '{base_index_raw}' to integer (from rules/config). Base name determination might be inaccurate.") - - file_to_base_name_map: Dict[Path, Optional[str]] = {} - potential_base_names_per_file: Dict[Path, str] = {} # Store potential name for each file path - - if isinstance(base_index, int): - log.debug(f"Attempting base name extraction using separator '{separator}' and index {base_index}.") - for file_info in relevant_files: - file_path = file_info['source_path'] - stem = file_path.stem - parts = stem.split(separator) - if len(parts) > base_index: - extracted_name = parts[base_index] - sanitized_name = self._sanitize_filename(extracted_name) - if sanitized_name: # Ensure we don't add empty names - potential_base_names_per_file[file_path] = sanitized_name - log.debug(f" File '{file_path.name}' -> Potential Base Name: '{sanitized_name}'") - else: - log.debug(f" File '{file_path.name}' -> Extracted empty name at index {base_index}. Marking as None.") - file_to_base_name_map[file_path] = None # Explicitly mark as None if extraction yields empty - else: - log.debug(f" File '{file_path.name}' -> Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}. Marking as None.") - file_to_base_name_map[file_path] = None # Mark as None if index is invalid for this file - else: - log.warning("Base name index not configured or invalid. Cannot determine distinct assets based on index. Treating as single asset.") - # Fallback to common prefix if no valid index - stems = [f['source_path'].stem for f in relevant_files] - common_prefix_name = os.path.commonprefix(stems) if stems else "" - sanitized_common_name = self._sanitize_filename(common_prefix_name or self.input_path.stem or "UnknownAsset") - log.info(f"Using common prefix '{sanitized_common_name}' as the single asset name.") - # Map all relevant files to this single name - for file_info in relevant_files: - potential_base_names_per_file[file_info['source_path']] = sanitized_common_name - - # --- Consolidate Distinct Names and Final Mapping --- - distinct_base_names_set = set(potential_base_names_per_file.values()) - distinct_base_names = sorted(list(distinct_base_names_set)) # Sort for consistent processing order - - # Populate the final map, including files that didn't match the index rule (marked as None earlier) - for file_info in relevant_files: - file_path = file_info['source_path'] - if file_path not in file_to_base_name_map: # If not already marked as None - file_to_base_name_map[file_path] = potential_base_names_per_file.get(file_path) # Assign determined name or None if somehow missed - - # Add files from 'extra' and 'ignored' to the map, marking them as None for base name - for category in ['extra', 'ignored']: - for file_info in self.classified_files.get(category, []): - file_path = file_info['source_path'] - if file_path not in file_to_base_name_map: # Avoid overwriting if somehow already mapped - file_to_base_name_map[file_path] = None - log.debug(f" File '{file_path.name}' (Category: {category}) -> Marked as None (No Base Name).") - - - if not distinct_base_names: - # This case should be rare due to fallbacks, but handle it. - log.warning("No distinct base names could be determined. Using input name as fallback.") - input_name = self.input_path.stem if self.input_path.is_file() else self.input_path.name - fallback_name = self._sanitize_filename(input_name or "FallbackAsset") - distinct_base_names = [fallback_name] - # Remap all files to this single fallback name - file_to_base_name_map = {f_path: fallback_name for f_path in file_to_base_name_map.keys()} - - - log.info(f"Determined {len(distinct_base_names)} distinct asset base name(s): {distinct_base_names}") - log.debug(f"File-to-BaseName Map ({len(file_to_base_name_map)} entries): { {str(k): v for k, v in file_to_base_name_map.items()} }") # Log string paths for readability - - return distinct_base_names, file_to_base_name_map - - def _determine_single_asset_metadata(self, asset_base_name: str, filtered_classified_files: Dict[str, List[Dict]], rules: SourceRule) -> Dict[str, str]: - """ - Determines the asset_category, archetype, and supplier name for a single, - specific asset based on its filtered list of classified files and hierarchical rules. - - Args: - asset_base_name: The determined base name for this specific asset. - filtered_classified_files: A dictionary containing only the classified - files (maps, models, etc.) belonging to this asset. - - Returns: - A dictionary containing {"asset_category": str, "archetype": str}. - """ - log.debug(f"Determining category, archetype, and supplier for asset: '{asset_base_name}'") - - # Determine Supplier Name using fallback - determined_supplier_name = self._get_rule_with_fallback(rules, 'supplier_name', asset_name=asset_base_name, default=self.config.supplier_name) - log.debug(f" Determined Supplier Name for '{asset_base_name}': {determined_supplier_name}") - - - determined_category = self._get_rule_with_fallback(rules, 'default_asset_category', asset_name=asset_base_name, default=self.config.default_asset_category) # Start with default from rules/config - determined_archetype = "Unknown" - - # --- Determine Asset Category --- - if filtered_classified_files.get("models"): - determined_category = "Asset" - log.debug(f" Category set to 'Asset' for '{asset_base_name}' due to model file presence.") - else: - # Check for Decal keywords only if not an Asset - # Get decal keywords using fallback - decal_keywords = self._get_rule_with_fallback(rules, 'decal_keywords', asset_name=asset_base_name, default=[]) - found_decal = False - # Check map names first for decal keywords - candidate_files = [f['source_path'] for f in filtered_classified_files.get('maps', [])] - # Fallback to checking extra files if no maps found for this asset - if not candidate_files: - candidate_files = [f['source_path'] for f in filtered_classified_files.get('extra', [])] - - if decal_keywords: - for file_path in candidate_files: - # Check against the specific file's name within this asset's context - for keyword in decal_keywords: - if keyword.lower() in file_path.name.lower(): - determined_category = "Decal" - found_decal = True; break - if found_decal: break - if found_decal: log.debug(f" Category set to 'Decal' for '{asset_base_name}' due to keyword match.") - # If not Asset or Decal, it remains the default (e.g., "Texture") - - log.debug(f" Determined Category for '{asset_base_name}': {determined_category}") - - - # --- Determine Archetype (Usage) --- - # Get archetype rules using fallback - archetype_rules = self._get_rule_with_fallback(rules, 'archetype_rules', asset_name=asset_base_name, default=[]) - # Use stems from maps and models belonging *only* to this asset - check_stems = [f['source_path'].stem.lower() for f in filtered_classified_files.get('maps', [])] - check_stems.extend([f['source_path'].stem.lower() for f in filtered_classified_files.get('models', [])]) - # Also check the determined base name itself - check_stems.append(asset_base_name.lower()) - - if check_stems: - best_match_archetype = "Unknown" - # Using simple "first match wins" logic as before - for rule in archetype_rules: - if len(rule) != 2 or not isinstance(rule[1], dict): continue - arch_name, rules_dict = rule - match_any = rules_dict.get("match_any", []) - matched_any_keyword = False - if match_any: - for keyword in match_any: - kw_lower = keyword.lower() - for stem in check_stems: - if kw_lower in stem: # Simple substring check - matched_any_keyword = True - break # Found a match for this keyword - if matched_any_keyword: break # Found a match for this rule's keywords - - if matched_any_keyword: - best_match_archetype = arch_name - log.debug(f" Archetype match '{arch_name}' for '{asset_base_name}' based on keywords: {match_any}") - break # First rule match wins - - determined_archetype = best_match_archetype - - log.debug(f" Determined Archetype for '{asset_base_name}': {determined_archetype}") - - return {"asset_category": determined_category, "archetype": determined_archetype, "supplier_name": determined_supplier_name} - - - - def _process_individual_maps(self, filtered_maps_list: List[Dict], current_asset_metadata: Dict, loaded_data_cache: dict, rules: SourceRule) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str, List[Dict]]: - """ - Processes, resizes, and saves classified map files for a specific asset - that are NOT used as inputs for merge rules, applying hierarchical rules. - Uses helper functions. - - Args: - filtered_maps_list: List of map dictionaries belonging to the current asset. - current_asset_metadata: Metadata dictionary for the current asset. - loaded_data_cache: Cache dictionary for loaded/resized source data. - - Returns: - Tuple containing: - - processed_maps_details_asset: Dict mapping map_type to resolution details. - - image_stats_asset: Dict mapping map_type to calculated image statistics. - - aspect_ratio_change_string_asset: String indicating aspect ratio change. - - ignored_rough_maps: List of map dictionaries for native rough maps ignored due to gloss priority. - - """ - if not self.temp_dir: raise AssetProcessingError("Workspace not setup.") - asset_name = current_asset_metadata.get("asset_name", "UnknownAsset") - log.info(f"Processing individual map files for asset '{asset_name}'...") - - # Initialize results specific to this asset - processed_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict) - image_stats_asset: Dict[str, Dict] = {} - map_details_asset: Dict[str, Dict] = {} # Store details like source bit depth, gloss inversion - aspect_ratio_change_string_asset: str = "N/A" - ignored_rough_maps: List[Dict] = [] # Store ignored native rough maps - - # --- Settings retrieval using fallback --- - resolutions = self._get_rule_with_fallback(rules, 'image_resolutions', asset_name=asset_name, default=self.config.image_resolutions) - stats_res_key = self._get_rule_with_fallback(rules, 'calculate_stats_resolution', asset_name=asset_name, default=self.config.calculate_stats_resolution) - stats_target_dim = resolutions.get(stats_res_key) - if not stats_target_dim: log.warning(f"Stats resolution key '{stats_res_key}' not found in rules/config. Stats skipped for '{asset_name}'.") - gloss_keywords = self._get_rule_with_fallback(rules, 'source_glossiness_keywords', asset_name=asset_name, default=self.config.source_glossiness_keywords) - # target_pattern = _get_rule_with_fallback('target_filename_pattern', asset_name=asset_name, default=self.config.target_filename_pattern) # Not needed here, handled by _save_image - base_name = asset_name # Use the asset name passed in - - # --- Pre-process Glossiness -> Roughness --- - # This logic needs to stay here to determine which ROUGH source to use - # and potentially ignore the native one. - derived_from_gloss_flag = {} - gloss_map_info_for_rough, native_rough_map_info = None, None - for map_info in filtered_maps_list: - # Use the final assigned map_type (e.g., ROUGH, ROUGH-1) - if map_info['map_type'].startswith('ROUGH'): - is_gloss = any(kw.lower() in map_info['source_path'].stem.lower() for kw in gloss_keywords) - if is_gloss: - # If multiple gloss sources map to ROUGH variants, prioritize the first one? - # For now, assume only one gloss source maps to ROUGH variants. - if gloss_map_info_for_rough is None: gloss_map_info_for_rough = map_info - else: - # If multiple native rough sources map to ROUGH variants, prioritize the first one? - if native_rough_map_info is None: native_rough_map_info = map_info - - rough_source_to_use_info = None # Store the map_info dict of the source to use - if gloss_map_info_for_rough: - rough_source_to_use_info = gloss_map_info_for_rough - derived_from_gloss_flag['ROUGH'] = True # Apply to all ROUGH variants if derived from gloss - if native_rough_map_info: - log.warning(f"Asset '{asset_name}': Both Gloss source ('{gloss_map_info_for_rough['source_path']}') and Rough source ('{native_rough_map_info['source_path']}') found for ROUGH maps. Prioritizing Gloss.") - ignored_rough_maps.append({'source_path': native_rough_map_info['source_path'], 'reason': 'Superseded by Gloss->Rough'}) - elif native_rough_map_info: - rough_source_to_use_info = native_rough_map_info - derived_from_gloss_flag['ROUGH'] = False - - # --- Identify maps used in merge rules (using fallback) --- - merge_rules = self._get_rule_with_fallback(rules, 'map_merge_rules', asset_name=asset_name, default=self.config.map_merge_rules) - merge_input_map_types = set() - for rule in merge_rules: - inputs_mapping = rule.get("inputs", {}) - for source_map_type in inputs_mapping.values(): - # Use the base type for checking against merge rules - base_type = _get_base_map_type(source_map_type) - merge_input_map_types.add(base_type) - log.debug(f"Map types used as input for merge rules (from rules/config): {merge_input_map_types}") - - log.info(f"Processing individual map files for asset '{asset_name}'...") - - # --- Aspect Ratio Calculation Setup --- - # We need original dimensions once per asset for aspect ratio. - # Find the first map to process to get its dimensions. - first_map_info_for_aspect = next((m for m in filtered_maps_list), None) # Use the original list - orig_w_aspect, orig_h_aspect = None, None - if first_map_info_for_aspect: - # Load just to get dimensions (might hit cache if used later) - # Use the first resolution key as a representative target for loading - first_res_key = next(iter(resolutions)) - temp_img_for_dims, _ = self._load_and_transform_source( - first_map_info_for_aspect['source_path'], - first_map_info_for_aspect['map_type'], # Use original map type for loading - first_res_key, - False, # is_gloss_source doesn't matter for dims - loaded_data_cache # Use the main cache - ) - if temp_img_for_dims is not None: - orig_h_aspect, orig_w_aspect = temp_img_for_dims.shape[:2] - log.debug(f"Got original dimensions ({orig_w_aspect}x{orig_h_aspect}) for aspect ratio calculation from {first_map_info_for_aspect['source_path']}") - else: - log.warning(f"Could not load image {first_map_info_for_aspect['source_path']} to get original dimensions for aspect ratio.") - else: - log.warning("No maps found to process individually, cannot calculate aspect ratio string.") - - - # --- Process Each Individual Map --- - # Iterate through the original list and filter/apply overrides here - for map_info in filtered_maps_list: - source_path_rel = map_info['source_path'] - original_extension = map_info.get('original_extension', '.png') - current_map_type = map_info['map_type'] # Start with the classified type - - # --- Apply map_type_override from FileRule (Step 2) --- - map_type_override = self._get_rule_with_fallback( - rules, - 'map_type_override', - file_path=source_path_rel, - asset_name=asset_name, - default=None - ) - if map_type_override is not None: - log.debug(f"Asset '{asset_name}': Applying map_type_override '{map_type_override}' for source '{source_path_rel.name}' (originally '{current_map_type}').") - map_type = map_type_override # Use the override - else: - map_type = current_map_type # Use the original classified type - - # --- Check if this map should be processed individually (Step 3) --- - base_map_type = _get_base_map_type(map_type) # Use the potentially updated map_type - if base_map_type in merge_input_map_types: - log.debug(f"Skipping individual processing for {map_type} (Source: {source_path_rel}) as its base type '{base_map_type}' is used in merge rules (from rules/config).") - continue # Skip to the next map in the loop - - # Skip native rough map if gloss was prioritized (keep this check) - # This check needs to use the *original* source path to see if it was the native rough that was ignored - # The ignored_rough_maps list contains the original map_info dicts. - if map_type.startswith('ROUGH') and any(ignored['source_path'] == source_path_rel for ignored in ignored_rough_maps): - log.debug(f"Skipping individual processing of native rough map '{source_path_rel}' as gloss version was prioritized.") - continue # Skip to the next map in the loop - - - # Determine if this specific map type should use gloss inversion logic - # If ROUGH-1, ROUGH-2 etc derive from gloss, they all use inversion - # This check should use the *final* map_type after override - is_gloss_source_for_this_map = map_type.startswith('ROUGH') and derived_from_gloss_flag.get('ROUGH', False) - - - log.info(f"-- Asset '{asset_name}': Processing Individual Map: {map_type} (Source: {source_path_rel.name}) --") - current_map_details = {"derived_from_gloss": is_gloss_source_for_this_map} - source_bit_depth_found = None # Track if we've found the bit depth for this map type - - try: - # --- Loop through target resolutions --- - for res_key, target_dim_px in resolutions.items(): - log.debug(f"Processing {map_type} for resolution: {res_key}...") - - # --- 1. Load and Transform Source (using helper + cache) --- - img_resized, source_dtype = self._load_and_transform_source( - source_path_rel=source_path_rel, - map_type=map_type, # Pass the specific map type (e.g., ROUGH-1) - target_resolution_key=res_key, - is_gloss_source=is_gloss_source_for_this_map, - cache=loaded_data_cache - ) - - if img_resized is None: - log.warning(f"Failed to load/transform source {source_path_rel} for {res_key}. Skipping resolution.") - continue # Skip this resolution - - # Store source bit depth once found - if source_dtype is not None and source_bit_depth_found is None: - source_bit_depth_found = 16 if source_dtype == np.uint16 else (8 if source_dtype == np.uint8 else 8) # Default non-uint to 8 - current_map_details["source_bit_depth"] = source_bit_depth_found - log.debug(f"Stored source bit depth for {map_type}: {source_bit_depth_found}") - - # --- 2. Calculate Stats (if applicable) --- - if res_key == stats_res_key and stats_target_dim: - log.debug(f"Calculating stats for {map_type} using {res_key} image...") - stats = _calculate_image_stats(img_resized) - if stats: image_stats_asset[map_type] = stats - else: log.warning(f"Stats calculation failed for {map_type} at {res_key}.") - - # --- 3. Calculate Aspect Ratio Change String (once per asset, using pre-calculated dims) --- - if aspect_ratio_change_string_asset == "N/A" and orig_w_aspect is not None and orig_h_aspect is not None: - target_w_aspect, target_h_aspect = img_resized.shape[1], img_resized.shape[0] # Use current resized dims - try: - aspect_string = self._normalize_aspect_ratio_change(orig_w_aspect, orig_h_aspect, target_w_aspect, target_h_aspect) - aspect_ratio_change_string_asset = aspect_string - log.debug(f"Stored aspect ratio change string using {res_key}: '{aspect_string}'") - except Exception as aspect_err: - log.error(f"Failed to calculate aspect ratio change string using {res_key}: {aspect_err}", exc_info=True) - aspect_ratio_change_string_asset = "Error" - elif aspect_ratio_change_string_asset == "N/A": - # This case happens if we couldn't get original dims - aspect_ratio_change_string_asset = "Unknown" # Set to unknown instead of recalculating - - - # --- 4. Save Image (using helper) --- - source_info = { - 'original_extension': original_extension, - 'source_bit_depth': source_bit_depth_found or 8, # Use found depth or default - 'involved_extensions': {original_extension} # Only self for individual maps - } - # Get bit depth rule using fallback logic - bit_depth_rules_map = self._get_rule_with_fallback(rules, 'output_bit_depth_rules', file_path=source_path_rel, asset_name=asset_name, default={}) - bit_depth_rule = bit_depth_rules_map.get(map_type, 'respect') # Default to 'respect' if map type not in rules/config - - # Get additional config values using fallback for _save_image - output_formats_16bit = self._get_rule_with_fallback(rules, 'get_16bit_output_formats', file_path=source_path_rel, asset_name=asset_name, default=self.config.get_16bit_output_formats()) - output_format_8bit = self._get_rule_with_fallback(rules, 'get_8bit_output_format', file_path=source_path_rel, asset_name=asset_name, default=self.config.get_8bit_output_format()) - resolution_threshold_for_jpg = self._get_rule_with_fallback(rules, 'resolution_threshold_for_jpg', file_path=source_path_rel, asset_name=asset_name, default=self.config.resolution_threshold_for_jpg) - force_lossless_map_types = self._get_rule_with_fallback(rules, 'force_lossless_map_types', asset_name=asset_name, default=self.config.force_lossless_map_types) # This rule applies to map type, not individual file path - jpg_quality = self._get_rule_with_fallback(rules, 'jpg_quality', file_path=source_path_rel, asset_name=asset_name, default=self.config.jpg_quality) - png_compression_level = self._get_rule_with_fallback(rules, '_core_settings', asset_name=asset_name, default=self.config._core_settings).get('PNG_COMPRESSION_LEVEL', 6) # This rule applies broadly, not per file - target_filename_pattern = self._get_rule_with_fallback(rules, 'target_filename_pattern', file_path=source_path_rel, asset_name=asset_name, default=self.config.target_filename_pattern) - # image_resolutions is already retrieved at the start of the method - - save_result = self._save_image( - image_data=img_resized, - map_type=map_type, - resolution_key=res_key, - asset_base_name=base_name, - source_info=source_info, - output_bit_depth_rule=bit_depth_rule, - temp_dir=self.temp_dir, - # Pass retrieved config values - output_formats_16bit=output_formats_16bit, - output_format_8bit=output_format_8bit, - resolution_threshold_for_jpg=resolution_threshold_for_jpg, - force_lossless_map_types=force_lossless_map_types, - jpg_quality=jpg_quality, - png_compression_level=png_compression_level, - target_filename_pattern=target_filename_pattern, - image_resolutions=resolutions # Pass the already retrieved resolutions dict - ) - - # --- 5. Store Result --- - if save_result: - processed_maps_details_asset.setdefault(map_type, {})[res_key] = save_result - # Update overall map detail (e.g., final format) if needed - current_map_details["output_format"] = save_result.get("format") - else: - log.error(f"Failed to save {map_type} at {res_key}.") - processed_maps_details_asset.setdefault(map_type, {})[f'error_{res_key}'] = "Save failed" - - - except Exception as map_proc_err: - log.error(f"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}", exc_info=True) - processed_maps_details_asset.setdefault(map_type, {})['error'] = str(map_proc_err) - - # Store collected details for this map type - map_details_asset[map_type] = current_map_details - - # --- Final Metadata Updates (Handled in main process loop) --- - # Update the passed-in current_asset_metadata dictionary directly with map_details - # This avoids returning it and merging later. - current_asset_metadata["map_details"] = map_details_asset - - log.info(f"Finished processing individual map files for asset '{asset_name}'.") - return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset, ignored_rough_maps - - - - - def _merge_maps_from_source(self, processed_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files: Dict[str, List[Dict]], current_asset_metadata: Dict, loaded_data_cache: dict, rules: SourceRule) -> Dict[str, Dict[str, Dict]]: - """ - Merges channels from different SOURCE maps for a specific asset based on rules - in configuration, using helper functions for loading and saving. - - Args: - processed_maps_details_asset: Details of processed maps (used to find common resolutions). - filtered_classified_files: Classified files dictionary filtered for this asset (used to find source paths). - current_asset_metadata: Metadata dictionary for the current asset. - loaded_data_cache: Cache dictionary for loaded/resized source data. - - Returns: - Dict[str, Dict[str, Dict]]: Details of the merged maps created for this asset. - """ - if not self.temp_dir: raise AssetProcessingError("Workspace not setup.") - asset_name = current_asset_metadata.get("asset_name", "UnknownAsset") - # Get gloss keywords using fallback logic - gloss_keywords = self._get_rule_with_fallback(rules, 'source_glossiness_keywords', asset_name=asset_name, default=self.config.source_glossiness_keywords) - - # Get merge rules using fallback logic - merge_rules = self._get_rule_with_fallback(rules, 'map_merge_rules', asset_name=asset_name, default=self.config.map_merge_rules) - log.info(f"Asset '{asset_name}': Applying {len(merge_rules)} map merging rule(s) from source (from rules/config)...") - - # Initialize results for this asset - merged_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict) - - for rule_index, rule in enumerate(merge_rules): - output_map_type = rule.get("output_map_type") - inputs_mapping = rule.get("inputs") # e.g., {"R": "AO", "G": "ROUGH", "B": "METAL"} - defaults = rule.get("defaults", {}) - rule_bit_depth = rule.get("output_bit_depth", "respect_inputs") - - if not output_map_type or not inputs_mapping: - log.warning(f"Asset '{asset_name}': Skipping merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs'. Rule: {rule}") - continue - - log.info(f"-- Asset '{asset_name}': Applying merge rule for '{output_map_type}' --") - - # --- Find required SOURCE files and their details for this asset --- - required_input_sources = {} # map_type -> {'source_path': Path, 'original_extension': str, 'is_gloss_source': bool} - possible_to_find_sources = True - for input_type in set(inputs_mapping.values()): # e.g., {"AO", "ROUGH", "METAL"} - found_source_for_type = False - # Search in the filtered classified maps for this asset - for classified_map in filtered_classified_files.get("maps", []): - # Check if the classified map's type matches the required input type - # This needs to handle variants (e.g., ROUGH-1 should match ROUGH) - if classified_map['map_type'].startswith(input_type): - source_path_rel = classified_map.get('source_path') - if not source_path_rel: continue # Skip if path is missing - - # Determine if this source is gloss (only relevant if input_type is ROUGH) - is_gloss = False - if input_type == 'ROUGH': - is_gloss = any(kw.lower() in source_path_rel.stem.lower() for kw in gloss_keywords) - # Prioritize gloss source if both exist (logic from _process_individual_maps) - native_rough_exists = any(m['map_type'].startswith('ROUGH') and not any(gk.lower() in m['source_path'].stem.lower() for gk in gloss_keywords) for m in filtered_classified_files.get("maps", [])) - if is_gloss and native_rough_exists: - log.debug(f"Merge input '{input_type}': Prioritizing gloss source '{source_path_rel}' over native rough.") - elif not is_gloss and native_rough_exists and any(m['map_type'].startswith('ROUGH') and any(gk.lower() in m['source_path'].stem.lower() for gk in gloss_keywords) for m in filtered_classified_files.get("maps", [])): - log.debug(f"Merge input '{input_type}': Skipping native rough source '{source_path_rel}' because gloss source exists.") - continue # Skip this native rough source - - required_input_sources[input_type] = { - 'source_path': source_path_rel, - 'original_extension': classified_map.get('original_extension', '.png'), - 'is_gloss_source': is_gloss - } - found_source_for_type = True - log.debug(f"Found source for merge input '{input_type}': {source_path_rel} (Gloss: {is_gloss})") - break # Found the first matching source for this input type - if not found_source_for_type: - log.warning(f"Asset '{asset_name}': Required source file for input map type '{input_type}' not found in classified files. Cannot perform merge for '{output_map_type}'.") - possible_to_find_sources = False - break - - if not possible_to_find_sources: - continue # Skip this merge rule - - # --- Determine common resolutions based on *processed* maps (as a proxy for available sizes) --- - # This assumes _process_individual_maps ran first and populated processed_maps_details_asset - possible_resolutions_per_input = [] - # Get resolutions using fallback - resolutions = self._get_rule_with_fallback(rules, 'image_resolutions', asset_name=asset_name, default=self.config.image_resolutions) - - for input_type in set(inputs_mapping.values()): - if input_type in processed_maps_details_asset: - res_keys = {res for res, details in processed_maps_details_asset[input_type].items() if isinstance(details, dict) and 'error' not in details} - if not res_keys: - log.warning(f"Asset '{asset_name}': Input map type '{input_type}' for merge rule '{output_map_type}' has no successfully processed resolutions (needed for size check).") - possible_resolutions_per_input = [] - break - possible_resolutions_per_input.append(res_keys) - else: - # This case might happen if the input map is *only* used for merging - # We need a way to determine available resolutions without relying on prior processing. - # For now, we'll rely on the check above ensuring the source exists. - # We'll load the source at *all* target resolutions and let _load_and_transform_source handle skipping if upscale is needed. - log.debug(f"Input map type '{input_type}' for merge rule '{output_map_type}' might not have been processed individually. Will attempt loading source for all target resolutions.") - # Add all configured resolutions as possibilities for this input - possible_resolutions_per_input.append(set(resolutions.keys())) - - - if not possible_resolutions_per_input: - log.warning(f"Asset '{asset_name}': Cannot determine common resolutions for '{output_map_type}'. Skipping rule.") - continue - - common_resolutions = set.intersection(*possible_resolutions_per_input) - - if not common_resolutions: - log.warning(f"Asset '{asset_name}': No common resolutions found among required inputs {set(inputs_mapping.values())} for merge rule '{output_map_type}'. Skipping rule.") - continue - log.debug(f"Asset '{asset_name}': Common resolutions for '{output_map_type}': {common_resolutions}") - - # --- Loop through common resolutions --- - res_order = {k: self.config.image_resolutions[k] for k in common_resolutions if k in self.config.image_resolutions} - if not res_order: - log.warning(f"Asset '{asset_name}': Common resolutions {common_resolutions} do not match config. Skipping merge for '{output_map_type}'.") - continue - - sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True) - base_name = asset_name # Use current asset's name - - for current_res_key in sorted_res_keys: - log.debug(f"Asset '{asset_name}': Merging '{output_map_type}' for resolution: {current_res_key}") - try: - loaded_inputs_data = {} # map_type -> loaded numpy array - source_info_for_save = {'involved_extensions': set(), 'max_input_bit_depth': 8} - - # --- Load required SOURCE maps using helper --- - possible_to_load = True - target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B'] - - for map_type in set(inputs_mapping.values()): # e.g., {"AO", "ROUGH", "METAL"} - source_details = required_input_sources.get(map_type) - if not source_details: - log.error(f"Internal Error: Source details missing for '{map_type}' during merge load.") - possible_to_load = False; break - - source_path_rel = source_details['source_path'] - is_gloss = source_details['is_gloss_source'] - original_ext = source_details['original_extension'] - source_info_for_save['involved_extensions'].add(original_ext) - - log.debug(f"Loading source '{source_path_rel}' for merge input '{map_type}' at {current_res_key} (Gloss: {is_gloss})") - img_resized, source_dtype = self._load_and_transform_source( - source_path_rel=source_path_rel, - map_type=map_type, # Pass the base map type (e.g., ROUGH) - target_resolution_key=current_res_key, - is_gloss_source=is_gloss, - cache=loaded_data_cache - ) - - if img_resized is None: - log.warning(f"Asset '{asset_name}': Failed to load/transform source '{source_path_rel}' for merge input '{map_type}' at {current_res_key}. Skipping resolution.") - possible_to_load = False; break - - loaded_inputs_data[map_type] = img_resized - - # Track max source bit depth - if source_dtype == np.uint16: - source_info_for_save['max_input_bit_depth'] = max(source_info_for_save['max_input_bit_depth'], 16) - # Add other dtype checks if needed (e.g., float32 -> 16?) - - if not possible_to_load: continue - - # --- Calculate Stats for ROUGH source if used and at stats resolution --- - stats_res_key = self.config.calculate_stats_resolution - if current_res_key == stats_res_key: - log.debug(f"Asset '{asset_name}': Checking for ROUGH source stats for '{output_map_type}' at {stats_res_key}") - for target_channel, source_map_type in inputs_mapping.items(): - if source_map_type == 'ROUGH' and source_map_type in loaded_inputs_data: - log.debug(f"Asset '{asset_name}': Calculating stats for ROUGH source (mapped to channel '{target_channel}') for '{output_map_type}' at {stats_res_key}") - rough_image_data = loaded_inputs_data[source_map_type] - rough_stats = _calculate_image_stats(rough_image_data) - if rough_stats: - # Ensure the nested dictionary structure exists - if "merged_map_channel_stats" not in current_asset_metadata: - current_asset_metadata["merged_map_channel_stats"] = {} - if output_map_type not in current_asset_metadata["merged_map_channel_stats"]: - current_asset_metadata["merged_map_channel_stats"][output_map_type] = {} - if target_channel not in current_asset_metadata["merged_map_channel_stats"][output_map_type]: - current_asset_metadata["merged_map_channel_stats"][output_map_type][target_channel] = {} - - current_asset_metadata["merged_map_channel_stats"][output_map_type][target_channel][stats_res_key] = rough_stats - log.debug(f"Asset '{asset_name}': Stored ROUGH stats for '{output_map_type}' channel '{target_channel}' at {stats_res_key}: {rough_stats}") - else: - log.warning(f"Asset '{asset_name}': Failed to calculate ROUGH stats for '{output_map_type}' channel '{target_channel}' at {stats_res_key}.") - - # --- Determine dimensions --- - # All loaded inputs should have the same dimensions for this resolution - first_map_type = next(iter(loaded_inputs_data)) - h, w = loaded_inputs_data[first_map_type].shape[:2] - num_target_channels = len(target_channels) - - # --- Prepare and Merge Channels --- - merged_channels_float32 = [] - for target_channel in target_channels: # e.g., 'R', 'G', 'B' - source_map_type = inputs_mapping.get(target_channel) # e.g., "AO", "ROUGH", "METAL" - channel_data_float32 = None - - if source_map_type and source_map_type in loaded_inputs_data: - img_input = loaded_inputs_data[source_map_type] # Get the loaded NumPy array - - # Ensure input is float32 0-1 range for merging - if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0 - elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0 - elif img_input.dtype == np.float16: img_float = img_input.astype(np.float32) # Assume float16 is 0-1 - else: img_float = img_input.astype(np.float32) # Assume other floats are 0-1 - - num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1 - - # Extract the correct channel - if num_source_channels >= 3: - if target_channel == 'R': channel_data_float32 = img_float[:, :, 0] - elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1] - elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2] - elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3] - else: log.warning(f"Target channel '{target_channel}' invalid for 3/4 channel source '{source_map_type}'.") - elif num_source_channels == 1 or len(img_float.shape) == 2: - # If source is grayscale, use it for R, G, B, or A target channels - channel_data_float32 = img_float.reshape(h, w) - else: - log.warning(f"Unexpected shape {img_float.shape} for source '{source_map_type}'.") - - # Apply default if channel data couldn't be extracted - if channel_data_float32 is None: - default_val = defaults.get(target_channel) - if default_val is None: - raise AssetProcessingError(f"Missing input/default for target channel '{target_channel}' in merge rule '{output_map_type}'.") - log.debug(f"Using default value {default_val} for target channel '{target_channel}' in '{output_map_type}'.") - channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32) - - merged_channels_float32.append(channel_data_float32) - - if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: - raise AssetProcessingError(f"Channel count mismatch during merge for '{output_map_type}'. Expected {num_target_channels}, got {len(merged_channels_float32)}.") - - merged_image_float32 = cv2.merge(merged_channels_float32) - log.debug(f"Merged channels for '{output_map_type}' ({current_res_key}). Result shape: {merged_image_float32.shape}, dtype: {merged_image_float32.dtype}") - - # --- Save Merged Map using Helper --- - # Get additional config values using fallback for _save_image - output_formats_16bit = self._get_rule_with_fallback(rules, 'get_16bit_output_formats', asset_name=asset_name, default=self.config.get_16bit_output_formats()) - output_format_8bit = self._get_rule_with_fallback(rules, 'get_8bit_output_format', asset_name=asset_name, default=self.config.get_8bit_output_format()) - resolution_threshold_for_jpg = self._get_rule_with_fallback(rules, 'resolution_threshold_for_jpg', asset_name=asset_name, default=self.config.resolution_threshold_for_jpg) - force_lossless_map_types = self._get_rule_with_fallback(rules, 'force_lossless_map_types', asset_name=asset_name, default=self.config.force_lossless_map_types) - jpg_quality = self._get_rule_with_fallback(rules, 'jpg_quality', asset_name=asset_name, default=self.config.jpg_quality) - png_compression_level = self._get_rule_with_fallback(rules, '_core_settings', asset_name=asset_name, default=self.config._core_settings).get('PNG_COMPRESSION_LEVEL', 6) - target_filename_pattern = self._get_rule_with_fallback(rules, 'target_filename_pattern', asset_name=asset_name, default=self.config.target_filename_pattern) - # image_resolutions is already retrieved at the start of the method - - save_result = self._save_image( - image_data=merged_image_float32, # Pass the merged float32 data - map_type=output_map_type, - resolution_key=current_res_key, - asset_base_name=base_name, - source_info=source_info_for_save, # Pass collected source info - output_bit_depth_rule=rule_bit_depth, # Pass the rule's requirement - temp_dir=self.temp_dir, - # Pass retrieved config values - output_formats_16bit=output_formats_16bit, - output_format_8bit=output_format_8bit, - resolution_threshold_for_jpg=resolution_threshold_for_jpg, - force_lossless_map_types=force_lossless_map_types, - jpg_quality=jpg_quality, - png_compression_level=png_compression_level, - target_filename_pattern=target_filename_pattern, - image_resolutions=resolutions # Pass the already retrieved resolutions dict - ) - - # --- Record details locally --- - if save_result: - merged_maps_details_asset[output_map_type][current_res_key] = save_result - else: - log.error(f"Asset '{asset_name}': Failed to save merged map '{output_map_type}' at resolution '{current_res_key}'.") - merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = "Save failed via helper" - - - except Exception as merge_res_err: - log.error(f"Asset '{asset_name}': Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}", exc_info=True) - # Store error locally for this asset - merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err) - - log.info(f"Asset '{asset_name}': Finished applying map merging rules.") - # Return the details for this asset - return merged_maps_details_asset - - - - def _generate_metadata_file(self, current_asset_metadata: Dict, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], map_details_asset: Dict[str, Dict], rules: SourceRule, - # Parameters passed down from calling method using _get_rule_with_fallback - preset_name: str, metadata_filename: str) -> Path: - """ - Gathers metadata for a specific asset and writes it to a temporary JSON file, - applying hierarchical rules for metadata fields. - - Args: - current_asset_metadata: Base metadata for this asset (name, category, archetype, etc.). - processed_maps_details_asset: Details of processed maps for this asset. - merged_maps_details_asset: Details of merged maps for this asset. - filtered_classified_files_asset: Classified files belonging only to this asset. - unmatched_files_paths: List of relative paths for files not matched to any base name. - map_details_asset: Dictionary containing details like source bit depth, gloss inversion per map type. - rules: The hierarchical rules object. - preset_name: The name of the preset being used (retrieved via fallback). - metadata_filename: The standard filename for metadata (retrieved via fallback). - - - Returns: - Path: The path to the generated temporary metadata file. - """ - if not self.temp_dir: raise AssetProcessingError("Workspace not setup.") - asset_name = current_asset_metadata.get("asset_name") - if not asset_name or asset_name == "UnknownAssetName": - log.warning("Asset name unknown during metadata generation, file may be incomplete or incorrectly named.") - asset_name = "UnknownAsset_Metadata" # Fallback for filename - - log.info(f"Generating metadata file for asset '{asset_name}'...") - - # Start with the base metadata passed in for this asset - final_metadata = current_asset_metadata.copy() - - # Apply hierarchical rules for specific metadata fields - final_metadata["supplier_name"] = self._get_rule_with_fallback(rules, 'supplier_name', asset_name=asset_name, default=final_metadata.get("supplier_name", self.config.supplier_name)) - final_metadata["asset_category"] = self._get_rule_with_fallback(rules, 'default_asset_category', asset_name=asset_name, default=final_metadata.get("asset_category", self.config.default_asset_category)) - final_metadata["archetype"] = self._get_rule_with_fallback(rules, 'archetype', asset_name=asset_name, default=final_metadata.get("archetype", "Unknown")) # Archetype is determined earlier, but allow override - - # Populate map details from the specific asset's processing results - # Add merged map channel stats - final_metadata["merged_map_channel_stats"] = current_asset_metadata.get("merged_map_channel_stats", {}) # Get from passed metadata - - final_metadata["processed_map_resolutions"] = {} - for map_type, res_dict in processed_maps_details_asset.items(): - keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d] - if keys: final_metadata["processed_map_resolutions"][map_type] = sorted(keys) - - final_metadata["merged_map_resolutions"] = {} - for map_type, res_dict in merged_maps_details_asset.items(): - keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d] - if keys: final_metadata["merged_map_resolutions"][map_type] = sorted(keys) - - # Determine maps present based on successful processing for this asset - final_metadata["maps_present"] = sorted(list(processed_maps_details_asset.keys())) - final_metadata["merged_maps"] = sorted(list(merged_maps_details_asset.keys())) - - # Determine shader features based on this asset's maps - features = set() - for map_type, details in map_details_asset.items(): # Use map_details_asset passed in - if map_type in ["SSS", "FUZZ", "MASK"]: features.add(map_type) - if details.get("derived_from_gloss"): features.add("InvertedGloss") - res_details = processed_maps_details_asset.get(map_type, {}) - if any(res_info.get("bit_depth") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f"16bit_{map_type}") - final_metadata["shader_features"] = sorted(list(features)) - - # Determine source files in this asset's Extra folder - # Includes: - # - Files originally classified as 'Extra' or 'Unrecognised' belonging to this asset. - # - Files originally classified as 'Ignored' belonging to this asset. - # - All 'unmatched' files (belonging to no specific asset). - source_files_in_extra_set = set() - for category in ['extra', 'ignored']: - for file_info in filtered_classified_files_asset.get(category, []): - source_files_in_extra_set.add(str(file_info['source_path'])) - # Add all unmatched files - for file_path in unmatched_files_paths: - source_files_in_extra_set.add(str(file_path)) - final_metadata["source_files_in_extra"] = sorted(list(source_files_in_extra_set)) - - # Add image stats and map details specific to this asset - final_metadata["image_stats_1k"] = current_asset_metadata.get("image_stats_1k", {}) # Get from passed metadata - final_metadata["map_details"] = map_details_asset # Use map_details_asset passed in - final_metadata["aspect_ratio_change_string"] = current_asset_metadata.get("aspect_ratio_change_string", "N/A") # Get from passed metadata - - - # Add processing info - final_metadata["_processing_info"] = { - "preset_used": preset_name, # Use passed-in parameter - "timestamp_utc": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), - "input_source": str(self.input_path.name), # Add original input source - } - - # Sort lists just before writing - for key in ["maps_present", "merged_maps", "shader_features", "source_files_in_extra"]: - if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort() - - # Use asset name in temporary filename to avoid conflicts - # Use passed-in parameter for the base metadata filename - temp_metadata_filename = f"{asset_name}_{metadata_filename}" - output_path = self.temp_dir / temp_metadata_filename - log.debug(f"Writing metadata for asset '{asset_name}' to temporary file: {output_path}") - try: - with open(output_path, 'w', encoding='utf-8') as f: - json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True) - log.info(f"Metadata file '{metadata_filename}' generated successfully for asset '{asset_name}'.") - return output_path # Return the path to the temporary file - except Exception as e: - raise AssetProcessingError(f"Failed to write metadata file {output_path} for asset '{asset_name}': {e}") from e - - - def _normalize_aspect_ratio_change(self, original_width, original_height, resized_width, resized_height, decimals=2): - """ - Calculates the aspect ratio change string (e.g., "EVEN", "X133") based on original prototype logic. - Returns the string representation. - """ - if original_width <= 0 or original_height <= 0: - log.warning("Cannot calculate aspect ratio change with zero original dimensions.") - return "InvalidInput" - - # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks) - if resized_width <= 0 or resized_height <= 0: - log.warning("Cannot calculate aspect ratio change with zero resized dimensions.") - return "InvalidResize" - - # Original logic from user feedback - width_change_percentage = ((resized_width - original_width) / original_width) * 100 - height_change_percentage = ((resized_height - original_height) / original_height) * 100 - - normalized_width_change = width_change_percentage / 100 - normalized_height_change = height_change_percentage / 100 - - normalized_width_change = min(max(normalized_width_change + 1, 0), 2) - normalized_height_change = min(max(normalized_height_change + 1, 0), 2) - - # Handle potential zero division if one dimension change is exactly -100% (normalized to 0) - # If both are 0, aspect ratio is maintained. If one is 0, the other dominates. - if normalized_width_change == 0 and normalized_height_change == 0: - closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1 - elif normalized_width_change == 0: - closest_value_to_one = abs(normalized_height_change) - elif normalized_height_change == 0: - closest_value_to_one = abs(normalized_width_change) - else: - closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change)) - - # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0 - epsilon = 1e-9 - scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one - - scaled_normalized_width_change = scale_factor * normalized_width_change - scaled_normalized_height_change = scale_factor * normalized_height_change - - output_width = round(scaled_normalized_width_change, decimals) - output_height = round(scaled_normalized_height_change, decimals) - - # Convert to int if exactly 1.0 after rounding - if abs(output_width - 1.0) < epsilon: output_width = 1 - if abs(output_height - 1.0) < epsilon: output_height = 1 - - # Determine output string - if original_width == original_height or abs(output_width - output_height) < epsilon: - output = "EVEN" - elif output_width != 1 and output_height == 1: - output = f"X{str(output_width).replace('.', '')}" - elif output_height != 1 and output_width == 1: - output = f"Y{str(output_height).replace('.', '')}" - else: - # Both changed relative to each other - output = f"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}" - - log.debug(f"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'") - return output - - def _sanitize_filename(self, name: str) -> str: - """Removes or replaces characters invalid for filenames/directory names.""" - # ... (Implementation from Response #51) ... - if not isinstance(name, str): name = str(name) - name = re.sub(r'[^\w.\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot - name = re.sub(r'_+', '_', name) - name = name.strip('_') - if not name: name = "invalid_name" - return name - - def _organize_output_files(self, current_asset_name: str, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], filtered_classified_files_asset: Dict[str, List[Dict]], unmatched_files_paths: List[Path], temp_metadata_path: Path): - """ - Moves/copies processed files for a specific asset from the temp dir to the final output structure. - - Args: - current_asset_name: The sanitized name of the asset being organized. - processed_maps_details_asset: Details of processed maps for this asset. - merged_maps_details_asset: Details of merged maps for this asset. - filtered_classified_files_asset: Classified files dictionary filtered for this asset. - unmatched_files_paths: List of relative paths for files not matched to any base name. - temp_metadata_path: Path to the temporary metadata file for this asset. - """ - if not self.temp_dir or not self.temp_dir.exists(): raise AssetProcessingError("Temp workspace missing.") - if not current_asset_name or current_asset_name == "UnknownAssetName": raise AssetProcessingError("Asset name missing for organization.") - supplier_name = self.config.supplier_name # Get supplier name from config - if not supplier_name: raise AssetProcessingError("Supplier name missing from config.") - - supplier_sanitized = self._sanitize_filename(supplier_name) - asset_name_sanitized = self._sanitize_filename(current_asset_name) # Already sanitized, but ensure consistency - final_dir = self.output_base_path / supplier_sanitized / asset_name_sanitized - log.info(f"Organizing output files for asset '{asset_name_sanitized}' into: {final_dir}") - - try: - # Handle overwrite logic specifically for this asset's directory - if final_dir.exists() and self.overwrite: - log.warning(f"Output directory exists for '{asset_name_sanitized}' and overwrite is True. Removing existing directory: {final_dir}") - try: - shutil.rmtree(final_dir) - except Exception as rm_err: - raise AssetProcessingError(f"Failed to remove existing output directory {final_dir} for asset '{asset_name_sanitized}' during overwrite: {rm_err}") from rm_err - # Note: Skip check should prevent this if overwrite is False, but mkdir handles exist_ok=True - - final_dir.mkdir(parents=True, exist_ok=True) - except Exception as e: - if not isinstance(e, AssetProcessingError): - raise AssetProcessingError(f"Failed to create final dir {final_dir} for asset '{asset_name_sanitized}': {e}") from e - else: - raise - - # --- Helper for moving files --- - # Keep track of files successfully moved to avoid copying them later as 'unmatched' - moved_source_files = set() - def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str): - if not src_rel_path: log.warning(f"Asset '{asset_name_sanitized}': Missing src path for {file_desc}."); return - source_abs = self.temp_dir / src_rel_path - # Use the original filename from the source path for the destination - dest_abs = dest_dir / src_rel_path.name - try: - if source_abs.exists(): - log.debug(f"Asset '{asset_name_sanitized}': Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/") - dest_dir.mkdir(parents=True, exist_ok=True) - shutil.move(str(source_abs), str(dest_abs)) - moved_source_files.add(src_rel_path) # Track successfully moved source files - else: log.warning(f"Asset '{asset_name_sanitized}': Source file missing for {file_desc}: {source_abs}") - except Exception as e: log.error(f"Asset '{asset_name_sanitized}': Failed moving {file_desc} '{source_abs.name}': {e}", exc_info=True) - - # --- Helper for copying files (for unmatched extras) --- - def _safe_copy(src_rel_path: Path | None, dest_dir: Path, file_desc: str): - if not src_rel_path: log.warning(f"Asset '{asset_name_sanitized}': Missing src path for {file_desc} copy."); return - # Skip copying if this source file was already moved (e.g., it was an 'Extra' for this specific asset) - if src_rel_path in moved_source_files: - log.debug(f"Asset '{asset_name_sanitized}': Skipping copy of {file_desc} '{src_rel_path.name}' as it was already moved.") - return - source_abs = self.temp_dir / src_rel_path - dest_abs = dest_dir / src_rel_path.name - try: - if source_abs.exists(): - # Avoid copying if the exact destination file already exists (e.g., from a previous asset's copy) - if dest_abs.exists(): - log.debug(f"Asset '{asset_name_sanitized}': Destination file already exists for {file_desc} copy: {dest_abs.name}. Skipping copy.") - return - log.debug(f"Asset '{asset_name_sanitized}': Copying {file_desc}: {source_abs.name} -> {dest_dir.relative_to(self.output_base_path)}/") - dest_dir.mkdir(parents=True, exist_ok=True) - shutil.copy2(str(source_abs), str(dest_abs)) # Use copy2 to preserve metadata - else: log.warning(f"Asset '{asset_name_sanitized}': Source file missing for {file_desc} copy: {source_abs}") - except Exception as e: log.error(f"Asset '{asset_name_sanitized}': Failed copying {file_desc} '{source_abs.name}': {e}", exc_info=True) - - - # --- Move Processed/Merged Maps --- - for details_dict in [processed_maps_details_asset, merged_maps_details_asset]: - for map_type, res_dict in details_dict.items(): - if 'error' in res_dict: continue - for res_key, details in res_dict.items(): - if isinstance(details, dict) and 'path' in details: - _safe_move(details['path'], final_dir, f"{map_type} ({res_key})") - - # --- Move Models specific to this asset --- - for model_info in filtered_classified_files_asset.get('models', []): - _safe_move(model_info.get('source_path'), final_dir, "model file") - - # --- Move Metadata File --- - if temp_metadata_path and temp_metadata_path.exists(): - final_metadata_path = final_dir / self.config.metadata_filename # Use standard name - try: - log.debug(f"Asset '{asset_name_sanitized}': Moving metadata file: {temp_metadata_path.name} -> {final_metadata_path.relative_to(self.output_base_path)}") - shutil.move(str(temp_metadata_path), str(final_metadata_path)) - # No need to add metadata path to moved_source_files as it's uniquely generated - except Exception as e: - log.error(f"Asset '{asset_name_sanitized}': Failed moving metadata file '{temp_metadata_path.name}': {e}", exc_info=True) - else: - log.warning(f"Asset '{asset_name_sanitized}': Temporary metadata file path missing or file does not exist: {temp_metadata_path}") - - - # --- Handle Extra/Ignored/Unmatched Files --- - extra_subdir_name = self.config.extra_files_subdir - extra_dir = final_dir / extra_subdir_name - if filtered_classified_files_asset.get('extra') or filtered_classified_files_asset.get('ignored') or unmatched_files_paths: - try: - extra_dir.mkdir(parents=True, exist_ok=True) - - # Move asset-specific Extra/Ignored files - files_to_move_extra = filtered_classified_files_asset.get('extra', []) + filtered_classified_files_asset.get('ignored', []) - if files_to_move_extra: - log.debug(f"Asset '{asset_name_sanitized}': Moving {len(files_to_move_extra)} asset-specific files to '{extra_subdir_name}/'...") - for file_info in files_to_move_extra: - _safe_move(file_info.get('source_path'), extra_dir, f"extra/ignored file ({file_info.get('reason', 'Unknown')})") - - # Copy unmatched files - if unmatched_files_paths: - log.debug(f"Asset '{asset_name_sanitized}': Copying {len(unmatched_files_paths)} unmatched files to '{extra_subdir_name}/'...") - for file_path in unmatched_files_paths: - _safe_copy(file_path, extra_dir, "unmatched file") - - except Exception as e: log.error(f"Asset '{asset_name_sanitized}': Failed creating/moving/copying to Extra dir {extra_dir}: {e}", exc_info=True) - - log.info(f"Finished organizing output for asset '{asset_name_sanitized}'.") - - - def _cleanup_workspace(self): - """Removes the temporary workspace directory if it exists.""" - # ... (Implementation from Response #45) ... - if self.temp_dir and self.temp_dir.exists(): - try: - log.debug(f"Cleaning up temporary workspace: {self.temp_dir}") - shutil.rmtree(self.temp_dir) - self.temp_dir = None - log.debug("Temporary workspace cleaned up successfully.") - except Exception as e: - log.error(f"Failed to remove temporary workspace {self.temp_dir}: {e}", exc_info=True) - - # --- Prediction Method --- - def predict_output_structure(self) -> tuple[str | None, str | None, dict[str, str] | None] | None: - """ - Predicts the final output structure (supplier, asset name) and attempts - to predict output filenames for potential map files based on naming conventions. - Does not perform full processing or image loading. - - Returns: - tuple[str | None, str | None, dict[str, str] | None]: - (sanitized_supplier_name, sanitized_asset_name, file_predictions_dict) - where file_predictions_dict maps input filename -> predicted output filename. - Returns None if prediction fails critically. - """ - log.debug(f"Predicting output structure and filenames for: {self.input_path.name}") - try: - # 1. Get Supplier Name - supplier_name = self.config.supplier_name - if not supplier_name: - log.warning("Supplier name not found in configuration during prediction.") - return None - - # 2. List Input Filenames/Stems - candidate_stems = set() # Use set for unique stems - filenames = [] - if self.input_path.is_file() and self.input_path.suffix.lower() == '.zip': - try: - with zipfile.ZipFile(self.input_path, 'r') as zip_ref: - # Get only filenames, ignore directories - filenames = [Path(f).name for f in zip_ref.namelist() if not f.endswith('/')] - except zipfile.BadZipFile: - log.error(f"Bad ZIP file during prediction: {self.input_path.name}") - return None - except Exception as zip_err: - log.error(f"Error reading ZIP file list during prediction for {self.input_path.name}: {zip_err}") - return None # Cannot proceed if we can't list files - elif self.input_path.is_dir(): - try: - for item in self.input_path.iterdir(): - if item.is_file(): # Only consider files directly in the folder for prediction simplicity - filenames.append(item.name) - # Note: Not walking subdirs for prediction to keep it fast - except Exception as dir_err: - log.error(f"Error listing directory contents during prediction for {self.input_path.name}: {dir_err}") - return None - - if not filenames: - log.warning(f"No files found in input for prediction: {self.input_path.name}") - return None # Return None if no files found - - - # 3. Lightweight Classification for Stems and Potential Maps using rules - map_type_mapping_rules = self._get_rule_with_fallback('map_type_mapping', default=[]) - model_patterns = self._get_rule_with_fallback('model_file_patterns', default=[]) # Use model_file_patterns from rules/config - # Get naming rules using fallback logic for separator - naming_rules = self._get_rule_with_fallback('source_naming_rules', default={}) - separator = naming_rules.get('separator', self.config.source_naming_separator) # Fallback to config if not in rules - - processed_filenames = set() # Track full filenames processed - potential_map_files = {} # Store fname -> potential map_type - - for fname in filenames: - if fname in processed_filenames: continue - - fstem = Path(fname).stem - fstem_lower = fstem.lower() - name_parts = fstem_lower.split(separator) - - - # Check map rules first (using rules/config) - map_matched = False - for mapping_rule in map_type_mapping_rules: - # Ensure the rule has the expected structure (list of keywords, target type) - if not isinstance(mapping_rule, dict) or 'keywords' not in mapping_rule or 'target_type' not in mapping_rule: - log.warning(f"Prediction: Skipping invalid map type mapping rule: {mapping_rule}") - continue - - source_keywords = mapping_rule.get('keywords', []) - standard_map_type = mapping_rule.get('target_type') - - if standard_map_type not in self.config.standard_map_types: continue # Check against standard types from config - - for keyword in source_keywords: - kw_lower = keyword.lower().strip('*') - # Check if the keyword exists as a whole part in the split stem - if any(part == kw_lower for part in name_parts): - candidate_stems.add(fstem) # Add unique stem - potential_map_files[fname] = standard_map_type # Store potential type - processed_filenames.add(fname) - map_matched = True - break # Found keyword match for this rule - if map_matched: break # Found a rule match for this file - if map_matched: continue # Move to next filename if identified as map - - # Check model patterns if not a map - for pattern in model_patterns: - if fnmatch(fname.lower(), pattern.lower()): - candidate_stems.add(fstem) # Still add stem for base name determination - processed_filenames.add(fname) - # Don't add models to potential_map_files - break # Found model match - - # Note: Files matching neither maps nor models are ignored for prediction details - - log.debug(f"[PREDICTION] Potential map files identified: {potential_map_files}") # DEBUG PREDICTION - candidate_stems_list = list(candidate_stems) # Convert set to list for commonprefix - log.debug(f"[PREDICTION] Candidate stems identified: {candidate_stems_list}") # DEBUG PREDICTION - if not candidate_stems_list: - log.warning(f"Prediction: No relevant map/model stems found in {self.input_path.name}. Using input name as fallback.") - # Fallback: Use the input path's name itself if no stems found - base_name_fallback = self.input_path.stem if self.input_path.is_file() else self.input_path.name - determined_base_name = base_name_fallback - else: - - # 4. Replicate _determine_base_metadata logic for base name using rules - determined_base_name = "UnknownAssetName" - # Use the separator and indices_dict retrieved earlier using fallback logic - base_index_raw = indices_dict.get('base_name') - log.debug(f"[PREDICTION] Base Name Determination: Separator='{separator}', Indices Dict={indices_dict}, Raw Base Index='{base_index_raw}' (from rules/config)") # DEBUG PREDICTION - - base_index = None - if base_index_raw is not None: - try: - base_index = int(base_index_raw) # Use explicit conversion like in main logic - except (ValueError, TypeError): - log.warning(f"[PREDICTION] Could not convert base_name index '{base_index_raw}' to integer.") - - if isinstance(base_index, int): - potential_base_names = set() - for stem in candidate_stems_list: # Iterate over the list - parts = stem.split(separator) - log.debug(f"[PREDICTION] Processing stem: '{stem}', Parts: {parts}") # DEBUG PREDICTION - if len(parts) > base_index: - extracted_name = parts[base_index] - potential_base_names.add(extracted_name) - log.debug(f"[PREDICTION] Extracted potential base name: '{extracted_name}' using index {base_index}") # DEBUG PREDICTION - else: - log.debug(f"[PREDICTION] Stem '{stem}' has too few parts ({len(parts)}) for index {base_index}.") # DEBUG PREDICTION - if len(potential_base_names) == 1: - determined_base_name = potential_base_names.pop() - log.debug(f"[PREDICTION] Determined base name '{determined_base_name}' from structured parts (index {base_index}).") # DEBUG PREDICTION - elif len(potential_base_names) > 1: - log.debug(f"[PREDICTION] Multiple potential base names found from index {base_index}: {potential_base_names}. Falling back to common prefix.") # DEBUG PREDICTION - determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here - determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') - # else: Use common prefix below - - if determined_base_name == "UnknownAssetName" or not determined_base_name: - log.debug("[PREDICTION] Falling back to common prefix for base name determination (structured parts failed or no index).") # DEBUG PREDICTION - determined_base_name = os.path.commonprefix(candidate_stems_list) # Use list here - determined_base_name = determined_base_name.strip(separator + ' _').rstrip(separator + ' _') - - # 5. Sanitize Names - final_base_name = self._sanitize_filename(determined_base_name or "UnknownAssetName") - log.debug(f"[PREDICTION] Final determined base name for prediction: '{final_base_name}'") # DEBUG PREDICTION - final_supplier_name = self._sanitize_filename(supplier_name) - - # 6. Predict Output Filenames using rules - target_pattern = self._get_rule_with_fallback('target_filename_pattern', default=self.config.target_filename_pattern) - # Get resolutions using fallback - resolutions = self._get_rule_with_fallback('image_resolutions', default=self.config.image_resolutions) - highest_res_key = "Res?" # Placeholder resolution for prediction - if resolutions: - highest_res_key = max(resolutions, key=resolutions.get) - - for input_fname, map_type in potential_map_files.items(): - # Assume PNG for prediction, extension might change based on bit depth rules later - # but this gives a good idea of the renaming. - # A more complex prediction could check bit depth rules. - predicted_ext = "png" # Simple assumption for preview - try: - predicted_fname = target_pattern.format( - base_name=final_base_name, - map_type=map_type, - resolution=highest_res_key, # Use placeholder resolution - ext=predicted_ext - ) - file_predictions[input_fname] = predicted_fname - except KeyError as fmt_err: - log.warning(f"Prediction: Error formatting filename for {input_fname} (KeyError: {fmt_err}). Skipping file prediction.") - file_predictions[input_fname] = "[Filename Format Error]" - - - log.debug(f"Predicted structure: Supplier='{final_supplier_name}', Asset='{final_base_name}', Files={len(file_predictions)}") - return final_supplier_name, final_base_name, file_predictions - - except Exception as e: - log.error(f"Error during output structure prediction for {self.input_path.name}: {e}", exc_info=True) - return None - - - # --- New Detailed Prediction Method --- - def get_detailed_file_predictions(self, rules: SourceRule) -> list[dict] | None: - """ - Performs extraction and classification to provide a detailed list of all - files found within the input and their predicted status/output name, - handling multiple potential assets within the input. - - Returns: - list[dict] | None: A list of dictionaries, each representing a file: - {'original_path': str, - 'predicted_asset_name': str | None, - 'predicted_output_name': str | None, - 'status': str, - 'details': str | None} - Returns None if a critical error occurs during setup/classification. - """ - log.info(f"Getting detailed file predictions for input: {self.input_path.name}") - results = [] - all_files_in_workspace = [] # Keep track of all files found - - try: - # --- Perform necessary setup and classification --- - - self._setup_workspace() - self._extract_input() - # Run classification - this populates self.classified_files, pass rules - self._inventory_and_classify_files(rules) - - - # --- Determine distinct assets and file mapping --- - # This uses the results from _inventory_and_classify_files, pass rules - distinct_base_names, file_to_base_name_map = self._determine_base_metadata(rules) - log.debug(f"Prediction: Determined base names: {distinct_base_names}") - log.debug(f"Prediction: File to base name map: { {str(k):v for k,v in file_to_base_name_map.items()} }") - - # --- Apply Suffixes for Prediction Preview --- - # This logic is similar to the main process method but applied to the classified_files list - log.debug("Prediction: Applying map type suffixes for preview...") - grouped_classified_maps = defaultdict(list) - for map_info in self.classified_files.get('maps', []): - # Group by the base map type - grouped_classified_maps[map_info['map_type']].append(map_info) - - # Create a new list for maps with updated types for prediction - maps_with_predicted_types = [] - for base_map_type, maps_in_group in grouped_classified_maps.items(): - respect_variants = base_map_type in self.config.respect_variant_map_types - # Sort maps within the group for consistent suffixing (using the same key as in _inventory_and_classify_files) - maps_in_group.sort(key=lambda c: ( - c.get('preset_rule_index', 9999), - c.get('keyword_index_in_rule', 9999) if 'keyword_index_in_rule' in c else 9999, # Handle potential missing key - str(c['source_path']) - )) - - for i, map_info in enumerate(maps_in_group): - predicted_map_type = f"{base_map_type}-{i + 1}" if respect_variants else base_map_type - # Create a copy to avoid modifying the original classified_files list in place - map_info_copy = map_info.copy() - map_info_copy['predicted_map_type'] = predicted_map_type # Store the predicted type - maps_with_predicted_types.append(map_info_copy) - - # Replace the original maps list with the one containing predicted types for the next step - # Note: This is a temporary list for prediction generation, not modifying the instance's classified_files permanently - # self.classified_files["maps"] = maps_with_predicted_types # Avoid modifying instance state - - - # --- Prepare for filename prediction using rules --- - target_pattern = self._get_rule_with_fallback(rules, 'target_filename_pattern', default=self.config.target_filename_pattern) - # Get resolutions using fallback - resolutions = self._get_rule_with_fallback(rules, 'image_resolutions', default=self.config.image_resolutions) - highest_res_key = "Res?" # Placeholder resolution for prediction - if resolutions: - highest_res_key = max(resolutions, key=resolutions.get) - - # --- Process all classified files (including maps with predicted types) --- - all_classified_files_with_category = [] - # Add maps with predicted types first - for map_info in maps_with_predicted_types: - map_info['category'] = 'maps' # Ensure category is set - all_classified_files_with_category.append(map_info) - if 'source_path' in map_info: - all_files_in_workspace.append(map_info['source_path']) - - # Add other categories (models, extra, ignored) - for category in ['models', 'extra', 'ignored']: - for file_info in self.classified_files.get(category, []): - file_info['category'] = category - all_classified_files_with_category.append(file_info) - if 'source_path' in file_info: - all_files_in_workspace.append(file_info['source_path']) - - - # --- Generate results for each file --- - processed_paths = set() # Track paths already added to results - for file_info in all_classified_files_with_category: - original_path = file_info.get("source_path") - if not original_path or original_path in processed_paths: - continue # Skip if path missing or already processed - - original_path_str = str(original_path) - processed_paths.add(original_path) # Mark as processed - - # Determine predicted asset name and status - predicted_asset_name = file_to_base_name_map.get(original_path) # Can be None - category = file_info['category'] # maps, models, extra, ignored - reason = file_info.get('reason') # Specific reason for extra/ignored - status = "Unknown" - details = None - predicted_output_name = None # Usually original name, except for maps - - if category == "maps": - status = "Mapped" - # Use the predicted_map_type for the preview display - map_type_for_preview = file_info.get("predicted_map_type", file_info.get("map_type", "UnknownType")) - details = f"[{map_type_for_preview}]" - if file_info.get("is_16bit_source"): details += " (16-bit)" - # Predict map output name using its determined asset name and predicted map type - if predicted_asset_name: - try: - predicted_ext = "png" # Assume PNG for prediction simplicity - predicted_output_name = target_pattern.format( - base_name=predicted_asset_name, - map_type=map_type_for_preview, # Use the predicted type here - resolution=highest_res_key, - ext=predicted_ext - ) - except KeyError as fmt_err: - log.warning(f"Prediction format error for map {original_path_str}: {fmt_err}") - predicted_output_name = "[Format Error]" - details += f" (Format Key Error: {fmt_err})" - except Exception as pred_err: - log.warning(f"Prediction error for map {original_path_str}: {pred_err}") - predicted_output_name = "[Prediction Error]" - details += f" (Error: {pred_err})" - else: - # Should not happen for maps if _determine_base_metadata worked correctly - log.warning(f"Map file '{original_path_str}' has no predicted asset name.") - predicted_output_name = "[No Asset Name]" - - elif category == "models": - status = "Model" - details = "[Model]" - predicted_output_name = original_path.name # Models keep original name - - elif category == "ignored": - status = "Ignored" - details = f"Ignored ({reason or 'Unknown reason'})" - predicted_output_name = None # Ignored files have no output - - elif category == "extra": - if predicted_asset_name is None: - # This is an "Unmatched Extra" file (includes Unrecognised and explicit Extras without a base name) - status = "Unmatched Extra" - details = f"[Unmatched Extra ({reason or 'N/A'})]" # Include original reason if available - elif reason == 'Unrecognised': - # Unrecognised but belongs to a specific asset - status = "Unrecognised" - details = "[Unrecognised]" - else: - # Explicitly matched an 'extra' pattern and belongs to an asset - status = "Extra" - details = f"Extra ({reason})" - predicted_output_name = original_path.name # Extra files keep original name - - else: - log.warning(f"Unknown category '{category}' encountered during prediction for {original_path_str}") - status = "Error" - details = f"[Unknown Category: {category}]" - predicted_output_name = original_path.name - - - results.append({ - "original_path": original_path_str, - "predicted_asset_name": predicted_asset_name, # May be None - "predicted_output_name": predicted_output_name, - "status": status, - "details": details - }) - - # Add any files found during walk but missed by classification (should be rare) - # These are likely unmatched as well. - for file_path in all_files_in_workspace: - if file_path not in processed_paths: - log.warning(f"File found in workspace but not classified: {file_path}. Adding as Unmatched Extra.") - results.append({ - "original_path": str(file_path), - "predicted_asset_name": None, # Explicitly None as it wasn't mapped - "predicted_output_name": file_path.name, - "status": "Unmatched Extra", - "details": "[Missed Classification]" - }) - - - log.info(f"Detailed prediction complete for input '{self.input_path.name}'. Found {len(results)} files.") - # Sort results by original path for consistent display - results.sort(key=lambda x: x.get("original_path", "")) - return results - - except (AssetProcessingError, ConfigurationError, Exception) as e: - log.error(f"Critical error during detailed prediction for {self.input_path.name}: {e}", exc_info=True) - return None # Indicate critical failure - finally: - # Ensure cleanup always happens - self._cleanup_workspace() - - -# --- End of AssetProcessor Class --- \ No newline at end of file diff --git a/config/app_settings.json b/config/app_settings.json index 0b46dbe..4287481 100644 --- a/config/app_settings.json +++ b/config/app_settings.json @@ -1,7 +1,7 @@ { "ASSET_TYPE_DEFINITIONS": { "Surface": { - "description": "Standard PBR material set for a surface.", + "description": "A single Standard PBR material set for a surface.", "color": "#1f3e5d", "examples": [ "WoodFloor01", @@ -10,7 +10,7 @@ }, "Model": { "description": "A set that contains models, can include PBR textureset", - "color": "#FFA500", + "color": "#b67300", "examples": [ "Chair.fbx", "Character.obj" @@ -18,7 +18,7 @@ }, "Decal": { "description": "A alphamasked textureset", - "color": "#90EE90", + "color": "#68ac68", "examples": [ "Graffiti01", "LeakStain03", @@ -27,7 +27,7 @@ }, "Atlas": { "description": "A texture sheet containing multiple smaller textures.", - "color": "#FFC0CB", + "color": "#955b8b", "examples": [ "FoliageAtlas", "UITextureSheet" @@ -35,7 +35,7 @@ }, "UtilityMap": { "description": "A useful image-asset consisting of only a single texture. Therefor each Utilitymap can only contain a single item.", - "color": "#D3D3D3", + "color": "#706b87", "examples": [ "FlowMap", "CurvatureMap", @@ -48,7 +48,7 @@ "FILE_TYPE_DEFINITIONS": { "MAP_COL": { "description": "Color/Albedo Map", - "color": "#3d3021", + "color": "#ffaa00", "examples": [ "_col.", "_basecolor.", @@ -60,7 +60,7 @@ }, "MAP_NRM": { "description": "Normal Map", - "color": "#23263d", + "color": "#cca2f1", "examples": [ "_nrm.", "_normal." @@ -70,7 +70,7 @@ }, "MAP_METAL": { "description": "Metalness Map", - "color": "#1f1f1f", + "color": "#dcf4f2", "examples": [ "_metal.", "_met." @@ -80,7 +80,7 @@ }, "MAP_ROUGH": { "description": "Roughness Map", - "color": "#3d1f11", + "color": "#bfd6bf", "examples": [ "_rough.", "_rgh.", @@ -91,7 +91,7 @@ }, "MAP_AO": { "description": "Ambient Occlusion Map", - "color": "#3d3d3d", + "color": "#e3c7c7", "examples": [ "_ao.", "_ambientocclusion." @@ -101,7 +101,7 @@ }, "MAP_DISP": { "description": "Displacement/Height Map", - "color": "#35343d", + "color": "#c6ddd5", "examples": [ "_disp.", "_height." @@ -111,7 +111,7 @@ }, "MAP_REFL": { "description": "Reflection/Specular Map", - "color": "#363d3d", + "color": "#c2c2b9", "examples": [ "_refl.", "_specular." @@ -121,7 +121,7 @@ }, "MAP_SSS": { "description": "Subsurface Scattering Map", - "color": "#3d342c", + "color": "#a0d394", "examples": [ "_sss.", "_subsurface." @@ -131,7 +131,7 @@ }, "MAP_FUZZ": { "description": "Fuzz/Sheen Map", - "color": "#3d261d", + "color": "#a2d1da", "examples": [ "_fuzz.", "_sheen." @@ -141,7 +141,7 @@ }, "MAP_IDMAP": { "description": "ID Map (for masking)", - "color": "#3d2121", + "color": "#ca8fb4", "examples": [ "_id.", "_matid." @@ -151,7 +151,7 @@ }, "MAP_MASK": { "description": "Generic Mask Map", - "color": "#3d3d3d", + "color": "#c6e2bf", "examples": [ "_mask." ], @@ -160,7 +160,7 @@ }, "MAP_IMPERFECTION": { "description": "Imperfection Map (scratches, dust)", - "color": "#3d3a24", + "color": "#e6d1a6", "examples": [ "_imp.", "_imperfection.", @@ -175,7 +175,7 @@ }, "MODEL": { "description": "3D Model File", - "color": "#3d2700", + "color": "#3db2bd", "examples": [ ".fbx", ".obj" @@ -185,7 +185,7 @@ }, "EXTRA": { "description": "asset previews or metadata", - "color": "#2f363d", + "color": "#8c8c8c", "examples": [ ".txt", ".zip", @@ -200,7 +200,7 @@ }, "FILE_IGNORE": { "description": "File to be ignored", - "color": "#243d3d", + "color": "#673d35", "examples": [ "Thumbs.db", ".DS_Store" @@ -234,7 +234,7 @@ "BLENDER_EXECUTABLE_PATH": "C:/Program Files/Blender Foundation/Blender 4.4/blender.exe", "PNG_COMPRESSION_LEVEL": 6, "JPG_QUALITY": 98, - "RESOLUTION_THRESHOLD_FOR_JPG": 4096, + "RESOLUTION_THRESHOLD_FOR_JPG": 999999, "IMAGE_RESOLUTIONS": { "8K": 8192, "4K": 4096, @@ -263,64 +263,315 @@ ], "CALCULATE_STATS_RESOLUTION": "1K", "DEFAULT_ASSET_CATEGORY": "Surface", - "TEMP_DIR_PREFIX": "_PROCESS_ASSET_" -, + "TEMP_DIR_PREFIX": "_PROCESS_ASSET_", "llm_predictor_examples": [ - { - "input": "MessyTextures/Concrete_Damage_Set/concrete_col.png\nMessyTextures/Concrete_Damage_Set/concrete_N.png\nMessyTextures/Concrete_Damage_Set/concrete_rough.jpg\nMessyTextures/Concrete_Damage_Set/height_map_concrete.tif\nMessyTextures/Concrete_Damage_Set/Thumbs.db\nMessyTextures/Fabric_Pattern/pattern_01_diffuse.tga\nMessyTextures/Fabric_Pattern/pattern_01_ao.png\nMessyTextures/Fabric_Pattern/pattern_01_normal.png\nMessyTextures/Fabric_Pattern/notes.txt\nMessyTextures/Fabric_Pattern/variant_blue_diffuse.tga", - "output": { - "predicted_assets": [ - { - "suggested_asset_name": "Concrete_Damage_Set", - "predicted_asset_type": "Surface", - "files": [ - {"file_path": "MessyTextures/Concrete_Damage_Set/concrete_col.png", "predicted_file_type": "MAP_COL"}, - {"file_path": "MessyTextures/Concrete_Damage_Set/concrete_N.png", "predicted_file_type": "MAP_NRM"}, - {"file_path": "MessyTextures/Concrete_Damage_Set/concrete_rough.jpg", "predicted_file_type": "MAP_ROUGH"}, - {"file_path": "MessyTextures/Concrete_Damage_Set/height_map_concrete.tif", "predicted_file_type": "MAP_DISP"}, - {"file_path": "MessyTextures/Concrete_Damage_Set/Thumbs.db", "predicted_file_type": "FILE_IGNORE"} - ] - }, - { - "suggested_asset_name": "Fabric_Pattern_01", - "predicted_asset_type": "Surface", - "files": [ - {"file_path": "MessyTextures/Fabric_Pattern/pattern_01_diffuse.tga", "predicted_file_type": "MAP_COL"}, - {"file_path": "MessyTextures/Fabric_Pattern/pattern_01_ao.png", "predicted_file_type": "MAP_AO"}, - {"file_path": "MessyTextures/Fabric_Pattern/pattern_01_normal.png", "predicted_file_type": "MAP_NRM"}, - {"file_path": "MessyTextures/Fabric_Pattern/variant_blue_diffuse.tga", "predicted_file_type": "MAP_COL"}, - {"file_path": "MessyTextures/Fabric_Pattern/variant_blue_flat.tga", "predicted_file_type": "EXTRA"}, - {"file_path": "MessyTextures/Fabric_Pattern/notes.txt", "predicted_file_type": "EXTRA"} - ] + { + "input": "MessyTextures/Concrete_Damage_Set/concrete_col.png\nMessyTextures/Concrete_Damage_Set/concrete_N.png\nMessyTextures/Concrete_Damage_Set/concrete_rough.jpg\nMessyTextures/Concrete_Damage_Set/height_map_concrete.tif\nMessyTextures/Concrete_Damage_Set/Thumbs.db\nMessyTextures/Fabric_Pattern/pattern_01_diffuse.tga\nMessyTextures/Fabric_Pattern/pattern_01_ao.png\nMessyTextures/Fabric_Pattern/pattern_01_normal.png\nMessyTextures/Fabric_Pattern/notes.txt\nMessyTextures/Fabric_Pattern/variant_blue_diffuse.tga\nMessyTextures/Fabric_Pattern/fabric_flat.jpg", + "output": { + "predicted_assets": [ + { + "suggested_asset_name": "Concrete_Damage_01", + "predicted_asset_type": "Surface", + "files": [ + { + "file_path": "MessyTextures/Concrete_Damage_Set/concrete_col.png", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "MessyTextures/Concrete_Damage_Set/concrete_N.png", + "predicted_file_type": "MAP_NRM" + }, + { + "file_path": "MessyTextures/Concrete_Damage_Set/concrete_rough.jpg", + "predicted_file_type": "MAP_ROUGH" + }, + { + "file_path": "MessyTextures/Concrete_Damage_Set/height_map_concrete.tif", + "predicted_file_type": "MAP_DISP" + }, + { + "file_path": "MessyTextures/Concrete_Damage_Set/Thumbs.db", + "predicted_file_type": "FILE_IGNORE" + } + ] + }, + { + "suggested_asset_name": "Fabric_Pattern_01", + "predicted_asset_type": "Surface", + "files": [ + { + "file_path": "MessyTextures/Fabric_Pattern/pattern_01_diffuse.tga", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "MessyTextures/Fabric_Pattern/pattern_01_ao.png", + "predicted_file_type": "MAP_AO" + }, + { + "file_path": "MessyTextures/Fabric_Pattern/pattern_01_normal.png", + "predicted_file_type": "MAP_NRM" + }, + { + "file_path": "MessyTextures/Fabric_Pattern/variant_blue_diffuse.tga", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "MessyTextures/Fabric_Pattern/fabric_flat.jpg", + "predicted_file_type": "EXTRA" + }, + { + "file_path": "MessyTextures/Fabric_Pattern/notes.txt", + "predicted_file_type": "EXTRA" + } + ] + } + ] } - ] - } - }, - { - "input": "SciFi_Drone/Drone_Model.fbx\nSciFi_Drone/Textures/Drone_BaseColor.png\nSciFi_Drone/Textures/Drone_Metallic.png\nSciFi_Drone/Textures/Drone_Roughness.png\nSciFi_Drone/Textures/Drone_Normal.png\nSciFi_Drone/Textures/Drone_Emissive.jpg\nSciFi_Drone/ReferenceImages/concept.jpg", - "output": { - "predicted_assets": [ - { - "suggested_asset_name": "SciFi_Drone", - "predicted_asset_type": "Model", - "files": [ - {"file_path": "SciFi_Drone/Drone_Model.fbx", "predicted_file_type": "MODEL"}, - {"file_path": "SciFi_Drone/Textures/Drone_BaseColor.png", "predicted_file_type": "MAP_COL"}, - {"file_path": "SciFi_Drone/Textures/Drone_Metallic.png", "predicted_file_type": "MAP_METAL"}, - {"file_path": "SciFi_Drone/Textures/Drone_Roughness.png", "predicted_file_type": "MAP_ROUGH"}, - {"file_path": "SciFi_Drone/Textures/Drone_Normal.png", "predicted_file_type": "MAP_NRM"}, - {"file_path": "SciFi_Drone/Textures/Drone_Emissive.jpg", "predicted_file_type": "EXTRA"}, - {"file_path": "SciFi_Drone/ReferenceImages/concept.jpg", "predicted_file_type": "EXTRA"} - ] + }, + { + "input": "SciFi_Drone/Drone_Model.fbx\nSciFi_Drone/Textures/Drone_BaseColor.png\nSciFi_Drone/Textures/Drone_Metallic.png\nSciFi_Drone/Textures/Drone_Roughness.png\nSciFi_Drone/Textures/Drone_Normal.png\nSciFi_Drone/Textures/Drone_Emissive.jpg\nSciFi_Drone/ReferenceImages/concept.jpg", + "output": { + "predicted_assets": [ + { + "suggested_asset_name": "SciFi_Drone", + "predicted_asset_type": "Model", + "files": [ + { + "file_path": "SciFi_Drone/Drone_Model.fbx", + "predicted_file_type": "MODEL" + }, + { + "file_path": "SciFi_Drone/Textures/Drone_BaseColor.png", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "SciFi_Drone/Textures/Drone_Metallic.png", + "predicted_file_type": "MAP_METAL" + }, + { + "file_path": "SciFi_Drone/Textures/Drone_Roughness.png", + "predicted_file_type": "MAP_ROUGH" + }, + { + "file_path": "SciFi_Drone/Textures/Drone_Normal.png", + "predicted_file_type": "MAP_NRM" + }, + { + "file_path": "SciFi_Drone/Textures/Drone_Emissive.jpg", + "predicted_file_type": "EXTRA" + }, + { + "file_path": "SciFi_Drone/ReferenceImages/concept.jpg", + "predicted_file_type": "EXTRA" + } + ] + } + ] + } + }, + { + "input": "21_hairs_deposits.tif\n22_hairs_fabric.tif\n23_hairs_fibres.tif\n24_hairs_fibres.tif\n25_bonus_isolatedFingerprints.tif\n26_bonus_isolatedPalmprint.tif\n27_metal_aluminum.tif\n28_metal_castIron.tif\n29_scratcehes_deposits_shapes.tif\n30_scratches_deposits.tif", + "output": { + "predicted_assets": [ + { + "suggested_asset_name": "21-Hairs-Deposits", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "21_hairs_deposits.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "22-Hairs-Fabric", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "22_hairs_fabric.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "23-Hairs-Deposits", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "23_hairs_fibres.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "24-Hairs-Fibres", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "24_hairs_fibres.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "27-MetalAluminium", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "27_metal_aluminum.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "28-MetalCastiron", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "28_metal_castIron.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "29-Scratches-Deposits-Shapes", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "29_scratcehes_deposits_shapes.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "30-Scrathes-Deposits", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "30_scratches_deposits.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "Bonus-IsolatedFingerprints", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "25_bonus_isolatedFingerprints.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + }, + { + "suggested_asset_name": "Bonus-IsolatedPalmprint", + "predicted_asset_type": "UtilityMap", + "files": [ + { + "file_path": "26_bonus_isolatedPalmprint.tif", + "predicted_file_type": "MAP_IMPERFECTION" + } + ] + } + ] + } + }, + { + "input": "Part1/TextureSupply_Boards001_A_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_A_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_B_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_B_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_C_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_C_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_D_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_D_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_E_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_E_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_F_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_F_28x300cm-Normal.jpg", + "output": { + "predicted_assets": [ + { + "suggested_asset_name": "Boards001_A", + "predicted_asset_type": "Surface", + "files": [ + { + "file_path": "Part1/TextureSupply_Boards001_A_28x300cm-Albedo.jpg", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "Part1/TextureSupply_Boards001_A_28x300cm-Normal.jpg", + "predicted_file_type": "MAP_NRM" + } + ] + }, + { + "suggested_asset_name": "Boards001_B", + "predicted_asset_type": "Surface", + "files": [ + { + "file_path": "Part1/TextureSupply_Boards001_B_28x300cm-Albedo.jpg", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "Part1/TextureSupply_Boards001_B_28x300cm-Normal.jpg", + "predicted_file_type": "MAP_NRM" + } + ] + }, + { + "suggested_asset_name": "Boards001_C", + "predicted_asset_type": "Surface", + "files": [ + { + "file_path": "Part1/TextureSupply_Boards001_C_28x300cm-Albedo.jpg", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "Part1/TextureSupply_Boards001_C_28x300cm-Normal.jpg", + "predicted_file_type": "MAP_NRM" + } + ] + }, + { + "suggested_asset_name": "Boards001_D", + "predicted_asset_type": "Surface", + "files": [ + { + "file_path": "Part1/TextureSupply_Boards001_D_28x300cm-Albedo.jpg", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "Part1/TextureSupply_Boards001_D_28x300cm-Normal.jpg", + "predicted_file_type": "MAP_NRM" + } + ] + }, + { + "suggested_asset_name": "Boards001_E", + "predicted_asset_type": "Surface", + "files": [ + { + "file_path": "Part1/TextureSupply_Boards001_E_28x300cm-Albedo.jpg", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "Part1/TextureSupply_Boards001_E_28x300cm-Normal.jpg", + "predicted_file_type": "MAP_NRM" + } + ] + }, + { + "suggested_asset_name": "Boards001_F", + "predicted_asset_type": "Surface", + "files": [ + { + "file_path": "Part1/TextureSupply_Boards001_F_28x300cm-Albedo.jpg", + "predicted_file_type": "MAP_COL" + }, + { + "file_path": "Part1/TextureSupply_Boards001_F_28x300cm-Normal.jpg", + "predicted_file_type": "MAP_NRM" + } + ] + } + ] } - ] } - } ], - "llm_endpoint_url": "http://100.65.14.122:1234/v1/chat/completions", - "llm_api_key": "", - "llm_model_name": "local-model", - "llm_temperature": 0.5, - "llm_request_timeout": 120, - "llm_predictor_prompt": "You are an expert asset classification system. Your task is to analyze a list of file paths from a directory, identify a pattern and then group them into logical assets, assigning an asset type and file type to each file.\\n\\n**Definitions:**\\n\\n* **Asset Types:** These define the overall category of an asset. Use one of the following keys for `predicted_asset_type`:\\n ```json\\n {ASSET_TYPE_DEFINITIONS}\\n ```\\n\\n* **File Types:** These define the specific purpose of each file. Use one of the following keys for `predicted_file_type`:\\n ```json\\n {FILE_TYPE_DEFINITIONS}\\n ```\\n\\n**Task:**\\n\\nGiven the following file list:\\n\\n```text\\n{FILE_LIST}\\n```\\n\\nAnalyze the file paths and names. Group the files into logical assets. For each asset, determine the most appropriate `predicted_asset_type` from the definitions above. For each file within an asset, determine the most appropriate `predicted_file_type` from the definitions above. Files that should be ignored (like Thumbs.db) should use the `FILE_IGNORE` type. Files that don't fit a standard map type but belong to the asset should use `EXTRA`.\\n\\n**Output Format:**\\n\\nYour response MUST be ONLY a single, perfectly valid JSON object adhering strictly to the structure below. Do NOT include any text before or after the JSON object. Ensure all strings are correctly quoted and escaped, and there are no trailing commas.\\n\\nCRITICAL: Ensure the output is strictly valid JSON parsable by standard libraries. This means NO comments (like // or /* */), NO trailing commas, and correct quoting/escaping of all strings.\\n\\n```json\\n{\\n \"predicted_assets\": [\\n {\\n \"suggested_asset_name\": \"string\", // Your best guess for a concise asset name based on file paths/names\\n \"predicted_asset_type\": \"string\", // Key from Asset Types definitions\\n \"files\": [\\n {\\n \"file_path\": \"string\", // Exact relative path from the input list\\n \"predicted_file_type\": \"string\" // Key from File Types definitions\\n },\\n // ... more files\\n ]\\n },\\n // ... more assets\\n ]\\n}\\n```\\n\\n**Examples:**\\n\\nHere are examples of input file lists and the desired JSON output:\\n\\n```json\\n[\\n {EXAMPLE_INPUT_OUTPUT_PAIRS}\\n]\\n```\\n\\nNow, process the provided file list and generate the JSON output." + "llm_endpoint_url": "http://100.65.14.122:1234/v1/chat/completions", + "llm_api_key": "", + "llm_model_name": "", + "llm_temperature": 0.5, + "llm_request_timeout": 120, + "llm_predictor_prompt": "You are an expert asset classification system. Your task is to analyze a list of file paths from a directory, identify patterns based on directory structure and filenames, and then group related files into logical assets. For each grouped asset, you must suggest a concise asset name, determine the overall asset type, and for each file within that asset, assign its specific file type.\n\nDefinitions:\n\nAsset Types: These define the overall category of an asset. Use one of the following keys for predicted_asset_type:\njson\n{ASSET_TYPE_DEFINITIONS}\n\n\nFile Types: These define the specific purpose of each file. Use one of the following keys for predicted_file_type:\njson\n{FILE_TYPE_DEFINITIONS}\n\n\nCore Task & Grouping Logic:\n\n1. Analyze Input: Examine the provided FILE_LIST. Pay close attention to directory paths and filenames (including prefixes, suffixes, separators like underscores or hyphens, and file extensions).\n2. Identify Potential Assets: Look for patterns that indicate files belong together:\n - Common Base Name: Files sharing a significant common prefix before map-type identifiers (e.g., Concrete_Damage_Set/concrete_ followed by col.png, N.png, rough.jpg).\n - Directory Grouping: Files located within the same immediate directory are often related, especially if their names follow a pattern (e.g., all files directly under SciFi_Drone/Textures/).\n - Model Association: If a MODEL file type (like .fbx, .obj) is present, group it with texture files that share its base name or are located in a plausible associated directory (like Textures/).\n - Single-File Assets (Utility Maps): Files whose names strongly suggest a UtilityMap type (e.g., scratches.tif, FlowMap.png, 21_hairs_deposits.tif) should typically form their own asset, unless they clearly belong to a larger PBR set based on naming conventions. Remember UtilityMap assets usually contain only one file as per their definition.\n - Variations: Files indicating variations (e.g., _A, _B or _variant_blue) should be grouped logically.\n - If variations represent complete, distinct sets (like Boards001_A and Boards001_B in the examples), create separate assets for each variation.\n - If variations seem like alternative maps or supplementary files for a single core asset (like pattern_01_diffuse.tga and variant_blue_diffuse.tga in the examples), group them under one asset. Use the base name (e.g., Fabric_Pattern_01) for the asset.\n3. Group Files: Based on the identified patterns, group the file paths into logical predicted_assets.\n4. Determine Asset Type: For each asset group, determine the most appropriate predicted_asset_type by considering the types of files it contains (e.g., presence of a .fbx suggests Model; multiple PBR maps like MAP_COL, MAP_NRM, MAP_ROUGH suggest Surface; a single imperfection map suggests UtilityMap). Refer to the ASSET_TYPE_DEFINITIONS.\n5. Suggest Asset Name: For each asset, generate a suggested_asset_name. This should be concise and derived from the common base filename or the immediate parent directory name. Clean up the name (e.g., use CamelCase or underscores consistently, remove redundant info like dimensions if not essential).\n6. Assign File Types: For each file_path within an asset, determine the most appropriate predicted_file_type based on its name, extension, and context within the asset. Use the keys from FILE_TYPE_DEFINITIONS.\n - Use FILE_IGNORE for files that should be ignored (e.g., Thumbs.db, .DS_Store).\n - Use EXTRA for files that belong to the asset but don't fit a standard map type (e.g., previews, text files, non-standard maps like Emissive unless you add a specific type for it).\n\nInput File List:\n\ntext\n{FILE_LIST}\n\n\nOutput Format:\n\nYour response MUST be ONLY a single, perfectly valid JSON object adhering strictly to the structure below. Do NOT include any text, explanations, or introductory phrases before or after the JSON object. Ensure all strings are correctly quoted and escaped, and there are NO trailing commas or comments (//, /* */).\n\nCRITICAL: The output must be strictly valid JSON parsable by standard libraries.\n\njson\n{\n \"predicted_assets\": [\n {\n \"suggested_asset_name\": \"string\", // Concise asset name derived from common file parts or directory\n \"predicted_asset_type\": \"string\", // Key from Asset Types definitions\n \"files\": [\n {\n \"file_path\": \"string\", // Exact relative path from the input list\n \"predicted_file_type\": \"string\" // Key from File Types definitions\n },\n // ... more files\n ]\n },\n // ... more assets\n ]\n}\n\n\nExamples:\n\nHere are examples of input file lists and the desired JSON output, illustrating the grouping logic:\n\njson\n[\n {EXAMPLE_INPUT_OUTPUT_PAIRS}\n]\n\n\nNow, process the provided FILE_LIST and generate ONLY the JSON output according to these instructions." } \ No newline at end of file diff --git a/gui/asset_restructure_handler.py b/gui/asset_restructure_handler.py new file mode 100644 index 0000000..a116053 --- /dev/null +++ b/gui/asset_restructure_handler.py @@ -0,0 +1,138 @@ +# gui/asset_restructure_handler.py +import logging +from PySide6.QtCore import QObject, Slot, QModelIndex +from PySide6.QtGui import QColor # Might be needed if copying logic directly, though unlikely now +from pathlib import Path +from .unified_view_model import UnifiedViewModel # Use relative import +from rule_structure import SourceRule, AssetRule, FileRule + +log = logging.getLogger(__name__) + +class AssetRestructureHandler(QObject): + """ + Handles the model restructuring logic triggered by changes + to FileRule target asset overrides in the UnifiedViewModel. + """ + def __init__(self, model: UnifiedViewModel, parent=None): + super().__init__(parent) + if not isinstance(model, UnifiedViewModel): + raise TypeError("AssetRestructureHandler requires a UnifiedViewModel instance.") + self.model = model + log.debug("AssetRestructureHandler initialized.") + + @Slot(QModelIndex, object) + def handle_target_asset_override(self, index: QModelIndex, new_target_path: object): + """ + Slot connected to UnifiedViewModel.targetAssetOverrideChanged. + Orchestrates model changes based on the new target asset path. + + Args: + index: The QModelIndex of the FileRule whose override changed. + new_target_path: The new target asset path (string or None). + """ + log.debug(f"Handler received targetAssetOverrideChanged: Index=({index.row()},{index.column()}), New Path='{new_target_path}'") + + if not index.isValid(): + log.warning("Handler received invalid index. Aborting.") + return + + file_item = self.model.getItem(index) + if not isinstance(file_item, FileRule): + log.warning(f"Handler received index for non-FileRule item: {type(file_item)}. Aborting.") + return + + # Ensure new_target_path is a string or None + new_target_name = str(new_target_path).strip() if new_target_path is not None else None + if new_target_name == "": new_target_name = None # Treat empty string as None + + # --- Get necessary context --- + old_parent_asset = getattr(file_item, 'parent_asset', None) + if not old_parent_asset: + log.error(f"Handler: File item '{Path(file_item.file_path).name}' has no parent asset. Cannot restructure.") + # Note: Data change already happened in setData, cannot easily revert here. + return + + source_rule = getattr(old_parent_asset, 'parent_source', None) + if not source_rule: + log.error(f"Handler: Could not find SourceRule for parent asset '{old_parent_asset.asset_name}'. Cannot restructure.") + return + + # --- Logic based on the new target name --- + target_parent_asset = None + target_parent_index = QModelIndex() + move_occurred = False + + # 1. Find existing target parent AssetRule within the same SourceRule + if new_target_name: + for i, asset in enumerate(source_rule.assets): + if asset.asset_name == new_target_name: + target_parent_asset = asset + # Get index for the target parent + try: + source_rule_row = self.model._source_rules.index(source_rule) + source_rule_index = self.model.createIndex(source_rule_row, 0, source_rule) + target_parent_index = self.model.index(i, 0, source_rule_index) + if not target_parent_index.isValid(): + log.error(f"Handler: Failed to create valid index for existing target parent '{new_target_name}'.") + target_parent_asset = None # Reset if index is invalid + except ValueError: + log.error(f"Handler: Could not find SourceRule index while looking for target parent '{new_target_name}'.") + target_parent_asset = None # Reset if index is invalid + break # Found the asset + + # 2. Handle Move or Creation + if target_parent_asset: + # --- Move to Existing Parent --- + if target_parent_asset != old_parent_asset: + log.info(f"Handler: Moving file '{Path(file_item.file_path).name}' to existing asset '{target_parent_asset.asset_name}'.") + if self.model.moveFileRule(index, target_parent_index): + move_occurred = True + else: + log.error(f"Handler: Model failed to move file rule to existing asset '{target_parent_asset.asset_name}'.") + # Consider how to handle failure - maybe log and continue to cleanup? + else: + # Target is the same as the old parent. No move needed. + log.debug(f"Handler: Target asset '{new_target_name}' is the same as the current parent. No move required.") + pass # No move needed, but might still need cleanup if old parent becomes empty later (unlikely in this specific case) + + elif new_target_name: # Only create if a *new* specific target name was given + # --- Create New Parent AssetRule and Move --- + log.info(f"Handler: Creating new asset '{new_target_name}' and moving file '{Path(file_item.file_path).name}'.") + # Create the new asset rule using the model's method + new_asset_index = self.model.createAssetRule(source_rule, new_target_name, copy_from_asset=old_parent_asset) + + if new_asset_index.isValid(): + # Now move the file to the newly created asset + if self.model.moveFileRule(index, new_asset_index): + move_occurred = True + target_parent_asset = new_asset_index.internalPointer() # Update for cleanup check + else: + log.error(f"Handler: Model failed to move file rule to newly created asset '{new_target_name}'.") + # If move fails after creation, should we remove the created asset? Maybe. + # For now, just log the error. + else: + log.error(f"Handler: Model failed to create new asset rule '{new_target_name}'. Cannot move file.") + + else: # new_target_name is None or empty + # --- Moving back to original/default parent (Clearing Override) --- + # The file *should* already be under its original parent if the override was just cleared. + # However, if it was previously moved *away* from its original parent due to an override, + # clearing the override *should* ideally move it back. + # This logic is complex: we need to know the *original* parent before any overrides. + # The current structure doesn't explicitly store this. + # For now, assume clearing the override means it stays in its *current* parent, + # and we only handle cleanup if that parent becomes empty. + # A more robust solution might involve finding the asset matching the file's *directory* name. + log.debug(f"Handler: Target asset override cleared for '{Path(file_item.file_path).name}'. File remains in parent '{old_parent_asset.asset_name}'.") + # No move occurs in this simplified interpretation. + + # 3. Cleanup Empty Old Parent (only if a move occurred) + # Check the old_parent_asset *after* the potential move + if move_occurred and old_parent_asset and not old_parent_asset.files: + log.info(f"Handler: Attempting to remove empty old parent asset '{old_parent_asset.asset_name}'.") + if not self.model.removeAssetRule(old_parent_asset): + log.warning(f"Handler: Model failed to remove empty old parent asset '{old_parent_asset.asset_name}'.") + elif move_occurred: + log.debug(f"Handler: Old parent asset '{old_parent_asset.asset_name}' still contains files. No removal needed.") + + log.debug(f"Handler finished processing targetAssetOverrideChanged for '{Path(file_item.file_path).name}'.") \ No newline at end of file diff --git a/gui/base_prediction_handler.py b/gui/base_prediction_handler.py new file mode 100644 index 0000000..daa11f9 --- /dev/null +++ b/gui/base_prediction_handler.py @@ -0,0 +1,133 @@ +# gui/base_prediction_handler.py +import logging +import time +from abc import ABC, abstractmethod +from pathlib import Path +from typing import List, Any + +from PySide6.QtCore import QObject, Signal, Slot, QThread + +# Assuming rule_structure defines SourceRule +try: + from rule_structure import SourceRule +except ImportError: + print("ERROR (BasePredictionHandler): Failed to import SourceRule. Predictions might fail.") + # Define a placeholder if the import fails to allow type hinting + class SourceRule: pass + +from abc import ABCMeta +from PySide6.QtCore import QObject # Ensure QObject is imported if not already + +# Combine metaclasses to avoid conflict between QObject and ABC +class QtABCMeta(type(QObject), ABCMeta): + pass +log = logging.getLogger(__name__) + +class BasePredictionHandler(QObject, ABC, metaclass=QtABCMeta): + """ + Abstract base class for prediction handlers that generate SourceRule hierarchies. + Designed to be run in a separate QThread. + """ + # --- Standardized Signals --- + # Emitted when prediction is successfully completed. + # Args: input_source_identifier (str), results (List[SourceRule]) + prediction_ready = Signal(str, list) + + # Emitted when an error occurs during prediction. + # Args: input_source_identifier (str), error_message (str) + prediction_error = Signal(str, str) + + # Emitted for status updates during the prediction process. + # Args: status_message (str) + status_update = Signal(str) + + def __init__(self, input_source_identifier: str, parent: QObject = None): + """ + Initializes the base handler. + + Args: + input_source_identifier: The unique identifier for the input source (e.g., file path). + parent: The parent QObject. + """ + super().__init__(parent) + self.input_source_identifier = input_source_identifier + self._is_running = False + self._is_cancelled = False # Added cancellation flag + + @property + def is_running(self) -> bool: + """Returns True if the handler is currently processing.""" + return self._is_running + + @Slot() + def run(self): + """ + Main execution slot intended to be connected to QThread.started. + Handles the overall process: setup, execution, error handling, signaling. + """ + if self._is_running: + log.warning(f"Handler for '{self.input_source_identifier}' is already running. Aborting.") + return + if self._is_cancelled: + log.info(f"Handler for '{self.input_source_identifier}' was cancelled before starting.") + # Optionally emit an error or specific signal for cancellation before start + return + + self._is_running = True + self._is_cancelled = False # Ensure cancel flag is reset at start + thread_id = QThread.currentThread() # Use currentThread() for PySide6 + log.info(f"[{time.time():.4f}][T:{thread_id}] Starting prediction run for: {self.input_source_identifier}") + self.status_update.emit(f"Starting analysis for '{Path(self.input_source_identifier).name}'...") + + try: + # --- Execute Core Logic --- + results = self._perform_prediction() + + if self._is_cancelled: + log.info(f"Prediction cancelled during execution for: {self.input_source_identifier}") + self.prediction_error.emit(self.input_source_identifier, "Prediction cancelled by user.") + else: + # --- Emit Success Signal --- + log.info(f"[{time.time():.4f}][T:{thread_id}] Prediction successful for '{self.input_source_identifier}'. Emitting results.") + self.prediction_ready.emit(self.input_source_identifier, results) + self.status_update.emit(f"Analysis complete for '{Path(self.input_source_identifier).name}'.") + + except Exception as e: + # --- Emit Error Signal --- + log.exception(f"[{time.time():.4f}][T:{thread_id}] Error during prediction for '{self.input_source_identifier}': {e}") + error_msg = f"Error analyzing '{Path(self.input_source_identifier).name}': {e}" + self.prediction_error.emit(self.input_source_identifier, error_msg) + # Status update might be redundant if error is shown elsewhere, but can be useful + # self.status_update.emit(f"Error: {e}") + + finally: + # --- Cleanup --- + self._is_running = False + log.info(f"[{time.time():.4f}][T:{thread_id}] Finished prediction run for: {self.input_source_identifier}") + # Note: The thread itself should be managed (quit/deleteLater) by the caller + # based on the signals emitted (prediction_ready, prediction_error). + + @Slot() + def cancel(self): + """ + Sets the cancellation flag. The running process should check this flag periodically. + """ + log.info(f"Cancellation requested for handler: {self.input_source_identifier}") + self._is_cancelled = True + self.status_update.emit(f"Cancellation requested for '{Path(self.input_source_identifier).name}'...") + + + @abstractmethod + def _perform_prediction(self) -> List[SourceRule]: + """ + Abstract method to be implemented by concrete subclasses. + This method contains the specific logic for generating the SourceRule list. + It should periodically check `self._is_cancelled`. + + Returns: + A list of SourceRule objects representing the prediction results. + + Raises: + Exception: If any critical error occurs during the prediction process. + """ + pass \ No newline at end of file diff --git a/gui/delegates.py b/gui/delegates.py index bd29cdc..bcd58ed 100644 --- a/gui/delegates.py +++ b/gui/delegates.py @@ -1,8 +1,10 @@ +from pathlib import Path # gui/delegates.py from PySide6.QtWidgets import QStyledItemDelegate, QLineEdit, QComboBox from PySide6.QtCore import Qt, QModelIndex -# Import the new config dictionaries -from configuration import load_base_config # Import load_base_config +# Import Configuration and ConfigurationError +from configuration import Configuration, ConfigurationError, load_base_config # Keep load_base_config for SupplierSearchDelegate +from PySide6.QtWidgets import QListWidgetItem # Import QListWidgetItem import json import logging @@ -40,29 +42,49 @@ class LineEditDelegate(QStyledItemDelegate): class ComboBoxDelegate(QStyledItemDelegate): """ Delegate for editing string values from a predefined list using a QComboBox. - Determines the list source based on column index. + Determines the list source based on column index by accessing the + UnifiedViewModel directly. """ + # REMOVED main_window parameter + def __init__(self, parent=None): + super().__init__(parent) + # REMOVED self.main_window store + def createEditor(self, parent, option, index: QModelIndex): # Creates the QComboBox editor widget. editor = QComboBox(parent) column = index.column() - model = index.model() # Get the model instance + model = index.model() # GET model from index # Add a "clear" option first, associating None with it. editor.addItem("---", None) # UserData = None - # Populate based on column using keys from config dictionaries - items_keys = None - try: - base_config = load_base_config() # Load base config - if column == 2: # Asset-Type Override (AssetRule) - items_keys = list(base_config.get('ASSET_TYPE_DEFINITIONS', {}).keys()) # Access from base_config - elif column == 4: # Item-Type Override (FileRule) - items_keys = list(base_config.get('FILE_TYPE_DEFINITIONS', {}).keys()) # Access from base_config - except Exception as e: - log.error(f"Error loading base config for ComboBoxDelegate: {e}") - items_keys = [] # Fallback to empty list on error + # Populate based on column by accessing the model's cached keys + items_keys = [] # Default to empty list + # --- Get keys directly from the UnifiedViewModel --- + # Check if the model is the correct type and has the attributes + if hasattr(model, '_asset_type_keys') and hasattr(model, '_file_type_keys'): + try: + # Use column constants from the model if available + COL_ASSET_TYPE = getattr(model, 'COL_ASSET_TYPE', 3) # Default fallback + COL_ITEM_TYPE = getattr(model, 'COL_ITEM_TYPE', 4) # Default fallback + + if column == COL_ASSET_TYPE: + items_keys = model._asset_type_keys # Use cached keys + elif column == COL_ITEM_TYPE: + items_keys = model._file_type_keys # Use cached keys + # else: # Handle other columns if necessary (optional) + # log.debug(f"ComboBoxDelegate applied to unexpected column: {column}") + + except Exception as e: + log.error(f"Error getting keys from UnifiedViewModel in ComboBoxDelegate: {e}") + items_keys = [] # Fallback on error + else: + log.warning("ComboBoxDelegate: Model is not a UnifiedViewModel or is missing key attributes (_asset_type_keys, _file_type_keys). Dropdown may be empty.") + # --- End key retrieval from model --- + + # REMOVED the entire block that loaded Configuration based on main_window preset if items_keys: for item_key in sorted(items_keys): # Sort keys alphabetically for consistency diff --git a/gui/llm_interaction_handler.py b/gui/llm_interaction_handler.py new file mode 100644 index 0000000..489ecc7 --- /dev/null +++ b/gui/llm_interaction_handler.py @@ -0,0 +1,340 @@ +import os +import logging +from pathlib import Path + +from PySide6.QtCore import QObject, Signal, QThread, Slot, QTimer + +# --- Backend Imports --- +# Assuming these might be needed based on MainWindow's usage +try: + from configuration import Configuration, ConfigurationError, load_base_config + from .llm_prediction_handler import LLMPredictionHandler # Backend handler + from rule_structure import SourceRule # For signal emission type hint +except ImportError as e: + logging.getLogger(__name__).critical(f"Failed to import backend modules for LLMInteractionHandler: {e}") + LLMPredictionHandler = None + load_base_config = None + ConfigurationError = Exception + SourceRule = None # Define as None if import fails + +log = logging.getLogger(__name__) + +class LLMInteractionHandler(QObject): + """ + Handles the logic for interacting with the LLM prediction service, + including managing the queue, thread, and communication. + """ + # Signals to communicate results/status back to MainWindow or other components + llm_prediction_ready = Signal(str, list) # input_path, List[SourceRule] + llm_prediction_error = Signal(str, str) # input_path, error_message + llm_status_update = Signal(str) # status_message + llm_processing_state_changed = Signal(bool) # is_processing (True when busy, False when idle) + + def __init__(self, main_window_ref, parent=None): + """ + Initializes the handler. + + Args: + main_window_ref: A reference to the MainWindow instance for accessing + shared components like status bar or models if needed. + parent: The parent QObject. + """ + super().__init__(parent) + self.main_window = main_window_ref # Store reference if needed for status updates etc. + self.llm_processing_queue = [] # Unified queue for initial adds and re-interpretations + self.llm_prediction_thread = None + self.llm_prediction_handler = None + self._is_processing = False # Internal flag to track processing state + + def _set_processing_state(self, processing: bool): + """Updates the internal processing state and emits a signal.""" + if self._is_processing != processing: + self._is_processing = processing + log.debug(f"LLM Handler processing state changed to: {processing}") + self.llm_processing_state_changed.emit(processing) + + @Slot(str, list) + def queue_llm_request(self, input_path: str, file_list: list | None): + """Adds a request to the LLM processing queue.""" + log.debug(f"Queueing LLM request for '{input_path}'. Current queue size: {len(self.llm_processing_queue)}") + # Avoid duplicates? Check if already in queue + is_in_queue = any(item[0] == input_path for item in self.llm_processing_queue) + if not is_in_queue: + self.llm_processing_queue.append((input_path, file_list)) + log.info(f"Added '{input_path}' to LLM queue. New size: {len(self.llm_processing_queue)}") + # If not currently processing, start the queue + if not self._is_processing: + # Use QTimer.singleShot to avoid immediate processing if called rapidly + QTimer.singleShot(0, self._process_next_llm_item) + else: + log.debug(f"Skipping duplicate add to LLM queue for: {input_path}") + + @Slot(list) + def queue_llm_requests_batch(self, requests: list[tuple[str, list | None]]): + """Adds multiple requests to the LLM processing queue.""" + added_count = 0 + for input_path, file_list in requests: + is_in_queue = any(item[0] == input_path for item in self.llm_processing_queue) + if not is_in_queue: + self.llm_processing_queue.append((input_path, file_list)) + added_count += 1 + else: + log.debug(f"Skipping duplicate add to LLM queue for: {input_path}") + + if added_count > 0: + log.info(f"Added {added_count} requests to LLM queue. New size: {len(self.llm_processing_queue)}") + # If not currently processing, start the queue + if not self._is_processing: + QTimer.singleShot(0, self._process_next_llm_item) + + # --- Methods to be moved from MainWindow --- + + @Slot() + def _reset_llm_thread_references(self): + """Resets LLM thread and handler references after the thread finishes.""" + log.debug("--> Entered LLMInteractionHandler._reset_llm_thread_references") + log.debug("Resetting LLM prediction thread and handler references.") + self.llm_prediction_thread = None + self.llm_prediction_handler = None + # --- Process next item now that the previous thread is fully finished --- + log.debug("Previous LLM thread finished. Triggering processing for next item by calling _process_next_llm_item...") + self._set_processing_state(False) # Mark processing as finished *before* trying next item + # Use QTimer.singleShot to yield control briefly before starting next item + QTimer.singleShot(0, self._process_next_llm_item) + log.debug("<-- Exiting LLMInteractionHandler._reset_llm_thread_references") + + + def _start_llm_prediction(self, input_path_str: str, file_list: list = None): + """ + Sets up and starts the LLMPredictionHandler in a separate thread. + Emits signals for results, errors, or status updates. + If file_list is not provided, it will be extracted. + """ + log.debug(f"Attempting to start LLM prediction for: {input_path_str}") + # Extract file list if not provided (needed for re-interpretation calls) + if file_list is None: + log.debug(f"File list not provided for {input_path_str}, extracting...") + # Need access to MainWindow's _extract_file_list or reimplement + # For now, assume MainWindow provides it or pass it during queueing + # Let's assume file_list is always provided correctly for now. + # If extraction fails before queueing, it won't reach here. + # If extraction needs to happen here, MainWindow ref is needed. + # Re-evaluating: MainWindow._extract_file_list is complex. + # It's better if the caller (MainWindow) extracts and passes the list. + # We'll modify queue_llm_request to require a non-None list eventually, + # or pass the main_window ref to call its extraction method. + # Let's pass main_window ref for now. + if hasattr(self.main_window, '_extract_file_list'): + file_list = self.main_window._extract_file_list(input_path_str) + if file_list is None: + error_msg = f"Failed to extract file list for {input_path_str} in _start_llm_prediction." + log.error(error_msg) + self.llm_status_update.emit(f"Error extracting files for {os.path.basename(input_path_str)}") + self.llm_prediction_error.emit(input_path_str, error_msg) # Signal error + # If called as part of a queue, we need to ensure the next item is processed. + # _reset_llm_thread_references handles this via the finished signal, + # but if the thread never starts, we need to trigger manually. + # This case should ideally be caught before calling _start_llm_prediction. + # We'll assume the queue logic handles failed extraction before calling this. + return # Stop if extraction failed + else: + error_msg = f"MainWindow reference does not have _extract_file_list method." + log.error(error_msg) + self.llm_status_update.emit(f"Internal Error: Cannot extract files for {os.path.basename(input_path_str)}") + self.llm_prediction_error.emit(input_path_str, error_msg) + return # Stop + + input_path_obj = Path(input_path_str) # Still needed for basename + + if not file_list: + error_msg = f"LLM Error: No files found/extracted for {input_path_str}" + log.error(error_msg) + self.llm_status_update.emit(f"LLM Error: No files found for {input_path_obj.name}") + self.llm_prediction_error.emit(input_path_str, error_msg) + return + + # --- Load Base Config for LLM Settings --- + if load_base_config is None: + log.critical("LLM Error: load_base_config function not available.") + self.llm_status_update.emit("LLM Error: Cannot load base configuration.") + self.llm_prediction_error.emit(input_path_str, "load_base_config function not available.") + return + try: + base_config = load_base_config() + if not base_config: + raise ConfigurationError("Failed to load base configuration (app_settings.json).") + + llm_settings = { + "llm_endpoint_url": base_config.get('llm_endpoint_url'), + "api_key": base_config.get('llm_api_key'), + "model_name": base_config.get('llm_model_name', 'gemini-pro'), + "prompt_template_content": base_config.get('llm_predictor_prompt'), + "asset_types": base_config.get('ASSET_TYPE_DEFINITIONS', {}), + "file_types": base_config.get('FILE_TYPE_DEFINITIONS', {}), + "examples": base_config.get('llm_predictor_examples', []) + } + except ConfigurationError as e: + log.error(f"LLM Configuration Error: {e}") + self.llm_status_update.emit(f"LLM Config Error: {e}") + self.llm_prediction_error.emit(input_path_str, f"LLM Configuration Error: {e}") + # Optionally show a QMessageBox via main_window ref if critical + # self.main_window.show_critical_error("LLM Config Error", str(e)) + return + except Exception as e: + log.exception(f"Unexpected error loading LLM configuration: {e}") + self.llm_status_update.emit(f"LLM Config Error: {e}") + self.llm_prediction_error.emit(input_path_str, f"Unexpected error loading LLM config: {e}") + return + # --- End Config Loading --- + + if LLMPredictionHandler is None: + log.critical("LLMPredictionHandler class not available.") + self.llm_status_update.emit("LLM Error: Prediction handler component missing.") + self.llm_prediction_error.emit(input_path_str, "LLMPredictionHandler class not available.") + return + + # Clean up previous thread/handler if any exist (should not happen if queue logic is correct) + if self.llm_prediction_thread and self.llm_prediction_thread.isRunning(): + log.warning("Warning: Previous LLM prediction thread still running when trying to start new one. This indicates a potential logic error.") + # Attempt graceful shutdown (might need more robust handling) + if self.llm_prediction_handler: + # Assuming LLMPredictionHandler has a cancel method or similar + if hasattr(self.llm_prediction_handler, 'cancel'): + self.llm_prediction_handler.cancel() + self.llm_prediction_thread.quit() + if not self.llm_prediction_thread.wait(1000): # Wait 1 sec + log.warning("LLM thread did not quit gracefully. Forcing termination.") + self.llm_prediction_thread.terminate() + self.llm_prediction_thread.wait() # Wait after terminate + # Reset references after ensuring termination + self.llm_prediction_thread = None + self.llm_prediction_handler = None + + + log.info(f"Starting LLM prediction thread for source: {input_path_str} with {len(file_list)} files.") + self.llm_status_update.emit(f"Starting LLM interpretation for {input_path_obj.name}...") + + self.llm_prediction_thread = QThread(self.main_window) # Parent thread to main window's thread? Or self? Let's try self. + self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, llm_settings) + self.llm_prediction_handler.moveToThread(self.llm_prediction_thread) + + # Connect signals from handler to *internal* slots or directly emit signals + self.llm_prediction_handler.prediction_ready.connect(self._handle_llm_result) + self.llm_prediction_handler.prediction_error.connect(self._handle_llm_error) + self.llm_prediction_handler.status_update.connect(self.llm_status_update) # Pass status through + + # Connect thread signals + self.llm_prediction_thread.started.connect(self.llm_prediction_handler.run) + # Clean up thread and handler when finished + self.llm_prediction_thread.finished.connect(self._reset_llm_thread_references) + self.llm_prediction_thread.finished.connect(self.llm_prediction_handler.deleteLater) + self.llm_prediction_thread.finished.connect(self.llm_prediction_thread.deleteLater) + # Also ensure thread quits when handler signals completion/error + self.llm_prediction_handler.prediction_ready.connect(self.llm_prediction_thread.quit) + self.llm_prediction_handler.prediction_error.connect(self.llm_prediction_thread.quit) + + self.llm_prediction_thread.start() + log.debug(f"LLM prediction thread started for {input_path_str}.") + + + def is_processing(self) -> bool: + """Safely checks if the LLM prediction thread is currently running.""" + # Use the internal flag, which is more reliable than checking thread directly + # due to potential race conditions during cleanup. + # The thread check can be a fallback. + is_running_flag = self._is_processing + # Also check thread as a safeguard, though the flag should be primary + try: + is_thread_alive = self.llm_prediction_thread is not None and self.llm_prediction_thread.isRunning() + if is_running_flag != is_thread_alive: + # This might indicate the flag wasn't updated correctly, log it. + log.warning(f"LLM Handler processing flag ({is_running_flag}) mismatch with thread state ({is_thread_alive}). Flag is primary.") + return is_running_flag + except RuntimeError: + log.debug("is_processing: Caught RuntimeError checking isRunning (thread likely deleted).") + # If thread died unexpectedly, the flag might be stale. Reset it. + if self._is_processing: + self._set_processing_state(False) + return False + + + def _process_next_llm_item(self): + """Processes the next directory in the unified LLM processing queue.""" + log.debug(f"--> Entered _process_next_llm_item. Queue size: {len(self.llm_processing_queue)}") + + if self.is_processing(): + log.info("LLM processing already running. Waiting for current item to finish.") + # Do not pop from queue if already running, wait for _reset_llm_thread_references to call this again + return + + if not self.llm_processing_queue: + log.info("LLM processing queue is empty. Finishing.") + self.llm_status_update.emit("LLM processing complete.") + self._set_processing_state(False) # Ensure state is set to idle + log.debug("<-- Exiting _process_next_llm_item (queue empty)") + return + + # Set state to busy *before* starting + self._set_processing_state(True) + + # Get next item *without* removing it yet + next_item = self.llm_processing_queue[0] # Peek at the first item + next_dir, file_list = next_item # Unpack the tuple + + # --- Update Status/Progress --- + total_in_queue_now = len(self.llm_processing_queue) + status_msg = f"LLM Processing {os.path.basename(next_dir)} ({total_in_queue_now} remaining)..." + self.llm_status_update.emit(status_msg) + log.info(status_msg) + + # --- Start Prediction (which might fail) --- + try: + # Pass the potentially None file_list. _start_llm_prediction handles extraction if needed. + self._start_llm_prediction(next_dir, file_list=file_list) + # --- Pop item *after* successfully starting prediction --- + self.llm_processing_queue.pop(0) + log.debug(f"Successfully started LLM prediction for {next_dir} and removed from queue.") + except Exception as e: + log.exception(f"Error occurred *during* _start_llm_prediction call for {next_dir}: {e}") + error_msg = f"Error starting LLM for {os.path.basename(next_dir)}: {e}" + self.llm_status_update.emit(error_msg) + self.llm_prediction_error.emit(next_dir, error_msg) # Signal the error + # --- Remove the failed item from the queue --- + try: + failed_item = self.llm_processing_queue.pop(0) + log.warning(f"Removed failed item {failed_item} from LLM queue due to start error.") + except IndexError: + log.error("Attempted to pop failed item from already empty LLM queue after start error.") + # --- Attempt to process the *next* item --- + # Reset processing state since this one failed *before* the thread finished signal could + self._set_processing_state(False) + # Use QTimer.singleShot to avoid deep recursion + QTimer.singleShot(100, self._process_next_llm_item) # Try next item after a short delay + + # --- Internal Slots to Handle Results/Errors from LLMPredictionHandler --- + @Slot(str, list) + def _handle_llm_result(self, input_path: str, source_rules: list): + """Internal slot to receive results and emit the public signal.""" + log.debug(f"LLM Handler received result for {input_path}. Emitting llm_prediction_ready.") + self.llm_prediction_ready.emit(input_path, source_rules) + # Note: The thread's finished signal calls _reset_llm_thread_references, + # which then calls _process_next_llm_item. + + @Slot(str, str) + def _handle_llm_error(self, input_path: str, error_message: str): + """Internal slot to receive errors and emit the public signal.""" + log.debug(f"LLM Handler received error for {input_path}: {error_message}. Emitting llm_prediction_error.") + self.llm_prediction_error.emit(input_path, error_message) + # Note: The thread's finished signal calls _reset_llm_thread_references, + # which then calls _process_next_llm_item. + + def clear_queue(self): + """Clears the LLM processing queue.""" + log.info(f"Clearing LLM processing queue ({len(self.llm_processing_queue)} items).") + self.llm_processing_queue.clear() + # TODO: Should we also attempt to cancel any *currently* running LLM task? + # This might be complex. For now, just clears the queue of pending items. + if self.is_processing(): + log.warning("LLM queue cleared, but a task is currently running. It will complete.") + else: + self.llm_status_update.emit("LLM queue cleared.") \ No newline at end of file diff --git a/gui/llm_prediction_handler.py b/gui/llm_prediction_handler.py index d251cf7..2198de1 100644 --- a/gui/llm_prediction_handler.py +++ b/gui/llm_prediction_handler.py @@ -1,7 +1,11 @@ import os import json import requests -from PySide6.QtCore import QObject, Signal, Slot, QThread +import re # Added import for regex +import logging # Add logging +from pathlib import Path # Add Path for basename +from PySide6.QtCore import QObject, Slot # Keep QObject for parent type hint, Slot for cancel if kept separate +# Removed Signal, QThread as they are handled by BasePredictionHandler or caller from typing import List, Dict, Any # Assuming rule_structure defines SourceRule, AssetRule, FileRule etc. @@ -12,92 +16,115 @@ from rule_structure import SourceRule, AssetRule, FileRule # Ensure AssetRule an # Adjust the import path if necessary # Removed Configuration import, will use load_base_config if needed or passed settings # from configuration import Configuration -from configuration import load_base_config # Keep this for now if needed elsewhere, or remove if settings are always passed +# from configuration import load_base_config # No longer needed here +from .base_prediction_handler import BasePredictionHandler # Import base class -class LLMPredictionHandler(QObject): +log = logging.getLogger(__name__) # Setup logger + +class LLMPredictionHandler(BasePredictionHandler): """ Handles the interaction with an LLM for predicting asset structures - based on a directory's file list. Designed to run in a QThread. + based on a directory's file list. Inherits from BasePredictionHandler. """ - # Signal emitted when prediction for a directory is complete - # Arguments: directory_path (str), results (List[SourceRule]) - prediction_ready = Signal(str, list) - # Signal emitted on error - # Arguments: directory_path (str), error_message (str) - prediction_error = Signal(str, str) - # Signal to update status message in the GUI - status_update = Signal(str) + # Signals (prediction_ready, prediction_error, status_update) are inherited - def __init__(self, input_path_str: str, file_list: list, llm_settings: dict, parent: QObject = None): # Accept input_path_str and file_list + def __init__(self, input_source_identifier: str, file_list: list, llm_settings: dict, parent: QObject = None): """ - Initializes the handler. + Initializes the LLM handler. Args: - input_path_str: The absolute path to the original input source (directory or archive). - file_list: A list of relative file paths extracted from the input source. - llm_settings: A dictionary containing necessary LLM configuration. + input_source_identifier: The unique identifier for the input source (e.g., file path). + file_list: A list of *relative* file paths extracted from the input source. + (LLM expects relative paths based on the prompt template). + llm_settings: A dictionary containing necessary LLM configuration + (endpoint_url, api_key, prompt_template_content, etc.). parent: The parent QObject. """ - super().__init__(parent) - self.input_path_str = input_path_str # Store original input path - self.file_list = file_list # Store the provided file list + super().__init__(input_source_identifier, parent) + # input_source_identifier is stored by the base class as self.input_source_identifier + self.file_list = file_list # Store the provided relative file list self.llm_settings = llm_settings # Store the settings dictionary self.endpoint_url = self.llm_settings.get('llm_endpoint_url') self.api_key = self.llm_settings.get('llm_api_key') - self._is_cancelled = False - @Slot() - def run(self): - """ - The main execution method to be called when the thread starts. - Orchestrates the prediction process for the given directory. - """ - # Directory check is no longer needed here, input path is just for context - # File list is provided via __init__ + # _is_running and _is_cancelled are handled by the base class + # The run() and cancel() slots are provided by the base class. + # We only need to implement the core logic in _perform_prediction. + + def _perform_prediction(self) -> List[SourceRule]: + """ + Performs the LLM prediction by preparing the prompt, calling the LLM, + and parsing the response. Implements the abstract method from BasePredictionHandler. + + Returns: + A list containing a single SourceRule object based on the LLM response, + or an empty list if prediction fails or yields no results. + + Raises: + ValueError: If required settings (like endpoint URL or prompt template) are missing. + ConnectionError: If the LLM API call fails due to network issues or timeouts. + Exception: For other errors during prompt preparation, API call, or parsing. + """ + log.info(f"Performing LLM prediction for: {self.input_source_identifier}") + base_name = Path(self.input_source_identifier).name + + # Use the file list passed during initialization + if not self.file_list: + log.warning(f"No files provided for LLM prediction for {self.input_source_identifier}. Returning empty list.") + self.status_update.emit(f"No files found for {base_name}.") # Use base signal + return [] # Return empty list, not an error + + # Check for cancellation before preparing prompt + if self._is_cancelled: + log.info("LLM prediction cancelled before preparing prompt.") + return [] + + # --- Prepare Prompt --- + self.status_update.emit(f"Preparing LLM input for {base_name}...") try: - self.status_update.emit(f"Preparing LLM input for {os.path.basename(self.input_path_str)}...") - if self._is_cancelled: return - - # Use the file list passed during initialization - if not self.file_list: - self.prediction_ready.emit(self.input_path_str, []) # Emit empty list if no files - return - if self._is_cancelled: return - - prompt = self._prepare_prompt(self.file_list) # Use self.file_list - if self._is_cancelled: return - - self.status_update.emit(f"Calling LLM for {os.path.basename(self.input_path_str)}...") - llm_response_json_str = self._call_llm(prompt) - if self._is_cancelled: return - - self.status_update.emit(f"Parsing LLM response for {os.path.basename(self.input_path_str)}...") - predicted_rules = self._parse_llm_response(llm_response_json_str) - if self._is_cancelled: return - - self.prediction_ready.emit(self.input_path_str, predicted_rules) # Use input_path_str - self.status_update.emit(f"LLM interpretation complete for {os.path.basename(self.input_path_str)}.") - + # Pass relative file list + prompt = self._prepare_prompt(self.file_list) except Exception as e: - error_msg = f"Error during LLM prediction for {self.input_path_str}: {e}" - print(error_msg) # Log the full error - self.prediction_error.emit(self.input_path_str, f"An error occurred: {e}") # Use input_path_str - finally: - # Ensure thread cleanup or final signals if needed - pass + log.exception("Error preparing LLM prompt.") + raise ValueError(f"Error preparing LLM prompt: {e}") from e # Re-raise for base handler - @Slot() - def cancel(self): - """ - Sets the cancellation flag. - """ - self._is_cancelled = True - self.status_update.emit(f"Cancellation requested for {os.path.basename(self.input_path_str)}...") # Use input_path_str + if self._is_cancelled: + log.info("LLM prediction cancelled after preparing prompt.") + return [] + + # --- Call LLM --- + self.status_update.emit(f"Calling LLM for {base_name}...") + try: + llm_response_json_str = self._call_llm(prompt) + except Exception as e: + log.exception("Error calling LLM API.") + # Re-raise potentially specific errors (ConnectionError, ValueError) or a generic one + raise RuntimeError(f"Error calling LLM: {e}") from e + + if self._is_cancelled: + log.info("LLM prediction cancelled after calling LLM.") + return [] + + # --- Parse Response --- + self.status_update.emit(f"Parsing LLM response for {base_name}...") + try: + predicted_rules = self._parse_llm_response(llm_response_json_str) + except Exception as e: + log.exception("Error parsing LLM response.") + raise ValueError(f"Error parsing LLM response: {e}") from e # Re-raise for base handler + + if self._is_cancelled: + log.info("LLM prediction cancelled after parsing response.") + return [] + + log.info(f"LLM prediction finished successfully for '{self.input_source_identifier}'.") + # The base class run() method will emit prediction_ready with these results + return predicted_rules -# Removed _get_file_list method as file list is now passed in __init__ + # --- Helper Methods (Keep these internal to this class) --- - def _prepare_prompt(self, file_list: List[str]) -> str: + def _prepare_prompt(self, relative_file_list: List[str]) -> str: """ Prepares the full prompt string to send to the LLM using stored settings. """ @@ -124,8 +151,8 @@ class LLMPredictionHandler(QObject): file_defs = json.dumps(self.llm_settings.get('file_types', {}), indent=4) examples = json.dumps(self.llm_settings.get('examples', []), indent=2) - # Format file list as a single string with newlines - file_list_str = "\n".join(file_list) + # Format *relative* file list as a single string with newlines + file_list_str = "\n".join(relative_file_list) # Replace placeholders prompt = prompt_template.replace('{ASSET_TYPE_DEFINITIONS}', asset_defs) @@ -173,75 +200,47 @@ class LLMPredictionHandler(QObject): # "response_format": { "type": "json_object" } # If supported by endpoint } - self.status_update.emit(f"Sending request to LLM at {self.endpoint_url}...") + # Status update emitted by _perform_prediction before calling this + # self.status_update.emit(f"Sending request to LLM at {self.endpoint_url}...") print(f"--- Calling LLM API: {self.endpoint_url} ---") # print(f"--- Payload Preview ---\n{json.dumps(payload, indent=2)[:500]}...\n--- END Payload Preview ---") - try: - # Make the POST request with a timeout (e.g., 120 seconds for potentially long LLM responses) - response = requests.post( - self.endpoint_url, - headers=headers, - json=payload, - # Make the POST request with configured timeout, default to 120 - timeout=self.llm_settings.get("llm_request_timeout", 120) - ) - response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) + # Note: Exceptions raised here (Timeout, RequestException, ValueError) + # will be caught by the _perform_prediction method's handler. - except requests.exceptions.Timeout: - error_msg = f"LLM request timed out after {self.llm_settings.get('llm_request_timeout', 120)} seconds." - print(error_msg) - raise ConnectionError(error_msg) - except requests.exceptions.RequestException as e: - error_msg = f"LLM request failed: {e}" - print(error_msg) - # Attempt to get more detail from response if available - try: - if e.response is not None: - print(f"LLM Response Status Code: {e.response.status_code}") - print(f"LLM Response Text: {e.response.text[:500]}...") # Log partial response text - error_msg += f" (Status: {e.response.status_code})" - except Exception: - pass # Ignore errors during error reporting enhancement - raise ConnectionError(error_msg) # Raise a more generic error for the GUI + # Make the POST request with a timeout + response = requests.post( + self.endpoint_url, + headers=headers, + json=payload, + timeout=self.llm_settings.get("llm_request_timeout", 120) + ) + response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) # Parse the JSON response - try: - response_data = response.json() - # print(f"--- LLM Raw Response ---\n{json.dumps(response_data, indent=2)}\n--- END Raw Response ---") # Debugging + response_data = response.json() + # print(f"--- LLM Raw Response ---\n{json.dumps(response_data, indent=2)}\n--- END Raw Response ---") # Debugging - # Extract content - structure depends on the API (OpenAI format assumed) - if "choices" in response_data and len(response_data["choices"]) > 0: - message = response_data["choices"][0].get("message", {}) - content = message.get("content") - if content: - # The content itself should be the JSON string we asked for - print("--- LLM Response Content Extracted Successfully ---") - return content.strip() - else: - raise ValueError("LLM response missing 'content' in choices[0].message.") + # Extract content - structure depends on the API (OpenAI format assumed) + if "choices" in response_data and len(response_data["choices"]) > 0: + message = response_data["choices"][0].get("message", {}) + content = message.get("content") + if content: + # The content itself should be the JSON string we asked for + log.debug("--- LLM Response Content Extracted Successfully ---") + return content.strip() else: - raise ValueError("LLM response missing 'choices' array or it's empty.") - - except json.JSONDecodeError: - error_msg = f"Failed to decode LLM JSON response. Response text: {response.text[:500]}..." - print(error_msg) - raise ValueError(error_msg) - except Exception as e: - # Capture the potentially problematic response_data in the error message - response_data_str = "Not available" - try: - response_data_str = json.dumps(response_data) if 'response_data' in locals() else response.text[:500] + "..." - except Exception: - pass # Avoid errors during error reporting - error_msg = f"Error parsing LLM response structure: {e}. Response data: {response_data_str}" - print(error_msg) - raise ValueError(error_msg) + raise ValueError("LLM response missing 'content' in choices[0].message.") + else: + raise ValueError("LLM response missing 'choices' array or it's empty.") def _parse_llm_response(self, llm_response_json_str: str) -> List[SourceRule]: """ Parses the LLM's JSON response string into a list of SourceRule objects. """ + # Note: Exceptions (JSONDecodeError, ValueError) raised here + # will be caught by the _perform_prediction method's handler. + # Strip potential markdown code fences before parsing clean_json_str = llm_response_json_str.strip() if clean_json_str.startswith("```json"): @@ -250,102 +249,112 @@ class LLMPredictionHandler(QObject): clean_json_str = clean_json_str[:-3] # Remove ``` clean_json_str = clean_json_str.strip() # Remove any extra whitespace + # --- ADDED: Remove tags --- + clean_json_str = re.sub(r'.*?', '', clean_json_str, flags=re.DOTALL | re.IGNORECASE) + clean_json_str = clean_json_str.strip() # Strip again after potential removal + # --------------------------------- + try: response_data = json.loads(clean_json_str) except json.JSONDecodeError as e: # Log the full cleaned string that caused the error for better debugging error_detail = f"Failed to decode LLM JSON response: {e}\nFull Cleaned Response:\n{clean_json_str}" - print(f"ERROR: {error_detail}") # Print full error detail to console + log.error(f"ERROR: {error_detail}") # Log full error detail to console raise ValueError(error_detail) # Raise the error with full detail + if "predicted_assets" not in response_data or not isinstance(response_data["predicted_assets"], list): raise ValueError("Invalid LLM response format: 'predicted_assets' key missing or not a list.") source_rules = [] # We assume one SourceRule per input source processed by this handler instance - source_rule = SourceRule(input_path=self.input_path_str) # Use input_path_str + # Use self.input_source_identifier from the base class + source_rule = SourceRule(input_path=self.input_source_identifier) # Access valid types from the settings dictionary valid_asset_types = list(self.llm_settings.get('asset_types', {}).keys()) valid_file_types = list(self.llm_settings.get('file_types', {}).keys()) for asset_data in response_data["predicted_assets"]: + # Check for cancellation within the loop + if self._is_cancelled: + log.info("LLM prediction cancelled during response parsing (assets).") + return [] + if not isinstance(asset_data, dict): - print(f"Warning: Skipping invalid asset data (not a dict): {asset_data}") + log.warning(f"Skipping invalid asset data (not a dict): {asset_data}") continue asset_name = asset_data.get("suggested_asset_name", "Unnamed_Asset") asset_type = asset_data.get("predicted_asset_type") if asset_type not in valid_asset_types: - print(f"Warning: Invalid predicted_asset_type '{asset_type}' for asset '{asset_name}'. Defaulting or skipping.") - # Decide handling: default to a generic type or skip? For now, skip. - continue # Or assign a default like 'Unknown' if defined + log.warning(f"Invalid predicted_asset_type '{asset_type}' for asset '{asset_name}'. Skipping asset.") + continue # Skip this asset - # --- MODIFIED LINES for AssetRule --- - # Create the AssetRule instance first asset_rule = AssetRule(asset_name=asset_name, asset_type=asset_type) - source_rule.assets.append(asset_rule) # Append to the list + source_rule.assets.append(asset_rule) if "files" not in asset_data or not isinstance(asset_data["files"], list): - print(f"Warning: 'files' key missing or not a list in asset '{asset_name}'. Skipping files for this asset.") + log.warning(f"'files' key missing or not a list in asset '{asset_name}'. Skipping files for this asset.") continue for file_data in asset_data["files"]: + # Check for cancellation within the inner loop + if self._is_cancelled: + log.info("LLM prediction cancelled during response parsing (files).") + return [] + if not isinstance(file_data, dict): - print(f"Warning: Skipping invalid file data (not a dict) in asset '{asset_name}': {file_data}") + log.warning(f"Skipping invalid file data (not a dict) in asset '{asset_name}': {file_data}") continue - file_path_rel = file_data.get("file_path") + file_path_rel = file_data.get("file_path") # LLM provides relative path file_type = file_data.get("predicted_file_type") if not file_path_rel: - print(f"Warning: Missing 'file_path' in file data for asset '{asset_name}'. Skipping file.") + log.warning(f"Missing 'file_path' in file data for asset '{asset_name}'. Skipping file.") continue # Convert relative path from LLM (using '/') back to absolute OS-specific path - # Note: LLM gets relative paths, so we join with the handler's base input path - file_path_abs = os.path.join(self.input_path_str, file_path_rel.replace('/', os.sep)) # Use input_path_str + # We need the original input path (directory or archive) to make it absolute + # Use self.input_source_identifier which holds the original path + # IMPORTANT: Ensure the LLM is actually providing paths relative to the *root* of the input source. + try: + # Use Pathlib for safer joining, assuming input_source_identifier is the parent dir/archive path + # If input_source_identifier is an archive file, this logic might need adjustment + # depending on where files were extracted. For now, assume it's the base path. + base_path = Path(self.input_source_identifier) + # If the input was a file (like a zip), use its parent directory as the base for joining relative paths + if base_path.is_file(): + base_path = base_path.parent + # Clean the relative path potentially coming from LLM + clean_rel_path = Path(file_path_rel.strip().replace('\\', '/')) + file_path_abs = str(base_path / clean_rel_path) + except Exception as path_e: + log.warning(f"Error constructing absolute path for '{file_path_rel}' relative to '{self.input_source_identifier}': {path_e}. Skipping file.") + continue + if file_type not in valid_file_types: - print(f"Warning: Invalid predicted_file_type '{file_type}' for file '{file_path_rel}'. Defaulting to EXTRA.") + log.warning(f"Invalid predicted_file_type '{file_type}' for file '{file_path_rel}'. Defaulting to EXTRA.") file_type = "EXTRA" # Default to EXTRA if invalid type from LLM - # --- MODIFIED LINES for FileRule --- - # Create the FileRule instance first - file_rule = FileRule(file_path=file_path_abs, item_type=file_type) # Use correct field names - asset_rule.files.append(file_rule) # Append to the list + # Create the FileRule instance + # Add default values for fields not provided by LLM + file_rule = FileRule( + file_path=file_path_abs, + item_type=file_type, + item_type_override=file_type, # Initial override + target_asset_name_override=asset_name, # Default to asset name + output_format_override=None, + is_gloss_source=False, # LLM doesn't predict this + standard_map_type=None, # LLM doesn't predict this directly + resolution_override=None, + channel_merge_instructions={} + ) + asset_rule.files.append(file_rule) source_rules.append(source_rule) return source_rules -# Example of how this might be used in MainWindow (conceptual) -# class MainWindow(QMainWindow): -# # ... other methods ... -# def _start_llm_prediction(self, directory_path): -# self.llm_thread = QThread() -# self.llm_handler = LLMPredictionHandler(directory_path, self.config_manager) -# self.llm_handler.moveToThread(self.llm_thread) -# -# # Connect signals -# self.llm_handler.prediction_ready.connect(self._on_llm_prediction_ready) -# self.llm_handler.prediction_error.connect(self._on_llm_prediction_error) -# self.llm_handler.status_update.connect(self.statusBar().showMessage) -# self.llm_thread.started.connect(self.llm_handler.run) -# self.llm_thread.finished.connect(self.llm_thread.deleteLater) -# self.llm_handler.prediction_ready.connect(self.llm_thread.quit) # Quit thread on success -# self.llm_handler.prediction_error.connect(self.llm_thread.quit) # Quit thread on error -# -# self.llm_thread.start() -# -# @Slot(str, list) -# def _on_llm_prediction_ready(self, directory_path, results): -# print(f"LLM Prediction ready for {directory_path}: {len(results)} source rules found.") -# # Process results, update model, etc. -# # Make sure to clean up thread/handler references if needed -# self.llm_handler.deleteLater() # Schedule handler for deletion -# -# @Slot(str, str) -# def _on_llm_prediction_error(self, directory_path, error_message): -# print(f"LLM Prediction error for {directory_path}: {error_message}") -# # Show error to user, clean up thread/handler -# self.llm_handler.deleteLater() \ No newline at end of file +# Removed conceptual example usage comments \ No newline at end of file diff --git a/gui/log_console_widget.py b/gui/log_console_widget.py new file mode 100644 index 0000000..fe7133b --- /dev/null +++ b/gui/log_console_widget.py @@ -0,0 +1,43 @@ +# gui/log_console_widget.py +import logging +from PySide6.QtWidgets import ( + QWidget, QVBoxLayout, QTextEdit, QLabel, QSizePolicy +) +from PySide6.QtCore import Slot + +log = logging.getLogger(__name__) + +class LogConsoleWidget(QWidget): + """ + A dedicated widget to display log messages. + """ + def __init__(self, parent=None): + super().__init__(parent) + self._init_ui() + + def _init_ui(self): + """Initializes the UI elements for the log console.""" + layout = QVBoxLayout(self) + layout.setContentsMargins(0, 5, 0, 0) # Add some top margin + + log_console_label = QLabel("Log Console:") + self.log_console_output = QTextEdit() + self.log_console_output.setReadOnly(True) + # self.log_console_output.setMaximumHeight(150) # Let the parent layout control height + self.log_console_output.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding) # Allow vertical expansion + + layout.addWidget(log_console_label) + layout.addWidget(self.log_console_output) + + # Initially hidden, visibility controlled by MainWindow + self.setVisible(False) + + @Slot(str) + def _append_log_message(self, message): + """Appends a log message to the QTextEdit console.""" + self.log_console_output.append(message) + # Auto-scroll to the bottom + self.log_console_output.verticalScrollBar().setValue(self.log_console_output.verticalScrollBar().maximum()) + + # Note: Visibility is controlled externally via setVisible(), + # so the _toggle_log_console_visibility slot is not needed here. \ No newline at end of file diff --git a/gui/main_panel_widget.py b/gui/main_panel_widget.py new file mode 100644 index 0000000..ee88cc0 --- /dev/null +++ b/gui/main_panel_widget.py @@ -0,0 +1,633 @@ +import sys +import os +import json +import logging +import time +from pathlib import Path +from functools import partial + +from PySide6.QtWidgets import QApplication # Added for processEvents +from PySide6.QtWidgets import ( + QWidget, QVBoxLayout, QHBoxLayout, QSplitter, QTableView, + QPushButton, QComboBox, QTableWidget, QTableWidgetItem, QHeaderView, + QProgressBar, QLabel, QFrame, QCheckBox, QSpinBox, QListWidget, QTextEdit, + QLineEdit, QMessageBox, QFileDialog, QInputDialog, QListWidgetItem, QTabWidget, + QFormLayout, QGroupBox, QAbstractItemView, QSizePolicy, QTreeView, QMenu +) +from PySide6.QtCore import Qt, Signal, Slot, QPoint, QModelIndex, QTimer +from PySide6.QtGui import QColor, QAction, QPalette, QClipboard, QGuiApplication # Added QGuiApplication for clipboard + +# --- Local GUI Imports --- +# Import delegates and models needed by the panel +from .delegates import LineEditDelegate, ComboBoxDelegate, SupplierSearchDelegate +from .unified_view_model import UnifiedViewModel # Assuming UnifiedViewModel is passed in + +# --- Backend Imports --- +# Import Rule Structures if needed for context menus etc. +from rule_structure import SourceRule, AssetRule, FileRule +# Import config loading if defaults are needed directly here (though better passed from MainWindow) +try: + from configuration import ConfigurationError, load_base_config +except ImportError: + ConfigurationError = Exception + load_base_config = None + +log = logging.getLogger(__name__) + +class MainPanelWidget(QWidget): + """ + Widget handling the main interaction panel: + - Output directory selection + - Asset preview/editing view (Unified View) + - Blender post-processing options + - Processing controls (Start, Cancel, Clear, LLM Re-interpret) + """ + # --- Signals Emitted by the Panel --- + # Request to add new input paths (e.g., from drag/drop handled by MainWindow) + # add_paths_requested = Signal(list) # Maybe not needed if MainWindow handles drop directly + + # Request to start the main processing job + process_requested = Signal(dict) # Emits dict with settings: output_dir, overwrite, workers, blender_enabled, ng_path, mat_path + + # Request to cancel the ongoing processing job + cancel_requested = Signal() + + # Request to clear the current queue/view + clear_queue_requested = Signal() + + # Request to re-interpret selected items using LLM + llm_reinterpret_requested = Signal(list) # Emits list of source paths + + # Notify when the output directory changes + output_dir_changed = Signal(str) + + # Notify when Blender settings change + blender_settings_changed = Signal(bool, str, str) # enabled, ng_path, mat_path + + def __init__(self, unified_model: UnifiedViewModel, parent=None): + """ + Initializes the MainPanelWidget. + + Args: + unified_model: The shared UnifiedViewModel instance. + parent: The parent widget. + """ + super().__init__(parent) + self.unified_model = unified_model + self.llm_processing_active = False # Track if LLM is running (set by MainWindow) + + # Get project root for resolving default paths if needed here + script_dir = Path(__file__).parent + self.project_root = script_dir.parent + + self._setup_ui() + self._connect_signals() + + def _setup_ui(self): + """Sets up the UI elements for the panel.""" + main_layout = QVBoxLayout(self) + main_layout.setContentsMargins(5, 5, 5, 5) # Reduce margins + + # --- Output Directory Selection --- + output_layout = QHBoxLayout() + self.output_dir_label = QLabel("Output Directory:") + self.output_path_edit = QLineEdit() + self.browse_output_button = QPushButton("Browse...") + output_layout.addWidget(self.output_dir_label) + output_layout.addWidget(self.output_path_edit, 1) + output_layout.addWidget(self.browse_output_button) + main_layout.addLayout(output_layout) + + # --- Set Initial Output Path (Copied from MainWindow) --- + # Consider passing this default path from MainWindow instead of reloading config here + if load_base_config: + try: + base_config = load_base_config() + output_base_dir_config = base_config.get('OUTPUT_BASE_DIR', '../Asset_Processor_Output') + default_output_dir = (self.project_root / output_base_dir_config).resolve() + self.output_path_edit.setText(str(default_output_dir)) + log.info(f"MainPanelWidget: Default output directory set to: {default_output_dir}") + except ConfigurationError as e: + log.error(f"MainPanelWidget: Error reading base configuration for default output directory: {e}") + self.output_path_edit.setText("") + except Exception as e: + log.exception(f"MainPanelWidget: Error setting default output directory: {e}") + self.output_path_edit.setText("") + else: + log.warning("MainPanelWidget: load_base_config not available to set default output path.") + self.output_path_edit.setText("") + + + # --- Unified View Setup --- + self.unified_view = QTreeView() + self.unified_view.setModel(self.unified_model) # Set the passed-in model + + # Instantiate Delegates + lineEditDelegate = LineEditDelegate(self.unified_view) + # ComboBoxDelegate needs access to MainWindow's get_llm_source_preset_name, + # which might require passing MainWindow or a callback here. + # For now, let's assume it can work without it or we adapt it later. + # TODO: Revisit ComboBoxDelegate dependency + comboBoxDelegate = ComboBoxDelegate(self) # Pass only parent (self) + supplierSearchDelegate = SupplierSearchDelegate(self) # Pass parent + + # Set Delegates for Columns + self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_SUPPLIER, supplierSearchDelegate) + self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_ASSET_TYPE, comboBoxDelegate) + self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_TARGET_ASSET, lineEditDelegate) + self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_ITEM_TYPE, comboBoxDelegate) + + # Configure View Appearance + self.unified_view.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding) + self.unified_view.setAlternatingRowColors(True) + self.unified_view.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows) + self.unified_view.setEditTriggers(QAbstractItemView.EditTrigger.DoubleClicked | QAbstractItemView.EditTrigger.SelectedClicked | QAbstractItemView.EditTrigger.EditKeyPressed) + self.unified_view.setSelectionMode(QAbstractItemView.SelectionMode.ExtendedSelection) # Allow multi-select for re-interpret + + # Configure Header Resize Modes + header = self.unified_view.header() + header.setStretchLastSection(False) + header.setSectionResizeMode(UnifiedViewModel.COL_NAME, QHeaderView.ResizeMode.ResizeToContents) + header.setSectionResizeMode(UnifiedViewModel.COL_TARGET_ASSET, QHeaderView.ResizeMode.Stretch) + header.setSectionResizeMode(UnifiedViewModel.COL_SUPPLIER, QHeaderView.ResizeMode.ResizeToContents) + header.setSectionResizeMode(UnifiedViewModel.COL_ASSET_TYPE, QHeaderView.ResizeMode.ResizeToContents) + header.setSectionResizeMode(UnifiedViewModel.COL_ITEM_TYPE, QHeaderView.ResizeMode.ResizeToContents) + + # Enable custom context menu + self.unified_view.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu) + + # Add the Unified View to the main layout + main_layout.addWidget(self.unified_view, 1) # Give it stretch factor 1 + + # --- Progress Bar --- + self.progress_bar = QProgressBar() + self.progress_bar.setValue(0) + self.progress_bar.setTextVisible(True) + self.progress_bar.setFormat("Idle") # Initial format + main_layout.addWidget(self.progress_bar) + + # --- Blender Integration Controls --- + blender_group = QGroupBox("Blender Post-Processing") + blender_layout = QVBoxLayout(blender_group) + + self.blender_integration_checkbox = QCheckBox("Run Blender Scripts After Processing") + self.blender_integration_checkbox.setToolTip("If checked, attempts to run create_nodegroups.py and create_materials.py in Blender.") + blender_layout.addWidget(self.blender_integration_checkbox) + + # Nodegroup Blend Path + nodegroup_layout = QHBoxLayout() + nodegroup_layout.addWidget(QLabel("Nodegroup .blend:")) + self.nodegroup_blend_path_input = QLineEdit() + self.browse_nodegroup_blend_button = QPushButton("...") + self.browse_nodegroup_blend_button.setFixedWidth(30) + nodegroup_layout.addWidget(self.nodegroup_blend_path_input) + nodegroup_layout.addWidget(self.browse_nodegroup_blend_button) + blender_layout.addLayout(nodegroup_layout) + + # Materials Blend Path + materials_layout = QHBoxLayout() + materials_layout.addWidget(QLabel("Materials .blend:")) + self.materials_blend_path_input = QLineEdit() + self.browse_materials_blend_button = QPushButton("...") + self.browse_materials_blend_button.setFixedWidth(30) + materials_layout.addWidget(self.materials_blend_path_input) + materials_layout.addWidget(self.browse_materials_blend_button) + blender_layout.addLayout(materials_layout) + + # Initialize paths from config (Copied from MainWindow) + # Consider passing these defaults from MainWindow + if load_base_config: + try: + base_config = load_base_config() + default_ng_path = base_config.get('DEFAULT_NODEGROUP_BLEND_PATH', '') + default_mat_path = base_config.get('DEFAULT_MATERIALS_BLEND_PATH', '') + self.nodegroup_blend_path_input.setText(default_ng_path if default_ng_path else "") + self.materials_blend_path_input.setText(default_mat_path if default_mat_path else "") + except ConfigurationError as e: + log.error(f"MainPanelWidget: Error reading base configuration for default Blender paths: {e}") + except Exception as e: + log.error(f"MainPanelWidget: Error reading default Blender paths from config: {e}") + else: + log.warning("MainPanelWidget: load_base_config not available to set default Blender paths.") + + + # Disable Blender controls initially if checkbox is unchecked + self.nodegroup_blend_path_input.setEnabled(False) + self.browse_nodegroup_blend_button.setEnabled(False) + self.materials_blend_path_input.setEnabled(False) + self.browse_materials_blend_button.setEnabled(False) + + main_layout.addWidget(blender_group) # Add the group box to the main layout + + # --- Bottom Controls --- + bottom_controls_layout = QHBoxLayout() + self.overwrite_checkbox = QCheckBox("Overwrite Existing") + self.overwrite_checkbox.setToolTip("If checked, existing output folders for processed assets will be deleted and replaced.") + bottom_controls_layout.addWidget(self.overwrite_checkbox) + + self.workers_label = QLabel("Workers:") + self.workers_spinbox = QSpinBox() + default_workers = 1 + try: + cores = os.cpu_count() + if cores: default_workers = max(1, cores // 2) + except NotImplementedError: pass + self.workers_spinbox.setMinimum(1) + self.workers_spinbox.setMaximum(os.cpu_count() or 32) + self.workers_spinbox.setValue(default_workers) + self.workers_spinbox.setToolTip("Number of assets to process concurrently.") + bottom_controls_layout.addWidget(self.workers_label) + bottom_controls_layout.addWidget(self.workers_spinbox) + bottom_controls_layout.addStretch(1) + + # --- LLM Re-interpret Button --- + self.llm_reinterpret_button = QPushButton("Re-interpret Selected with LLM") + self.llm_reinterpret_button.setToolTip("Re-run LLM interpretation on the selected source items.") + self.llm_reinterpret_button.setEnabled(False) # Initially disabled + bottom_controls_layout.addWidget(self.llm_reinterpret_button) + + self.clear_queue_button = QPushButton("Clear Queue") + self.start_button = QPushButton("Start Processing") + self.cancel_button = QPushButton("Cancel") + self.cancel_button.setEnabled(False) + + bottom_controls_layout.addWidget(self.clear_queue_button) + bottom_controls_layout.addWidget(self.start_button) + bottom_controls_layout.addWidget(self.cancel_button) + main_layout.addLayout(bottom_controls_layout) + + def _connect_signals(self): + """Connect internal UI signals to slots or emit panel signals.""" + # Output Directory + self.browse_output_button.clicked.connect(self._browse_for_output_directory) + self.output_path_edit.editingFinished.connect(self._on_output_path_changed) # Emit signal when user finishes editing + + # Unified View + self.unified_view.selectionModel().selectionChanged.connect(self._update_llm_reinterpret_button_state) + self.unified_view.customContextMenuRequested.connect(self._show_unified_view_context_menu) + + # Blender Controls + self.blender_integration_checkbox.toggled.connect(self._toggle_blender_controls) + self.browse_nodegroup_blend_button.clicked.connect(self._browse_for_nodegroup_blend) + self.browse_materials_blend_button.clicked.connect(self._browse_for_materials_blend) + # Emit signal when paths change + self.nodegroup_blend_path_input.editingFinished.connect(self._emit_blender_settings_changed) + self.materials_blend_path_input.editingFinished.connect(self._emit_blender_settings_changed) + self.blender_integration_checkbox.toggled.connect(self._emit_blender_settings_changed) + + + # Bottom Buttons + self.clear_queue_button.clicked.connect(self.clear_queue_requested) # Emit signal directly + self.start_button.clicked.connect(self._on_start_processing_clicked) # Use slot to gather data + self.cancel_button.clicked.connect(self.cancel_requested) # Emit signal directly + self.llm_reinterpret_button.clicked.connect(self._on_llm_reinterpret_clicked) # Use slot to gather data + + # --- Slots for Internal UI Logic --- + + @Slot() + def _browse_for_output_directory(self): + """Opens a dialog to select the output directory.""" + current_path = self.output_path_edit.text() + if not current_path or not Path(current_path).is_dir(): + current_path = str(self.project_root) # Use project root as fallback + + directory = QFileDialog.getExistingDirectory( + self, + "Select Output Directory", + current_path, + QFileDialog.Option.ShowDirsOnly | QFileDialog.Option.DontResolveSymlinks + ) + if directory: + self.output_path_edit.setText(directory) + self._on_output_path_changed() # Explicitly call the change handler + + @Slot() + def _on_output_path_changed(self): + """Emits the output_dir_changed signal.""" + self.output_dir_changed.emit(self.output_path_edit.text()) + + @Slot(bool) + def _toggle_blender_controls(self, checked): + """Enable/disable Blender path inputs based on the checkbox state.""" + self.nodegroup_blend_path_input.setEnabled(checked) + self.browse_nodegroup_blend_button.setEnabled(checked) + self.materials_blend_path_input.setEnabled(checked) + self.browse_materials_blend_button.setEnabled(checked) + # No need to emit here, the checkbox toggle signal is connected separately + + def _browse_for_blend_file(self, line_edit_widget: QLineEdit): + """Opens a dialog to select a .blend file and updates the line edit.""" + current_path = line_edit_widget.text() + start_dir = str(Path(current_path).parent) if current_path and Path(current_path).exists() else str(self.project_root) + + file_path, _ = QFileDialog.getOpenFileName( + self, + "Select Blender File", + start_dir, + "Blender Files (*.blend);;All Files (*)" + ) + if file_path: + line_edit_widget.setText(file_path) + line_edit_widget.editingFinished.emit() # Trigger editingFinished to emit change signal + + @Slot() + def _browse_for_nodegroup_blend(self): + self._browse_for_blend_file(self.nodegroup_blend_path_input) + + @Slot() + def _browse_for_materials_blend(self): + self._browse_for_blend_file(self.materials_blend_path_input) + + @Slot() + def _emit_blender_settings_changed(self): + """Gathers current Blender settings and emits the blender_settings_changed signal.""" + enabled = self.blender_integration_checkbox.isChecked() + ng_path = self.nodegroup_blend_path_input.text() + mat_path = self.materials_blend_path_input.text() + self.blender_settings_changed.emit(enabled, ng_path, mat_path) + + @Slot() + def _on_start_processing_clicked(self): + """Gathers settings and emits the process_requested signal.""" + output_dir = self.output_path_edit.text().strip() + if not output_dir: + QMessageBox.warning(self, "Missing Output Directory", "Please select an output directory.") + return + + # Basic validation (MainWindow should do more thorough validation) + try: + Path(output_dir).mkdir(parents=True, exist_ok=True) + except Exception as e: + QMessageBox.warning(self, "Invalid Output Directory", f"Cannot use output directory:\n{output_dir}\n\nError: {e}") + return + + settings = { + "output_dir": output_dir, + "overwrite": self.overwrite_checkbox.isChecked(), + "workers": self.workers_spinbox.value(), + "blender_enabled": self.blender_integration_checkbox.isChecked(), + "nodegroup_blend_path": self.nodegroup_blend_path_input.text(), + "materials_blend_path": self.materials_blend_path_input.text() + } + self.process_requested.emit(settings) + + @Slot() + def _update_llm_reinterpret_button_state(self): + """Enables/disables the LLM re-interpret button based on selection and LLM status.""" + selection_model = self.unified_view.selectionModel() + has_selection = selection_model is not None and selection_model.hasSelection() + # Enable only if there's a selection AND LLM is not currently active + self.llm_reinterpret_button.setEnabled(has_selection and not self.llm_processing_active) + + @Slot() + def _on_llm_reinterpret_clicked(self): + """Gathers selected source paths and emits the llm_reinterpret_requested signal.""" + selected_indexes = self.unified_view.selectionModel().selectedIndexes() + if not selected_indexes: + return + + if self.llm_processing_active: + QMessageBox.warning(self, "Busy", "LLM processing is already in progress. Please wait.") + return + + unique_source_dirs = set() + processed_source_paths = set() # Track processed source paths to avoid duplicates + for index in selected_indexes: + if not index.isValid(): continue + item_node = index.internalPointer() + if not item_node: continue + + # Traverse up to find the SourceRule node (Simplified traversal) + source_node = None + current_node = item_node + while current_node is not None: + if isinstance(current_node, SourceRule): + source_node = current_node + break + # Simplified parent traversal - adjust if model structure is different + parent_attr = getattr(current_node, 'parent', None) # Check for generic 'parent' + if callable(parent_attr): # Check if parent is a method (like in QStandardItemModel) + current_node = parent_attr() + elif parent_attr: # Check if parent is an attribute + current_node = parent_attr + else: # Try specific parent attributes if generic fails + parent_source = getattr(current_node, 'parent_source', None) + if parent_source: + current_node = parent_source + else: + parent_asset = getattr(current_node, 'parent_asset', None) + if parent_asset: + current_node = parent_asset + else: # Reached top or unexpected node type + current_node = None + + + if source_node and hasattr(source_node, 'input_path') and source_node.input_path: + source_path_str = source_node.input_path + if source_path_str in processed_source_paths: + continue + source_path_obj = Path(source_path_str) + if source_path_obj.is_dir() or (source_path_obj.is_file() and source_path_obj.suffix.lower() == '.zip'): + unique_source_dirs.add(source_path_str) + processed_source_paths.add(source_path_str) + else: + log.warning(f"Skipping non-directory/zip source for re-interpretation: {source_path_str}") + # else: # Reduce log noise + # log.warning(f"Could not determine valid SourceRule or input_path for selected index: {index.row()},{index.column()} (Item type: {type(item_node).__name__})") + + + if not unique_source_dirs: + # self.statusBar().showMessage("No valid source directories found for selected items.", 5000) # Status bar is in MainWindow + log.warning("No valid source directories found for selected items to re-interpret.") + return + + self.llm_reinterpret_requested.emit(list(unique_source_dirs)) + + + @Slot(QPoint) + def _show_unified_view_context_menu(self, point: QPoint): + """Shows the context menu for the unified view.""" + index = self.unified_view.indexAt(point) + if not index.isValid(): + return + + item_node = index.internalPointer() + is_source_item = isinstance(item_node, SourceRule) + + menu = QMenu(self) + + if is_source_item: + copy_llm_example_action = QAction("Copy LLM Example to Clipboard", self) + copy_llm_example_action.setToolTip("Copies a JSON structure representing the input files and predicted output, suitable for LLM examples.") + copy_llm_example_action.triggered.connect(lambda: self._copy_llm_example_to_clipboard(index)) + menu.addAction(copy_llm_example_action) + menu.addSeparator() + + # Add other actions... + + if not menu.isEmpty(): + menu.exec(self.unified_view.viewport().mapToGlobal(point)) + + @Slot(QModelIndex) + def _copy_llm_example_to_clipboard(self, index: QModelIndex): + """Copies a JSON structure for the selected source item to the clipboard.""" + if not index.isValid(): return + item_node = index.internalPointer() + if not isinstance(item_node, SourceRule): return + + source_rule: SourceRule = item_node + log.info(f"Attempting to generate LLM example JSON for source: {source_rule.input_path}") + + all_file_paths = [] + predicted_assets_data = [] + + for asset_rule in source_rule.assets: + asset_files_data = [] + for file_rule in asset_rule.files: + if file_rule.file_path: + all_file_paths.append(file_rule.file_path) + asset_files_data.append({ + "file_path": file_rule.file_path, + "predicted_file_type": file_rule.item_type or "UNKNOWN" + }) + asset_files_data.sort(key=lambda x: x['file_path']) + predicted_assets_data.append({ + "suggested_asset_name": asset_rule.asset_name or "UnnamedAsset", + "predicted_asset_type": asset_rule.asset_type or "UNKNOWN", + "files": asset_files_data + }) + + predicted_assets_data.sort(key=lambda x: x['suggested_asset_name']) + all_file_paths.sort() + + if not all_file_paths: + log.warning(f"No file paths found for source: {source_rule.input_path}. Cannot generate example.") + # Cannot show status bar message here + return + + llm_example = { + "input": "\n".join(all_file_paths), + "output": {"predicted_assets": predicted_assets_data} + } + + try: + json_string = json.dumps(llm_example, indent=2) + clipboard = QGuiApplication.clipboard() # Use QGuiApplication + if clipboard: + clipboard.setText(json_string) + log.info(f"Copied LLM example JSON to clipboard for source: {source_rule.input_path}") + # Cannot show status bar message here + else: + log.error("Failed to get system clipboard.") + except Exception as e: + log.exception(f"Error copying LLM example JSON to clipboard: {e}") + + + # --- Public Slots for MainWindow to Call --- + + @Slot(int, int) + def update_progress_bar(self, current_count, total_count): + """Updates the progress bar display.""" + if total_count > 0: + percentage = int((current_count / total_count) * 100) + log.debug(f"Updating progress bar: current={current_count}, total={total_count}, calculated_percentage={percentage}") # DEBUG LOG + self.progress_bar.setValue(percentage) + self.progress_bar.setFormat(f"%p% ({current_count}/{total_count})") + QApplication.processEvents() # Force GUI update + else: + self.progress_bar.setValue(0) + self.progress_bar.setFormat("0/0") + + @Slot(str) + def set_progress_bar_text(self, text: str): + """Sets the text format of the progress bar.""" + self.progress_bar.setFormat(text) + # Reset value if setting text like "Idle" or "Waiting..." + if not "%" in text: + self.progress_bar.setValue(0) + + + @Slot(bool) + def set_controls_enabled(self, enabled: bool): + """Enables or disables controls within the panel.""" + # Enable/disable most controls based on the 'enabled' flag + self.output_path_edit.setEnabled(enabled) + self.browse_output_button.setEnabled(enabled) + self.unified_view.setEnabled(enabled) + self.overwrite_checkbox.setEnabled(enabled) + self.workers_spinbox.setEnabled(enabled) + self.clear_queue_button.setEnabled(enabled) + self.blender_integration_checkbox.setEnabled(enabled) + + # Start button is enabled only if controls are generally enabled AND preset mode is active (handled by MainWindow) + # Cancel button is enabled only when processing is active (handled by MainWindow) + # LLM button state depends on selection and LLM status (handled by _update_llm_reinterpret_button_state) + + # Blender path inputs depend on both 'enabled' and the checkbox state + blender_paths_enabled = enabled and self.blender_integration_checkbox.isChecked() + self.nodegroup_blend_path_input.setEnabled(blender_paths_enabled) + self.browse_nodegroup_blend_button.setEnabled(blender_paths_enabled) + self.materials_blend_path_input.setEnabled(blender_paths_enabled) + self.browse_materials_blend_button.setEnabled(blender_paths_enabled) + + # Update LLM button state explicitly when controls are enabled/disabled + if enabled: + self._update_llm_reinterpret_button_state() + else: + self.llm_reinterpret_button.setEnabled(False) + + + @Slot(bool) + def set_start_button_enabled(self, enabled: bool): + """Sets the enabled state of the Start Processing button.""" + self.start_button.setEnabled(enabled) + + @Slot(str) + def set_start_button_text(self, text: str): + """Sets the text of the Start Processing button.""" + self.start_button.setText(text) + + @Slot(bool) + def set_cancel_button_enabled(self, enabled: bool): + """Sets the enabled state of the Cancel button.""" + self.cancel_button.setEnabled(enabled) + + @Slot(bool) + def set_llm_processing_status(self, active: bool): + """Informs the panel whether LLM processing is active.""" + self.llm_processing_active = active + self._update_llm_reinterpret_button_state() # Update button state based on new status + + # TODO: Add method to get current output path if needed by MainWindow before processing + def get_output_directory(self) -> str: + return self.output_path_edit.text().strip() + + # TODO: Add method to get current Blender settings if needed by MainWindow before processing + def get_blender_settings(self) -> dict: + return { + "enabled": self.blender_integration_checkbox.isChecked(), + "nodegroup_blend_path": self.nodegroup_blend_path_input.text(), + "materials_blend_path": self.materials_blend_path_input.text() + } + + # TODO: Add method to get current worker count if needed by MainWindow before processing + def get_worker_count(self) -> int: + return self.workers_spinbox.value() + + # TODO: Add method to get current overwrite setting if needed by MainWindow before processing + def get_overwrite_setting(self) -> bool: + return self.overwrite_checkbox.isChecked() + + # --- Delegate Dependency --- + # This method might be needed by ComboBoxDelegate if it relies on MainWindow's logic + def get_llm_source_preset_name(self) -> str | None: + """ + Placeholder for providing context to delegates. + Ideally, the required info (like last preset name) should be passed + from MainWindow when the delegate needs it, or the delegate's dependency + should be refactored. + """ + log.warning("MainPanelWidget.get_llm_source_preset_name called - needs proper implementation or refactoring.") + # This needs to get the info from MainWindow, perhaps via a signal/slot or passed reference. + # Returning None for now. + return None \ No newline at end of file diff --git a/gui/main_window.py b/gui/main_window.py index a5c6c1c..16ff582 100644 --- a/gui/main_window.py +++ b/gui/main_window.py @@ -15,11 +15,16 @@ from PySide6.QtWidgets import ( QProgressBar, QLabel, QFrame, QCheckBox, QSpinBox, QListWidget, QTextEdit, # Added QListWidget, QTextEdit QLineEdit, QMessageBox, QFileDialog, QInputDialog, QListWidgetItem, QTabWidget, # Added more widgets QFormLayout, QGroupBox, QAbstractItemView, QSizePolicy, # Added more layout/widget items - QMenuBar, QMenu # Added for menu + QMenuBar, QMenu, QTreeView # Added for menu, QTreeView ) -from PySide6.QtCore import Qt, QThread, Slot, Signal, QObject, QModelIndex, QItemSelectionModel, QPoint # Added Signal, QObject, QModelIndex, QItemSelectionModel, QPoint +from PySide6.QtCore import Qt, QThread, Slot, Signal, QObject, QModelIndex, QItemSelectionModel, QPoint, QTimer # Added Signal, QObject, QModelIndex, QItemSelectionModel, QPoint, QTimer from PySide6.QtGui import QColor, QAction, QPalette, QClipboard # Add QColor import, QAction, QPalette, QClipboard +# --- Local GUI Imports --- +from .preset_editor_widget import PresetEditorWidget # Import the new widget +from .log_console_widget import LogConsoleWidget # Import the log console widget +from .main_panel_widget import MainPanelWidget # Import the new main panel widget + # --- Backend Imports for Data Structures --- from rule_structure import SourceRule, AssetRule, FileRule # Import Rule Structures # Removed incorrect import of AssetType, ItemType from config @@ -29,10 +34,11 @@ from rule_structure import SourceRule, AssetRule, FileRule # Import Rule Structu # Removed: from gui.preview_table_model import PreviewTableModel, PreviewSortFilterProxyModel # Removed: from gui.rule_hierarchy_model import RuleHierarchyModel from gui.unified_view_model import UnifiedViewModel # Import the new unified model -from gui.delegates import LineEditDelegate, ComboBoxDelegate, SupplierSearchDelegate # Import delegates -from gui.delegates import LineEditDelegate, ComboBoxDelegate # Import delegates -from .llm_prediction_handler import LLMPredictionHandler # Added for LLM integration - +# Removed delegate imports, now handled by MainPanelWidget +from .prediction_handler import RuleBasedPredictionHandler # Corrected import path +from .llm_interaction_handler import LLMInteractionHandler # Import the new handler +from .asset_restructure_handler import AssetRestructureHandler # Import the new handler + # --- Backend Imports --- script_dir = Path(__file__).parent project_root = script_dir.parent @@ -41,10 +47,10 @@ if str(project_root) not in sys.path: try: from configuration import Configuration, ConfigurationError, load_base_config # Import Configuration and load_base_config - from asset_processor import AssetProcessor, AssetProcessingError + # Removed unused import: from asset_processor import AssetProcessor, AssetProcessingError # from gui.processing_handler import ProcessingHandler # REMOVED Obsolete Handler - from gui.prediction_handler import PredictionHandler - # Removed: import config as core_config # Import the config module + # from gui.prediction_handler import PredictionHandler # Old import removed + # Removed: import config as core_config # PresetEditorDialog is no longer needed except ImportError as e: print(f"ERROR: Failed to import backend modules: {e}") @@ -54,7 +60,7 @@ except ImportError as e: ConfigurationError = Exception AssetProcessor = None # ProcessingHandler = None # REMOVED Obsolete Handler - PredictionHandler = None + RuleBasedPredictionHandler = None # Updated placeholder name AssetProcessingError = Exception @@ -92,80 +98,17 @@ class QtLogHandler(logging.Handler, QObject): self.handleError(record) -# --- Helper Functions (from PresetEditorDialog) --- -# NOTE: Consider moving these to a utils file if reused elsewhere - -def setup_list_widget_with_controls(parent_layout, label_text, attribute_name, instance): - """Adds a QListWidget with Add/Remove buttons to a layout.""" - list_widget = QListWidget() - list_widget.setAlternatingRowColors(True) - # Make items editable by default in the editor context - list_widget.setEditTriggers(QAbstractItemView.EditTrigger.DoubleClicked | QAbstractItemView.EditTrigger.SelectedClicked | QAbstractItemView.EditTrigger.EditKeyPressed) - instance.__setattr__(attribute_name, list_widget) # Store list widget on the instance - - add_button = QPushButton("+") - remove_button = QPushButton("-") - add_button.setFixedWidth(30) - remove_button.setFixedWidth(30) - - button_layout = QVBoxLayout() - button_layout.addWidget(add_button) - button_layout.addWidget(remove_button) - button_layout.addStretch() - - list_layout = QHBoxLayout() - list_layout.addWidget(list_widget) - list_layout.addLayout(button_layout) - - group_box = QGroupBox(label_text) - group_box_layout = QVBoxLayout(group_box) - group_box_layout.addLayout(list_layout) - - parent_layout.addWidget(group_box) - - # Connections (use the instance's methods) - add_button.clicked.connect(partial(instance._editor_add_list_item, list_widget)) - remove_button.clicked.connect(partial(instance._editor_remove_list_item, list_widget)) - list_widget.itemChanged.connect(instance._mark_editor_unsaved) # Mark unsaved on item edit - -def setup_table_widget_with_controls(parent_layout, label_text, attribute_name, columns, instance): - """Adds a QTableWidget with Add/Remove buttons to a layout.""" - table_widget = QTableWidget() - table_widget.setColumnCount(len(columns)) - table_widget.setHorizontalHeaderLabels(columns) - table_widget.setAlternatingRowColors(True) - instance.__setattr__(attribute_name, table_widget) # Store table widget - - add_button = QPushButton("+ Row") - remove_button = QPushButton("- Row") - - button_layout = QHBoxLayout() - button_layout.addStretch() - button_layout.addWidget(add_button) - button_layout.addWidget(remove_button) - - group_box = QGroupBox(label_text) - group_box_layout = QVBoxLayout(group_box) - group_box_layout.addWidget(table_widget) - group_box_layout.addLayout(button_layout) - - parent_layout.addWidget(group_box) - - # Connections (use the instance's methods) - add_button.clicked.connect(partial(instance._editor_add_table_row, table_widget)) - remove_button.clicked.connect(partial(instance._editor_remove_table_row, table_widget)) - table_widget.itemChanged.connect(instance._mark_editor_unsaved) # Mark unsaved on item edit - +# --- Helper Functions (Moved to PresetEditorWidget) --- # --- Main Window Class --- class MainWindow(QMainWindow): - # Signal emitted when presets change in the editor panel - presets_changed_signal = Signal() # Signal to trigger prediction handler in its thread start_prediction_signal = Signal(str, list, str) # input_source_identifier, file_list, preset_name # Signal to request processing with the final list of rules - processing_requested = Signal(list) # Emits List[SourceRule] + # processing_requested = Signal(list) # Now handled by MainPanelWidget signal -> _on_process_requested slot + # Signal to trigger the actual processing backend (e.g., in App class) + start_backend_processing = Signal(list, dict) # Emits List[SourceRule], processing_settings def __init__(self): super().__init__() @@ -176,6 +119,7 @@ class MainWindow(QMainWindow): # --- Internal State --- self.current_asset_paths = set() # Store unique paths of assets added self._pending_predictions = set() # Track input paths awaiting prediction results + self._completed_predictions = set() # Track input paths for which prediction has completed self._accumulated_rules = {} # Store {input_path: SourceRule} as results arrive self._source_file_lists = {} # Store {input_path: [file_list]} for context # Removed: self.rule_hierarchy_model = RuleHierarchyModel() @@ -186,20 +130,19 @@ class MainWindow(QMainWindow): self.config_manager = None # Initialize as None # self.llm_reinterpret_queue = [] # Removed, using unified queue self.llm_processing_queue = [] # Unified queue for initial adds and re-interpretations - - # --- Editor State --- - self.current_editing_preset_path = None - self.editor_unsaved_changes = False - self._is_loading_editor = False # Flag to prevent signals during load + self._current_output_dir = "" # Store the output directory selected in the panel + self._current_blender_settings = {} # Store blender settings from the panel # --- Threading Setup --- # self.processing_thread = None # REMOVED Obsolete Handler Thread # self.processing_handler = None # REMOVED Obsolete Handler self.prediction_thread = None self.prediction_handler = None - self.llm_prediction_thread = None # Added for LLM - self.llm_prediction_handler = None # Added for LLM - self.setup_threads() + # LLM thread/handler are now managed by LLMInteractionHandler + self.setup_threads() # Sets up rule-based prediction thread + + # --- Instantiate Handlers --- + self.llm_interaction_handler = LLMInteractionHandler(self) # Instantiate the new handler # --- Preview Area (Table) Setup --- REMOVED --- # Models, TableView, and Placeholder are no longer needed here. @@ -209,427 +152,63 @@ class MainWindow(QMainWindow): self.splitter = QSplitter(Qt.Orientation.Horizontal) self.setCentralWidget(self.splitter) + # --- Create Models --- + self.unified_model = UnifiedViewModel() # Instantiate the unified model here + # --- Instantiate Handlers that depend on the model --- + self.restructure_handler = AssetRestructureHandler(self.unified_model, self) # Instantiate the restructure handler + # --- Create Panels --- - self.editor_panel = QWidget() - self.main_panel = QWidget() + self.preset_editor_widget = PresetEditorWidget() # Instantiate the preset editor + # Instantiate MainPanelWidget, passing the model and self (MainWindow) for context + self.main_panel_widget = MainPanelWidget(self.unified_model, self) + self.log_console = LogConsoleWidget(self) # Instantiate the log console - self.splitter.addWidget(self.editor_panel) - self.splitter.addWidget(self.main_panel) + self.splitter.addWidget(self.preset_editor_widget) # Add the preset editor + self.splitter.addWidget(self.main_panel_widget) # Add the new main panel widget - # --- Setup UI Elements for each panel --- - self.setup_editor_panel_ui() - self.setup_main_panel_ui() + # --- Setup UI Elements --- + # Main panel UI is handled internally by MainPanelWidget self.setup_menu_bar() # Setup menu bar # --- Status Bar --- self.statusBar().showMessage("Ready") # --- Initial State --- - self._clear_editor() # Clear/disable editor fields initially - self._set_editor_enabled(False) # Disable editor initially - self.populate_presets() # Populate preset list + # self._clear_editor() # Handled internally by PresetEditorWidget + # self._set_editor_enabled(False) # Handled internally by PresetEditorWidget + # self.populate_presets() # Handled internally by PresetEditorWidget self.setup_logging_handler() # Setup the custom log handler - # --- Connect Editor Signals --- - self._connect_editor_change_signals() + # --- Connect Signals from PresetEditorWidget --- + self.preset_editor_widget.preset_selection_changed_signal.connect(self._on_preset_selection_changed) + # --- Connect Signals from MainPanelWidget --- + self.main_panel_widget.process_requested.connect(self._on_process_requested) + self.main_panel_widget.cancel_requested.connect(self._on_cancel_requested) + self.main_panel_widget.clear_queue_requested.connect(self._on_clear_queue_requested) + self.main_panel_widget.llm_reinterpret_requested.connect(self._delegate_llm_reinterpret) # Connect to delegator slot + self.main_panel_widget.output_dir_changed.connect(self._on_output_dir_changed) + self.main_panel_widget.blender_settings_changed.connect(self._on_blender_settings_changed) -# --- Adjust Splitter --- + # --- Connect Signals from LLMInteractionHandler --- + self.llm_interaction_handler.llm_prediction_ready.connect(self._on_llm_prediction_ready_from_handler) + self.llm_interaction_handler.llm_prediction_error.connect(self._on_prediction_error) # Use common error slot + self.llm_interaction_handler.llm_status_update.connect(self.show_status_message) # Use common status slot + self.llm_interaction_handler.llm_processing_state_changed.connect(self._on_llm_processing_state_changed) + + # --- Connect Model Signals --- + self.unified_model.targetAssetOverrideChanged.connect(self.restructure_handler.handle_target_asset_override) + + # --- Adjust Splitter --- self.splitter.setSizes([400, 800]) # Initial size ratio # --- UI Setup Methods --- + # setup_editor_panel_ui, _create_editor_general_tab, _create_editor_mapping_tab moved to PresetEditorWidget - def setup_editor_panel_ui(self): - """Sets up the UI elements for the left preset editor panel.""" - editor_layout = QVBoxLayout(self.editor_panel) - editor_layout.setContentsMargins(5, 5, 5, 5) # Reduce margins + # --- setup_main_panel_ui REMOVED --- + # UI setup is now handled internally by MainPanelWidget - # --- Log Console Output (Initially Hidden) --- - self.log_console_widget = QWidget() - log_console_layout = QVBoxLayout(self.log_console_widget) - log_console_layout.setContentsMargins(0, 0, 0, 5) # Add some bottom margin - log_console_label = QLabel("Log Console:") - self.log_console_output = QTextEdit() - self.log_console_output.setReadOnly(True) - self.log_console_output.setMaximumHeight(150) # Limit initial height - self.log_console_output.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Maximum) - log_console_layout.addWidget(log_console_label) - log_console_layout.addWidget(self.log_console_output) - self.log_console_widget.setVisible(False) # Start hidden - editor_layout.addWidget(self.log_console_widget) # Add it at the top - - # Preset List and Controls - list_layout = QVBoxLayout() - list_layout.addWidget(QLabel("Presets:")) - self.editor_preset_list = QListWidget() - self.editor_preset_list.currentItemChanged.connect(self._load_selected_preset_for_editing) - list_layout.addWidget(self.editor_preset_list) - - list_button_layout = QHBoxLayout() - self.editor_new_button = QPushButton("New") - self.editor_delete_button = QPushButton("Delete") - self.editor_new_button.clicked.connect(self._new_preset) - self.editor_delete_button.clicked.connect(self._delete_selected_preset) - list_button_layout.addWidget(self.editor_new_button) - list_button_layout.addWidget(self.editor_delete_button) - list_layout.addLayout(list_button_layout) - editor_layout.addLayout(list_layout, 1) # Allow list to stretch - - # Editor Tabs - self.editor_tab_widget = QTabWidget() - self.editor_tab_general_naming = QWidget() - self.editor_tab_mapping_rules = QWidget() - self.editor_tab_widget.addTab(self.editor_tab_general_naming, "General & Naming") - self.editor_tab_widget.addTab(self.editor_tab_mapping_rules, "Mapping & Rules") - self._create_editor_general_tab() - self._create_editor_mapping_tab() - editor_layout.addWidget(self.editor_tab_widget, 3) # Allow tabs to stretch more - - # Save Buttons - save_button_layout = QHBoxLayout() - self.editor_save_button = QPushButton("Save") - self.editor_save_as_button = QPushButton("Save As...") - self.editor_save_button.setEnabled(False) # Disabled initially - self.editor_save_button.clicked.connect(self._save_current_preset) - self.editor_save_as_button.clicked.connect(self._save_preset_as) - save_button_layout.addStretch() - save_button_layout.addWidget(self.editor_save_button) - save_button_layout.addWidget(self.editor_save_as_button) - editor_layout.addLayout(save_button_layout) - - def _create_editor_general_tab(self): - """Creates the widgets and layout for the 'General & Naming' editor tab.""" - layout = QVBoxLayout(self.editor_tab_general_naming) - form_layout = QFormLayout() - form_layout.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow) - - # Basic Info - self.editor_preset_name = QLineEdit() - self.editor_supplier_name = QLineEdit() - self.editor_notes = QTextEdit() - self.editor_notes.setAcceptRichText(False) - self.editor_notes.setFixedHeight(60) - form_layout.addRow("Preset Name:", self.editor_preset_name) - form_layout.addRow("Supplier Name:", self.editor_supplier_name) - form_layout.addRow("Notes:", self.editor_notes) - layout.addLayout(form_layout) - - # Source Naming Group - naming_group = QGroupBox("Source File Naming Rules") - naming_layout_outer = QVBoxLayout(naming_group) - naming_layout_form = QFormLayout() - self.editor_separator = QLineEdit() - self.editor_separator.setMaxLength(1) - self.editor_spin_base_name_idx = QSpinBox() - self.editor_spin_base_name_idx.setMinimum(-1) - self.editor_spin_map_type_idx = QSpinBox() - self.editor_spin_map_type_idx.setMinimum(-1) - naming_layout_form.addRow("Separator:", self.editor_separator) - naming_layout_form.addRow("Base Name Index:", self.editor_spin_base_name_idx) - naming_layout_form.addRow("Map Type Index:", self.editor_spin_map_type_idx) - naming_layout_outer.addLayout(naming_layout_form) - # Gloss Keywords List - setup_list_widget_with_controls(naming_layout_outer, "Glossiness Keywords", "editor_list_gloss_keywords", self) - # Bit Depth Variants Table - setup_table_widget_with_controls(naming_layout_outer, "16-bit Variant Patterns", "editor_table_bit_depth_variants", ["Map Type", "Pattern"], self) - self.editor_table_bit_depth_variants.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents) - self.editor_table_bit_depth_variants.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch) - layout.addWidget(naming_group) - - # Extra Files Group - setup_list_widget_with_controls(layout, "Move to 'Extra' Folder Patterns", "editor_list_extra_patterns", self) - - layout.addStretch(1) - - def _create_editor_mapping_tab(self): - """Creates the widgets and layout for the 'Mapping & Rules' editor tab.""" - layout = QVBoxLayout(self.editor_tab_mapping_rules) - - # Map Type Mapping Group - setup_table_widget_with_controls(layout, "Map Type Mapping (Standard Type <- Input Keywords)", "editor_table_map_type_mapping", ["Standard Type", "Input Keywords (comma-sep)"], self) - self.editor_table_map_type_mapping.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents) - self.editor_table_map_type_mapping.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch) - - # Category Rules Group - category_group = QGroupBox("Asset Category Rules") - category_layout = QVBoxLayout(category_group) - setup_list_widget_with_controls(category_layout, "Model File Patterns", "editor_list_model_patterns", self) - setup_list_widget_with_controls(category_layout, "Decal Keywords", "editor_list_decal_keywords", self) - layout.addWidget(category_group) - - # Archetype Rules Group - setup_table_widget_with_controls(layout, "Archetype Rules", "editor_table_archetype_rules", ["Archetype Name", "Match Any (comma-sep)", "Match All (comma-sep)"], self) - self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents) - self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch) - self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeMode.Stretch) - - layout.addStretch(1) - - def setup_main_panel_ui(self): - """Sets up the UI elements for the right main processing panel.""" - main_layout = QVBoxLayout(self.main_panel) - main_layout.setContentsMargins(5, 5, 5, 5) # Reduce margins - - # --- Output Directory Selection --- - output_layout = QHBoxLayout() - self.output_dir_label = QLabel("Output Directory:") - self.output_path_edit = QLineEdit() - # Make read-only for now, user must use Browse - # self.output_path_edit.setReadOnly(True) - self.browse_output_button = QPushButton("Browse...") - self.browse_output_button.clicked.connect(self._browse_for_output_directory) - output_layout.addWidget(self.output_dir_label) - output_layout.addWidget(self.output_path_edit, 1) - output_layout.addWidget(self.browse_output_button) - main_layout.addLayout(output_layout) - - # --- Set Initial Output Path --- - try: - # Use load_base_config to get the default output directory - base_config = load_base_config() - output_base_dir_config = base_config.get('OUTPUT_BASE_DIR', '../Asset_Processor_Output') # Default if not found - # Resolve the path relative to the project root - default_output_dir = (project_root / output_base_dir_config).resolve() - self.output_path_edit.setText(str(default_output_dir)) - log.info(f"Default output directory set to: {default_output_dir}") - except ConfigurationError as e: - log.error(f"Error reading base configuration for default output directory: {e}") - self.output_path_edit.setText("") # Clear on error - self.statusBar().showMessage(f"Error setting default output path: {e}", 5000) - except Exception as e: - log.exception(f"Error setting default output directory: {e}") - self.output_path_edit.setText("") # Clear on error - self.statusBar().showMessage(f"Error setting default output path: {e}", 5000) - - - # --- Drag and Drop Area --- - self.drag_drop_area = QFrame() - self.drag_drop_area.setFrameShape(QFrame.Shape.StyledPanel) - self.drag_drop_area.setFrameShadow(QFrame.Shadow.Sunken) - drag_drop_layout = QVBoxLayout(self.drag_drop_area) - drag_drop_label = QLabel("Drag and Drop Asset Files/Folders Here") - drag_drop_label.setAlignment(Qt.AlignmentFlag.AlignCenter) - drag_drop_layout.addWidget(drag_drop_label) - self.drag_drop_area.setMinimumHeight(100) - self.setAcceptDrops(True) # Main window handles drops initially - main_layout.addWidget(self.drag_drop_area) - self.drag_drop_area.setVisible(False) # Hide the specific visual drag/drop area - - # --- Unified View Setup --- - from PySide6.QtWidgets import QTreeView # Import QTreeView here if not already imported globally - - self.unified_view = QTreeView() - self.unified_model = UnifiedViewModel() # Instantiate the new model - self.unified_view.setModel(self.unified_model) # Set the model - - # Instantiate Delegates - lineEditDelegate = LineEditDelegate(self.unified_view) - comboBoxDelegate = ComboBoxDelegate(self.unified_view) - supplierSearchDelegate = SupplierSearchDelegate(self.unified_view) # Instantiate the new delegate - - # Set Delegates for Columns (adjust column indices as per UnifiedViewModel) - # Assuming columns are: Name (0), Supplier (1), AssetType (2), TargetAsset (3), ItemType (4) - self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_SUPPLIER, supplierSearchDelegate) # Use the new delegate for Supplier - self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_ASSET_TYPE, comboBoxDelegate) - self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_TARGET_ASSET, lineEditDelegate) - self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_ITEM_TYPE, comboBoxDelegate) - - # Configure View Appearance (optional, customize as needed) - self.unified_view.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding) # Expand horizontally and vertically - self.unified_view.setAlternatingRowColors(True) - self.unified_view.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows) - self.unified_view.setEditTriggers(QAbstractItemView.EditTrigger.DoubleClicked | QAbstractItemView.EditTrigger.SelectedClicked | QAbstractItemView.EditTrigger.EditKeyPressed) - - # Configure Header Resize Modes based on new column order - header = self.unified_view.header() - header.setStretchLastSection(False) # Don't stretch the last section by default - header.setSectionResizeMode(UnifiedViewModel.COL_NAME, QHeaderView.ResizeMode.ResizeToContents) - header.setSectionResizeMode(UnifiedViewModel.COL_TARGET_ASSET, QHeaderView.ResizeMode.Stretch) # Stretch Target Asset - header.setSectionResizeMode(UnifiedViewModel.COL_SUPPLIER, QHeaderView.ResizeMode.ResizeToContents) - header.setSectionResizeMode(UnifiedViewModel.COL_ASSET_TYPE, QHeaderView.ResizeMode.ResizeToContents) - header.setSectionResizeMode(UnifiedViewModel.COL_ITEM_TYPE, QHeaderView.ResizeMode.ResizeToContents) - - # Add the Unified View to the main layout - main_layout.addWidget(self.unified_view, 1) # Give it stretch factor 1 - - # Connect selection change signal for LLM button state - self.unified_view.selectionModel().selectionChanged.connect(self._update_llm_reinterpret_button_state) - - # Enable custom context menu - self.unified_view.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu) - self.unified_view.customContextMenuRequested.connect(self._show_unified_view_context_menu) - - # --- REMOVED Old Hierarchy/Rule/Preview Splitter and Contents --- - - # --- Progress Bar --- - self.progress_bar = QProgressBar() - self.progress_bar.setValue(0) - self.progress_bar.setTextVisible(True) - main_layout.addWidget(self.progress_bar) - - # --- Blender Integration Controls --- - blender_group = QGroupBox("Blender Post-Processing") - blender_layout = QVBoxLayout(blender_group) - - self.blender_integration_checkbox = QCheckBox("Run Blender Scripts After Processing") - self.blender_integration_checkbox.setToolTip("If checked, attempts to run create_nodegroups.py and create_materials.py in Blender.") - blender_layout.addWidget(self.blender_integration_checkbox) - - # Nodegroup Blend Path - nodegroup_layout = QHBoxLayout() - nodegroup_layout.addWidget(QLabel("Nodegroup .blend:")) - self.nodegroup_blend_path_input = QLineEdit() - self.browse_nodegroup_blend_button = QPushButton("...") - self.browse_nodegroup_blend_button.setFixedWidth(30) - self.browse_nodegroup_blend_button.clicked.connect(self._browse_for_nodegroup_blend) - nodegroup_layout.addWidget(self.nodegroup_blend_path_input) - nodegroup_layout.addWidget(self.browse_nodegroup_blend_button) - blender_layout.addLayout(nodegroup_layout) - - # Materials Blend Path - materials_layout = QHBoxLayout() - materials_layout.addWidget(QLabel("Materials .blend:")) - self.materials_blend_path_input = QLineEdit() - self.browse_materials_blend_button = QPushButton("...") - self.browse_materials_blend_button.setFixedWidth(30) - self.browse_materials_blend_button.clicked.connect(self._browse_for_materials_blend) - materials_layout.addWidget(self.materials_blend_path_input) - materials_layout.addWidget(self.browse_materials_blend_button) - blender_layout.addLayout(materials_layout) - - # Initialize paths from config - try: - # Use load_base_config to get default Blender paths - base_config = load_base_config() - default_ng_path = base_config.get('DEFAULT_NODEGROUP_BLEND_PATH', '') - default_mat_path = base_config.get('DEFAULT_MATERIALS_BLEND_PATH', '') - self.nodegroup_blend_path_input.setText(default_ng_path if default_ng_path else "") - self.materials_blend_path_input.setText(default_mat_path if default_mat_path else "") - except ConfigurationError as e: - log.error(f"Error reading base configuration for default Blender paths: {e}") - except Exception as e: - log.error(f"Error reading default Blender paths from config: {e}") - - - # Disable Blender controls initially if checkbox is unchecked - self.nodegroup_blend_path_input.setEnabled(False) - self.browse_nodegroup_blend_button.setEnabled(False) - self.materials_blend_path_input.setEnabled(False) - self.browse_materials_blend_button.setEnabled(False) - self.blender_integration_checkbox.toggled.connect(self._toggle_blender_controls) - - main_layout.addWidget(blender_group) # Add the group box to the main layout - - # --- Bottom Controls --- - bottom_controls_layout = QHBoxLayout() - self.overwrite_checkbox = QCheckBox("Overwrite Existing") - self.overwrite_checkbox.setToolTip("If checked, existing output folders for processed assets will be deleted and replaced.") - bottom_controls_layout.addWidget(self.overwrite_checkbox) - - # self.disable_preview_checkbox = QCheckBox("Disable Detailed Preview") # REMOVED - Moved to View Menu - # self.disable_preview_checkbox.setToolTip("If checked, shows only the list of input assets instead of detailed file predictions.") - # self.disable_preview_checkbox.setChecked(False) # Default is detailed preview enabled - # self.disable_preview_checkbox.toggled.connect(self.update_preview) # Update preview when toggled - # bottom_controls_layout.addWidget(self.disable_preview_checkbox) - - # bottom_controls_layout.addSpacing(20) # Add some space # REMOVED - No longer needed after checkbox removal - - self.workers_label = QLabel("Workers:") - self.workers_spinbox = QSpinBox() - default_workers = 1 - try: - cores = os.cpu_count() - if cores: default_workers = max(1, cores // 2) - except NotImplementedError: pass - self.workers_spinbox.setMinimum(1) - self.workers_spinbox.setMaximum(os.cpu_count() or 32) - self.workers_spinbox.setValue(default_workers) - self.workers_spinbox.setToolTip("Number of assets to process concurrently.") - bottom_controls_layout.addWidget(self.workers_label) - bottom_controls_layout.addWidget(self.workers_spinbox) - bottom_controls_layout.addStretch(1) - - # --- LLM Re-interpret Button --- - self.llm_reinterpret_button = QPushButton("Re-interpret Selected with LLM") - self.llm_reinterpret_button.setToolTip("Re-run LLM interpretation on the selected source items.") - self.llm_reinterpret_button.setEnabled(False) # Initially disabled - self.llm_reinterpret_button.clicked.connect(self._on_llm_reinterpret_clicked) - bottom_controls_layout.addWidget(self.llm_reinterpret_button) - # --- End LLM Button --- - - self.clear_queue_button = QPushButton("Clear Queue") # Added Clear button - self.start_button = QPushButton("Start Processing") - self.cancel_button = QPushButton("Cancel") - self.cancel_button.setEnabled(False) - self.clear_queue_button.clicked.connect(self.clear_queue) # Connect signal - self.start_button.clicked.connect(self.start_processing) - self.cancel_button.clicked.connect(self.cancel_processing) - bottom_controls_layout.addWidget(self.clear_queue_button) # Add button to layout - bottom_controls_layout.addWidget(self.start_button) - bottom_controls_layout.addWidget(self.cancel_button) - main_layout.addLayout(bottom_controls_layout) - - # --- Preset Population and Handling --- - - def populate_presets(self): - """Scans presets dir and populates BOTH the editor list and processing combo.""" - log.debug("Populating preset list...") - # Store current list selection - current_list_item = self.editor_preset_list.currentItem() - current_list_selection_text = current_list_item.text() if current_list_item else None - - # Clear list - self.editor_preset_list.clear() - log.debug("Preset list cleared.") - - # Add the "Select a Preset" placeholder item - placeholder_item = QListWidgetItem("--- Select a Preset ---") - # Make it non-selectable and non-editable - placeholder_item.setFlags(placeholder_item.flags() & ~Qt.ItemFlag.ItemIsSelectable & ~Qt.ItemFlag.ItemIsEditable) - # Set unique data to identify the placeholder - placeholder_item.setData(Qt.ItemDataRole.UserRole, "__PLACEHOLDER__") - self.editor_preset_list.addItem(placeholder_item) - log.debug("Added '--- Select a Preset ---' placeholder item.") - - # Add LLM Option - llm_item = QListWidgetItem("- LLM Interpretation -") - llm_item.setData(Qt.ItemDataRole.UserRole, "__LLM__") # Special identifier - self.editor_preset_list.addItem(llm_item) - log.debug("Added '- LLM Interpretation -' item.") - - if not PRESETS_DIR.is_dir(): - msg = f"Error: Presets directory not found at {PRESETS_DIR}" - self.statusBar().showMessage(msg) - log.error(msg) - return - - # Exclude template file starting with _ - presets = sorted([f for f in PRESETS_DIR.glob("*.json") if f.is_file() and not f.name.startswith('_')]) - preset_names = [p.stem for p in presets] - - if not presets: - msg = "Warning: No presets found in presets directory." - self.statusBar().showMessage(msg) - log.warning(msg) - else: - # Populate List Widget (for editing) - for preset_path in presets: - item = QListWidgetItem(preset_path.stem) - item.setData(Qt.ItemDataRole.UserRole, preset_path) # Store full path - self.editor_preset_list.addItem(item) - - self.statusBar().showMessage(f"Loaded {len(presets)} presets.") - - # Try to restore list selection - # combo_index = self.preset_combo.findText(current_combo_selection) # REMOVED - # if combo_index != -1: # REMOVED - # self.preset_combo.setCurrentIndex(combo_index) # REMOVED - - # Do NOT attempt to restore list selection by default - self.statusBar().showMessage(f"Loaded {len(presets)} presets.") - - # Select the "Select a Preset" item by default - log.debug("Preset list populated. Selecting '--- Select a Preset ---' item.") - self.editor_preset_list.setCurrentItem(placeholder_item) # Select the placeholder item + # --- Preset Population and Handling (Moved to PresetEditorWidget) --- + # def populate_presets(self): ... # --- Drag and Drop Event Handlers (Unchanged) --- def dragEnterEvent(self, event): @@ -709,15 +288,13 @@ class MainWindow(QMainWindow): self.statusBar().showMessage(f"Added {added_count} asset(s). Updating preview...", 3000) # --- Trigger prediction for newly added paths --- - current_editor_item = self.editor_preset_list.currentItem() - is_placeholder = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__" - is_llm = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__LLM__" - selected_preset_text = current_editor_item.text() if current_editor_item and not is_placeholder else None + # Get current mode and preset name from the editor widget + mode, selected_preset_text = self.preset_editor_widget.get_selected_preset_mode() - if is_llm: + if mode == "llm": # --- LLM Prediction Path --- - log.info(f"LLM Interpretation selected. Queueing LLM prediction for {len(newly_added_paths)} new paths.") - llm_requests_added = 0 + log.info(f"LLM Interpretation selected. Preparing LLM prediction for {len(newly_added_paths)} new paths.") + llm_requests_to_queue = [] # Collect requests for batch queueing for input_path_str in newly_added_paths: file_list = self._extract_file_list(input_path_str) if file_list is not None: # Check if extraction was successful @@ -725,45 +302,15 @@ class MainWindow(QMainWindow): # Store file list and mark as pending before adding to queue self._source_file_lists[input_path_str] = file_list self._pending_predictions.add(input_path_str) # Use the same pending set for now - # --- Queue the initial LLM request --- - # --- Queue the unified LLM request --- - self.llm_processing_queue.append((input_path_str, file_list)) - log.debug(f"Queued LLM request for '{input_path_str}'. Queue size: {len(self.llm_processing_queue)}") - llm_requests_added += 1 - # --- End Queue --- + llm_requests_to_queue.append((input_path_str, file_list)) else: log.warning(f"Skipping LLM prediction queuing for {input_path_str} due to extraction error.") - # --- Trigger queue processing *after* the loop --- - if llm_requests_added > 0: - # Check if an LLM thread is already running (from re-interpret or previous add) - is_llm_running = False - try: - if self.llm_prediction_thread is not None: - is_llm_running = self.llm_prediction_thread.isRunning() - except RuntimeError: - is_llm_running = False # Treat as not running if deleted - - if not is_llm_running: - log.info("No LLM thread running. Starting unified queue processing.") - self._process_next_llm_item() # Start processing the unified queue - else: - log.info("LLM thread already running. Queue will be processed when current task finishes.") - # --- Trigger queue processing *after* the loop --- - if llm_requests_added > 0: - # Check if an LLM thread is already running (from re-interpret or previous add) - is_llm_running = False - try: - if self.llm_prediction_thread is not None: - is_llm_running = self.llm_prediction_thread.isRunning() - except RuntimeError: - is_llm_running = False # Treat as not running if deleted - - if not is_llm_running: - log.info("No LLM thread running. Starting initial add queue processing.") - self._process_next_llm_initial_add() # Start processing the queue - else: - log.info("LLM thread already running. Queue will be processed when current task finishes.") - elif selected_preset_text: + # --- Delegate queueing to the handler --- + if llm_requests_to_queue: + log.info(f"Delegating {len(llm_requests_to_queue)} LLM requests to the handler.") + self.llm_interaction_handler.queue_llm_requests_batch(llm_requests_to_queue) + # The handler manages starting its own processing internally. + elif mode == "preset" and selected_preset_text: # --- Existing Rule-Based Prediction Path --- log.info(f"Preset '{selected_preset_text}' selected. Triggering prediction for {len(newly_added_paths)} new paths.") # Ensure the prediction thread is running before emitting signals @@ -782,91 +329,156 @@ class MainWindow(QMainWindow): self.start_prediction_signal.emit(input_path_str, file_list, selected_preset_text) else: log.warning(f"Skipping prediction for {input_path_str} due to extraction error.") - else: - log.warning(f"Added {added_count} asset(s), but no valid preset selected. Prediction not triggered.") - self.statusBar().showMessage(f"Added {added_count} asset(s). Select a preset or LLM to generate preview.", 3000) + elif mode == "placeholder": + log.info(f"Added {added_count} asset(s) while placeholder selected. Adding directories with file contents to view.") + rules_to_add = [] + for input_path_str in newly_added_paths: + input_path = Path(input_path_str) + if input_path.is_dir(): + log.debug(f"Processing directory in placeholder mode: {input_path_str}") + file_rules = [] + try: + for item_name in os.listdir(input_path): + item_path = input_path / item_name + if item_path.is_file(): + # Use relative path for the FileRule + relative_path = item_name # Immediate children, so just the name + file_rules.append(FileRule(file_path=relative_path, map_type="")) # Empty map_type + log.debug(f" Found file: {relative_path}") + except OSError as e: + log.warning(f"Could not list directory contents for {input_path_str}: {e}") + # Optionally add the directory itself even if listing fails + # minimal_rule = SourceRule(input_path=input_path_str, assets=[]) + # rules_to_add.append(minimal_rule) + continue # Skip adding children if listing failed + + # Create a SourceRule for the directory, containing one dummy AssetRule holding the FileRules + dummy_asset = AssetRule(asset_name="", asset_type="", files=file_rules) # Empty name/type + source_rule = SourceRule(input_path=input_path_str, assets=[dummy_asset]) + rules_to_add.append(source_rule) + log.debug(f"Created SourceRule with {len(file_rules)} child files for directory: {input_path_str}") + + elif input_path.is_file() and input_path.suffix.lower() == '.zip': + # --- Modified: Inspect zip file contents in placeholder mode --- + log.debug(f"Processing zip file in placeholder mode (inspecting contents): {input_path_str}") + file_rules = [] + try: + # Basic check if it's a valid zip file before opening + if not zipfile.is_zipfile(input_path): + log.warning(f"File is not a valid zip archive: {input_path_str}") + self.statusBar().showMessage(f"Warning: Not a valid zip: {input_path.name}", 5000) + # Skip adding this invalid file + continue + + with zipfile.ZipFile(input_path, 'r') as zip_ref: + for name in zip_ref.namelist(): + # Filter out directory entries explicitly marked with '/' + if not name.endswith('/'): + file_rules.append(FileRule(file_path=name)) # Empty map_type for placeholder + log.debug(f" Found file in zip: {name}") + + # Create a SourceRule for the archive, containing one dummy AssetRule holding the FileRules + # This structure allows the UnifiedViewModel to display it hierarchically + dummy_asset = AssetRule(asset_name="", asset_type="", files=file_rules) # Empty name/type + source_rule = SourceRule(input_path=input_path_str, assets=[dummy_asset]) + rules_to_add.append(source_rule) + log.debug(f"Created SourceRule with {len(file_rules)} child files for zip archive: {input_path_str}") + + except zipfile.BadZipFile: + log.error(f"Bad zip file encountered: {input_path_str}") + self.statusBar().showMessage(f"Error: Invalid zip file: {input_path.name}", 5000) + # Skip adding this file on error + continue + except FileNotFoundError: # Should ideally not happen due to earlier checks + log.error(f"File not found during zip processing: {input_path_str}") + self.statusBar().showMessage(f"Error: Input not found: {input_path.name}", 5000) + continue + except PermissionError: + log.error(f"Permission denied accessing zip: {input_path_str}") + self.statusBar().showMessage(f"Error: Permission denied for: {input_path.name}", 5000) + continue + except Exception as e: + log.exception(f"Unexpected error processing zip file {input_path_str}: {e}") + self.statusBar().showMessage(f"Error reading zip: {input_path.name}", 5000) + # Skip adding this file on unexpected error + continue + # --- End Modification --- + else: + # This case should ideally not be reached due to earlier checks, but log just in case + log.warning(f"Skipping unexpected item type in placeholder mode: {input_path_str}") + + if rules_to_add: + try: + log.info(f"Updating model with {len(rules_to_add)} SourceRules (placeholder mode with directory contents).") + self.unified_model.update_rules_for_sources(rules_to_add) + # Expand items after model update (Access view via panel) + if hasattr(self, 'main_panel_widget'): + self.main_panel_widget.unified_view.expandToDepth(1) # Expand directory level + self.statusBar().showMessage(f"Added {len(rules_to_add)} item(s) to view. Select preset/LLM for details.", 3000) + except Exception as e: + log.exception(f"Error updating model with placeholder rules: {e}") + self.statusBar().showMessage(f"Error adding items to view: {e}", 5000) + else: + # This might happen if only non-dir/zip items were added or directory listing failed for all + self.statusBar().showMessage(f"Added {added_count} input(s), but no valid items to display in placeholder mode.", 5000) + + else: # Should not happen + log.error(f"Added {added_count} asset(s), but encountered unexpected preset mode: {mode}. Prediction not triggered.") + self.statusBar().showMessage(f"Added {added_count} asset(s). Error determining preset mode.", 3000) # --- REMOVED call to self.update_preview() --- # The preview update is now triggered per-item via the signal emission above, # and also when the preset selection changes (handled in update_preview). - def _browse_for_output_directory(self): - """Opens a dialog to select the output directory.""" - current_path = self.output_path_edit.text() - if not current_path or not Path(current_path).is_dir(): - # Fallback to project root or home directory if current path is invalid - current_path = str(project_root) + # --- _browse_for_output_directory REMOVED (Handled by MainPanelWidget) --- - directory = QFileDialog.getExistingDirectory( - self, - "Select Output Directory", - current_path, - QFileDialog.Option.ShowDirsOnly | QFileDialog.Option.DontResolveSymlinks - ) - if directory: - self.output_path_edit.setText(directory) - log.info(f"User selected output directory: {directory}") + # --- Slots for Handling Requests from MainPanelWidget --- + @Slot(dict) + def _on_process_requested(self, settings: dict): + """Handles the process_requested signal from the MainPanelWidget.""" + log.info(f"Received process request signal with settings: {settings}") - # --- Processing Action Methods --- - def start_processing(self): - # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_processing_state_checks_line_710.py if not hasattr(self, 'current_asset_paths') or not self.current_asset_paths: self.statusBar().showMessage("No assets added to process.", 3000) return - input_paths = list(self.current_asset_paths) - if not input_paths: - self.statusBar().showMessage("No assets added to process.", 3000) - return - # --- Get preset from editor list --- - current_editor_item = self.editor_preset_list.currentItem() - # Check if the selected item is the placeholder - is_placeholder = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__" + # --- Get preset mode (still useful for logging/context, but not for blocking processing) --- + mode, selected_preset_name = self.preset_editor_widget.get_selected_preset_mode() + # REMOVED: Check that prevented processing if mode != "preset" + # if mode != "preset": + # self.statusBar().showMessage("Please select a valid preset (not LLM or placeholder) to start processing.", 3000) + # log.warning(f"Start processing failed: Mode is '{mode}', not 'preset'.") + # return - if is_placeholder: - self.statusBar().showMessage("Please select a valid preset from the list on the left.", 3000) - log.warning("Start processing failed: Placeholder preset selected.") - return - - # Existing logic to get selected_preset text and proceed - selected_preset = current_editor_item.text() if current_editor_item else None - overwrite = self.overwrite_checkbox.isChecked() - num_workers = self.workers_spinbox.value() - - # --- Get Output Directory from UI and Validate --- - output_dir_str = self.output_path_edit.text().strip() + # --- Validate Output Directory (Received from signal) --- + output_dir_str = settings.get("output_dir") if not output_dir_str: self.statusBar().showMessage("Error: Output directory cannot be empty.", 5000) log.error("Start processing failed: Output directory field is empty.") return try: output_dir = Path(output_dir_str) - # Attempt to create the directory if it doesn't exist output_dir.mkdir(parents=True, exist_ok=True) - # Basic writability check (create and delete a temp file) - # Note: This isn't foolproof due to potential race conditions/permissions issues - # but provides a basic level of validation. + # Basic writability check temp_file = output_dir / f".writable_check_{time.time()}" temp_file.touch() temp_file.unlink() log.info(f"Using validated output directory: {output_dir_str}") + self._current_output_dir = output_dir_str # Store validated path except OSError as e: error_msg = f"Error creating/accessing output directory '{output_dir_str}': {e}" self.statusBar().showMessage(error_msg, 5000) log.error(error_msg) return - except Exception as e: # Catch other potential Path errors or unexpected issues + except Exception as e: error_msg = f"Invalid output directory path '{output_dir_str}': {e}" self.statusBar().showMessage(error_msg, 5000) log.error(error_msg) return # --- End Output Directory Validation --- - log.info(f"Preparing to start processing {len(input_paths)} items to '{output_dir_str}'.") - # --- Get the final list of SourceRule objects from the model --- - # Assuming UnifiedViewModel has a method like get_all_source_rules() try: final_source_rules = self.unified_model.get_all_source_rules() if not final_source_rules: @@ -884,57 +496,131 @@ class MainWindow(QMainWindow): log.info(f"Retrieved {len(final_source_rules)} SourceRule objects from the model.") - # --- Emit signal with the list of rules --- - log.info(f"DEBUG: Emitting processing_requested with rules: {final_source_rules}") # DEBUG LOG -# DEBUG Verify: Log the rules being emitted - rule_paths = [rule.input_path for rule in final_source_rules] - log.info(f"VERIFY: Emitting processing_requested with {len(final_source_rules)} SourceRule(s). Input paths: {rule_paths}") - # test_message = f"Processing requested for {len(final_source_rules)} rules." # Reverted - self.processing_requested.emit(final_source_rules) # Emit original list - log.info("Emitted processing_requested signal with the list of SourceRules.") # Reverted log + # --- Filter SourceRules based on Target Asset --- + filtered_source_rules = [] + for source_rule in final_source_rules: + has_valid_target = False + if hasattr(source_rule, 'assets') and source_rule.assets: + for asset_rule in source_rule.assets: + # Check if asset_name (Target Asset) is not None and not empty/whitespace + if asset_rule.asset_name and asset_rule.asset_name.strip(): + has_valid_target = True + break # Found one valid target, no need to check others in this source + if has_valid_target: + filtered_source_rules.append(source_rule) + else: + log.info(f"Filtering out SourceRule '{source_rule.input_path}' as it has no assets with a Target Asset name.") - # --- Update UI (Progress bar, status, buttons) --- - # Note: The actual processing start/progress/finish will now be handled - # by the main application logic connected to the processing_requested signal. - # We might want to show an intermediate status here. - self.progress_bar.setValue(0) - self.progress_bar.setFormat("Waiting for processing start...") - self.statusBar().showMessage(f"Requested processing for {len(final_source_rules)} rule sets...", 0) - # Disable start button, enable cancel (assuming main will handle re-enabling) - self.set_controls_enabled(False) # Disable most controls - self.start_button.setEnabled(False) # Keep start disabled - self.start_button.setText("Processing...") - self.cancel_button.setEnabled(True) # Enable cancel + if not filtered_source_rules: + log.warning("All SourceRules were filtered out. No items have a valid Target Asset. Nothing to process.") + self.statusBar().showMessage("No items have a Target Asset assigned. Nothing to process.", 5000) + # Re-enable controls since processing won't start + self.set_controls_enabled(True) + self.main_panel_widget.set_start_button_text("Start Processing") + self.main_panel_widget.set_cancel_button_enabled(False) + self.main_panel_widget.set_progress_bar_text("Idle") + return - # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_direct_processing_call_line_814.py + log.info(f"Processing {len(filtered_source_rules)} SourceRule objects after filtering (originally {len(final_source_rules)}).") - def cancel_processing(self): + # --- Update UI (via MainPanelWidget slots) --- + self.main_panel_widget.set_progress_bar_text("Waiting for processing start...") + self.statusBar().showMessage(f"Requested processing for {len(filtered_source_rules)} rule sets...", 0) + self.set_controls_enabled(False) # Disable controls in both panels + self.main_panel_widget.set_start_button_enabled(False) + self.main_panel_widget.set_start_button_text("Processing...") + self.main_panel_widget.set_cancel_button_enabled(True) + + # --- Emit signal to trigger backend processing --- + # Combine rules and settings for the backend + processing_data = { + "output_dir": output_dir_str, + "overwrite": settings.get("overwrite", False), + "workers": settings.get("workers", 1), + "blender_enabled": settings.get("blender_enabled", False), + "nodegroup_blend_path": settings.get("nodegroup_blend_path", ""), + "materials_blend_path": settings.get("materials_blend_path", "") + } + log.info(f"Emitting start_backend_processing with {len(filtered_source_rules)} rules and settings: {processing_data}") + self.start_backend_processing.emit(filtered_source_rules, processing_data) + + @Slot() + def _on_cancel_requested(self): + """Handles the cancel_requested signal from the MainPanelWidget.""" # TODO: Implement cancellation by signaling the App/main thread to stop the QThreadPool tasks - log.warning("Cancel button clicked, but cancellation logic needs reimplementation.") - self.statusBar().showMessage("Cancellation not yet implemented.", 3000) - # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_cancel_processing_logic_line_834.py + log.warning("Cancel requested, but cancellation logic needs reimplementation in main application.") + self.statusBar().showMessage("Cancellation request sent (implementation pending).", 3000) + # Optionally, re-enable controls immediately or wait for confirmation + # self.set_controls_enabled(True) + # self.main_panel_widget.set_cancel_button_enabled(False) + # self.main_panel_widget.set_start_button_text("Start Processing") - def clear_queue(self): - """Clears the current asset queue and the preview table.""" + @Slot() + def _on_clear_queue_requested(self): + """Handles the clear_queue_requested signal from the MainPanelWidget.""" # TODO: Check processing state via App/main thread if needed before clearing - # if self.processing_handler and self.processing_handler.is_running: # OLD HANDLER CHECK + # if self.is_processing_active: # Need a way to check this # self.statusBar().showMessage("Cannot clear queue while processing.", 3000) # return if hasattr(self, 'current_asset_paths') and self.current_asset_paths: log.info(f"Clearing asset queue ({len(self.current_asset_paths)} items).") self.current_asset_paths.clear() - # self.preview_model.clear_data() # Old model removed - self.unified_model.clear_data() # Clear the new model data + self.unified_model.clear_data() # Clear the model data + # Disable start button as model is now empty + if hasattr(self, 'main_panel_widget'): + self.main_panel_widget.set_start_button_enabled(False) # Clear accumulation state self._pending_predictions.clear() self._accumulated_rules.clear() self._source_file_lists.clear() - log.info("Cleared accumulation state (_pending_predictions, _accumulated_rules, _source_file_lists).") + self.llm_interaction_handler.clear_queue() # Clear LLM queue via handler + log.info("Cleared accumulation state and delegated LLM queue clear.") self.statusBar().showMessage("Asset queue and prediction state cleared.", 3000) + # Reset progress bar + self.main_panel_widget.set_progress_bar_text("Idle") else: self.statusBar().showMessage("Asset queue is already empty.", 3000) + @Slot(list) + def _delegate_llm_reinterpret(self, source_paths: list): + """ + Slot to receive the llm_reinterpret_requested signal from MainPanelWidget + and delegate the request to the LLMInteractionHandler. + """ + log.info(f"Received LLM re-interpret request for {len(source_paths)} paths. Delegating to handler.") + + if not source_paths: + self.statusBar().showMessage("No valid source directories selected for re-interpretation.", 5000) + return + + # Check handler status before queueing (optional, handler manages internally) + if self.llm_interaction_handler.is_processing(): + QMessageBox.warning(self, "Busy", "LLM interpretation is already in progress. Request added to queue.") + # Proceed to queue anyway, handler manages the queue + + # Prepare requests (input_path, file_list=None for re-interpret) + requests = [(path, None) for path in source_paths] + + # Delegate to the handler + self.llm_interaction_handler.queue_llm_requests_batch(requests) + # Status updates (like "Added X directories to queue") will come from the handler via signals + + @Slot(str) + def _on_output_dir_changed(self, path: str): + """Stores the output directory path when it changes in the panel.""" + self._current_output_dir = path + log.debug(f"MainWindow stored output directory: {path}") + + @Slot(bool, str, str) + def _on_blender_settings_changed(self, enabled: bool, ng_path: str, mat_path: str): + """Stores the Blender settings when they change in the panel.""" + self._current_blender_settings = { + "enabled": enabled, + "nodegroup_blend_path": ng_path, + "materials_blend_path": mat_path + } + log.debug(f"MainWindow stored Blender settings: {self._current_blender_settings}") # --- Preview Update Method --- def update_preview(self): @@ -977,22 +663,25 @@ class MainWindow(QMainWindow): # though the handler itself should handle being called multiple times. # A better fix might involve properly resetting is_running in the handler. - if PredictionHandler is None: - log.error("PredictionHandler not loaded. Cannot update preview.") + if RuleBasedPredictionHandler is None: # Check the class itself + log.error("RuleBasedPredictionHandler not loaded. Cannot update preview.") self.statusBar().showMessage("Error: Prediction components not loaded.", 5000) return - # Get preset from editor list - current_editor_item = self.editor_preset_list.currentItem() + # Get preset mode from editor widget + mode, selected_preset_name = self.preset_editor_widget.get_selected_preset_mode() - # Check if the selected item is the placeholder or LLM - is_placeholder = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__" - is_llm = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__LLM__" + if mode == "placeholder": + log.debug("Update preview called with placeholder preset selected. Showing existing raw inputs (detailed view).") + # Model is always detailed now, no need to set simple mode + # self.unified_model.set_display_mode("simple") # REMOVED + # Don't clear data here, _on_preset_selection_changed handles mode switch, + # and add_input_paths handles adding raw data if needed. + # If current_asset_paths is empty, the view will be empty anyway. + if not self.current_asset_paths: + self.unified_model.clear_data() # Clear if no assets are tracked - if is_placeholder: - log.debug("Update preview called with placeholder preset selected. Clearing unified view.") - self.unified_model.clear_data() # Clear the new model if placeholder selected - self.statusBar().showMessage("Select a preset from the list on the left to update preview.", 3000) - return # Stop prediction as no valid preset is selected + self.statusBar().showMessage("Select a preset or LLM to generate preview.", 3000) + return # Stop further prediction logic for placeholder # Get asset paths if not hasattr(self, 'current_asset_paths') or not self.current_asset_paths: @@ -1002,88 +691,71 @@ class MainWindow(QMainWindow): input_paths = list(self.current_asset_paths) # --- Handle LLM Mode --- - if is_llm: - log.info(f"[{time.time():.4f}] LLM mode selected. Triggering LLM prediction for {len(input_paths)} assets.") + if mode == "llm": + log.info(f"[{time.time():.4f}] LLM mode selected. Preparing LLM prediction for {len(input_paths)} assets.") self.statusBar().showMessage(f"Starting LLM interpretation for assets...", 0) - # --- Reset Accumulation State (might not be strictly needed for LLM, but good practice) --- + # --- Reset Accumulation State --- log.debug("Clearing accumulated rules and pending predictions for LLM batch.") self._accumulated_rules.clear() - self._pending_predictions.clear() # Clear pending standard predictions + # Reset pending predictions for the new batch + self._pending_predictions = set(input_paths) + self._completed_predictions.clear() # Clear completed from previous runs + log.debug(f"Reset pending predictions for LLM batch: {self._pending_predictions}") - # --- Queue all current assets for LLM processing --- - llm_requests_added = 0 + # --- Prepare requests for the handler --- + llm_requests_to_queue = [] if input_paths: - log.info(f"Queueing LLM prediction for {len(input_paths)} existing assets.") + log.info(f"Preparing LLM prediction requests for {len(input_paths)} existing assets.") for input_path_str in input_paths: - # Check if already in queue to avoid duplicates if user clicks quickly - is_in_queue = any(item[0] == input_path_str for item in self.llm_processing_queue) - if is_in_queue: - log.debug(f"Skipping duplicate add to LLM queue for existing asset: {input_path_str}") - continue - + # Duplication check is handled by the handler's queue method file_list = self._extract_file_list(input_path_str) if file_list is not None: log.debug(f"Extracted {len(file_list)} files for LLM prediction from existing asset: {input_path_str}") - # Store file list and mark as pending before adding to queue + # Store file list (still needed for context if prediction fails before handler starts?) self._source_file_lists[input_path_str] = file_list - # self._pending_predictions.add(input_path_str) # Pending is handled by the queue process itself now - self.llm_processing_queue.append((input_path_str, file_list)) - log.debug(f"Queued LLM request for existing asset '{input_path_str}'. Queue size: {len(self.llm_processing_queue)}") - llm_requests_added += 1 + llm_requests_to_queue.append((input_path_str, file_list)) else: log.warning(f"Skipping LLM prediction queuing for existing asset {input_path_str} due to extraction error.") + # If extraction fails, remove from pending immediately + self._pending_predictions.discard(input_path_str) + self.statusBar().showMessage(f"Error extracting files for {Path(input_path_str).name}", 5000) else: log.warning("LLM selected, but no input paths currently in view to process.") self.statusBar().showMessage("LLM selected, but no assets are loaded.", 3000) - # --- Trigger queue processing if items were added and it's not running --- - if llm_requests_added > 0: - is_llm_running = False - try: - if self.llm_prediction_thread is not None: - is_llm_running = self.llm_prediction_thread.isRunning() - except RuntimeError: - is_llm_running = False # Treat as not running if deleted - - if not is_llm_running: - log.info("LLM thread not running. Starting unified queue processing from update_preview.") - self._process_next_llm_item() # Start processing the unified queue - else: - log.info("LLM thread already running. Queue will be processed when current task finishes.") - # --- End Trigger --- + # --- Delegate queueing to the handler --- + if llm_requests_to_queue: + log.info(f"Delegating {len(llm_requests_to_queue)} LLM requests to the handler from update_preview.") + self.llm_interaction_handler.queue_llm_requests_batch(llm_requests_to_queue) + # The handler manages starting its own processing internally. # Do not return here; let the function exit normally after handling LLM case. - # The standard prediction path below will be skipped because is_llm is True. + # The standard prediction path below will be skipped because mode is 'llm'. # --- Handle Standard Preset Mode --- - selected_preset = current_editor_item.text() if current_editor_item else None - if not selected_preset: # Should not happen if placeholder/LLM checks passed, but safety check - log.error("Update preview called with invalid state (no preset, not placeholder, not LLM).") - self.unified_model.clear_data() - return + elif mode == "preset" and selected_preset_name: + log.info(f"[{time.time():.4f}] Requesting background preview update for {len(input_paths)} items using Preset='{selected_preset_name}'") + self.statusBar().showMessage(f"Updating preview for '{selected_preset_name}'...", 0) - log.info(f"[{time.time():.4f}] Requesting background preview update for {len(input_paths)} items using Preset='{selected_preset}'") - self.statusBar().showMessage(f"Updating preview for '{selected_preset}'...", 0) + # --- Reset Accumulation State for standard prediction batch --- + log.debug("Clearing accumulated rules for new standard preview batch.") + self._accumulated_rules.clear() + self._pending_predictions = set(input_paths) # Reset pending standard predictions + log.debug(f"Reset pending standard predictions for batch: {self._pending_predictions}") - # --- Reset Accumulation State for standard prediction batch --- - log.debug("Clearing accumulated rules for new standard preview batch.") - self._accumulated_rules.clear() - self._pending_predictions = set(input_paths) # Reset pending standard predictions - log.debug(f"Reset pending standard predictions for batch: {self._pending_predictions}") - - # Trigger standard prediction handler - if self.prediction_thread and self.prediction_handler: - self.prediction_thread.start() # Ensure thread is running - log.debug(f"[{time.time():.4f}] Iterating through {len(input_paths)} paths to extract files and emit standard prediction signals.") - for input_path_str in input_paths: - file_list = self._extract_file_list(input_path_str) - if file_list is not None: - log.debug(f"[{time.time():.4f}] Emitting start_prediction_signal for: {input_path_str} with {len(file_list)} files.") - self.start_prediction_signal.emit(input_path_str, file_list, selected_preset) - else: - log.warning(f"[{time.time():.4f}] Skipping standard prediction signal for {input_path_str} due to extraction error.") - else: - log.error(f"[{time.time():.4f}][T:{thread_id}] Failed to trigger standard prediction: Thread or handler not initialized.") - self.statusBar().showMessage("Error: Failed to initialize standard prediction thread.", 5000) + # Trigger standard prediction handler + if self.prediction_thread and self.prediction_handler: + self.prediction_thread.start() # Ensure thread is running + log.debug(f"[{time.time():.4f}] Iterating through {len(input_paths)} paths to extract files and emit standard prediction signals.") + for input_path_str in input_paths: + file_list = self._extract_file_list(input_path_str) + if file_list is not None: + log.debug(f"[{time.time():.4f}] Emitting start_prediction_signal for: {input_path_str} with {len(file_list)} files.") + self.start_prediction_signal.emit(input_path_str, file_list, selected_preset_name) + else: + log.warning(f"[{time.time():.4f}] Skipping standard prediction signal for {input_path_str} due to extraction error.") + else: + log.error(f"[{time.time():.4f}][T:{thread_id}] Failed to trigger standard prediction: Thread or handler not initialized.") + self.statusBar().showMessage("Error: Failed to initialize standard prediction thread.", 5000) log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting update_preview.") @@ -1092,23 +764,28 @@ class MainWindow(QMainWindow): def setup_threads(self): # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_processing_thread_setup_line_978.py - # Setup Prediction Thread (Keep this) - if PredictionHandler and self.prediction_thread is None: + # Setup Rule-Based Prediction Thread (Persistent) + if RuleBasedPredictionHandler and self.prediction_thread is None: self.prediction_thread = QThread(self) - self.prediction_handler = PredictionHandler() + # Instantiate the renamed handler + self.prediction_handler = RuleBasedPredictionHandler(input_source_identifier="", original_input_paths=[], preset_name="") # Initial dummy values self.prediction_handler.moveToThread(self.prediction_thread) - # Connect the new signal to the handler's run_prediction slot using QueuedConnection + + # Connect the trigger signal from MainWindow to the handler's execution slot self.start_prediction_signal.connect(self.prediction_handler.run_prediction, Qt.ConnectionType.QueuedConnection) - # Removed: self.prediction_handler.prediction_results_ready.connect(self.on_prediction_results_ready) # Old signal - # Assume PredictionHandler.rule_hierarchy_ready signal is changed to Signal(str, list) -> input_path, rules_list - self.prediction_handler.rule_hierarchy_ready.connect(self._on_rule_hierarchy_ready) # Connect the LIST signal (now with input_path) - # Assume PredictionHandler.prediction_finished signal is changed to Signal(str) -> input_path - self.prediction_handler.prediction_finished.connect(self.on_prediction_finished) # Connect finish signal (now with input_path) - self.prediction_handler.status_message.connect(self.show_status_message) - # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_prediction_thread_cleanup_connections_line_1012.py - log.debug("Prediction thread and handler set up to be persistent.") - elif not PredictionHandler: - log.error("PredictionHandler not available. Cannot set up prediction thread.") + + # Connect signals from the handler (inherited from BasePredictionHandler) back to MainWindow slots + self.prediction_handler.prediction_ready.connect(self._on_rule_hierarchy_ready) # Slot signature updated below + self.prediction_handler.prediction_error.connect(self._on_prediction_error) # New slot added below + self.prediction_handler.status_update.connect(self.show_status_message) # Slot signature updated below + + # Keep thread alive (no automatic quit/deleteLater for persistent handler) + log.debug("Rule-Based Prediction thread and handler set up to be persistent.") + self.prediction_thread.start() # Start the thread immediately + elif not RuleBasedPredictionHandler: + log.error("RuleBasedPredictionHandler not available. Cannot set up prediction thread.") + + # LLM Thread setup is now handled internally by LLMInteractionHandler # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_reset_processing_thread_references_slot_line_1022.py @@ -1119,65 +796,17 @@ class MainWindow(QMainWindow): # self.prediction_thread = None # Keep references alive # self.prediction_handler = None # Keep references alive - @Slot() - def _reset_llm_thread_references(self): - """Resets LLM thread and handler references after the thread finishes.""" - log.debug("--> Entered _reset_llm_thread_references") - log.debug("Resetting LLM prediction thread and handler references.") - self.llm_prediction_thread = None - self.llm_prediction_handler = None - # Update button state now that thread is confirmed finished - log.debug("Calling _update_llm_reinterpret_button_state...") - self._update_llm_reinterpret_button_state() - # --- Process next item now that the previous thread is fully finished --- - log.debug("Previous LLM thread finished. Triggering processing for next item by calling _process_next_llm_item...") - self._process_next_llm_item() - log.debug("<-- Exiting _reset_llm_thread_references") + # _reset_llm_thread_references REMOVED (Handled internally by LLMInteractionHandler) + @Slot(int, int) def update_progress_bar(self, current_count, total_count): - if total_count > 0: - percentage = int((current_count / total_count) * 100) - self.progress_bar.setValue(percentage) - self.progress_bar.setFormat(f"%p% ({current_count}/{total_count})") - else: - self.progress_bar.setValue(0) - self.progress_bar.setFormat("0/0") + """Slot to receive progress updates (e.g., from backend) and forward to the panel.""" + if hasattr(self, 'main_panel_widget'): + self.main_panel_widget.update_progress_bar(current_count, total_count) # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_on_prediction_results_ready_slot_line_987.py -# Slot signature assumes prediction_finished signal is updated to emit input_path: Signal(str) - # Slot signature assumes prediction_finished signal is updated to emit input_path: Signal(str) - @Slot(str) - def on_prediction_finished(self, input_path: str): - """Handles the completion (potentially failure) of a single prediction task.""" - log.info(f"[{time.time():.4f}] --> Prediction finished signal received for: {input_path}") - - # Ensure path is removed from pending even if rule_hierarchy_ready wasn't emitted (e.g., critical error) - if input_path in self._pending_predictions: - log.warning(f"Prediction finished for '{input_path}', but it was still marked as pending. Removing.") - self._pending_predictions.discard(input_path) - # Check if this was the last pending item after an error - if not self._pending_predictions: - log.info("Prediction finished, and no more predictions are pending (potentially due to error).") - # self._finalize_model_update() # Removed call to obsolete method - else: - # Update status about remaining items - completed_count = len(self._accumulated_rules) # Note: _accumulated_rules might not be accurate if prediction failed - pending_count = len(self._pending_predictions) - # total_count = completed_count + pending_count # This might be slightly off if some failed without rules - # We don't have the total count of *requested* predictions here easily, - # but we can use the initial number of items added. - total_requested = len(self.current_asset_paths) # Use the total number of items added - status_msg = f"Prediction finished for {Path(input_path).name}. Waiting for {pending_count} more ({completed_count}/{total_requested} requested)..." - self.statusBar().showMessage(status_msg, 5000) - log.debug(status_msg) - else: - log.debug(f"Prediction finished for '{input_path}', which was already processed.") - - # Original status message might be misleading now, handled by accumulation logic. - # self.statusBar().showMessage("Preview updated.", 3000) # Removed - - + # REMOVED on_prediction_finished slot - completion is handled by _on_rule_hierarchy_ready or _on_prediction_error @Slot(str, str, str) def update_file_status(self, input_path_str, status, message): # TODO: Update status bar or potentially find rows in table later @@ -1191,555 +820,51 @@ class MainWindow(QMainWindow): @Slot(int, int, int) def on_processing_finished(self, processed_count, skipped_count, failed_count): # This log message might be inaccurate until signal source is updated - log.info(f"GUI received processing_finished signal (Source TBC): P={processed_count}, S={skipped_count}, F={failed_count}") - self.set_controls_enabled(True) - self.cancel_button.setEnabled(False) - self.start_button.setText("Start Processing") + log.info(f"GUI received processing_finished signal: P={processed_count}, S={skipped_count}, F={failed_count}") + # Re-enable controls via the panel widget + self.set_controls_enabled(True) # Enables general controls + self.main_panel_widget.set_cancel_button_enabled(False) + self.main_panel_widget.set_start_button_text("Start Processing") + # Start button enabled state depends on preset mode, handled by _on_preset_selection_changed or set_controls_enabled + # Reset progress bar text + self.main_panel_widget.set_progress_bar_text(f"Finished: {processed_count} processed, {skipped_count} skipped, {failed_count} failed.") - @Slot(str, int) - def show_status_message(self, message, timeout_ms): - if timeout_ms > 0: self.statusBar().showMessage(message, timeout_ms) - else: self.statusBar().showMessage(message) + @Slot(str) # Signature changed: Base class signal only emits message string + def show_status_message(self, message): + # Show message indefinitely until replaced + self.statusBar().showMessage(message) def set_controls_enabled(self, enabled: bool): - """Enables/disables input controls during processing.""" - # Main panel controls - self.start_button.setEnabled(enabled) - self.setAcceptDrops(enabled) - self.drag_drop_area.setEnabled(enabled) - # Removed: self.preview_table_view.setEnabled(enabled) - self.unified_view.setEnabled(enabled) # Enable/disable the new Unified View - # Editor panel controls (should generally be enabled unless processing) - self.editor_panel.setEnabled(enabled) # Enable/disable the whole panel - # Blender controls - self.blender_integration_checkbox.setEnabled(enabled) - # Only enable path inputs if checkbox is checked AND main controls are enabled - blender_paths_enabled = enabled and self.blender_integration_checkbox.isChecked() - self.nodegroup_blend_path_input.setEnabled(blender_paths_enabled) - self.browse_nodegroup_blend_button.setEnabled(blender_paths_enabled) - self.materials_blend_path_input.setEnabled(blender_paths_enabled) - self.browse_materials_blend_button.setEnabled(blender_paths_enabled) - - - @Slot(bool) - def _toggle_blender_controls(self, checked): - """Enable/disable Blender path inputs based on the checkbox state.""" - self.nodegroup_blend_path_input.setEnabled(checked) - self.browse_nodegroup_blend_button.setEnabled(checked) - self.materials_blend_path_input.setEnabled(checked) - self.browse_materials_blend_button.setEnabled(checked) - - def _browse_for_blend_file(self, line_edit_widget: QLineEdit): - """Opens a dialog to select a .blend file and updates the line edit.""" - current_path = line_edit_widget.text() - start_dir = str(Path(current_path).parent) if current_path and Path(current_path).exists() else str(project_root) - - file_path, _ = QFileDialog.getOpenFileName( - self, - "Select Blender File", - start_dir, - "Blender Files (*.blend);;All Files (*)" - ) - if file_path: - line_edit_widget.setText(file_path) - log.info(f"User selected blend file: {file_path}") - - def _browse_for_nodegroup_blend(self): - self._browse_for_blend_file(self.nodegroup_blend_path_input) - - def _browse_for_materials_blend(self): - self._browse_for_blend_file(self.materials_blend_path_input) - - def _start_llm_prediction(self, input_path_str: str, file_list: list = None): - """Starts the LLM prediction process in a separate thread. - If file_list is not provided, it will be extracted. - """ - # Extract file list if not provided (needed for re-interpretation calls) - if file_list is None: - log.debug(f"File list not provided for {input_path_str}, extracting...") - file_list = self._extract_file_list(input_path_str) - if file_list is None: - log.error(f"Failed to extract file list for {input_path_str} in _start_llm_prediction.") - self.statusBar().showMessage(f"Error extracting files for {os.path.basename(input_path_str)}", 5000) - # If called as part of a queue, we need to process the next item - self._process_next_llm_reinterpret() - return - - # Input path validation is now done before calling this function - input_path_obj = Path(input_path_str) # Still needed for basename - - if not file_list: - self.statusBar().showMessage(f"LLM Error: No files extracted for {input_path_str}", 5000) - log.error(f"LLM Error: Received empty file list for {input_path_str}") - # Ensure path is removed from pending if we error out here - self._pending_predictions.discard(input_path_str) - log.debug(f"Removed '{input_path_str}' from pending predictions due to empty file list.") - return - # --- Load Base Config for LLM Settings --- - if load_base_config is None: # Check if function was imported successfully - log.critical("LLM Error: load_base_config function not available.") - self.statusBar().showMessage("LLM Error: Cannot load base configuration.", 5000) - return - try: - base_config = load_base_config() - if not base_config: - raise ConfigurationError("Failed to load base configuration (app_settings.json).") - - # Extract necessary LLM settings - llm_settings = { - "llm_endpoint_url": base_config.get('llm_endpoint_url'), # Add the endpoint URL - "api_key": base_config.get('llm_api_key'), - "model_name": base_config.get('llm_model_name', 'gemini-pro'), - "prompt_template_content": base_config.get('llm_predictor_prompt'), # Get the prompt content directly - "asset_types": base_config.get('ASSET_TYPE_DEFINITIONS', {}), - "file_types": base_config.get('FILE_TYPE_DEFINITIONS', {}), - "examples": base_config.get('llm_predictor_examples', []) - } - - # Validate essential settings - # Removed check for empty API key to support local LLMs without keys - except ConfigurationError as e: - log.error(f"LLM Configuration Error: {e}") - self.statusBar().showMessage(f"LLM Config Error: {e}", 5000) - QMessageBox.warning(self, "LLM Configuration Error", f"Could not load necessary LLM settings from app_settings.json:\n\n{e}") - return - except Exception as e: # Catch other potential errors during loading - log.exception(f"Unexpected error loading LLM configuration: {e}") - self.statusBar().showMessage(f"LLM Config Error: {e}", 5000) - QMessageBox.critical(self, "LLM Configuration Error", f"An unexpected error occurred while loading LLM settings:\n\n{e}") - return - # --- End Config Loading --- - - - # Clean up previous thread/handler if any are still running (basic cleanup) - if self.llm_prediction_thread and self.llm_prediction_thread.isRunning(): - log.warning("Warning: Previous LLM prediction thread still running. Attempting to cancel/quit.") - # Add more robust cleanup if needed (e.g., wait loop, force quit) - if self.llm_prediction_handler: - self.llm_prediction_handler.cancel() # Request cancellation - self.llm_prediction_thread.quit() - # self.llm_prediction_thread.wait(1000) # Optional wait - - log.info(f"Starting LLM prediction for source: {input_path_str} with {len(file_list)} files.") - self.statusBar().showMessage(f"Starting LLM interpretation for {os.path.basename(input_path_str)}...") - - self.llm_prediction_thread = QThread(self) - # Pass the input path (for context), the file list, and settings to the handler - self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, llm_settings) # Pass input_path_str, file_list, settings - self.llm_prediction_handler.moveToThread(self.llm_prediction_thread) - # Connect signals from handler to slots in MainWindow - self.llm_prediction_handler.prediction_ready.connect(self._on_llm_prediction_ready) - self.llm_prediction_handler.prediction_error.connect(self._on_llm_prediction_error) - self.llm_prediction_handler.status_update.connect(self.statusBar().showMessage) # Connect status updates - - # Connect thread signals - self.llm_prediction_thread.started.connect(self.llm_prediction_handler.run) - # Clean up thread and handler when finished - # --- Connect thread finished signal to cleanup slot --- - self.llm_prediction_thread.finished.connect(self._reset_llm_thread_references) - # --- End Connect --- - self.llm_prediction_thread.finished.connect(self.llm_prediction_handler.deleteLater) - self.llm_prediction_thread.finished.connect(self.llm_prediction_thread.deleteLater) - # Also ensure thread quits when handler signals completion/error - self.llm_prediction_handler.prediction_ready.connect(self.llm_prediction_thread.quit) - self.llm_prediction_handler.prediction_error.connect(self.llm_prediction_thread.quit) - - # UI disabling is now handled by the calling function (_on_llm_reinterpret_clicked) - # when the queue processing starts. - - self.llm_prediction_thread.start() - - # --- Preset Editor Methods (Adapted from PresetEditorDialog) --- - - def _editor_add_list_item(self, list_widget: QListWidget): - """Adds an editable item to the specified list widget in the editor.""" - text, ok = QInputDialog.getText(self, f"Add Item", "Enter value:") - if ok and text: - item = QListWidgetItem(text) - # item.setFlags(item.flags() | Qt.ItemFlag.ItemIsEditable) # Already editable by default - list_widget.addItem(item) - self._mark_editor_unsaved() - - def _editor_remove_list_item(self, list_widget: QListWidget): - """Removes the selected item from the specified list widget in the editor.""" - selected_items = list_widget.selectedItems() - if not selected_items: return - for item in selected_items: list_widget.takeItem(list_widget.row(item)) - self._mark_editor_unsaved() - - def _editor_add_table_row(self, table_widget: QTableWidget): - """Adds an empty row to the specified table widget in the editor.""" - row_count = table_widget.rowCount() - table_widget.insertRow(row_count) - for col in range(table_widget.columnCount()): table_widget.setItem(row_count, col, QTableWidgetItem("")) - self._mark_editor_unsaved() - - def _editor_remove_table_row(self, table_widget: QTableWidget): - """Removes the selected row(s) from the specified table widget in the editor.""" - selected_rows = sorted(list(set(index.row() for index in table_widget.selectedIndexes())), reverse=True) - if not selected_rows: - if table_widget.rowCount() > 0: selected_rows = [table_widget.rowCount() - 1] - else: return - for row in selected_rows: table_widget.removeRow(row) - self._mark_editor_unsaved() - - def _mark_editor_unsaved(self): - """Marks changes in the editor panel as unsaved.""" - if self._is_loading_editor: return - self.editor_unsaved_changes = True - self.editor_save_button.setEnabled(True) - preset_name = Path(self.current_editing_preset_path).name if self.current_editing_preset_path else 'New Preset' - self.setWindowTitle(f"Asset Processor Tool - {preset_name}*") - - def _connect_editor_change_signals(self): - """Connect signals from all editor widgets to mark_editor_unsaved.""" - self.editor_preset_name.textChanged.connect(self._mark_editor_unsaved) - self.editor_supplier_name.textChanged.connect(self._mark_editor_unsaved) - self.editor_notes.textChanged.connect(self._mark_editor_unsaved) - self.editor_separator.textChanged.connect(self._mark_editor_unsaved) - self.editor_spin_base_name_idx.valueChanged.connect(self._mark_editor_unsaved) - self.editor_spin_map_type_idx.valueChanged.connect(self._mark_editor_unsaved) - # List/Table widgets are connected via helper functions - - def _check_editor_unsaved_changes(self) -> bool: - """Checks for unsaved changes in the editor and prompts the user. Returns True if should cancel action.""" - if not self.editor_unsaved_changes: return False - reply = QMessageBox.question(self, "Unsaved Preset Changes", - "You have unsaved changes in the preset editor. Discard them?", - QMessageBox.StandardButton.Save | QMessageBox.StandardButton.Discard | QMessageBox.StandardButton.Cancel, - QMessageBox.StandardButton.Cancel) - if reply == QMessageBox.StandardButton.Save: return not self._save_current_preset() # Return True (cancel) if save fails - elif reply == QMessageBox.StandardButton.Discard: return False # Discarded, proceed - else: return True # Cancel action - - def _set_editor_enabled(self, enabled: bool): - """Enables or disables all editor widgets.""" - self.editor_tab_widget.setEnabled(enabled) - # Also enable/disable save buttons based on editor state, not just processing state - self.editor_save_button.setEnabled(enabled and self.editor_unsaved_changes) - self.editor_save_as_button.setEnabled(enabled) # Save As is always possible if editor is enabled - - def _clear_editor(self): - """Clears the editor fields and resets state.""" - self._is_loading_editor = True - self.editor_preset_name.clear() - self.editor_supplier_name.clear() - self.editor_notes.clear() - self.editor_separator.clear() - self.editor_spin_base_name_idx.setValue(0) - self.editor_spin_map_type_idx.setValue(1) - self.editor_list_gloss_keywords.clear() - self.editor_table_bit_depth_variants.setRowCount(0) - self.editor_list_extra_patterns.clear() - self.editor_table_map_type_mapping.setRowCount(0) - self.editor_list_model_patterns.clear() - self.editor_list_decal_keywords.clear() - self.editor_table_archetype_rules.setRowCount(0) - self.current_editing_preset_path = None - self.editor_unsaved_changes = False - self.editor_save_button.setEnabled(False) - self.setWindowTitle("Asset Processor Tool") # Reset window title - self._set_editor_enabled(False) - - # Ensure unified view is cleared (handled by model clear) - # No placeholder label to manage for unified view - - self._is_loading_editor = False - - def _populate_editor_from_data(self, preset_data: dict): - """Helper method to populate editor UI widgets from a preset data dictionary.""" - self._is_loading_editor = True - try: - self.editor_preset_name.setText(preset_data.get("preset_name", "")) - self.editor_supplier_name.setText(preset_data.get("supplier_name", "")) - self.editor_notes.setText(preset_data.get("notes", "")) - naming_data = preset_data.get("source_naming", {}) - self.editor_separator.setText(naming_data.get("separator", "_")) - indices = naming_data.get("part_indices", {}) - self.editor_spin_base_name_idx.setValue(indices.get("base_name", 0)) - self.editor_spin_map_type_idx.setValue(indices.get("map_type", 1)) - self.editor_list_gloss_keywords.clear() - self.editor_list_gloss_keywords.addItems(naming_data.get("glossiness_keywords", [])) - self.editor_table_bit_depth_variants.setRowCount(0) - bit_depth_vars = naming_data.get("bit_depth_variants", {}) - for i, (map_type, pattern) in enumerate(bit_depth_vars.items()): - self.editor_table_bit_depth_variants.insertRow(i) - self.editor_table_bit_depth_variants.setItem(i, 0, QTableWidgetItem(map_type)) - self.editor_table_bit_depth_variants.setItem(i, 1, QTableWidgetItem(pattern)) - self.editor_list_extra_patterns.clear() - self.editor_list_extra_patterns.addItems(preset_data.get("move_to_extra_patterns", [])) - self.editor_table_map_type_mapping.setRowCount(0) - map_mappings = preset_data.get("map_type_mapping", []) - # --- UPDATED for new dictionary format --- - for i, mapping_dict in enumerate(map_mappings): - if isinstance(mapping_dict, dict) and "target_type" in mapping_dict and "keywords" in mapping_dict: - std_type = mapping_dict["target_type"] - keywords = mapping_dict["keywords"] - self.editor_table_map_type_mapping.insertRow(i) - self.editor_table_map_type_mapping.setItem(i, 0, QTableWidgetItem(std_type)) - # Ensure keywords are strings before joining - keywords_str = [str(k) for k in keywords if isinstance(k, str)] - self.editor_table_map_type_mapping.setItem(i, 1, QTableWidgetItem(", ".join(keywords_str))) - else: - log.warning(f"Skipping invalid map_type_mapping item during editor population: {mapping_dict}") - # --- END UPDATE --- - category_rules = preset_data.get("asset_category_rules", {}) - self.editor_list_model_patterns.clear() - self.editor_list_model_patterns.addItems(category_rules.get("model_patterns", [])) - self.editor_list_decal_keywords.clear() - self.editor_list_decal_keywords.addItems(category_rules.get("decal_keywords", [])) - preset_data["asset_category_rules"] = category_rules - arch_rules = [] - for r in range(self.editor_table_archetype_rules.rowCount()): - name_item = self.editor_table_archetype_rules.item(r, 0) - any_item = self.editor_table_archetype_rules.item(r, 1) - all_item = self.editor_table_archetype_rules.item(r, 2) - if name_item and any_item and all_item: - match_any = [k.strip() for k in any_item.text().split(',') if k.strip()] - match_all = [k.strip() for k in all_item.text().split(',') if k.strip()] - arch_rules.append([name_item.text().strip(), {"match_any": match_any, "match_all": match_all}]) - preset_data["archetype_rules"] = arch_rules - return preset_data - finally: - self._is_loading_editor = False - - def _load_preset_for_editing(self, file_path: Path): - """Loads the content of the selected preset file into the editor widgets.""" - if not file_path or not file_path.is_file(): - self._clear_editor() - return - log.info(f"Loading preset into editor: {file_path.name}") - log.info(f"Loading preset into editor: {file_path.name}") - try: - with open(file_path, 'r', encoding='utf-8') as f: preset_data = json.load(f) - self._populate_editor_from_data(preset_data) - self._set_editor_enabled(True) - self.current_editing_preset_path = file_path - self.editor_unsaved_changes = False - self.editor_save_button.setEnabled(False) - self.setWindowTitle(f"Asset Processor Tool - {file_path.name}") - log.info(f"Preset '{file_path.name}' loaded into editor.") - log.debug("Preset loaded. Checking visibility states.") - # No placeholder/table view visibility to manage here - except json.JSONDecodeError as json_err: - log.error(f"Invalid JSON in {file_path.name}: {json_err}") - QMessageBox.warning(self, "Load Error", f"Failed to load preset '{file_path.name}'.\nInvalid JSON structure:\n{json_err}") - self._clear_editor() - except Exception as e: - log.exception(f"Error loading preset file {file_path}: {e}") - QMessageBox.critical(self, "Error", f"Could not load preset file:\n{file_path}\n\nError: {e}") - self._clear_editor() - - def _load_selected_preset_for_editing(self, current_item: QListWidgetItem, previous_item: QListWidgetItem): - """Loads the preset currently selected in the editor list.""" - log.debug(f"currentItemChanged signal triggered. current_item: {current_item.text() if current_item else 'None'}, previous_item: {previous_item.text() if previous_item else 'None'}") - - # Check if the selected item is the placeholder or LLM - is_placeholder = current_item and current_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__" - is_llm = current_item and current_item.data(Qt.ItemDataRole.UserRole) == "__LLM__" # Added check - - if self._check_editor_unsaved_changes(): - # If user cancels, revert selection - if previous_item: - log.debug("Unsaved changes check cancelled. Reverting selection.") - self.editor_preset_list.blockSignals(True) - self.editor_preset_list.setCurrentItem(previous_item) - self.editor_preset_list.blockSignals(False) - return - - if is_placeholder: - log.debug("Placeholder item selected. Clearing editor and unified view.") - self._clear_editor() - self.unified_model.clear_data() # Ensure the new model is empty - # No placeholder/table view visibility to manage - self.start_button.setEnabled(False) # Disable start button - return # Stop processing as no real preset is selected - - if is_llm: # Added block - log.debug("LLM Interpretation item selected. Clearing editor and triggering preview.") - self._clear_editor() # Clear editor fields - self._set_editor_enabled(False) # Disable editor fields for LLM mode - self.start_button.setEnabled(False) # Disable start processing button for LLM mode - self._update_llm_reinterpret_button_state() # Update re-interpret button state - self.update_preview() # Trigger preview update (which will handle LLM if assets exist) - return # Stop processing here, don't load as preset - - # Existing logic for handling real preset items starts here - if current_item: # This will now only run for actual preset files - log.debug(f"Loading preset for editing: {current_item.text()}") - preset_path = current_item.data(Qt.ItemDataRole.UserRole) - # Ensure preset_path is actually a Path object before calling _load_preset_for_editing - if isinstance(preset_path, Path): - self._load_preset_for_editing(preset_path) - self.start_button.setEnabled(True) # Enable start button for presets - self._update_llm_reinterpret_button_state() # Update re-interpret button state - # --- Trigger preview update after loading editor --- - self.update_preview() - # --- End Trigger --- + """Enables/disables input controls in relevant panels during processing.""" + self.setAcceptDrops(enabled) # Still handle drops in MainWindow + # Enable/disable the editor panel + self.preset_editor_widget.setEnabled(enabled) + # Enable/disable controls within the main panel via its slot + if hasattr(self, 'main_panel_widget'): + self.main_panel_widget.set_controls_enabled(enabled) + # Explicitly set start button state based on preset mode when enabling + if enabled: + # Enable start button only if the model has items + model_has_items = self.unified_model.rowCount() > 0 + self.main_panel_widget.set_start_button_enabled(model_has_items) else: - log.error(f"Invalid data type for preset path: {type(preset_path)}. Expected Path object. Clearing editor.") - self._clear_editor() - self.start_button.setEnabled(False) - - # No placeholder/table view visibility to manage - else: - # This case should ideally not be reached if the placeholder is always present - log.debug("No preset selected (unexpected state if placeholder is present). Clearing editor.") - self._clear_editor() # Clear editor if selection is cleared - # No placeholder/table view visibility to manage - - def _gather_editor_data(self) -> dict: - """Gathers data from all editor UI widgets and returns a dictionary.""" - preset_data = {} - preset_data["preset_name"] = self.editor_preset_name.text().strip() - preset_data["supplier_name"] = self.editor_supplier_name.text().strip() - preset_data["notes"] = self.editor_notes.toPlainText().strip() - naming_data = {} - naming_data["separator"] = self.editor_separator.text() - naming_data["part_indices"] = { "base_name": self.editor_spin_base_name_idx.value(), "map_type": self.editor_spin_map_type_idx.value() } - naming_data["glossiness_keywords"] = [self.editor_list_gloss_keywords.item(i).text() for i in range(self.editor_list_gloss_keywords.count())] - naming_data["bit_depth_variants"] = {self.editor_table_bit_depth_variants.item(r, 0).text(): self.editor_table_bit_depth_variants.item(r, 1).text() - for r in range(self.editor_table_bit_depth_variants.rowCount()) if self.editor_table_bit_depth_variants.item(r, 0) and self.editor_table_bit_depth_variants.item(r, 1)} - preset_data["source_naming"] = naming_data - preset_data["move_to_extra_patterns"] = [self.editor_list_extra_patterns.item(i).text() for i in range(self.editor_list_extra_patterns.count())] - # --- UPDATED for new dictionary format --- - map_mappings = [] - for r in range(self.editor_table_map_type_mapping.rowCount()): - type_item = self.editor_table_map_type_mapping.item(r, 0) - keywords_item = self.editor_table_map_type_mapping.item(r, 1) - # Ensure both items exist and have text before processing - if type_item and type_item.text() and keywords_item and keywords_item.text(): - target_type = type_item.text().strip() - keywords = [k.strip() for k in keywords_item.text().split(',') if k.strip()] - if target_type and keywords: # Only add if both parts are valid - map_mappings.append({"target_type": target_type, "keywords": keywords}) - else: - log.warning(f"Skipping row {r} in map type mapping table due to empty target type or keywords.") - else: - log.warning(f"Skipping row {r} in map type mapping table due to missing items.") - preset_data["map_type_mapping"] = map_mappings - # --- END UPDATE --- - category_rules = {} - category_rules["model_patterns"] = [self.editor_list_model_patterns.item(i).text() for i in range(self.editor_list_model_patterns.count())] - category_rules["decal_keywords"] = [self.editor_list_decal_keywords.item(i).text() for i in range(self.editor_list_decal_keywords.count())] - preset_data["asset_category_rules"] = category_rules - arch_rules = [] - for r in range(self.editor_table_archetype_rules.rowCount()): - name_item = self.editor_table_archetype_rules.item(r, 0) - any_item = self.editor_table_archetype_rules.item(r, 1) - all_item = self.editor_table_archetype_rules.item(r, 2) - if name_item and any_item and all_item: - match_any = [k.strip() for k in any_item.text().split(',') if k.strip()] - match_all = [k.strip() for k in all_item.text().split(',') if k.strip()] - arch_rules.append([name_item.text().strip(), {"match_any": match_any, "match_all": match_all}]) - preset_data["archetype_rules"] = arch_rules - return preset_data - - def _save_current_preset(self) -> bool: - """Saves the current editor content to the currently loaded file path.""" - if not self.current_editing_preset_path: return self._save_preset_as() - log.info(f"Saving preset: {self.current_editing_preset_path.name}") - try: - preset_data = self._gather_editor_data() - if not preset_data.get("preset_name"): QMessageBox.warning(self, "Save Error", "Preset Name cannot be empty."); return False - if not preset_data.get("supplier_name"): QMessageBox.warning(self, "Save Error", "Supplier Name cannot be empty."); return False - content_to_save = json.dumps(preset_data, indent=4, ensure_ascii=False) - with open(self.current_editing_preset_path, 'w', encoding='utf-8') as f: f.write(content_to_save) - self.editor_unsaved_changes = False - self.editor_save_button.setEnabled(False) - self.setWindowTitle(f"Asset Processor Tool - {self.current_editing_preset_path.name}") - self.presets_changed_signal.emit() # Signal that presets changed - log.info("Preset saved successfully.") - # Refresh lists after save - self.populate_presets() - return True - except Exception as e: - log.exception(f"Error saving preset file {self.current_editing_preset_path}: {e}") - QMessageBox.critical(self, "Save Error", f"Could not save preset file:\n{self.current_editing_preset_path}\n\nError: {e}") - return False - - def _save_preset_as(self) -> bool: - """Saves the current editor content to a new file chosen by the user.""" - log.debug("Save As action triggered.") - try: - preset_data = self._gather_editor_data() - new_preset_name = preset_data.get("preset_name") - if not new_preset_name: QMessageBox.warning(self, "Save As Error", "Preset Name cannot be empty."); return False - if not preset_data.get("supplier_name"): QMessageBox.warning(self, "Save As Error", "Supplier Name cannot be empty."); return False - content_to_save = json.dumps(preset_data, indent=4, ensure_ascii=False) - suggested_name = f"{new_preset_name}.json" - default_path = PRESETS_DIR / suggested_name - file_path_str, _ = QFileDialog.getSaveFileName(self, "Save Preset As", str(default_path), "JSON Files (*.json);;All Files (*)") - if not file_path_str: log.debug("Save As cancelled by user."); return False - save_path = Path(file_path_str) - if save_path.suffix.lower() != ".json": save_path = save_path.with_suffix(".json") - if save_path.exists() and save_path != self.current_editing_preset_path: - reply = QMessageBox.warning(self, "Confirm Overwrite", f"Preset '{save_path.name}' already exists. Overwrite?", QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.No) - if reply == QMessageBox.StandardButton.No: log.debug("Save As overwrite cancelled."); return False - log.info(f"Saving preset as: {save_path.name}") - with open(save_path, 'w', encoding='utf-8') as f: f.write(content_to_save) - self.current_editing_preset_path = save_path # Update current path - self.editor_unsaved_changes = False - self.editor_save_button.setEnabled(False) - self.setWindowTitle(f"Asset Processor Tool - {save_path.name}") - self.presets_changed_signal.emit() # Signal change - log.info("Preset saved successfully (Save As).") - # Refresh lists and select the new item - self.populate_presets() - return True - except Exception as e: - log.exception(f"Error saving preset file (Save As): {e}") - QMessageBox.critical(self, "Save Error", f"Could not save preset file.\n\nError: {e}") - return False - - def _new_preset(self): - """Clears the editor and loads data from _template.json.""" - log.debug("New Preset action triggered.") - if self._check_editor_unsaved_changes(): return - self._clear_editor() - if TEMPLATE_PATH.is_file(): - log.info("Loading new preset from _template.json") - try: - with open(TEMPLATE_PATH, 'r', encoding='utf-8') as f: template_data = json.load(f) - self._populate_editor_from_data(template_data) - # Override specific fields for a new preset - self.editor_preset_name.setText("NewPreset") - self.setWindowTitle("Asset Processor Tool - New Preset*") - except Exception as e: - log.exception(f"Error loading template preset file {TEMPLATE_PATH}: {e}") - QMessageBox.critical(self, "Error", f"Could not load template preset file:\n{TEMPLATE_PATH}\n\nError: {e}") - self._clear_editor() - self.setWindowTitle("Asset Processor Tool - New Preset*") - self.editor_supplier_name.setText("MySupplier") # Set a default supplier name - else: - log.warning("Presets/_template.json not found. Creating empty preset.") - self.setWindowTitle("Asset Processor Tool - New Preset*") - self.editor_preset_name.setText("NewPreset") - self.editor_supplier_name.setText("MySupplier") # Set a default supplier name - self._set_editor_enabled(True) - self.editor_unsaved_changes = True - self.editor_save_button.setEnabled(True) + # Always disable start button when controls are disabled + self.main_panel_widget.set_start_button_enabled(False) - def _delete_selected_preset(self): - """Deletes the currently selected preset file from the editor list after confirmation.""" - current_item = self.editor_preset_list.currentItem() - if not current_item: QMessageBox.information(self, "Delete Preset", "Please select a preset from the list to delete."); return - preset_path = current_item.data(Qt.ItemDataRole.UserRole) - preset_name = preset_path.stem - reply = QMessageBox.warning(self, "Confirm Delete", f"Are you sure you want to permanently delete the preset '{preset_name}'?", QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.No) - if reply == QMessageBox.StandardButton.Yes: - log.info(f"Deleting preset: {preset_path.name}") - try: - preset_path.unlink() - log.info("Preset deleted successfully.") - if self.current_editing_preset_path == preset_path: self._clear_editor() - self.presets_changed_signal.emit() # Signal change - # Refresh lists - self.populate_presets() - except Exception as e: - log.exception(f"Error deleting preset file {preset_path}: {e}") - QMessageBox.critical(self, "Delete Error", f"Could not delete preset file:\n{preset_path}\n\nError: {e}") + # --- _toggle_blender_controls REMOVED (Handled by MainPanelWidget) --- + # --- _browse_for_blend_file REMOVED (Handled by MainPanelWidget) --- + # --- _browse_for_nodegroup_blend REMOVED (Handled by MainPanelWidget) --- + # --- _browse_for_materials_blend REMOVED (Handled by MainPanelWidget) --- + + # _start_llm_prediction REMOVED (Handled internally by LLMInteractionHandler) + + # --- Preset Editor Methods (Moved to PresetEditorWidget) --- + # _editor_add_list_item, _editor_remove_list_item, _editor_add_table_row, _editor_remove_table_row, + # _mark_editor_unsaved, _connect_editor_change_signals, _check_editor_unsaved_changes, + # _set_editor_enabled, _clear_editor, _populate_editor_from_data, _load_preset_for_editing, + # _load_selected_preset_for_editing, _gather_editor_data, _save_current_preset, + # _save_preset_as, _new_preset, _delete_selected_preset # --- Menu Bar Setup --- def setup_menu_bar(self): @@ -1764,15 +889,16 @@ class MainWindow(QMainWindow): # Log Console Action self.toggle_log_action = QAction("Show Log Console", self, checkable=True) self.toggle_log_action.setChecked(False) # Start hidden + # Connect to the slot in MainWindow self.toggle_log_action.toggled.connect(self._toggle_log_console_visibility) view_menu.addAction(self.toggle_log_action) # Detailed Preview Action self.toggle_preview_action = QAction("Disable Detailed Preview", self, checkable=True) self.toggle_preview_action.setChecked(False) # Start enabled (detailed view) - # Connect to update_preview, which now checks this action's state - self.toggle_preview_action.toggled.connect(self.update_preview) - view_menu.addAction(self.toggle_preview_action) + # Connect to update_preview, which now checks this action's state (REMOVED - Preview mode toggle removed) + # self.toggle_preview_action.toggled.connect(self.update_preview) + # view_menu.addAction(self.toggle_preview_action) # REMOVED - Preview mode toggle removed # Verbose Logging Action self.toggle_verbose_action = QAction("Verbose Logging (DEBUG)", self, checkable=True) @@ -1792,8 +918,8 @@ class MainWindow(QMainWindow): self.log_handler.setLevel(logging.INFO) # Add handler to the root logger to capture logs from all modules logging.getLogger().addHandler(self.log_handler) - # Connect the signal to the slot - self.log_handler.log_record_received.connect(self._append_log_message) + # Connect the signal to the slot in LogConsoleWidget + self.log_handler.log_record_received.connect(self.log_console._append_log_message) log.info("UI Log Handler Initialized.") # Log that the handler is ready # --- Slots for Menu Actions and Logging --- @@ -1816,10 +942,14 @@ class MainWindow(QMainWindow): @Slot(bool) def _toggle_log_console_visibility(self, checked): - """Shows or hides the log console widget based on menu action.""" - if hasattr(self, 'log_console_widget'): - self.log_console_widget.setVisible(checked) + """Shows or hides the log console widget.""" + if hasattr(self, 'log_console'): + self.log_console.setVisible(checked) log.debug(f"Log console visibility set to: {checked}") + else: + log.warning("Attempted to toggle log console visibility, but widget not found.") + + # _toggle_log_console_visibility moved to PresetEditorWidget (connected via menu action) # <-- This comment is now incorrect, slot is above @Slot(bool) def _toggle_verbose_logging(self, checked): @@ -1836,33 +966,13 @@ class MainWindow(QMainWindow): # Update status bar or log console to indicate change self.statusBar().showMessage(f"Logging level set to {logging.getLevelName(new_level)}", 3000) - @Slot(str) - def _append_log_message(self, message): - """Appends a log message to the QTextEdit console.""" - if hasattr(self, 'log_console_output'): - # Optional: Add basic coloring (can be expanded) - # if message.startswith("ERROR"): - # message = f"{message}" - # elif message.startswith("WARNING"): - # message = f"{message}" - - self.log_console_output.append(message) # Use append for plain text - # Optional: Limit history size - # MAX_LINES = 500 - # if self.log_console_output.document().blockCount() > MAX_LINES: - # cursor = self.log_console_output.textCursor() - # cursor.movePosition(QTextCursor.MoveOperation.Start) - # cursor.select(QTextCursor.SelectionType.BlockUnderCursor) - # cursor.removeSelectedText() - # cursor.deletePreviousChar() # Remove the newline potentially left behind - # Ensure the view scrolls to the bottom - self.log_console_output.verticalScrollBar().setValue(self.log_console_output.verticalScrollBar().maximum()) - + # _append_log_message moved to PresetEditorWidget (connected via log handler signal) # --- Overridden Close Event --- def closeEvent(self, event): - """Overrides close event to check for unsaved changes in the editor.""" - if self._check_editor_unsaved_changes(): + """Overrides close event to check for unsaved changes in the editor widget.""" + # Call the check method on the preset editor widget instance + if self.preset_editor_widget.check_unsaved_changes(): event.ignore() # Ignore close event if user cancels else: event.accept() # Accept close event @@ -1870,541 +980,205 @@ class MainWindow(QMainWindow): # --- REMOVED Slots for Old Hierarchy and Rule Editor --- # Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_hierarchy_and_rule_editor_slots_line_1553.py -# Slot signature assumes rule_hierarchy_ready signal is updated to emit input_path: Signal(str, list) - # Slot signature matches rule_hierarchy_ready = Signal(list) - @Slot(list) - def _on_rule_hierarchy_ready(self, source_rules_list: list): - """Receives prediction results (a list containing one SourceRule) for a single input path, - finds the corresponding existing rule in the model, updates it while preserving overrides, - and emits dataChanged/layoutChanged signals.""" + # --- Slots for Handling Prediction Results --- - # --- Extract input_path and the new rule from the received list --- - input_path = None - new_source_rule = None - if source_rules_list and isinstance(source_rules_list[0], SourceRule): - new_source_rule = source_rules_list[0] - input_path = new_source_rule.input_path - log.debug(f"--> Entered _on_rule_hierarchy_ready for '{input_path}' with 1 SourceRule") - elif source_rules_list: - log.error(f"Received non-SourceRule object in list: {type(source_rules_list[0])}. Cannot process.") + # Slot signature updated to match BasePredictionHandler.prediction_ready: Signal(str, list) + @Slot(str, list) + def _on_rule_hierarchy_ready(self, input_path: str, source_rules_list: list): + """ + Receives rule-based prediction results (a list containing one SourceRule) + for a single input path, updates the model preserving overrides, + and handles completion tracking. + """ + log.debug(f"--> Entered _on_rule_hierarchy_ready for '{input_path}'") + + # --- Validate input --- + if not input_path: + log.error("Received rule hierarchy ready signal with empty input_path. Cannot process.") return - else: - log.warning("Received empty source_rules_list in _on_rule_hierarchy_ready. Prediction likely failed.") - # Try to deduce input_path if possible (e.g., if only one is pending) - if len(self._pending_predictions) == 1: - input_path = list(self._pending_predictions)[0] - log.warning(f"Assuming failed prediction corresponds to pending path: {input_path}") - else: - log.error("Cannot determine input_path for empty/failed prediction result when multiple predictions are pending.") - return - - if input_path is None: - log.error("Could not determine input_path from received source_rules_list or pending state.") - return - - # Log received rule details (even if it's None due to failure) - log.debug(f"_on_rule_hierarchy_ready: Processing result for '{input_path}'. Received Supplier ID='{getattr(new_source_rule, 'supplier_identifier', 'N/A')}', Received Override='{getattr(new_source_rule, 'supplier_override', 'N/A')}'") + # --- Check if still pending --- if input_path not in self._pending_predictions: log.warning(f"Received rule hierarchy for '{input_path}', but it was not in the pending set. Ignoring stale result? Pending: {self._pending_predictions}") return # Ignore if not expected - # --- Find existing rule in the model's internal list --- - # Access the model directly - existing_rule = None - existing_rule_index = -1 - model_rules = self.unified_model.get_all_source_rules() # Get current rules from model - for i, rule in enumerate(model_rules): - if rule.input_path == input_path: - existing_rule = rule - existing_rule_index = i - break - - if existing_rule: - log.debug(f"Found existing rule for '{input_path}' in model at index {existing_rule_index}. Updating it.") - if new_source_rule: # Only update if prediction was successful - # Preserve existing user overrides from the rule currently in the model - preserved_supplier_override = existing_rule.supplier_override - # Preserve other potential user overrides if they exist - preserved_asset_overrides = {asset.asset_name: asset.asset_type_override for asset in existing_rule.assets} - preserved_file_overrides = {(file.file_path, 'target'): file.target_asset_name_override for asset in existing_rule.assets for file in asset.files} - preserved_file_overrides.update({(file.file_path, 'item'): file.item_type_override for asset in existing_rule.assets for file in asset.files}) - - # --- Update existing rule with new prediction data --- - existing_rule.supplier_identifier = new_source_rule.supplier_identifier - existing_rule.preset_name = new_source_rule.preset_name - existing_rule.assets = new_source_rule.assets # Replace assets list - - # Re-apply preserved overrides - existing_rule.supplier_override = preserved_supplier_override - for asset in existing_rule.assets: - asset.asset_type_override = preserved_asset_overrides.get(asset.asset_name) - asset.parent_source = existing_rule # Set parent reference - for file in asset.files: - file.target_asset_name_override = preserved_file_overrides.get((file.file_path, 'target')) - file.item_type_override = preserved_file_overrides.get((file.file_path, 'item')) - file.parent_asset = asset # Set parent reference - # --- End Update --- - - # Emit dataChanged and layoutChanged for the updated existing rule via the model - start_index = self.unified_model.createIndex(existing_rule_index, 0, existing_rule) - end_index = self.unified_model.createIndex(existing_rule_index, self.unified_model.columnCount() - 1, existing_rule) - log.debug(f"Emitting dataChanged and layoutChanged for updated existing rule at index {existing_rule_index}") - self.unified_model.dataChanged.emit(start_index, end_index) - self.unified_model.layoutChanged.emit() # Signal layout change - else: - log.warning(f"Prediction failed for '{input_path}'. Not updating existing rule data in model.") - + # --- Update Model --- + if source_rules_list: + try: + log.info(f"Updating model with rule-based results for source: {input_path}") +# --- DEBUG: Check model type and attribute existence --- + log.debug(f"DEBUG: Type of self.unified_model: {type(self.unified_model)}") + log.debug(f"DEBUG: hasattr(self.unified_model, 'update_rules_for_sources'): {hasattr(self.unified_model, 'update_rules_for_sources')}") + # --- END DEBUG --- + self.unified_model.update_rules_for_sources(source_rules_list) + log.info("Model update call successful.") + # Expand items after model update (Access view via panel) + if hasattr(self, 'main_panel_widget'): + self.main_panel_widget.unified_view.expandToDepth(1) + except Exception as e: + error_msg = f"Error updating model with rule-based results for {input_path}: {e}" + log.exception(error_msg) + self.statusBar().showMessage(error_msg, 8000) + # Fall through to completion handling even if model update fails else: - # If no existing rule was found (e.g., first time result arrives) - if new_source_rule: # Only add if prediction was successful - log.debug(f"No existing rule found for '{input_path}'. Adding new rule to model.") - # Ensure parent references are set - for asset_rule in new_source_rule.assets: - asset_rule.parent_source = new_source_rule - for file_rule in asset_rule.files: - file_rule.parent_asset = asset_rule - # Add to model's internal list and emit signal via model methods - self.unified_model.beginInsertRows(QModelIndex(), len(model_rules), len(model_rules)) - model_rules.append(new_source_rule) # Append to the list obtained from the model - self.unified_model.endInsertRows() - else: - log.warning(f"Prediction failed for '{input_path}' and no existing rule found. Nothing to add to model.") + log.warning(f"Received empty source_rules_list for '{input_path}'. Prediction likely failed. Model not updated.") + # --- Handle Completion --- + self._handle_prediction_completion(input_path) - # --- Remove from pending --- - self._pending_predictions.discard(input_path) - log.debug(f"Removed '{input_path}' from pending predictions. Remaining: {len(self._pending_predictions)} -> {self._pending_predictions}") + # _on_llm_prediction_ready REMOVED (Replaced by _on_llm_prediction_ready_from_handler) - # --- Check for Completion --- - if not self._pending_predictions: - log.info("All pending predictions processed. Model should be up-to-date.") - self.statusBar().showMessage(f"Preview complete.", 5000) # Update status - # Optional: Resize columns after all updates are done - for col in range(self.unified_model.columnCount()): - self.unified_view.resizeColumnToContents(col) - self.unified_view.expandToDepth(1) # Expand Source -> Asset level - else: - # Update status bar with progress - completed_count = len(self.unified_model.get_all_source_rules()) # Count rules in model - pending_count = len(self._pending_predictions) - total_requested = completed_count + pending_count # Estimate total - status_msg = f"Preview updated for {Path(input_path).name}. Waiting for {pending_count} more ({completed_count}/{total_requested} processed)..." - self.statusBar().showMessage(status_msg, 5000) - log.debug(status_msg) + # _on_llm_prediction_error REMOVED (Errors now connect to _on_prediction_error) + # --- Slot for LLM Results from Handler --- @Slot(str, list) - def _on_llm_prediction_ready(self, directory_path, source_rules): - """Handles the successful LLM prediction result and processes the next item in the queue.""" - log.info(f"Received LLM prediction for {directory_path}. {len(source_rules)} source rule(s) found.") - self.statusBar().showMessage(f"LLM interpretation complete for {os.path.basename(directory_path)}.", 5000) + def _on_llm_prediction_ready_from_handler(self, input_path: str, source_rules: list): + """ + Handles the successful LLM prediction result received from LLMInteractionHandler. + Updates the model and handles completion tracking. + """ + log.info(f"Received LLM prediction result from handler for {input_path}. {len(source_rules)} source rule(s) found.") + # Status message is likely already updated by the handler signal # --- Update the model --- if source_rules: try: - # Assuming the model has a method like this: - # It should intelligently update/replace rules only for the sources - # contained within the source_rules list (which should correspond - # to the directory_path processed by the handler). - log.info(f"Updating model with rules for sources: {[rule.input_path for rule in source_rules]}") # Corrected source_path to input_path for logging - # --- DIAGNOSTIC LOGGING --- - log.debug(f"DIAGNOSTIC: Type of self.unified_model: {type(self.unified_model)}") - log.debug(f"DIAGNOSTIC: hasattr(self.unified_model, 'update_rules_for_sources'): {hasattr(self.unified_model, 'update_rules_for_sources')}") - # --- END DIAGNOSTIC --- - # Ensure the model method exists and handles the update correctly. - # This might involve finding existing rules for the source_path and replacing them, - # or adding new ones if they don't exist. + log.info(f"Updating model with LLM results for source: {input_path}") self.unified_model.update_rules_for_sources(source_rules) log.info("Model update call successful.") - # --- Expand items after model update --- - self.unified_view.expandToDepth(1) # Expand Source -> Asset level - # --- End Expand --- - except AttributeError as e: # Capture the exception object - # Log the specific attribute error message - error_msg = f"AttributeError: {e}. Attempted to call 'update_rules_for_sources' on object of type {type(self.unified_model)}." - log.error(error_msg) - self.statusBar().showMessage(error_msg, 8000) - # Consider showing a QMessageBox critical error here + # Expand items after model update (Access view via panel) + if hasattr(self, 'main_panel_widget'): + self.main_panel_widget.unified_view.expandToDepth(1) except Exception as e: - error_msg = f"Error updating model with LLM results: {e}" - log.exception(error_msg) # Use log.exception to include traceback + error_msg = f"Error updating model with LLM results for {input_path}: {e}" + log.exception(error_msg) self.statusBar().showMessage(error_msg, 8000) - # Consider showing a QMessageBox critical error here + # Fall through to completion handling even if model update fails else: - log.info(f"No source rules returned by LLM for {directory_path}. Model not updated.") - # UI re-enabling is handled by _process_next_llm_reinterpret when the queue is empty. - - # Clean up references (optional, as deleteLater is connected to finished) - # self.llm_prediction_handler = None # Keep references until queue is done? No, handler/thread are per-item. - # self.llm_prediction_thread = None - - # --- Process next item in queue (MOVED TO _reset_llm_thread_references) --- - # self._process_next_llm_item() # Ensure this calls the correct unified method - # Explicitly update button state after successful prediction (handled in _reset_llm_thread_references now) - # self._update_llm_reinterpret_button_state() # Moved to _reset_llm_thread_references + log.info(f"No source rules returned by LLM handler for {input_path}. Model not updated.") + # --- Handle Completion --- + self._handle_prediction_completion(input_path) + + # --- Slot for Common Prediction Errors (Handles both Rule-Based and LLM) --- @Slot(str, str) - def _on_llm_prediction_error(self, directory_path, error_message): - """Handles errors reported by the LLM prediction handler.""" - log.debug(f"--> Entered _on_llm_prediction_error for: {directory_path}") - log.error(f"LLM Prediction Error for {directory_path}: {error_message}") - # Simplify status bar message - simple_error_msg = f"LLM Error ({os.path.basename(directory_path)}): Request failed (see log)." - self.statusBar().showMessage(simple_error_msg, 8000) - # Optionally show a QMessageBox to the user - # QMessageBox.critical(self, "LLM Prediction Error", f"Failed to get LLM prediction for {directory_path}:\n{error_message}") - log.debug(f"<-- Exiting _on_llm_prediction_error for: {directory_path}") + def _on_prediction_error(self, input_path: str, error_message: str): + """Handles errors reported by any prediction handler (RuleBased or LLM).""" + log.error(f"Prediction Error for '{input_path}': {error_message}") + self.statusBar().showMessage(f"Error analyzing {Path(input_path).name}: {error_message}", 8000) - # UI re-enabling is handled by _process_next_llm_reinterpret when the queue is empty. - - # Clean up references (optional, as deleteLater is connected to finished) - # self.llm_prediction_handler = None - # self.llm_prediction_thread = None - - # --- Process next item in queue --- - # Even on error, try to process the next directory in the queue (MOVED TO _reset_llm_thread_references) - # self._process_next_llm_item() # Ensure this calls the correct unified method - # Explicitly update button state after prediction error (handled in _reset_llm_thread_references now) - # self._update_llm_reinterpret_button_state() # Moved to _reset_llm_thread_references + # --- Handle Completion (even on error) --- + self._handle_prediction_completion(input_path) - # REMOVED _finalize_model_update method as it's no longer needed - # def _finalize_model_update(self): - # """Combines accumulated rules and updates the UI model and view.""" - # ... (old code removed) ... + # --- NEW Method for Centralized Completion Tracking --- + def _handle_prediction_completion(self, input_path: str): + """ + Centralized method to handle completion tracking for both successful + predictions and errors for a given input path. + """ + log.debug(f"--> Entered _handle_prediction_completion for '{input_path}'") - # --- Slots for LLM Re-interpretation --- + # Remove from pending predictions if it's there + if input_path in self._pending_predictions: + self._pending_predictions.discard(input_path) + self._completed_predictions.add(input_path) # Add to completed set + log.debug(f"Marked '{input_path}' as completed. Pending: {len(self._pending_predictions)}, Completed: {len(self._completed_predictions)}") - @Slot() - def _update_llm_reinterpret_button_state(self): - """Enables/disables the LLM re-interpret button based on selection in the unified view.""" - if hasattr(self, 'llm_reinterpret_button') and hasattr(self, 'unified_view'): - # Check if the selection model exists and has a selection - selection_model = self.unified_view.selectionModel() - has_selection = selection_model is not None and selection_model.hasSelection() - # Also check if LLM processing is currently running (safely) - is_llm_running = False - try: - # Check if thread exists and hasn't been deleted yet before calling isRunning - if self.llm_prediction_thread is not None: - is_llm_running = self.llm_prediction_thread.isRunning() - except RuntimeError: - # Handle the case where the C++ object is deleted between checks - log.debug("_update_llm_reinterpret_button_state: Caught RuntimeError checking isRunning (thread likely deleted).") - is_llm_running = False # Treat as not running if deleted - # Enable only if there's a selection AND LLM is not currently running - self.llm_reinterpret_button.setEnabled(has_selection and not is_llm_running) - elif hasattr(self, 'llm_reinterpret_button'): - # Ensure button is disabled if view/model isn't ready - self.llm_reinterpret_button.setEnabled(False) - - - @Slot() - def _on_llm_reinterpret_clicked(self): - """Handles the click of the 'Re-interpret Selected with LLM' button.""" - selected_indexes = self.unified_view.selectionModel().selectedIndexes() - if not selected_indexes: - return - - if self.llm_prediction_thread and self.llm_prediction_thread.isRunning(): - QMessageBox.warning(self, "Busy", "LLM prediction is already in progress. Please wait.") - return - - unique_source_dirs = set() - try: - # --- Get unique source directories for selected items --- - log.debug(f"Finding unique source directories from {len(selected_indexes)} selected indexes for LLM re-interpretation.") - processed_source_paths = set() # Track processed source paths to avoid duplicates - for index in selected_indexes: - if not index.isValid(): continue - # Get the node associated with the index - item_node = index.internalPointer() # Use internalPointer() for tree models - if not item_node: continue # Skip if node is invalid - - # Traverse up to find the SourceRule node - current_node = item_node - source_node = None - while current_node is not None: - if isinstance(current_node, SourceRule): - source_node = current_node - break - # Traverse using parent attributes (adjust if model structure differs) - if hasattr(current_node, 'parent_asset'): - current_node = getattr(current_node, 'parent_asset', None) - if hasattr(current_node, 'parent_source'): - current_node = getattr(current_node, 'parent_source', None) - else: # Should not happen if structure is consistent - current_node = None - elif hasattr(current_node, 'parent_source'): - current_node = getattr(current_node, 'parent_source', None) - else: # Reached top or unexpected node type - current_node = None - - - if source_node and hasattr(source_node, 'input_path') and source_node.input_path: - source_path_str = source_node.input_path - # Check if this source path has already been processed for this selection - if source_path_str in processed_source_paths: - continue # Skip if already added - # Ensure it's a directory path suitable for processing (or zip) - source_path_obj = Path(source_path_str) - if source_path_obj.is_dir() or (source_path_obj.is_file() and source_path_obj.suffix.lower() == '.zip'): - unique_source_dirs.add(source_path_str) - processed_source_paths.add(source_path_str) # Mark this source path as processed - else: - # Handle archives if needed, or just log/ignore - log.warning(f"Skipping non-directory/zip source for re-interpretation: {source_path_str}") - else: - log.warning(f"Could not determine valid SourceRule or input_path for selected index: {index.row()},{index.column()} (Item type: {type(item_node).__name__})") - - - except Exception as e: - log.exception(f"Error getting source directories for LLM re-interpretation: {e}") - QMessageBox.warning(self, "Error", f"Could not determine source directories for selected items: {e}") - return - - if not unique_source_dirs: - self.statusBar().showMessage("No valid source directories found for selected items.", 5000) - return - - # --- Queue directories and start processing --- - # Add directories to the unified queue, checking for duplicates - items_added_to_queue = 0 - for source_dir in unique_source_dirs: - # Check if the source_dir is already in the queue (avoids duplicate processing requests) - # Note: This checks only the path, assuming file_list is None for re-interpret requests - is_in_queue = any(item[0] == source_dir for item in self.llm_processing_queue) - if not is_in_queue: - # Re-interpretation needs to extract file list again, so pass None for file_list - self.llm_processing_queue.append((source_dir, None)) - items_added_to_queue += 1 + # Check if all initially requested predictions are now complete + if not self._pending_predictions: + log.info("All pending predictions processed. Model should be up-to-date.") + self.statusBar().showMessage(f"Preview generation complete.", 5000) + # Re-evaluate start button state based on model content + if hasattr(self, 'main_panel_widget'): + self.main_panel_widget.set_start_button_enabled(self.unified_model.rowCount() > 0) + # Optional: Resize columns after all updates are done (Access view via panel) + if hasattr(self, 'main_panel_widget'): + view = self.main_panel_widget.unified_view + for col in range(self.unified_model.columnCount()): + view.resizeColumnToContents(col) + # Ensure view is expanded after completion + view.expandToDepth(1) else: - log.debug(f"Skipping duplicate add to LLM queue for: {source_dir}") - - if items_added_to_queue > 0: - log.info(f"Added {items_added_to_queue} unique directories to LLM processing queue. Queue size: {len(self.llm_processing_queue)}") + # Update status bar with progress + # Use completed set size for accuracy + completed_count = len(self._completed_predictions) + pending_count = len(self._pending_predictions) + # Estimate total based on initial request size (might be slightly off if items were added/removed) + total_requested = completed_count + pending_count + status_msg = f"Preview updated for {Path(input_path).name}. Waiting for {pending_count} more ({completed_count}/{total_requested} processed)..." + self.statusBar().showMessage(status_msg, 5000) + log.debug(status_msg) else: - log.info(f"No new unique directories added to LLM queue (already present or none selected). Queue size: {len(self.llm_processing_queue)}") + log.warning(f"Received completion signal for '{input_path}', but it was not in the pending set. Ignoring?") - # Start processing if not already running - is_llm_running = False - try: - # Safely check if thread exists and is running - if self.llm_prediction_thread is not None: - is_llm_running = self.llm_prediction_thread.isRunning() - except RuntimeError: - log.debug("RuntimeError checking llm_prediction_thread.isRunning() in _on_llm_reinterpret_clicked (likely deleted).") - is_llm_running = False + log.debug(f"<-- Exiting _handle_prediction_completion for '{input_path}'") - if not is_llm_running: - if self.llm_processing_queue: # Only start if queue is not empty - log.info("LLM thread not running. Starting unified queue processing.") - # --- Disable UI --- - self.llm_reinterpret_button.setEnabled(False) - self.editor_preset_list.setEnabled(False) # Keep preset list disabled - # --- End Disable --- - self._process_next_llm_item() # Start processing the first item - else: - log.info("LLM thread not running, but queue is empty. Nothing to start.") + # --- End NEW Method --- + + # --- Slot for Preset Editor Selection Changes --- + @Slot(str, str) + def _on_preset_selection_changed(self, mode: str, preset_name: str | None): + """Handles changes in the preset editor selection (preset, LLM, placeholder).""" + log.info(f"Preset selection changed: mode='{mode}', preset_name='{preset_name}'") + + # Update window title based on selection + if mode == "preset" and preset_name: + # Check for unsaved changes *within the editor widget* + # This might be redundant if the editor handles its own title updates on save/load + # but good for consistency. + unsaved = self.preset_editor_widget.editor_unsaved_changes + self.setWindowTitle(f"Asset Processor Tool - {preset_name}{'*' if unsaved else ''}") + elif mode == "llm": + self.setWindowTitle("Asset Processor Tool - LLM Interpretation") + else: # Placeholder or error + self.setWindowTitle("Asset Processor Tool") + + # Enable/disable start button based on whether the model has items + if hasattr(self, 'main_panel_widget'): + model_has_items = self.unified_model.rowCount() > 0 + self.main_panel_widget.set_start_button_enabled(model_has_items) + # Update LLM button state in the panel using handler's status + self.main_panel_widget.set_llm_processing_status(self.llm_interaction_handler.is_processing()) + + # Display mode is always detailed, no need to set it here + # if mode == "placeholder": + # self.unified_model.set_display_mode("simple") # REMOVED + # else: # "preset" or "llm" + # self.unified_model.set_display_mode("detailed") # REMOVED + + # Trigger preview update based on the new selection + # update_preview will now respect the mode set above + self.update_preview() + + # --- Slot for LLM Processing State Changes from Handler --- + @Slot(bool) + def _on_llm_processing_state_changed(self, is_processing: bool): + """Updates the UI based on the LLM handler's processing state.""" + log.debug(f"Received LLM processing state change from handler: {is_processing}") + # Update UI elements (e.g., disable preset editor, update button in panel) + self.preset_editor_widget.setEnabled(not is_processing) + if hasattr(self, 'main_panel_widget'): + self.main_panel_widget.set_llm_processing_status(is_processing) + + # _is_llm_thread_running REMOVED (Use self.llm_interaction_handler.is_processing()) + # _process_next_llm_item REMOVED (Handled internally by LLMInteractionHandler) + + # --- Context Menu methods REMOVED (Handled by MainPanelWidget) --- + + def get_llm_source_preset_name(self) -> str | None: + """ + Returns the name (stem) of the last valid preset that was loaded + before switching to LLM mode or triggering re-interpretation. + Used by delegates to populate dropdowns based on the original context. + Delegates this call to the PresetEditorWidget. + """ + if hasattr(self, 'preset_editor_widget'): + last_name = self.preset_editor_widget.get_last_valid_preset_name() + log.debug(f"get_llm_source_preset_name called, returning from widget: {last_name}") + return last_name else: - log.info(f"LLM thread already running. Added {items_added_to_queue} directories to queue.") - if items_added_to_queue > 0: - self.statusBar().showMessage(f"Added {items_added_to_queue} directories to running LLM queue.", 3000) - - - def _process_next_llm_item(self): - """Processes the next directory in the unified LLM processing queue.""" - log.debug(f"--> Entered _process_next_llm_item. Queue size: {len(self.llm_processing_queue)}") - if not self.llm_processing_queue: - log.info("LLM processing queue is empty. Finishing.") - self.statusBar().showMessage("LLM processing complete.", 5000) - # --- Re-enable UI --- - log.debug("Re-enabling UI controls.") - self._update_llm_reinterpret_button_state() # Update based on selection/state - self.editor_preset_list.setEnabled(True) # Re-enable preset list - # --- End Re-enable --- - log.debug("<-- Exiting _process_next_llm_item (queue empty)") - return - - # Check if already running - crucial for unified queue - is_llm_running = False - try: - # Safely check if thread exists and is running - if self.llm_prediction_thread is not None: - is_llm_running = self.llm_prediction_thread.isRunning() - except RuntimeError: - log.debug("RuntimeError checking llm_prediction_thread.isRunning() in _process_next_llm_item (likely deleted).") - is_llm_running = False - - if is_llm_running: - log.info("LLM processing already running. Waiting for current item to finish.") - # Do not pop from queue if already running, wait for _on_llm_prediction_ready/error to call this again - return - - # Ensure UI is disabled while processing starts/continues - # (Might be redundant if called correctly, but good safety) - self.llm_reinterpret_button.setEnabled(False) - self.editor_preset_list.setEnabled(False) - - # Get next item *without* removing it yet, in case _start_llm_prediction fails immediately - if not self.llm_processing_queue: # Double check queue isn't empty after potential wait - log.warning("_process_next_llm_item: Queue became empty unexpectedly.") - # Re-enable UI just in case - self._update_llm_reinterpret_button_state() - self.editor_preset_list.setEnabled(True) - return - - next_item = self.llm_processing_queue[0] # Peek at the first item - next_dir, file_list = next_item # Unpack the tuple (file_list might be None) - - # --- Calculate approximate progress --- - total_in_queue_now = len(self.llm_processing_queue) - status_msg = f"LLM Processing {os.path.basename(next_dir)} (Approx. {total_in_queue_now} remaining)..." - self.statusBar().showMessage(status_msg) - log.info(status_msg) - - # --- Start Prediction (which might fail) --- - try: - # Pass the potentially None file_list. _start_llm_prediction handles extraction if None. - self._start_llm_prediction(next_dir, file_list=file_list) - # --- Pop item *after* successfully starting prediction --- - self.llm_processing_queue.pop(0) - log.debug(f"Successfully started LLM prediction for {next_dir} and removed from queue.") - except Exception as e: - log.exception(f"Error occurred *during* _start_llm_prediction call for {next_dir}: {e}") - self.statusBar().showMessage(f"Error starting LLM for {os.path.basename(next_dir)}: {e}", 8000) - # --- Remove the failed item from the queue --- - try: - failed_item = self.llm_processing_queue.pop(0) - log.warning(f"Removed failed item {failed_item} from LLM queue.") - except IndexError: - log.error("Attempted to pop failed item from already empty LLM queue.") - # --- Attempt to process the *next* item --- - # Use QTimer.singleShot to avoid deep recursion if many items fail quickly - from PySide6.QtCore import QTimer - QTimer.singleShot(100, self._process_next_llm_item) # Try next item after a short delay - - # --- Context Menu for Unified View --- - - @Slot(QPoint) - def _show_unified_view_context_menu(self, point: QPoint): - """Shows the context menu for the unified view.""" - index = self.unified_view.indexAt(point) - if not index.isValid(): - return # Clicked on empty area - - # Determine the type of item clicked (Source, Asset, File) - item_node = index.internalPointer() - is_source_item = isinstance(item_node, SourceRule) - - menu = QMenu(self) - - # --- Add "Copy Source Files" action only for SourceRule items --- - if is_source_item: - # Renamed action - copy_llm_example_action = QAction("Copy LLM Example to Clipboard", self) - copy_llm_example_action.setToolTip("Copies a JSON structure representing the input files and predicted output, suitable for LLM examples.") - # Pass the index to the slot using functools.partial or a lambda - copy_llm_example_action.triggered.connect(lambda: self._copy_llm_example_to_clipboard(index)) # Renamed slot - menu.addAction(copy_llm_example_action) - menu.addSeparator() # Add separator if other actions might be added - - # --- Add other potential actions here based on item_node type --- - # Example: - # if isinstance(item_node, AssetRule): - # asset_action = QAction("Asset Action...", self) - # menu.addAction(asset_action) - - # Show the menu if any actions were added - if not menu.isEmpty(): - menu.exec(self.unified_view.viewport().mapToGlobal(point)) - - @Slot(QModelIndex) - def _copy_llm_example_to_clipboard(self, index: QModelIndex): - """Copies a JSON structure for the selected source item to the clipboard, - matching the LLM predictor example format.""" - if not index.isValid(): - log.warning("Copy LLM example called with invalid index.") - return - - item_node = index.internalPointer() - - if not isinstance(item_node, SourceRule): - log.warning(f"Copy LLM example called on non-SourceRule item: {type(item_node)}") - self.statusBar().showMessage("Please right-click directly on the Source item.", 3000) - return - - source_rule: SourceRule = item_node - log.info(f"Attempting to generate LLM example JSON for source: {source_rule.input_path}") - - all_file_paths = [] - predicted_assets_data = [] - - # Iterate through assets and files to gather data - for asset_rule in source_rule.assets: - asset_files_data = [] - for file_rule in asset_rule.files: - if file_rule.file_path: - # Add to the overall list for the "input" field - all_file_paths.append(file_rule.file_path) - # Add to the specific asset's file list for the "output" field - asset_files_data.append({ - "file_path": file_rule.file_path, - # Use item_type as the predicted file type - "predicted_file_type": file_rule.item_type or "UNKNOWN" # Use UNKNOWN if None - }) - - # Sort files within the asset for consistency - asset_files_data.sort(key=lambda x: x['file_path']) - - # Add the asset data to the list - predicted_assets_data.append({ - # Use asset_name as the suggested name - "suggested_asset_name": asset_rule.asset_name or "UnnamedAsset", # Use default if None - # Use asset_type as the predicted asset type - "predicted_asset_type": asset_rule.asset_type or "UNKNOWN", # Use UNKNOWN if None - "files": asset_files_data - }) - - # Sort assets by name for consistency - predicted_assets_data.sort(key=lambda x: x['suggested_asset_name']) - # Sort all file paths for the input field - all_file_paths.sort() - - if not all_file_paths: - log.warning(f"No file paths found for source: {source_rule.input_path}. Cannot generate example.") - self.statusBar().showMessage(f"No files found for source '{os.path.basename(source_rule.input_path)}'.", 3000) - return - - # Construct the final dictionary - llm_example = { - "input": "\n".join(all_file_paths), - "output": { - "predicted_assets": predicted_assets_data - } - } - - # Serialize to JSON string - try: - json_string = json.dumps(llm_example, indent=2) # Set indent=2 for matching format - except Exception as e: - log.exception(f"Error serializing LLM example data to JSON for source {source_rule.input_path}: {e}") - self.statusBar().showMessage(f"Error generating JSON: {e}", 5000) - return - - # Copy to clipboard - try: - clipboard = QApplication.clipboard() - if clipboard: - clipboard.setText(json_string) - log.info(f"Copied LLM example JSON to clipboard for source: {source_rule.input_path}") - self.statusBar().showMessage("Copied LLM example JSON to clipboard.", 3000) - else: - log.error("Failed to get system clipboard.") - self.statusBar().showMessage("Error: Could not access clipboard.", 5000) - except Exception as e: - log.exception(f"Error copying LLM example JSON to clipboard: {e}") - self.statusBar().showMessage(f"Error copying to clipboard: {e}", 5000) - + log.warning("get_llm_source_preset_name called before preset_editor_widget was initialized.") + return None # --- Main Execution --- def run_gui(): diff --git a/gui/prediction_handler.py b/gui/prediction_handler.py index 8d6f42d..8bc07e9 100644 --- a/gui/prediction_handler.py +++ b/gui/prediction_handler.py @@ -1,4 +1,4 @@ -# gui/prediction_handler.py +# gui/rule_based_prediction_handler.py import logging from pathlib import Path import time @@ -11,7 +11,8 @@ from collections import defaultdict, Counter # Added Counter from typing import List, Dict, Any # For type hinting # --- PySide6 Imports --- -from PySide6.QtCore import QObject, Signal, QThread, Slot +from PySide6.QtCore import QObject, Slot # Keep QObject for parent type hint, Slot for classify_files if kept as method +# Removed Signal, QThread as they are handled by BasePredictionHandler or caller # --- Backend Imports --- import sys @@ -21,16 +22,13 @@ if str(project_root) not in sys.path: sys.path.insert(0, str(project_root)) try: - from configuration import Configuration, ConfigurationError, load_base_config # Import Configuration, ConfigurationError, and load_base_config - # AssetProcessor might not be needed directly anymore if logic is moved here - # from asset_processor import AssetProcessor, AssetProcessingError - from rule_structure import SourceRule, AssetRule, FileRule # Removed AssetType, ItemType - # Removed: import config as app_config # Import project's config module - # Removed: Import the new dictionaries directly for easier access - # Removed: from config import ASSET_TYPE_DEFINITIONS, FILE_TYPE_DEFINITIONS + from configuration import Configuration, ConfigurationError # load_base_config might not be needed here + from rule_structure import SourceRule, AssetRule, FileRule + from .base_prediction_handler import BasePredictionHandler # Import the base class BACKEND_AVAILABLE = True except ImportError as e: - print(f"ERROR (PredictionHandler): Failed to import backend/config modules: {e}") + # Update error message source + print(f"ERROR (RuleBasedPredictionHandler): Failed to import backend/config/base modules: {e}") # Define placeholders if imports fail Configuration = None load_base_config = None # Placeholder @@ -44,7 +42,7 @@ except ImportError as e: log = logging.getLogger(__name__) # Basic config if logger hasn't been set up elsewhere if not log.hasHandlers(): - logging.basicConfig(level=logging.INFO, format='%(levelname)s (PredictHandler): %(message)s') + logging.basicConfig(level=logging.INFO, format='%(levelname)s (RuleBasedPredictHandler): %(message)s') # Helper function for classification (can be moved outside class if preferred) @@ -303,254 +301,191 @@ def classify_files(file_list: List[str], config: Configuration) -> Dict[str, Lis return dict(temp_grouped_files) -class PredictionHandler(QObject): +class RuleBasedPredictionHandler(BasePredictionHandler): """ - Handles running predictions in a separate thread to avoid GUI freezes. + Handles running rule-based predictions in a separate thread using presets. Generates the initial SourceRule hierarchy based on file lists and presets. + Inherits from BasePredictionHandler for common threading and signaling. """ - # --- Signals --- - # Emitted when the hierarchical rule structure is ready for a single source - rule_hierarchy_ready = Signal(list) # Emits a LIST containing ONE SourceRule object - # Emitted when prediction/hierarchy generation for a source is done (emits the input_source_identifier) - prediction_finished = Signal(str) - # Emitted for status updates - status_message = Signal(str, int) - def __init__(self, parent=None): - super().__init__(parent) - self._is_running = False + def __init__(self, input_source_identifier: str, original_input_paths: list[str], preset_name: str, parent: QObject = None): + """ + Initializes the rule-based handler. - @property - def is_running(self): - return self._is_running + Args: + input_source_identifier: The unique identifier for the input source (e.g., file path). + original_input_paths: List of absolute file paths extracted from the source. + preset_name: The name of the preset configuration to use. + parent: The parent QObject. + """ + super().__init__(input_source_identifier, parent) + self.original_input_paths = original_input_paths + self.preset_name = preset_name + # _is_running is handled by the base class + # Keep track of the current request being processed by this persistent handler + self._current_input_path = None + self._current_file_list = None + self._current_preset_name = None - # Removed _predict_single_asset method - - @Slot(str, list, str) # Explicitly define types for the slot + # Re-introduce run_prediction as the main slot to receive requests + @Slot(str, list, str) def run_prediction(self, input_source_identifier: str, original_input_paths: list[str], preset_name: str): """ - Generates the initial SourceRule hierarchy for a given source identifier - (which could be a folder or archive path), extracting the actual file list first. + Generates the initial SourceRule hierarchy for a given source identifier, file list, and preset name. Populates only overridable fields based on classification and preset defaults. - This method is intended to be run in a separate QThread. + This method is intended to be run in the handler's QThread. + Uses the base class signals for reporting results/errors. """ - thread_id = QThread.currentThread() - log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered PredictionHandler.run_prediction.") - # Note: file_list argument is renamed to original_input_paths for clarity, - # but the signal passes the list of source paths, not the content files yet. - # We use input_source_identifier as the primary path to analyze. - log.info(f"VERIFY: PredictionHandler received request. Source: '{input_source_identifier}', Original Paths: {original_input_paths}, Preset: '{preset_name}'") # DEBUG Verify - log.info(f"Source Identifier: '{input_source_identifier}', Preset: '{preset_name}'") - - if self._is_running: - log.warning("Prediction is already running for another source. Aborting this run.") - # Don't emit finished, let the running one complete. + # Check if already running a prediction for a *different* source + # Allow re-triggering for the *same* source if needed (e.g., preset changed) + if self._is_running and self._current_input_path != input_source_identifier: + log.warning(f"RuleBasedPredictionHandler is busy with '{self._current_input_path}'. Ignoring request for '{input_source_identifier}'.") + # Optionally emit an error signal specific to this condition + # self.prediction_error.emit(input_source_identifier, "Handler busy with another prediction.") return - if not BACKEND_AVAILABLE: - log.error("Backend/config modules not available. Cannot run prediction.") - self.status_message.emit("Error: Backend components missing.", 5000) - # self.prediction_finished.emit() # Don't emit finished if never started properly - return - if not preset_name: - log.warning("No preset selected for prediction.") - self.status_message.emit("No preset selected.", 3000) - # self.prediction_finished.emit() - return - # Check the identifier path itself - source_path = Path(input_source_identifier) - if not source_path.exists(): - log.warning(f"Input source path does not exist: '{input_source_identifier}'. Skipping prediction.") - self.status_message.emit("Input path not found.", 3000) - self.rule_hierarchy_ready.emit([]) - self.prediction_finished.emit(input_source_identifier) - return - self._is_running = True - self.status_message.emit(f"Analyzing '{source_path.name}'...", 0) + self._is_cancelled = False # Reset cancellation flag for new request + self._current_input_path = input_source_identifier + self._current_file_list = original_input_paths + self._current_preset_name = preset_name - config: Configuration | None = None - # Removed: asset_type_definitions: Dict[str, Dict] = {} - # Removed: file_type_definitions: Dict[str, Dict] = {} # These are ItemType names + log.info(f"Starting rule-based prediction for: {input_source_identifier} using preset: {preset_name}") + self.status_update.emit(f"Starting analysis for '{Path(input_source_identifier).name}'...") # Use base signal - try: - config = Configuration(preset_name) - # Removed: Load allowed types from the project's config module (now dictionaries) - # Removed: if app_config: - # Removed: asset_type_definitions = getattr(app_config, 'ASSET_TYPE_DEFINITIONS', {}) - # Removed: file_type_definitions = getattr(app_config, 'FILE_TYPE_DEFINITIONS', {}) - # Removed: log.debug(f"Loaded AssetType Definitions: {list(asset_type_definitions.keys())}") - # Removed: log.debug(f"Loaded FileType Definitions (ItemTypes): {list(file_type_definitions.keys())}") - # Removed: else: - # Removed: log.warning("Project config module not loaded. Cannot get type definitions.") - - except ConfigurationError as e: - log.error(f"Failed to load configuration for preset '{preset_name}': {e}") - self.status_message.emit(f"Error loading preset '{preset_name}': {e}", 5000) - self.prediction_finished.emit(input_source_identifier) - self._is_running = False - return - except Exception as e: - log.exception(f"Unexpected error loading configuration or allowed types for preset '{preset_name}': {e}") - self.status_message.emit(f"Unexpected error loading preset '{preset_name}'.", 5000) - self.prediction_finished.emit(input_source_identifier) - self._is_running = False - return - - log.debug(f"DEBUG: Calling classify_files with file_list: {original_input_paths}") # DEBUG LOG - # --- Perform Classification --- - try: - classified_assets = classify_files(original_input_paths, config) - except Exception as e: - log.exception(f"Error during file classification for source '{input_source_identifier}': {e}") - self.status_message.emit(f"Error classifying files: {e}", 5000) - self.prediction_finished.emit(input_source_identifier) - self._is_running = False - return - - if not classified_assets: - log.warning(f"Classification yielded no assets for source '{input_source_identifier}'.") - self.status_message.emit("No assets identified from files.", 3000) - self.rule_hierarchy_ready.emit([]) # Emit empty list - self.prediction_finished.emit(input_source_identifier) - self._is_running = False - return - - # --- Build the Hierarchy --- source_rules_list = [] try: - # Determine SourceRule level overrides/defaults - # Get supplier name from the config property - supplier_identifier = config.supplier_name # Use the property + if not BACKEND_AVAILABLE: + raise RuntimeError("Backend/config modules not available. Cannot run prediction.") - # Create the single SourceRule for this input source - source_rule = SourceRule( - input_path=input_source_identifier, # Use the identifier provided - supplier_identifier=supplier_identifier, # Set overridable field - preset_name=preset_name # Pass the selected preset name - ) - log.debug(f"Created SourceRule for identifier: {input_source_identifier} with supplier: {supplier_identifier}") + if not preset_name: + log.warning("No preset selected for prediction.") + self.status_update.emit("No preset selected.") + # Emit empty list for non-critical issues, signal completion + self.prediction_ready.emit(input_source_identifier, []) + self._is_running = False # Mark as finished + return - asset_rules = [] - # Get allowed asset types from config's internal core settings - asset_type_definitions = config._core_settings.get('ASSET_TYPE_DEFINITIONS', {}) - log.debug(f"Loaded AssetType Definitions from config: {list(asset_type_definitions.keys())}") + source_path = Path(input_source_identifier) + if not source_path.exists(): + log.warning(f"Input source path does not exist: '{input_source_identifier}'. Skipping prediction.") + raise FileNotFoundError(f"Input source path not found: {input_source_identifier}") - for asset_name, files_info in classified_assets.items(): - if not files_info: continue # Skip empty asset groups + # --- Load Configuration --- + config = Configuration(preset_name) + log.info(f"Successfully loaded configuration for preset '{preset_name}'.") - # Determine AssetRule level overrides/defaults - item_types_in_asset = {f_info['item_type'] for f_info in files_info} - predicted_asset_type = "Surface" # Default to "Surface" string - material_indicators = {"MAP_COL", "MAP_NRM", "MAP_ROUGH", "MAP_METAL", "MAP_AO", "MAP_DISP", "COL", "NRM", "ROUGH", "METAL", "AO", "DISP"} # Added base types too - if any(it in material_indicators for it in item_types_in_asset if it not in ["EXTRA", "FILE_IGNORE"]): # Exclude non-maps - predicted_asset_type = "Surface" # Predict as "Surface" string + if self._is_cancelled: raise RuntimeError("Prediction cancelled before classification.") - # Ensure the predicted type is allowed, fallback if necessary - if asset_type_definitions and predicted_asset_type not in asset_type_definitions: - log.warning(f"Predicted AssetType '{predicted_asset_type}' for asset '{asset_name}' is not in ASSET_TYPE_DEFINITIONS from config. Falling back.") - default_type = config.default_asset_category - if default_type in asset_type_definitions: - predicted_asset_type = default_type - elif asset_type_definitions: - predicted_asset_type = list(asset_type_definitions.keys())[0] - else: - pass # Keep the original prediction if definitions are empty + # --- Perform Classification --- + self.status_update.emit(f"Classifying files for '{source_path.name}'...") + try: + classified_assets = classify_files(original_input_paths, config) + except Exception as e: + log.exception(f"Error during file classification for source '{input_source_identifier}': {e}") + raise RuntimeError(f"Error classifying files: {e}") from e + if self._is_cancelled: raise RuntimeError("Prediction cancelled after classification.") - asset_rule = AssetRule( - asset_name=asset_name, - asset_type=predicted_asset_type, + if not classified_assets: + log.warning(f"Classification yielded no assets for source '{input_source_identifier}'.") + self.status_update.emit("No assets identified from files.") + # Emit empty list, signal completion + self.prediction_ready.emit(input_source_identifier, []) + self._is_running = False # Mark as finished + return + + # --- Build the Hierarchy --- + self.status_update.emit(f"Building rule hierarchy for '{source_path.name}'...") + try: + # (Hierarchy building logic remains the same as before) + supplier_identifier = config.supplier_name + source_rule = SourceRule( + input_path=input_source_identifier, + supplier_identifier=supplier_identifier, + preset_name=preset_name ) - log.debug(f"Created AssetRule for asset: {asset_name} with type: {predicted_asset_type}") - - file_rules = [] + asset_rules = [] + asset_type_definitions = config._core_settings.get('ASSET_TYPE_DEFINITIONS', {}) file_type_definitions = config._core_settings.get('FILE_TYPE_DEFINITIONS', {}) - log.debug(f"Loaded FileType Definitions (ItemTypes) from config: {list(file_type_definitions.keys())}") - for file_info in files_info: - base_item_type = file_info['item_type'] - target_asset_name_override = file_info['asset_name'] + for asset_name, files_info in classified_assets.items(): + if self._is_cancelled: raise RuntimeError("Prediction cancelled during hierarchy building (assets).") + if not files_info: continue - # Determine the final item_type string (prefix maps, check if allowed) - final_item_type = base_item_type - if not base_item_type.startswith("MAP_") and base_item_type not in ["FILE_IGNORE", "EXTRA", "MODEL"]: - final_item_type = f"MAP_{base_item_type}" + item_types_in_asset = {f_info['item_type'] for f_info in files_info} + predicted_asset_type = "Surface" + material_indicators = {"MAP_COL", "MAP_NRM", "MAP_ROUGH", "MAP_METAL", "MAP_AO", "MAP_DISP", "COL", "NRM", "ROUGH", "METAL", "AO", "DISP"} + if any(it in material_indicators for it in item_types_in_asset if it not in ["EXTRA", "FILE_IGNORE"]): + predicted_asset_type = "Surface" - # Check if the final type is allowed - if file_type_definitions and final_item_type not in file_type_definitions and base_item_type not in ["FILE_IGNORE", "EXTRA"]: - log.warning(f"Predicted ItemType '{base_item_type}' (checked as '{final_item_type}') for file '{file_info['file_path']}' is not in FILE_TYPE_DEFINITIONS. Setting to FILE_IGNORE.") - final_item_type = "FILE_IGNORE" + if asset_type_definitions and predicted_asset_type not in asset_type_definitions: + log.warning(f"Predicted AssetType '{predicted_asset_type}' for asset '{asset_name}' is not in ASSET_TYPE_DEFINITIONS. Falling back.") + default_type = config.default_asset_category + if default_type in asset_type_definitions: predicted_asset_type = default_type + elif asset_type_definitions: predicted_asset_type = list(asset_type_definitions.keys())[0] + asset_rule = AssetRule(asset_name=asset_name, asset_type=predicted_asset_type) + file_rules = [] + for file_info in files_info: + if self._is_cancelled: raise RuntimeError("Prediction cancelled during hierarchy building (files).") - # Retrieve the standard_type - standard_map_type = None - file_type_details = file_type_definitions.get(final_item_type) - if file_type_details: - standard_map_type = file_type_details.get('standard_type') - log.debug(f" Found standard_type '{standard_map_type}' for final_item_type '{final_item_type}'") - else: - file_type_details_alias = file_type_definitions.get(base_item_type) - if file_type_details_alias: - standard_map_type = file_type_details_alias.get('standard_type') - log.debug(f" Found standard_type '{standard_map_type}' via alias lookup for base_item_type '{base_item_type}'") - elif base_item_type in file_type_definitions: - standard_map_type = base_item_type - log.debug(f" Using base_item_type '{base_item_type}' itself as standard_map_type.") + base_item_type = file_info['item_type'] + target_asset_name_override = file_info['asset_name'] + final_item_type = base_item_type + if not base_item_type.startswith("MAP_") and base_item_type not in ["FILE_IGNORE", "EXTRA", "MODEL"]: + final_item_type = f"MAP_{base_item_type}" + + if file_type_definitions and final_item_type not in file_type_definitions and base_item_type not in ["FILE_IGNORE", "EXTRA"]: + log.warning(f"Predicted ItemType '{base_item_type}' (checked as '{final_item_type}') for file '{file_info['file_path']}' is not in FILE_TYPE_DEFINITIONS. Setting to FILE_IGNORE.") + final_item_type = "FILE_IGNORE" + + standard_map_type = None + file_type_details = file_type_definitions.get(final_item_type) + if file_type_details: standard_map_type = file_type_details.get('standard_type') else: - log.debug(f" Could not determine standard_map_type for base '{base_item_type}' / final '{final_item_type}'. Setting to None.") + file_type_details_alias = file_type_definitions.get(base_item_type) + if file_type_details_alias: standard_map_type = file_type_details_alias.get('standard_type') + elif base_item_type in file_type_definitions: standard_map_type = base_item_type + is_gloss_source_value = file_info.get('is_gloss_source', False) - output_format_override = None - item_type_override = None + file_rule = FileRule( + file_path=file_info['file_path'], + item_type=final_item_type, + item_type_override=final_item_type, + target_asset_name_override=target_asset_name_override, + output_format_override=None, + is_gloss_source=is_gloss_source_value if isinstance(is_gloss_source_value, bool) else False, + standard_map_type=standard_map_type, + resolution_override=None, + channel_merge_instructions={}, + ) + file_rules.append(file_rule) + asset_rule.files = file_rules + asset_rules.append(asset_rule) + source_rule.assets = asset_rules + source_rules_list.append(source_rule) - log.debug(f" Creating FileRule for: {file_info['file_path']}") - log.debug(f" Base Item Type (from classification): {base_item_type}") - log.debug(f" Final Item Type (for model): {final_item_type}") - log.debug(f" Target Asset Name Override: {target_asset_name_override}") - log.debug(f" Determined Standard Map Type: {standard_map_type}") - is_gloss_source_value = file_info.get('is_gloss_source', 'MISSING') - log.debug(f" Value for 'is_gloss_source' from file_info: {is_gloss_source_value}") + except Exception as e: + log.exception(f"Error building rule hierarchy for source '{input_source_identifier}': {e}") + raise RuntimeError(f"Error building rule hierarchy: {e}") from e - - file_rule = FileRule( - file_path=file_info['file_path'], - item_type=final_item_type, - item_type_override=final_item_type, - target_asset_name_override=target_asset_name_override, - output_format_override=output_format_override, - is_gloss_source=is_gloss_source_value if isinstance(is_gloss_source_value, bool) else False, - standard_map_type=standard_map_type, - resolution_override=None, - channel_merge_instructions={}, - ) - file_rules.append(file_rule) - - asset_rule.files = file_rules - asset_rules.append(asset_rule) - - source_rule.assets = asset_rules - log.debug(f"Built SourceRule '{source_rule.input_path}' with {len(asset_rules)} AssetRule(s).") - source_rules_list.append(source_rule) + # --- Emit Success Signal --- + log.info(f"Rule-based prediction finished successfully for '{input_source_identifier}'.") + self.prediction_ready.emit(input_source_identifier, source_rules_list) # Use base signal except Exception as e: - log.exception(f"Error building rule hierarchy for source '{input_source_identifier}': {e}") - self.status_message.emit(f"Error building rules: {e}", 5000) - self.prediction_finished.emit(input_source_identifier) - self._is_running = False - return + # --- Emit Error Signal --- + log.exception(f"Error during rule-based prediction for '{input_source_identifier}': {e}") + error_msg = f"Error analyzing '{Path(input_source_identifier).name}': {e}" + self.prediction_error.emit(input_source_identifier, error_msg) # Use base signal - - # --- Emit Results --- - log.info(f"VERIFY: Emitting rule_hierarchy_ready with {len(source_rules_list)} SourceRule(s).") - for i, rule in enumerate(source_rules_list): - log.debug(f" VERIFY Rule {i}: Input='{rule.input_path}', Assets={len(rule.assets)}") - log.info(f"[{time.time():.4f}][T:{thread_id}] Prediction run finished. Emitting hierarchy for '{input_source_identifier}'.") - self.rule_hierarchy_ready.emit(source_rules_list) - log.info(f"[{time.time():.4f}][T:{thread_id}] Emitted rule_hierarchy_ready signal.") - - self.status_message.emit(f"Analysis complete for '{input_source_identifier}'.", 3000) - self.prediction_finished.emit(input_source_identifier) - self._is_running = False - log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting PredictionHandler.run_prediction.") + finally: + # --- Cleanup --- + self._is_running = False + self._current_input_path = None # Clear current task info + self._current_file_list = None + self._current_preset_name = None + log.info(f"Finished rule-based prediction run for: {input_source_identifier}") diff --git a/gui/preset_editor_widget.py b/gui/preset_editor_widget.py new file mode 100644 index 0000000..a63bd4a --- /dev/null +++ b/gui/preset_editor_widget.py @@ -0,0 +1,717 @@ +import sys +import os +import json +import logging +from pathlib import Path +from functools import partial + +from PySide6.QtWidgets import ( + QWidget, QVBoxLayout, QHBoxLayout, QListWidget, QPushButton, QLabel, QTabWidget, + QLineEdit, QTextEdit, QSpinBox, QTableWidget, QGroupBox, QFormLayout, + QHeaderView, QAbstractItemView, QListWidgetItem, QTableWidgetItem, QMessageBox, + QFileDialog, QInputDialog, QSizePolicy +) +from PySide6.QtCore import Qt, Signal, QObject, Slot +from PySide6.QtGui import QAction # Keep QAction if needed for context menus within editor later + +# --- Constants --- +# Assuming project root is parent of the directory containing this file +script_dir = Path(__file__).parent +project_root = script_dir.parent +PRESETS_DIR = project_root / "Presets" # Corrected path +TEMPLATE_PATH = PRESETS_DIR / "_template.json" + +log = logging.getLogger(__name__) + +# --- Preset Editor Widget --- + +class PresetEditorWidget(QWidget): + """ + Widget dedicated to managing and editing presets. + Contains the preset list, editor tabs, and save/load functionality. + """ + # Signal emitted when presets list changes (saved, deleted, new) + presets_changed_signal = Signal() + # Signal emitted when the selected preset (or LLM/Placeholder) changes + # Emits: mode ("preset", "llm", "placeholder"), preset_name (str or None) + preset_selection_changed_signal = Signal(str, str) + + def __init__(self, parent=None): + super().__init__(parent) + + # --- Internal State --- + self._last_valid_preset_name = None # Store the name of the last valid preset loaded + self.current_editing_preset_path = None + self.editor_unsaved_changes = False + self._is_loading_editor = False # Flag to prevent signals during load + + # --- UI Setup --- + self._init_ui() + + # --- Initial State --- + self._clear_editor() # Clear/disable editor fields initially + self._set_editor_enabled(False) # Disable editor initially + self.populate_presets() # Populate preset list + + # --- Connect Editor Signals --- + self._connect_editor_change_signals() + + def _init_ui(self): + """Initializes the UI elements for the preset editor.""" + editor_layout = QVBoxLayout(self) + editor_layout.setContentsMargins(5, 5, 5, 5) # Reduce margins + + # Preset List and Controls + list_layout = QVBoxLayout() + list_layout.addWidget(QLabel("Presets:")) + self.editor_preset_list = QListWidget() + self.editor_preset_list.currentItemChanged.connect(self._load_selected_preset_for_editing) + list_layout.addWidget(self.editor_preset_list) + + list_button_layout = QHBoxLayout() + self.editor_new_button = QPushButton("New") + self.editor_delete_button = QPushButton("Delete") + self.editor_new_button.clicked.connect(self._new_preset) + self.editor_delete_button.clicked.connect(self._delete_selected_preset) + list_button_layout.addWidget(self.editor_new_button) + list_button_layout.addWidget(self.editor_delete_button) + list_layout.addLayout(list_button_layout) + editor_layout.addLayout(list_layout, 1) # Allow list to stretch + + # Editor Tabs + self.editor_tab_widget = QTabWidget() + self.editor_tab_general_naming = QWidget() + self.editor_tab_mapping_rules = QWidget() + self.editor_tab_widget.addTab(self.editor_tab_general_naming, "General & Naming") + self.editor_tab_widget.addTab(self.editor_tab_mapping_rules, "Mapping & Rules") + self._create_editor_general_tab() + self._create_editor_mapping_tab() + editor_layout.addWidget(self.editor_tab_widget, 3) # Allow tabs to stretch more + + # Save Buttons + save_button_layout = QHBoxLayout() + self.editor_save_button = QPushButton("Save") + self.editor_save_as_button = QPushButton("Save As...") + self.editor_save_button.setEnabled(False) # Disabled initially + self.editor_save_button.clicked.connect(self._save_current_preset) + self.editor_save_as_button.clicked.connect(self._save_preset_as) + save_button_layout.addStretch() + save_button_layout.addWidget(self.editor_save_button) + save_button_layout.addWidget(self.editor_save_as_button) + editor_layout.addLayout(save_button_layout) + + def _create_editor_general_tab(self): + """Creates the widgets and layout for the 'General & Naming' editor tab.""" + layout = QVBoxLayout(self.editor_tab_general_naming) + form_layout = QFormLayout() + form_layout.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow) + + # Basic Info + self.editor_preset_name = QLineEdit() + self.editor_supplier_name = QLineEdit() + self.editor_notes = QTextEdit() + self.editor_notes.setAcceptRichText(False) + self.editor_notes.setFixedHeight(60) + form_layout.addRow("Preset Name:", self.editor_preset_name) + form_layout.addRow("Supplier Name:", self.editor_supplier_name) + form_layout.addRow("Notes:", self.editor_notes) + layout.addLayout(form_layout) + + # Source Naming Group + naming_group = QGroupBox("Source File Naming Rules") + naming_layout_outer = QVBoxLayout(naming_group) + naming_layout_form = QFormLayout() + self.editor_separator = QLineEdit() + self.editor_separator.setMaxLength(1) + self.editor_spin_base_name_idx = QSpinBox() + self.editor_spin_base_name_idx.setMinimum(-1) + self.editor_spin_map_type_idx = QSpinBox() + self.editor_spin_map_type_idx.setMinimum(-1) + naming_layout_form.addRow("Separator:", self.editor_separator) + naming_layout_form.addRow("Base Name Index:", self.editor_spin_base_name_idx) + naming_layout_form.addRow("Map Type Index:", self.editor_spin_map_type_idx) + naming_layout_outer.addLayout(naming_layout_form) + # Gloss Keywords List + self._setup_list_widget_with_controls(naming_layout_outer, "Glossiness Keywords", "editor_list_gloss_keywords") + # Bit Depth Variants Table + self._setup_table_widget_with_controls(naming_layout_outer, "16-bit Variant Patterns", "editor_table_bit_depth_variants", ["Map Type", "Pattern"]) + self.editor_table_bit_depth_variants.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents) + self.editor_table_bit_depth_variants.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch) + layout.addWidget(naming_group) + + # Extra Files Group + self._setup_list_widget_with_controls(layout, "Move to 'Extra' Folder Patterns", "editor_list_extra_patterns") + + layout.addStretch(1) + + def _create_editor_mapping_tab(self): + """Creates the widgets and layout for the 'Mapping & Rules' editor tab.""" + layout = QVBoxLayout(self.editor_tab_mapping_rules) + + # Map Type Mapping Group + self._setup_table_widget_with_controls(layout, "Map Type Mapping (Standard Type <- Input Keywords)", "editor_table_map_type_mapping", ["Standard Type", "Input Keywords (comma-sep)"]) + self.editor_table_map_type_mapping.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents) + self.editor_table_map_type_mapping.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch) + + # Category Rules Group + category_group = QGroupBox("Asset Category Rules") + category_layout = QVBoxLayout(category_group) + self._setup_list_widget_with_controls(category_layout, "Model File Patterns", "editor_list_model_patterns") + self._setup_list_widget_with_controls(category_layout, "Decal Keywords", "editor_list_decal_keywords") + layout.addWidget(category_group) + + # Archetype Rules Group + self._setup_table_widget_with_controls(layout, "Archetype Rules", "editor_table_archetype_rules", ["Archetype Name", "Match Any (comma-sep)", "Match All (comma-sep)"]) + self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents) + self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch) + self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeMode.Stretch) + + layout.addStretch(1) + + # --- Helper Functions for UI Setup (Moved into class) --- + def _setup_list_widget_with_controls(self, parent_layout, label_text, attribute_name): + """Adds a QListWidget with Add/Remove buttons to a layout.""" + list_widget = QListWidget() + list_widget.setAlternatingRowColors(True) + list_widget.setEditTriggers(QAbstractItemView.EditTrigger.DoubleClicked | QAbstractItemView.EditTrigger.SelectedClicked | QAbstractItemView.EditTrigger.EditKeyPressed) + setattr(self, attribute_name, list_widget) # Store list widget on the instance + + add_button = QPushButton("+") + remove_button = QPushButton("-") + add_button.setFixedWidth(30) + remove_button.setFixedWidth(30) + + button_layout = QVBoxLayout() + button_layout.addWidget(add_button) + button_layout.addWidget(remove_button) + button_layout.addStretch() + + list_layout = QHBoxLayout() + list_layout.addWidget(list_widget) + list_layout.addLayout(button_layout) + + group_box = QGroupBox(label_text) + group_box_layout = QVBoxLayout(group_box) + group_box_layout.addLayout(list_layout) + + parent_layout.addWidget(group_box) + + # Connections + add_button.clicked.connect(partial(self._editor_add_list_item, list_widget)) + remove_button.clicked.connect(partial(self._editor_remove_list_item, list_widget)) + list_widget.itemChanged.connect(self._mark_editor_unsaved) # Mark unsaved on item edit + + def _setup_table_widget_with_controls(self, parent_layout, label_text, attribute_name, columns): + """Adds a QTableWidget with Add/Remove buttons to a layout.""" + table_widget = QTableWidget() + table_widget.setColumnCount(len(columns)) + table_widget.setHorizontalHeaderLabels(columns) + table_widget.setAlternatingRowColors(True) + setattr(self, attribute_name, table_widget) # Store table widget + + add_button = QPushButton("+ Row") + remove_button = QPushButton("- Row") + + button_layout = QHBoxLayout() + button_layout.addStretch() + button_layout.addWidget(add_button) + button_layout.addWidget(remove_button) + + group_box = QGroupBox(label_text) + group_box_layout = QVBoxLayout(group_box) + group_box_layout.addWidget(table_widget) + group_box_layout.addLayout(button_layout) + + parent_layout.addWidget(group_box) + + # Connections + add_button.clicked.connect(partial(self._editor_add_table_row, table_widget)) + remove_button.clicked.connect(partial(self._editor_remove_table_row, table_widget)) + table_widget.itemChanged.connect(self._mark_editor_unsaved) # Mark unsaved on item edit + + # --- Preset Population and Handling --- + def populate_presets(self): + """Scans presets dir and populates the editor list.""" + log.debug("Populating preset list in PresetEditorWidget...") + current_list_item = self.editor_preset_list.currentItem() + current_list_selection_text = current_list_item.text() if current_list_item else None + + self.editor_preset_list.clear() + log.debug("Preset list cleared.") + + # Add the "Select a Preset" placeholder item + placeholder_item = QListWidgetItem("--- Select a Preset ---") + placeholder_item.setFlags(placeholder_item.flags() & ~Qt.ItemFlag.ItemIsSelectable & ~Qt.ItemFlag.ItemIsEditable) + placeholder_item.setData(Qt.ItemDataRole.UserRole, "__PLACEHOLDER__") + self.editor_preset_list.addItem(placeholder_item) + log.debug("Added '--- Select a Preset ---' placeholder item.") + + # Add LLM Option + llm_item = QListWidgetItem("- LLM Interpretation -") + llm_item.setData(Qt.ItemDataRole.UserRole, "__LLM__") # Special identifier + self.editor_preset_list.addItem(llm_item) + log.debug("Added '- LLM Interpretation -' item.") + + if not PRESETS_DIR.is_dir(): + msg = f"Error: Presets directory not found at {PRESETS_DIR}" + log.error(msg) + # Consider emitting a status signal to MainWindow? + return + + presets = sorted([f for f in PRESETS_DIR.glob("*.json") if f.is_file() and not f.name.startswith('_')]) + + if not presets: + msg = "Warning: No presets found in presets directory." + log.warning(msg) + else: + for preset_path in presets: + item = QListWidgetItem(preset_path.stem) + item.setData(Qt.ItemDataRole.UserRole, preset_path) # Store full path + self.editor_preset_list.addItem(item) + log.info(f"Loaded {len(presets)} presets into editor list.") + + # Select the "Select a Preset" item by default + log.debug("Preset list populated. Selecting '--- Select a Preset ---' item.") + self.editor_preset_list.setCurrentItem(placeholder_item) # Select the placeholder item + + # --- Preset Editor Methods --- + + def _editor_add_list_item(self, list_widget: QListWidget): + """Adds an editable item to the specified list widget in the editor.""" + text, ok = QInputDialog.getText(self, f"Add Item", "Enter value:") + if ok and text: + item = QListWidgetItem(text) + list_widget.addItem(item) + self._mark_editor_unsaved() + + def _editor_remove_list_item(self, list_widget: QListWidget): + """Removes the selected item from the specified list widget in the editor.""" + selected_items = list_widget.selectedItems() + if not selected_items: return + for item in selected_items: list_widget.takeItem(list_widget.row(item)) + self._mark_editor_unsaved() + + def _editor_add_table_row(self, table_widget: QTableWidget): + """Adds an empty row to the specified table widget in the editor.""" + row_count = table_widget.rowCount() + table_widget.insertRow(row_count) + for col in range(table_widget.columnCount()): table_widget.setItem(row_count, col, QTableWidgetItem("")) + self._mark_editor_unsaved() + + def _editor_remove_table_row(self, table_widget: QTableWidget): + """Removes the selected row(s) from the specified table widget in the editor.""" + selected_rows = sorted(list(set(index.row() for index in table_widget.selectedIndexes())), reverse=True) + if not selected_rows: + if table_widget.rowCount() > 0: selected_rows = [table_widget.rowCount() - 1] + else: return + for row in selected_rows: table_widget.removeRow(row) + self._mark_editor_unsaved() + + def _mark_editor_unsaved(self): + """Marks changes in the editor panel as unsaved.""" + if self._is_loading_editor: return + self.editor_unsaved_changes = True + self.editor_save_button.setEnabled(True) + # Update window title (handled by MainWindow) - maybe emit signal? + # preset_name = Path(self.current_editing_preset_path).name if self.current_editing_preset_path else 'New Preset' + # self.window().setWindowTitle(f"Asset Processor Tool - {preset_name}*") # Access parent window + + def _connect_editor_change_signals(self): + """Connect signals from all editor widgets to mark_editor_unsaved.""" + self.editor_preset_name.textChanged.connect(self._mark_editor_unsaved) + self.editor_supplier_name.textChanged.connect(self._mark_editor_unsaved) + self.editor_notes.textChanged.connect(self._mark_editor_unsaved) + self.editor_separator.textChanged.connect(self._mark_editor_unsaved) + self.editor_spin_base_name_idx.valueChanged.connect(self._mark_editor_unsaved) + self.editor_spin_map_type_idx.valueChanged.connect(self._mark_editor_unsaved) + # List/Table widgets are connected via helper functions + + def check_unsaved_changes(self) -> bool: + """ + Checks for unsaved changes in the editor and prompts the user. + Returns True if the calling action should be cancelled. + (Called by MainWindow's closeEvent or before loading a new preset). + """ + if not self.editor_unsaved_changes: return False # No unsaved changes, proceed + reply = QMessageBox.question(self, "Unsaved Preset Changes", # Use self as parent + "You have unsaved changes in the preset editor. Discard them?", + QMessageBox.StandardButton.Save | QMessageBox.StandardButton.Discard | QMessageBox.StandardButton.Cancel, + QMessageBox.StandardButton.Cancel) + if reply == QMessageBox.StandardButton.Save: + save_successful = self._save_current_preset() + return not save_successful # Return True (cancel) if save fails + elif reply == QMessageBox.StandardButton.Discard: + return False # Discarded, proceed + else: # Cancelled + return True # Cancel the original action + + def _set_editor_enabled(self, enabled: bool): + """Enables or disables all editor widgets.""" + self.editor_tab_widget.setEnabled(enabled) + self.editor_save_button.setEnabled(enabled and self.editor_unsaved_changes) + self.editor_save_as_button.setEnabled(enabled) # Save As is always possible if editor is enabled + + def _clear_editor(self): + """Clears the editor fields and resets state.""" + self._is_loading_editor = True + try: + self.editor_preset_name.clear() + self.editor_supplier_name.clear() + self.editor_notes.clear() + self.editor_separator.clear() + self.editor_spin_base_name_idx.setValue(0) + self.editor_spin_map_type_idx.setValue(1) + self.editor_list_gloss_keywords.clear() + self.editor_table_bit_depth_variants.setRowCount(0) + self.editor_list_extra_patterns.clear() + self.editor_table_map_type_mapping.setRowCount(0) + self.editor_list_model_patterns.clear() + self.editor_list_decal_keywords.clear() + self.editor_table_archetype_rules.setRowCount(0) + self.current_editing_preset_path = None + self.editor_unsaved_changes = False + self.editor_save_button.setEnabled(False) + # self.window().setWindowTitle("Asset Processor Tool") # Reset window title (handled by MainWindow) + self._set_editor_enabled(False) + finally: + self._is_loading_editor = False + + def _populate_editor_from_data(self, preset_data: dict): + """Helper method to populate editor UI widgets from a preset data dictionary.""" + self._is_loading_editor = True + try: + self.editor_preset_name.setText(preset_data.get("preset_name", "")) + self.editor_supplier_name.setText(preset_data.get("supplier_name", "")) + self.editor_notes.setText(preset_data.get("notes", "")) + naming_data = preset_data.get("source_naming", {}) + self.editor_separator.setText(naming_data.get("separator", "_")) + indices = naming_data.get("part_indices", {}) + self.editor_spin_base_name_idx.setValue(indices.get("base_name", 0)) + self.editor_spin_map_type_idx.setValue(indices.get("map_type", 1)) + self.editor_list_gloss_keywords.clear() + self.editor_list_gloss_keywords.addItems(naming_data.get("glossiness_keywords", [])) + self.editor_table_bit_depth_variants.setRowCount(0) + bit_depth_vars = naming_data.get("bit_depth_variants", {}) + for i, (map_type, pattern) in enumerate(bit_depth_vars.items()): + self.editor_table_bit_depth_variants.insertRow(i) + self.editor_table_bit_depth_variants.setItem(i, 0, QTableWidgetItem(map_type)) + self.editor_table_bit_depth_variants.setItem(i, 1, QTableWidgetItem(pattern)) + self.editor_list_extra_patterns.clear() + self.editor_list_extra_patterns.addItems(preset_data.get("move_to_extra_patterns", [])) + self.editor_table_map_type_mapping.setRowCount(0) + map_mappings = preset_data.get("map_type_mapping", []) + for i, mapping_dict in enumerate(map_mappings): + if isinstance(mapping_dict, dict) and "target_type" in mapping_dict and "keywords" in mapping_dict: + std_type = mapping_dict["target_type"] + keywords = mapping_dict["keywords"] + self.editor_table_map_type_mapping.insertRow(i) + self.editor_table_map_type_mapping.setItem(i, 0, QTableWidgetItem(std_type)) + keywords_str = [str(k) for k in keywords if isinstance(k, str)] + self.editor_table_map_type_mapping.setItem(i, 1, QTableWidgetItem(", ".join(keywords_str))) + else: + log.warning(f"Skipping invalid map_type_mapping item during editor population: {mapping_dict}") + category_rules = preset_data.get("asset_category_rules", {}) + self.editor_list_model_patterns.clear() + self.editor_list_model_patterns.addItems(category_rules.get("model_patterns", [])) + self.editor_list_decal_keywords.clear() + self.editor_list_decal_keywords.addItems(category_rules.get("decal_keywords", [])) + # Archetype rules population (assuming table exists) + self.editor_table_archetype_rules.setRowCount(0) + arch_rules_data = preset_data.get("archetype_rules", []) + for i, rule_entry in enumerate(arch_rules_data): + # Handle both list and dict format for backward compatibility? Assuming list for now. + if isinstance(rule_entry, (list, tuple)) and len(rule_entry) == 2: + name, conditions = rule_entry + if isinstance(conditions, dict): + match_any = conditions.get("match_any", []) + match_all = conditions.get("match_all", []) + self.editor_table_archetype_rules.insertRow(i) + self.editor_table_archetype_rules.setItem(i, 0, QTableWidgetItem(str(name))) + self.editor_table_archetype_rules.setItem(i, 1, QTableWidgetItem(", ".join(map(str, match_any)))) + self.editor_table_archetype_rules.setItem(i, 2, QTableWidgetItem(", ".join(map(str, match_all)))) + else: + log.warning(f"Skipping invalid archetype rule condition format: {conditions}") + else: + log.warning(f"Skipping invalid archetype rule format: {rule_entry}") + + finally: + self._is_loading_editor = False + + def _load_preset_for_editing(self, file_path: Path): + """Loads the content of the selected preset file into the editor widgets.""" + if not file_path or not file_path.is_file(): + self._clear_editor() + return + log.info(f"Loading preset into editor: {file_path.name}") + try: + with open(file_path, 'r', encoding='utf-8') as f: preset_data = json.load(f) + self._populate_editor_from_data(preset_data) + self._set_editor_enabled(True) + self.current_editing_preset_path = file_path + self.editor_unsaved_changes = False + self.editor_save_button.setEnabled(False) + # self.window().setWindowTitle(f"Asset Processor Tool - {file_path.name}") # Handled by MainWindow + log.info(f"Preset '{file_path.name}' loaded into editor.") + except json.JSONDecodeError as json_err: + log.error(f"Invalid JSON in {file_path.name}: {json_err}") + QMessageBox.warning(self, "Load Error", f"Failed to load preset '{file_path.name}'.\nInvalid JSON structure:\n{json_err}") + self._clear_editor() + except Exception as e: + log.exception(f"Error loading preset file {file_path}: {e}") + QMessageBox.critical(self, "Error", f"Could not load preset file:\n{file_path}\n\nError: {e}") + self._clear_editor() + + @Slot(QListWidgetItem, QListWidgetItem) + def _load_selected_preset_for_editing(self, current_item: QListWidgetItem, previous_item: QListWidgetItem): + """Loads the preset currently selected in the editor list and emits selection change signal.""" + log.debug(f"PresetEditor: currentItemChanged signal triggered. current: {current_item.text() if current_item else 'None'}") + + mode = "placeholder" + preset_name = None + + # Check for unsaved changes before proceeding + if self.check_unsaved_changes(): + # If user cancels, revert selection + if previous_item: + log.debug("Unsaved changes check cancelled. Reverting selection.") + self.editor_preset_list.blockSignals(True) + self.editor_preset_list.setCurrentItem(previous_item) + self.editor_preset_list.blockSignals(False) + return # Stop processing + + # Determine mode and preset name based on selection + if current_item: + item_data = current_item.data(Qt.ItemDataRole.UserRole) + if item_data == "__PLACEHOLDER__": + log.debug("Placeholder item selected.") + self._clear_editor() + self._set_editor_enabled(False) + mode = "placeholder" + self._last_valid_preset_name = None # Clear last valid name + elif item_data == "__LLM__": + log.debug("LLM Interpretation item selected.") + self._clear_editor() + self._set_editor_enabled(False) + mode = "llm" + # Keep _last_valid_preset_name as it was + elif isinstance(item_data, Path): + log.debug(f"Loading preset for editing: {current_item.text()}") + preset_path = item_data + self._load_preset_for_editing(preset_path) + self._last_valid_preset_name = preset_path.stem # Store the name + mode = "preset" + preset_name = self._last_valid_preset_name + else: + log.error(f"Invalid data type for preset path: {type(item_data)}. Clearing editor.") + self._clear_editor() + self._set_editor_enabled(False) + mode = "placeholder" # Treat as placeholder on error + self._last_valid_preset_name = None + else: + log.debug("No preset selected. Clearing editor.") + self._clear_editor() + self._set_editor_enabled(False) + mode = "placeholder" + self._last_valid_preset_name = None + + # Emit the signal regardless of what was selected + log.debug(f"Emitting preset_selection_changed_signal: mode='{mode}', preset_name='{preset_name}'") + self.preset_selection_changed_signal.emit(mode, preset_name) + + def _gather_editor_data(self) -> dict: + """Gathers data from all editor UI widgets and returns a dictionary.""" + preset_data = {} + preset_data["preset_name"] = self.editor_preset_name.text().strip() + preset_data["supplier_name"] = self.editor_supplier_name.text().strip() + preset_data["notes"] = self.editor_notes.toPlainText().strip() + naming_data = {} + naming_data["separator"] = self.editor_separator.text() + naming_data["part_indices"] = { "base_name": self.editor_spin_base_name_idx.value(), "map_type": self.editor_spin_map_type_idx.value() } + naming_data["glossiness_keywords"] = [self.editor_list_gloss_keywords.item(i).text() for i in range(self.editor_list_gloss_keywords.count())] + naming_data["bit_depth_variants"] = {self.editor_table_bit_depth_variants.item(r, 0).text(): self.editor_table_bit_depth_variants.item(r, 1).text() + for r in range(self.editor_table_bit_depth_variants.rowCount()) if self.editor_table_bit_depth_variants.item(r, 0) and self.editor_table_bit_depth_variants.item(r, 1)} + preset_data["source_naming"] = naming_data + preset_data["move_to_extra_patterns"] = [self.editor_list_extra_patterns.item(i).text() for i in range(self.editor_list_extra_patterns.count())] + map_mappings = [] + for r in range(self.editor_table_map_type_mapping.rowCount()): + type_item = self.editor_table_map_type_mapping.item(r, 0) + keywords_item = self.editor_table_map_type_mapping.item(r, 1) + if type_item and type_item.text() and keywords_item and keywords_item.text(): + target_type = type_item.text().strip() + keywords = [k.strip() for k in keywords_item.text().split(',') if k.strip()] + if target_type and keywords: + map_mappings.append({"target_type": target_type, "keywords": keywords}) + else: log.warning(f"Skipping row {r} in map type mapping table due to empty target type or keywords.") + else: log.warning(f"Skipping row {r} in map type mapping table due to missing items.") + preset_data["map_type_mapping"] = map_mappings + category_rules = {} + category_rules["model_patterns"] = [self.editor_list_model_patterns.item(i).text() for i in range(self.editor_list_model_patterns.count())] + category_rules["decal_keywords"] = [self.editor_list_decal_keywords.item(i).text() for i in range(self.editor_list_decal_keywords.count())] + preset_data["asset_category_rules"] = category_rules + arch_rules = [] + for r in range(self.editor_table_archetype_rules.rowCount()): + name_item = self.editor_table_archetype_rules.item(r, 0) + any_item = self.editor_table_archetype_rules.item(r, 1) + all_item = self.editor_table_archetype_rules.item(r, 2) + if name_item and name_item.text() and any_item and all_item: # Check name has text + match_any = [k.strip() for k in any_item.text().split(',') if k.strip()] + match_all = [k.strip() for k in all_item.text().split(',') if k.strip()] + # Only add if name is present and at least one condition list is non-empty? Or allow empty conditions? + # Let's allow empty conditions for now. + arch_rules.append([name_item.text().strip(), {"match_any": match_any, "match_all": match_all}]) + else: + log.warning(f"Skipping row {r} in archetype rules table due to missing items or empty name.") + preset_data["archetype_rules"] = arch_rules + return preset_data + + def _save_current_preset(self) -> bool: + """Saves the current editor content to the currently loaded file path.""" + if not self.current_editing_preset_path: return self._save_preset_as() + log.info(f"Saving preset: {self.current_editing_preset_path.name}") + try: + preset_data = self._gather_editor_data() + if not preset_data.get("preset_name"): QMessageBox.warning(self, "Save Error", "Preset Name cannot be empty."); return False + if not preset_data.get("supplier_name"): QMessageBox.warning(self, "Save Error", "Supplier Name cannot be empty."); return False + content_to_save = json.dumps(preset_data, indent=4, ensure_ascii=False) + with open(self.current_editing_preset_path, 'w', encoding='utf-8') as f: f.write(content_to_save) + self.editor_unsaved_changes = False + self.editor_save_button.setEnabled(False) + # self.window().setWindowTitle(f"Asset Processor Tool - {self.current_editing_preset_path.name}") # Handled by MainWindow + self.presets_changed_signal.emit() # Signal that presets changed + log.info("Preset saved successfully.") + # Refresh list within the editor + self.populate_presets() + # Reselect the saved item + items = self.editor_preset_list.findItems(self.current_editing_preset_path.stem, Qt.MatchFlag.MatchExactly) + if items: self.editor_preset_list.setCurrentItem(items[0]) + return True + except Exception as e: + log.exception(f"Error saving preset file {self.current_editing_preset_path}: {e}") + QMessageBox.critical(self, "Save Error", f"Could not save preset file:\n{self.current_editing_preset_path}\n\nError: {e}") + return False + + def _save_preset_as(self) -> bool: + """Saves the current editor content to a new file chosen by the user.""" + log.debug("Save As action triggered.") + try: + preset_data = self._gather_editor_data() + new_preset_name = preset_data.get("preset_name") + if not new_preset_name: QMessageBox.warning(self, "Save As Error", "Preset Name cannot be empty."); return False + if not preset_data.get("supplier_name"): QMessageBox.warning(self, "Save As Error", "Supplier Name cannot be empty."); return False + content_to_save = json.dumps(preset_data, indent=4, ensure_ascii=False) + suggested_name = f"{new_preset_name}.json" + default_path = PRESETS_DIR / suggested_name + file_path_str, _ = QFileDialog.getSaveFileName(self, "Save Preset As", str(default_path), "JSON Files (*.json);;All Files (*)") + if not file_path_str: log.debug("Save As cancelled by user."); return False + save_path = Path(file_path_str) + if save_path.suffix.lower() != ".json": save_path = save_path.with_suffix(".json") + if save_path.exists() and save_path != self.current_editing_preset_path: + reply = QMessageBox.warning(self, "Confirm Overwrite", f"Preset '{save_path.name}' already exists. Overwrite?", QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.No) + if reply == QMessageBox.StandardButton.No: log.debug("Save As overwrite cancelled."); return False + log.info(f"Saving preset as: {save_path.name}") + with open(save_path, 'w', encoding='utf-8') as f: f.write(content_to_save) + self.current_editing_preset_path = save_path # Update current path + self.editor_unsaved_changes = False + self.editor_save_button.setEnabled(False) + # self.window().setWindowTitle(f"Asset Processor Tool - {save_path.name}") # Handled by MainWindow + self.presets_changed_signal.emit() # Signal change + log.info("Preset saved successfully (Save As).") + # Refresh list and select the new item + self.populate_presets() + items = self.editor_preset_list.findItems(save_path.stem, Qt.MatchFlag.MatchExactly) + if items: self.editor_preset_list.setCurrentItem(items[0]) + return True + except Exception as e: + log.exception(f"Error saving preset file (Save As): {e}") + QMessageBox.critical(self, "Save Error", f"Could not save preset file.\n\nError: {e}") + return False + + def _new_preset(self): + """Clears the editor and loads data from _template.json.""" + log.debug("New Preset action triggered.") + if self.check_unsaved_changes(): return # Check unsaved changes first + self._clear_editor() + if TEMPLATE_PATH.is_file(): + log.info("Loading new preset from _template.json") + try: + with open(TEMPLATE_PATH, 'r', encoding='utf-8') as f: template_data = json.load(f) + self._populate_editor_from_data(template_data) + # Override specific fields for a new preset + self.editor_preset_name.setText("NewPreset") + # self.window().setWindowTitle("Asset Processor Tool - New Preset*") # Handled by MainWindow + except Exception as e: + log.exception(f"Error loading template preset file {TEMPLATE_PATH}: {e}") + QMessageBox.critical(self, "Error", f"Could not load template preset file:\n{TEMPLATE_PATH}\n\nError: {e}") + self._clear_editor() + # self.window().setWindowTitle("Asset Processor Tool - New Preset*") # Handled by MainWindow + self.editor_supplier_name.setText("MySupplier") # Set a default supplier name + else: + log.warning("Presets/_template.json not found. Creating empty preset.") + # self.window().setWindowTitle("Asset Processor Tool - New Preset*") # Handled by MainWindow + self.editor_preset_name.setText("NewPreset") + self.editor_supplier_name.setText("MySupplier") # Set a default supplier name + self._set_editor_enabled(True) + self.editor_unsaved_changes = True + self.editor_save_button.setEnabled(True) + # Select the placeholder item to avoid auto-loading the "NewPreset" + placeholder_item = self.editor_preset_list.findItems("--- Select a Preset ---", Qt.MatchFlag.MatchExactly) + if placeholder_item: + self.editor_preset_list.setCurrentItem(placeholder_item[0]) + # Emit selection change for the new state (effectively placeholder) + self.preset_selection_changed_signal.emit("placeholder", None) + + + def _delete_selected_preset(self): + """Deletes the currently selected preset file from the editor list after confirmation.""" + current_item = self.editor_preset_list.currentItem() + if not current_item: QMessageBox.information(self, "Delete Preset", "Please select a preset from the list to delete."); return + + item_data = current_item.data(Qt.ItemDataRole.UserRole) + # Ensure it's a real preset path before attempting delete + if not isinstance(item_data, Path): + QMessageBox.information(self, "Delete Preset", "Cannot delete placeholder or LLM option.") + return + + preset_path = item_data + preset_name = preset_path.stem + reply = QMessageBox.warning(self, "Confirm Delete", f"Are you sure you want to permanently delete the preset '{preset_name}'?", QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.No) + if reply == QMessageBox.StandardButton.Yes: + log.info(f"Deleting preset: {preset_path.name}") + try: + preset_path.unlink() + log.info("Preset deleted successfully.") + if self.current_editing_preset_path == preset_path: self._clear_editor() + self.presets_changed_signal.emit() # Signal change + # Refresh list + self.populate_presets() + except Exception as e: + log.exception(f"Error deleting preset file {preset_path}: {e}") + QMessageBox.critical(self, "Delete Error", f"Could not delete preset file:\n{preset_path}\n\nError: {e}") + + # --- Public Access Methods for MainWindow --- + + def get_selected_preset_mode(self) -> tuple[str, str | None]: + """ + Returns the current selection mode and preset name (if applicable). + Returns: tuple(mode_string, preset_name_string_or_None) + mode_string can be "preset", "llm", "placeholder" + """ + current_item = self.editor_preset_list.currentItem() + if current_item: + item_data = current_item.data(Qt.ItemDataRole.UserRole) + if item_data == "__PLACEHOLDER__": + return "placeholder", None + elif item_data == "__LLM__": + return "llm", None + elif isinstance(item_data, Path): + return "preset", item_data.stem + return "placeholder", None # Default or if no item selected + + def get_last_valid_preset_name(self) -> str | None: + """ + Returns the name (stem) of the last valid preset that was loaded. + Used by delegates to populate dropdowns based on the original context. + """ + return self._last_valid_preset_name + + # --- Slots for MainWindow Interaction --- diff --git a/gui/processing_handler.py b/gui/processing_handler.py deleted file mode 100644 index b72b2ee..0000000 --- a/gui/processing_handler.py +++ /dev/null @@ -1,372 +0,0 @@ -# gui/processing_handler.py -import logging -from pathlib import Path -from concurrent.futures import ProcessPoolExecutor, as_completed -import time # For potential delays if needed - -import subprocess # <<< ADDED IMPORT -import shutil # <<< ADDED IMPORT -from typing import Optional # <<< ADDED IMPORT -from rule_structure import SourceRule # Import SourceRule - -# --- PySide6 Imports --- -# Inherit from QObject to support signals/slots for thread communication -from PySide6.QtCore import QObject, Signal - -# --- Backend Imports --- -# Need to import the worker function and potentially config/processor if needed directly -# Adjust path to ensure modules can be found relative to this file's location -import sys -script_dir = Path(__file__).parent -project_root = script_dir.parent -if str(project_root) not in sys.path: - sys.path.insert(0, str(project_root)) - -try: - # Import the worker function from main.py - from main import process_single_asset_wrapper - # Import exceptions if needed for type hinting or specific handling - from configuration import ConfigurationError, load_base_config # Import ConfigurationError and load_base_config - from asset_processor import AssetProcessingError - # Removed: import config as core_config # <<< ADDED IMPORT - BACKEND_AVAILABLE = True -except ImportError as e: - print(f"ERROR (ProcessingHandler): Failed to import backend modules/worker: {e}") - # Define placeholders if imports fail, so the GUI doesn't crash immediately - process_single_asset_wrapper = None - ConfigurationError = Exception - load_base_config = None # Placeholder - AssetProcessingError = Exception - BACKEND_AVAILABLE = False - -log = logging.getLogger(__name__) -# Basic config if logger hasn't been set up elsewhere -if not log.hasHandlers(): - logging.basicConfig(level=logging.INFO, format='%(levelname)s (Handler): %(message)s') - - -class ProcessingHandler(QObject): - """ - Handles the execution of the asset processing pipeline in a way that - can be run in a separate thread and communicate progress via signals. - """ - # --- Signals --- - # Emitted for overall progress bar update - progress_updated = Signal(int, int) # current_count, total_count - # Emitted for updating status of individual files in the list - file_status_updated = Signal(str, str, str) # input_path_str, status ("processing", "processed", "skipped", "failed"), message - # Emitted when the entire batch processing is finished - processing_finished = Signal(int, int, int) # processed_count, skipped_count, failed_count - # Emitted for general status messages to the status bar - status_message = Signal(str, int) # message, timeout_ms - - def __init__(self, parent=None): - super().__init__(parent) - self._executor = None - self._futures = {} # Store future->input_path mapping - self._is_running = False - self._cancel_requested = False - - @property - def is_running(self): - return self._is_running - - # Removed _predict_single_asset method - - @Slot(str, list, str, str, bool, int, - bool, str, str, bool, SourceRule) # Explicitly define types for the slot - def run_processing(self, input_source_identifier: str, original_input_paths: list[str], preset_name: str, output_dir_str: str, overwrite: bool, num_workers: int, - run_blender: bool, nodegroup_blend_path: str, materials_blend_path: str, verbose: bool, rules: SourceRule): # <<< ADDED verbose PARAM - """ - Starts the asset processing task and optionally runs Blender scripts afterwards. - This method should be called when the handler is moved to a separate thread. - """ - if self._is_running: - log.warning("Processing is already running.") - self.status_message.emit("Processing already in progress.", 3000) - return - - if not BACKEND_AVAILABLE or not process_single_asset_wrapper: - log.error("Backend modules or worker function not available. Cannot start processing.") - self.status_message.emit("Error: Backend components missing. Cannot process.", 5000) - self.processing_finished.emit(0, 0, len(original_input_paths)) # Emit finished with all failed - return - - self._is_running = True - self._cancel_requested = False - self._futures = {} # Reset futures - total_files = len(original_input_paths) # Use original_input_paths for total count - processed_count = 0 - skipped_count = 0 - failed_count = 0 - completed_count = 0 - - log.info(f"Starting processing run: {total_files} assets, Preset='{preset_name}', Workers={num_workers}, Overwrite={overwrite}") - self.status_message.emit(f"Starting processing for {total_files} items...", 0) # Persistent message - - try: - # Use 'with' statement for ProcessPoolExecutor for cleanup - with ProcessPoolExecutor(max_workers=num_workers) as executor: - self._executor = executor # Store for potential cancellation - - # Submit tasks - for input_path in original_input_paths: # Iterate through the list of input paths - if self._cancel_requested: break # Check before submitting more - log.debug(f"Submitting task for: {input_path}") - # Pass the single SourceRule object to the worker -# --- DEBUG LOG: Inspect FileRule overrides before sending to worker --- - log.debug(f"ProcessingHandler: Inspecting rules for input '{input_path}' before submitting to worker:") - if rules: # Check if rules object exists - for asset_rule in rules.assets: - log.debug(f" Asset: {asset_rule.asset_name}") - for file_rule in asset_rule.files: - log.debug(f" File: {Path(file_rule.file_path).name}, ItemType: {file_rule.item_type}, Override: {file_rule.item_type_override}, StandardMap: {getattr(file_rule, 'standard_map_type', 'N/A')}") - else: - log.debug(" Rules object is None.") - # --- END DEBUG LOG --- - future = executor.submit(process_single_asset_wrapper, input_path, preset_name, output_dir_str, overwrite, verbose=verbose, rules=rules) # Pass verbose flag from GUI and rules - self._futures[future] = input_path # Map future back to input path - # Optionally emit "processing" status here - self.file_status_updated.emit(input_path, "processing", "") - - if self._cancel_requested: - log.info("Processing cancelled during task submission.") - # Count remaining unsubmitted tasks as failed/cancelled - failed_count = total_files - len(self._futures) - - # Process completed futures - for future in as_completed(self._futures): - completed_count += 1 - input_path = self._futures[future] # Get original path - asset_name = Path(input_path).name - status = "failed" # Default status - error_message = "Unknown error" - - if self._cancel_requested: - # If cancelled after submission, try to get result but count as failed - status = "failed" - error_message = "Cancelled" - failed_count += 1 - # Don't try future.result() if cancelled, it might raise CancelledError - else: - try: - # Get result tuple: (input_path_str, status_string, error_message_or_None) - result_tuple = future.result() - _, status, error_message = result_tuple - error_message = error_message or "" # Ensure it's a string - - # Increment counters based on status - if status == "processed": - processed_count += 1 - elif status == "skipped": - skipped_count += 1 - elif status == "failed": - failed_count += 1 - else: - log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.") - failed_count += 1 - error_message = f"Unknown status: {status}" - - except Exception as e: - # Catch errors if the future itself fails (e.g., worker process crashed hard) - log.exception(f"Critical worker failure for {asset_name}: {e}") - failed_count += 1 # Count crashes as failures - status = "failed" - error_message = f"Worker process crashed: {e}" - - # Emit progress signals - self.progress_updated.emit(completed_count, total_files) - self.file_status_updated.emit(input_path, status, error_message) - - # Check for cancellation again after processing each result - if self._cancel_requested: - log.info("Cancellation detected after processing a result.") - # Count remaining unprocessed futures as failed/cancelled - remaining_futures = total_files - completed_count - failed_count += remaining_futures - break # Exit the as_completed loop - - except Exception as pool_exc: - log.exception(f"An error occurred with the process pool: {pool_exc}") - self.status_message.emit(f"Error during processing: {pool_exc}", 5000) - # Mark all remaining as failed - failed_count = total_files - processed_count - skipped_count - - finally: - # --- Blender Script Execution (Optional) --- - if run_blender and not self._cancel_requested: - log.info("Asset processing complete. Checking for Blender script execution.") - self.status_message.emit("Asset processing complete. Starting Blender scripts...", 0) - blender_exe = self._find_blender_executable() - if blender_exe: - script_dir = Path(__file__).parent.parent / "blenderscripts" # Go up one level from gui/ - nodegroup_script_path = script_dir / "create_nodegroups.py" - materials_script_path = script_dir / "create_materials.py" - asset_output_root = output_dir_str # Use the same output dir - - # Run Nodegroup Script - if nodegroup_blend_path and Path(nodegroup_blend_path).is_file(): - if nodegroup_script_path.is_file(): - log.info("-" * 20 + " Running Nodegroup Script " + "-" * 20) - self.status_message.emit(f"Running Blender nodegroup script on {Path(nodegroup_blend_path).name}...", 0) - success_ng = self._run_blender_script_subprocess( - blender_exe_path=blender_exe, - blend_file_path=nodegroup_blend_path, - python_script_path=str(nodegroup_script_path), - asset_root_dir=asset_output_root - ) - if not success_ng: - log.error("Blender node group script execution failed.") - self.status_message.emit("Blender nodegroup script failed.", 5000) - else: - log.info("Blender nodegroup script finished successfully.") - self.status_message.emit("Blender nodegroup script finished.", 3000) - else: - log.error(f"Node group script not found: {nodegroup_script_path}") - self.status_message.emit(f"Error: Nodegroup script not found.", 5000) - elif run_blender and nodegroup_blend_path: # Log if path was provided but invalid - log.warning(f"Nodegroup blend path provided but invalid: {nodegroup_blend_path}") - self.status_message.emit(f"Warning: Invalid Nodegroup .blend path.", 5000) - - - # Run Materials Script (only if nodegroup script was attempted or not needed) - if materials_blend_path and Path(materials_blend_path).is_file(): - if materials_script_path.is_file(): - log.info("-" * 20 + " Running Materials Script " + "-" * 20) - self.status_message.emit(f"Running Blender materials script on {Path(materials_blend_path).name}...", 0) - # Pass the nodegroup blend path as the second argument to the script - success_mat = self._run_blender_script_subprocess( - blender_exe_path=blender_exe, - blend_file_path=materials_blend_path, - python_script_path=str(materials_script_path), - asset_root_dir=asset_output_root, - nodegroup_blend_file_path_arg=nodegroup_blend_path # Pass the nodegroup path - ) - if not success_mat: - log.error("Blender material script execution failed.") - self.status_message.emit("Blender material script failed.", 5000) - else: - log.info("Blender material script finished successfully.") - self.status_message.emit("Blender material script finished.", 3000) - else: - log.error(f"Material script not found: {materials_script_path}") - self.status_message.emit(f"Error: Material script not found.", 5000) - elif run_blender and materials_blend_path: # Log if path was provided but invalid - log.warning(f"Materials blend path provided but invalid: {materials_blend_path}") - self.status_message.emit(f"Warning: Invalid Materials .blend path.", 5000) - - else: - log.warning("Blender executable not found. Skipping Blender script execution.") - self.status_message.emit("Warning: Blender executable not found. Skipping scripts.", 5000) - elif self._cancel_requested: - log.info("Processing was cancelled. Skipping Blender script execution.") - # --- End Blender Script Execution --- - - final_message = f"Finished. Processed: {processed_count}, Skipped: {skipped_count}, Failed: {failed_count}" - log.info(final_message) - self.status_message.emit(final_message, 5000) # Show final summary - self.processing_finished.emit(processed_count, skipped_count, failed_count) - self._is_running = False - self._executor = None - self._futures = {} # Clear futures - - def request_cancel(self): - """Requests cancellation of the ongoing processing task.""" - if not self._is_running: - log.warning("Cancel requested but no processing is running.") - return - - if self._cancel_requested: - log.warning("Cancellation already requested.") - return - - log.info("Cancellation requested.") - self.status_message.emit("Cancellation requested...", 3000) - self._cancel_requested = True - - # Attempt to shutdown the executor - this might cancel pending tasks - # but won't forcefully stop running ones. `cancel_futures=True` is Python 3.9+ - if self._executor: - log.debug("Requesting executor shutdown...") - # For Python 3.9+: self._executor.shutdown(wait=False, cancel_futures=True) - # For older Python: - self._executor.shutdown(wait=False) - # Manually try cancelling futures that haven't started - for future in self._futures: - if not future.running() and not future.done(): - future.cancel() - log.debug("Executor shutdown requested.") - - # Note: True cancellation of running ProcessPoolExecutor tasks is complex. - # This implementation primarily prevents processing further results and - # attempts to cancel pending/unstarted tasks. - - def _find_blender_executable(self) -> Optional[str]: - """Finds the Blender executable path from config or system PATH.""" - try: - # Use load_base_config to get the Blender executable path - if load_base_config: - base_config = load_base_config() - blender_exe_config = base_config.get('BLENDER_EXECUTABLE_PATH', None) - else: - blender_exe_config = None - log.warning("load_base_config not available. Cannot read BLENDER_EXECUTABLE_PATH from config.") - - if blender_exe_config: - p = Path(blender_exe_config) - if p.is_file(): - log.info(f"Using Blender executable from config: {p}") - return str(p.resolve()) - else: - log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying PATH.") - else: - log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying PATH.") - - blender_exe = shutil.which("blender") - if blender_exe: - log.info(f"Found Blender executable in PATH: {blender_exe}") - return blender_exe - else: - log.warning("Could not find 'blender' in system PATH.") - return None - except ConfigurationError as e: - log.error(f"Error reading base configuration for Blender executable path: {e}") - return None - except Exception as e: - log.error(f"Error checking Blender executable path: {e}") - return None - - def _run_blender_script_subprocess(self, blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str, nodegroup_blend_file_path_arg: Optional[str] = None) -> bool: - """Internal helper to run a single Blender script via subprocess.""" - command_base = [ - blender_exe_path, - "--factory-startup", - "-b", - blend_file_path, - "--log", "*", # <<< ADDED BLENDER LOGGING FLAG - "--python", python_script_path, - "--", - asset_root_dir, - ] - # Add nodegroup blend file path if provided (for create_materials script) - if nodegroup_blend_file_path_arg: - command = command_base + [nodegroup_blend_file_path_arg] - else: - command = command_base - log.debug(f"Executing Blender command: {' '.join(map(str, command))}") # Ensure all parts are strings for join - try: - # Ensure all parts of the command are strings for subprocess - str_command = [str(part) for part in command] - result = subprocess.run(str_command, capture_output=True, text=True, check=False, encoding='utf-8') # Specify encoding - log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}") - if result.stdout: log.debug(f"Blender stdout:\n{result.stdout.strip()}") - if result.stderr: - if result.returncode != 0: log.error(f"Blender stderr:\n{result.stderr.strip()}") - else: log.warning(f"Blender stderr (RC=0):\n{result.stderr.strip()}") - return result.returncode == 0 - except FileNotFoundError: - log.error(f"Blender executable not found at: {blender_exe_path}") - return False - except Exception as e: - log.exception(f"Error running Blender script '{Path(python_script_path).name}': {e}") - return False \ No newline at end of file diff --git a/gui/rule_editor_widget.py b/gui/rule_editor_widget.py index 5a55ccf..6b014de 100644 --- a/gui/rule_editor_widget.py +++ b/gui/rule_editor_widget.py @@ -9,158 +9,158 @@ from PySide6.QtCore import Signal, Slot, QObject # from rule_structure import SourceRule, AssetRule, FileRule # Assuming direct import is possible class RuleEditorWidget(QWidget): - """ - A widget to display and edit hierarchical processing rules (Source, Asset, File). - """ - rule_updated = Signal(object) # Signal emitted when a rule is updated - - def __init__(self, asset_types: list[str] | None = None, parent=None): - """ - Initializes the RuleEditorWidget. - - Args: - asset_types (list[str] | None): A list of available asset type names. Defaults to None. - parent: The parent widget. - """ - super().__init__(parent) - self.asset_types = asset_types if asset_types else [] # Store asset types - self.current_rule_type = None - self.current_rule_object = None - - self.layout = QVBoxLayout(self) - self.rule_type_label = QLabel("Select an item in the hierarchy to view/edit rules.") - self.layout.addWidget(self.rule_type_label) - - self.form_layout = QFormLayout() - self.layout.addLayout(self.form_layout) - - self.layout.addStretch() # Add stretch to push content to the top - - self.setLayout(self.layout) - self.clear_editor() - - @Slot(object, str) - def load_rule(self, rule_object, rule_type_name): - """ - Loads a rule object into the editor. - - Args: - rule_object: The SourceRule, AssetRule, or FileRule object. - rule_type_name: The name of the rule type ('SourceRule', 'AssetRule', 'FileRule'). - """ - self.clear_editor() - self.current_rule_object = rule_object - self.current_rule_type = rule_type_name - self.rule_type_label.setText(f"Editing: {rule_type_name}") - - if rule_object: - # Dynamically create form fields based on rule object attributes - for attr_name, attr_value in vars(rule_object).items(): - if attr_name.startswith('_'): # Skip private attributes - continue - - label = QLabel(attr_name.replace('_', ' ').title() + ":") - editor_widget = self._create_editor_widget(attr_name, attr_value) - if editor_widget: - self.form_layout.addRow(label, editor_widget) - # Connect signal to update rule object - self._connect_editor_signal(editor_widget, attr_name) - - def _create_editor_widget(self, attr_name, attr_value): - """ - Creates an appropriate editor widget based on the attribute type. - """ - # --- Special Handling for Asset Type Dropdown --- - if self.current_rule_type == 'AssetRule' and attr_name == 'asset_type' and self.asset_types: - widget = QComboBox() - widget.addItems(self.asset_types) - if attr_value in self.asset_types: - widget.setCurrentText(attr_value) - elif self.asset_types: # Select first item if current value is invalid - widget.setCurrentIndex(0) - return widget - # --- Standard Type Handling --- - elif isinstance(attr_value, bool): - widget = QCheckBox() - widget.setChecked(attr_value) - return widget - elif isinstance(attr_value, int): - widget = QSpinBox() - widget.setRange(-2147483648, 2147483647) # Default integer range - widget.setValue(attr_value) - return widget - elif isinstance(attr_value, float): - widget = QDoubleSpinBox() - widget.setRange(-sys.float_info.max, sys.float_info.max) # Default float range - widget.setValue(attr_value) - return widget - elif isinstance(attr_value, (str, type(None))): # Handle None for strings - widget = QLineEdit() - widget.setText(str(attr_value) if attr_value is not None else "") - return widget - # Add more types as needed - # elif isinstance(attr_value, list): - # # Example for a simple list of strings - # widget = QLineEdit() - # widget.setText(", ".join(map(str, attr_value))) - # return widget - else: - # For unsupported types, just display the value - label = QLabel(str(attr_value)) - return label - - def _connect_editor_signal(self, editor_widget, attr_name): - """ - Connects the appropriate signal of the editor widget to the update logic. - """ - if isinstance(editor_widget, QLineEdit): - editor_widget.textChanged.connect(lambda text: self._update_rule_attribute(attr_name, text)) - elif isinstance(editor_widget, QCheckBox): - editor_widget.toggled.connect(lambda checked: self._update_rule_attribute(attr_name, checked)) - elif isinstance(editor_widget, QSpinBox): - editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value)) - elif isinstance(editor_widget, QDoubleSpinBox): - editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value)) - elif isinstance(editor_widget, QComboBox): - # Use currentTextChanged to get the string value directly - editor_widget.currentTextChanged.connect(lambda text: self._update_rule_attribute(attr_name, text)) - # Add connections for other widget types - - def _update_rule_attribute(self, attr_name, value): - """ - Updates the attribute of the current rule object and emits the signal. - """ - if self.current_rule_object: - # Basic type conversion based on the original attribute type - original_value = getattr(self.current_rule_object, attr_name) - try: - if isinstance(original_value, bool): - converted_value = bool(value) - elif isinstance(original_value, int): - converted_value = int(value) - elif isinstance(original_value, float): - converted_value = float(value) - elif isinstance(original_value, (str, type(None))): - converted_value = str(value) if value != "" else None # Convert empty string to None for original None types - else: - converted_value = value # Fallback for other types - setattr(self.current_rule_object, attr_name, converted_value) - self.rule_updated.emit(self.current_rule_object) - # print(f"Updated {attr_name} to {converted_value} in {self.current_rule_type}") # Debugging - except ValueError: - # Handle potential conversion errors (e.g., non-numeric input for int/float) - print(f"Error converting value '{value}' for attribute '{attr_name}'") - # Optionally, revert the editor widget to the original value or show an error indicator - - def clear_editor(self): - """ - Clears the form layout. - """ - self.current_rule_object = None - self.current_rule_type = None - self.rule_type_label.setText("Select an item in the hierarchy to view/edit rules.") - while self.form_layout.rowCount() > 0: - self.form_layout.removeRow(0) + """ + A widget to display and edit hierarchical processing rules (Source, Asset, File). + """ + rule_updated = Signal(object) # Signal emitted when a rule is updated + + def __init__(self, asset_types: list[str] | None = None, parent=None): + """ + Initializes the RuleEditorWidget. + + Args: + asset_types (list[str] | None): A list of available asset type names. Defaults to None. + parent: The parent widget. + """ + super().__init__(parent) + self.asset_types = asset_types if asset_types else [] # Store asset types + self.current_rule_type = None + self.current_rule_object = None + + self.layout = QVBoxLayout(self) + self.rule_type_label = QLabel("Select an item in the hierarchy to view/edit rules.") + self.layout.addWidget(self.rule_type_label) + + self.form_layout = QFormLayout() + self.layout.addLayout(self.form_layout) + + self.layout.addStretch() # Add stretch to push content to the top + + self.setLayout(self.layout) + self.clear_editor() + + @Slot(object, str) + def load_rule(self, rule_object, rule_type_name): + """ + Loads a rule object into the editor. + + Args: + rule_object: The SourceRule, AssetRule, or FileRule object. + rule_type_name: The name of the rule type ('SourceRule', 'AssetRule', 'FileRule'). + """ + self.clear_editor() + self.current_rule_object = rule_object + self.current_rule_type = rule_type_name + self.rule_type_label.setText(f"Editing: {rule_type_name}") + + if rule_object: + # Dynamically create form fields based on rule object attributes + for attr_name, attr_value in vars(rule_object).items(): + if attr_name.startswith('_'): # Skip private attributes + continue + + label = QLabel(attr_name.replace('_', ' ').title() + ":") + editor_widget = self._create_editor_widget(attr_name, attr_value) + if editor_widget: + self.form_layout.addRow(label, editor_widget) + # Connect signal to update rule object + self._connect_editor_signal(editor_widget, attr_name) + + def _create_editor_widget(self, attr_name, attr_value): + """ + Creates an appropriate editor widget based on the attribute type. + """ + # --- Special Handling for Asset Type Dropdown --- + if self.current_rule_type == 'AssetRule' and attr_name == 'asset_type' and self.asset_types: + widget = QComboBox() + widget.addItems(self.asset_types) + if attr_value in self.asset_types: + widget.setCurrentText(attr_value) + elif self.asset_types: # Select first item if current value is invalid + widget.setCurrentIndex(0) + return widget + # --- Standard Type Handling --- + elif isinstance(attr_value, bool): + widget = QCheckBox() + widget.setChecked(attr_value) + return widget + elif isinstance(attr_value, int): + widget = QSpinBox() + widget.setRange(-2147483648, 2147483647) # Default integer range + widget.setValue(attr_value) + return widget + elif isinstance(attr_value, float): + widget = QDoubleSpinBox() + widget.setRange(-sys.float_info.max, sys.float_info.max) # Default float range + widget.setValue(attr_value) + return widget + elif isinstance(attr_value, (str, type(None))): # Handle None for strings + widget = QLineEdit() + widget.setText(str(attr_value) if attr_value is not None else "") + return widget + # Add more types as needed + # elif isinstance(attr_value, list): + # # Example for a simple list of strings + # widget = QLineEdit() + # widget.setText(", ".join(map(str, attr_value))) + # return widget + else: + # For unsupported types, just display the value + label = QLabel(str(attr_value)) + return label + + def _connect_editor_signal(self, editor_widget, attr_name): + """ + Connects the appropriate signal of the editor widget to the update logic. + """ + if isinstance(editor_widget, QLineEdit): + editor_widget.textChanged.connect(lambda text: self._update_rule_attribute(attr_name, text)) + elif isinstance(editor_widget, QCheckBox): + editor_widget.toggled.connect(lambda checked: self._update_rule_attribute(attr_name, checked)) + elif isinstance(editor_widget, QSpinBox): + editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value)) + elif isinstance(editor_widget, QDoubleSpinBox): + editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value)) + elif isinstance(editor_widget, QComboBox): + # Use currentTextChanged to get the string value directly + editor_widget.currentTextChanged.connect(lambda text: self._update_rule_attribute(attr_name, text)) + # Add connections for other widget types + + def _update_rule_attribute(self, attr_name, value): + """ + Updates the attribute of the current rule object and emits the signal. + """ + if self.current_rule_object: + # Basic type conversion based on the original attribute type + original_value = getattr(self.current_rule_object, attr_name) + try: + if isinstance(original_value, bool): + converted_value = bool(value) + elif isinstance(original_value, int): + converted_value = int(value) + elif isinstance(original_value, float): + converted_value = float(value) + elif isinstance(original_value, (str, type(None))): + converted_value = str(value) if value != "" else None # Convert empty string to None for original None types + else: + converted_value = value # Fallback for other types + setattr(self.current_rule_object, attr_name, converted_value) + self.rule_updated.emit(self.current_rule_object) + # print(f"Updated {attr_name} to {converted_value} in {self.current_rule_type}") # Debugging + except ValueError: + # Handle potential conversion errors (e.g., non-numeric input for int/float) + print(f"Error converting value '{value}' for attribute '{attr_name}'") + # Optionally, revert the editor widget to the original value or show an error indicator + + def clear_editor(self): + """ + Clears the form layout. + """ + self.current_rule_object = None + self.current_rule_type = None + self.rule_type_label.setText("Select an item in the hierarchy to view/edit rules.") + while self.form_layout.rowCount() > 0: + self.form_layout.removeRow(0) if __name__ == '__main__': app = QApplication(sys.argv) diff --git a/gui/unified_view_model.py b/gui/unified_view_model.py index ca09ea5..61f9295 100644 --- a/gui/unified_view_model.py +++ b/gui/unified_view_model.py @@ -1,7 +1,7 @@ # gui/unified_view_model.py import logging # Added for debugging log = logging.getLogger(__name__) # Added for debugging -from PySide6.QtCore import QAbstractItemModel, QModelIndex, Qt, Signal # Added Signal +from PySide6.QtCore import QAbstractItemModel, QModelIndex, Qt, Signal, Slot # Added Signal and Slot from PySide6.QtGui import QColor # Added for background role from pathlib import Path # Added for file_name extraction from rule_structure import SourceRule, AssetRule, FileRule # Removed AssetType, ItemType import @@ -18,6 +18,10 @@ class UnifiedViewModel(QAbstractItemModel): A QAbstractItemModel for displaying and editing the hierarchical structure of SourceRule -> AssetRule -> FileRule. """ + # Signal emitted when a FileRule's target asset override changes. + # Carries the index of the FileRule and the new target asset path (or None). + targetAssetOverrideChanged = Signal(QModelIndex, object) + Columns = [ "Name", "Target Asset", "Supplier", "Asset Type", "Item Type" @@ -34,9 +38,52 @@ class UnifiedViewModel(QAbstractItemModel): def __init__(self, parent=None): super().__init__(parent) self._source_rules = [] # Now stores a list of SourceRule objects + # self._display_mode removed + self._asset_type_colors = {} + self._file_type_colors = {} + self._asset_type_keys = [] # Store asset type keys + self._file_type_keys = [] # Store file type keys + self._load_definitions() # Load colors and keys + + def _load_definitions(self): + """Loads configuration and caches colors and type keys.""" + try: + base_config = load_base_config() + asset_type_defs = base_config.get('ASSET_TYPE_DEFINITIONS', {}) + file_type_defs = base_config.get('FILE_TYPE_DEFINITIONS', {}) + + # Cache Asset Type Definitions (Keys and Colors) + self._asset_type_keys = sorted(list(asset_type_defs.keys())) + for type_name, type_info in asset_type_defs.items(): + hex_color = type_info.get("color") + if hex_color: + try: + self._asset_type_colors[type_name] = QColor(hex_color) + except ValueError: + log.warning(f"Invalid hex color '{hex_color}' for asset type '{type_name}' in config.") + + # Cache File Type Definitions (Keys and Colors) + self._file_type_keys = sorted(list(file_type_defs.keys())) + for type_name, type_info in file_type_defs.items(): + hex_color = type_info.get("color") + if hex_color: + try: + self._file_type_colors[type_name] = QColor(hex_color) + except ValueError: + log.warning(f"Invalid hex color '{hex_color}' for file type '{type_name}' in config.") + + except Exception as e: + log.exception(f"Error loading or caching colors from configuration: {e}") + # Ensure caches/lists are empty if loading fails + self._asset_type_colors = {} + self._file_type_colors = {} + self._asset_type_keys = [] + self._file_type_keys = [] def load_data(self, source_rules_list: list): # Accepts a list """Loads or reloads the model with a list of SourceRule objects.""" + # Consider if color cache needs refreshing if config can change dynamically + # self._load_and_cache_colors() # Uncomment if config can change and needs refresh self.beginResetModel() self._source_rules = source_rules_list if source_rules_list else [] # Assign the new list # Ensure back-references for parent lookup are set on the NEW items @@ -56,26 +103,26 @@ class UnifiedViewModel(QAbstractItemModel): def get_all_source_rules(self) -> list: """Returns the internal list of SourceRule objects.""" return self._source_rules + + # set_display_mode removed + def rowCount(self, parent: QModelIndex = QModelIndex()) -> int: """Returns the number of rows under the given parent.""" if not parent.isValid(): # Parent is the invisible root. Children are the SourceRules. return len(self._source_rules) + # Always use detailed logic parent_item = parent.internalPointer() - if isinstance(parent_item, SourceRule): - # Parent is a SourceRule. Children are AssetRules. - return len(parent_item.assets) + return len(parent_item.assets) elif isinstance(parent_item, AssetRule): - # Parent is an AssetRule. Children are FileRules. return len(parent_item.files) elif isinstance(parent_item, FileRule): return 0 # FileRules have no children return 0 # Should not happen for valid items - def columnCount(self, parent: QModelIndex = QModelIndex()) -> int: """Returns the number of columns.""" return len(self.Columns) @@ -143,27 +190,22 @@ class UnifiedViewModel(QAbstractItemModel): # Parent is a valid index, get its item parent_item = parent.internalPointer() + # Always use detailed logic child_item = None if isinstance(parent_item, SourceRule): - # Parent is SourceRule. Children are AssetRules. if row < len(parent_item.assets): child_item = parent_item.assets[row] - # Ensure parent reference is set if not hasattr(child_item, 'parent_source'): child_item.parent_source = parent_item elif isinstance(parent_item, AssetRule): - # Parent is AssetRule. Children are FileRules. if row < len(parent_item.files): child_item = parent_item.files[row] - # Ensure parent reference is set if not hasattr(child_item, 'parent_asset'): child_item.parent_asset = parent_item if child_item: - # Create index for the child item under the parent return self.createIndex(row, column, child_item) else: - # Invalid row or parent type has no children (FileRule) return QModelIndex() def data(self, index: QModelIndex, role: int = Qt.DisplayRole): @@ -183,107 +225,79 @@ class UnifiedViewModel(QAbstractItemModel): # Determine effective asset type asset_type = item.asset_type_override if item.asset_type_override else item.asset_type if asset_type: - try: - base_config = load_base_config() # Load base config - asset_type_definitions = base_config.get('ASSET_TYPE_DEFINITIONS', {}) # Get definitions - type_info = asset_type_definitions.get(asset_type) - if type_info: - hex_color = type_info.get("color") - if hex_color: - try: - return QColor(hex_color) - except ValueError: - # Optional: Add logging for invalid hex color - # print(f"Warning: Invalid hex color '{hex_color}' for asset type '{asset_type}' in config.") - return None # Fallback for invalid hex - else: - # Optional: Add logging for missing color key - # print(f"Warning: No color defined for asset type '{asset_type}' in config.") - return None # Fallback if color key missing - else: - # Optional: Add logging for missing asset type definition - # print(f"Warning: Asset type '{asset_type}' not found in ASSET_TYPE_DEFINITIONS.") - return None # Fallback if type not in config - except Exception: # Catch errors during config loading - return None # Fallback on error + # Use cached color + return self._asset_type_colors.get(asset_type) # Returns None if not found else: return None # Fallback if no asset_type determined elif isinstance(item, FileRule): - # Determine effective item type: Prioritize override, then use base type - effective_item_type = item.item_type_override if item.item_type_override is not None else item.item_type - if effective_item_type: - try: - base_config = load_base_config() # Load base config - file_type_definitions = base_config.get('FILE_TYPE_DEFINITIONS', {}) # Get definitions - type_info = file_type_definitions.get(effective_item_type) - if type_info: - hex_color = type_info.get("color") - if hex_color: - try: - return QColor(hex_color) - except ValueError: - # Optional: Add logging for invalid hex color - # print(f"Warning: Invalid hex color '{hex_color}' for file type '{item_type}' in config.") - return None # Fallback for invalid hex - else: - # Optional: Add logging for missing color key - # print(f"Warning: No color defined for file type '{item_type}' in config.") - return None # Fallback if color key missing - else: - # File types often don't have specific colors, so no warning needed unless debugging - return None # Fallback if type not in config - except Exception: # Catch errors during config loading - return None # Fallback on error + # --- New Logic: Darkened Parent Background --- + parent_asset = getattr(item, 'parent_asset', None) + if parent_asset: + parent_asset_type = parent_asset.asset_type_override if parent_asset.asset_type_override else parent_asset.asset_type + parent_bg_color = self._asset_type_colors.get(parent_asset_type) if parent_asset_type else None + + if parent_bg_color: + # Darken the parent color by ~30% (factor 130) + return parent_bg_color.darker(130) + else: + # Parent has no specific color, use default background + return None else: - return None # Fallback if no item_type determined + # Should not happen if structure is correct, but fallback to default + return None + # --- End New Logic --- else: # Other item types or if item is None return None + # --- Handle Foreground Role (Text Color) --- + elif role == Qt.ForegroundRole: + if isinstance(item, FileRule): + # Determine effective item type + effective_item_type = item.item_type_override if item.item_type_override is not None else item.item_type + if effective_item_type: + # Use cached color for text + return self._file_type_colors.get(effective_item_type) # Returns None if not found + # For SourceRule and AssetRule, return None to use default text color (usually contrasts well) + return None # --- Handle other roles (Display, Edit, etc.) --- if isinstance(item, SourceRule): - if role == Qt.DisplayRole or role == Qt.EditRole: # Combine Display and Edit logic + if role == Qt.DisplayRole or role == Qt.EditRole: if column == self.COL_NAME: + # Always display name return Path(item.input_path).name - elif column == self.COL_SUPPLIER: - # Return override if set, otherwise the original identifier, else empty string + elif column == self.COL_SUPPLIER: # Always handle supplier display_value = item.supplier_override if item.supplier_override is not None else item.supplier_identifier return display_value if display_value is not None else "" - # Other columns return None or "" for SourceRule in Display/Edit roles - return None # Default for SourceRule for other roles/columns + return None # Other columns/roles are blank for SourceRule + # --- Logic for AssetRule and FileRule (previously detailed mode only) --- elif isinstance(item, AssetRule): if role == Qt.DisplayRole: if column == self.COL_NAME: return item.asset_name elif column == self.COL_ASSET_TYPE: display_value = item.asset_type_override if item.asset_type_override is not None else item.asset_type return display_value if display_value else "" - # Removed Status and Output Path columns elif role == Qt.EditRole: if column == self.COL_ASSET_TYPE: return item.asset_type_override - return None # Default for AssetRule + return None elif isinstance(item, FileRule): if role == Qt.DisplayRole: - if column == self.COL_NAME: return Path(item.file_path).name # Display only filename + if column == self.COL_NAME: return Path(item.file_path).name elif column == self.COL_TARGET_ASSET: return item.target_asset_name_override if item.target_asset_name_override is not None else "" elif column == self.COL_ITEM_TYPE: - # Reverted Logic: Display override if set, otherwise base type. Shows prefixed keys. override = item.item_type_override initial_type = item.item_type - - if override is not None: - return override - else: - return initial_type if initial_type else "" - # Removed Status and Output Path columns + if override is not None: return override + else: return initial_type if initial_type else "" elif role == Qt.EditRole: - if column == self.COL_TARGET_ASSET: return item.target_asset_name_override if item.target_asset_name_override is not None else "" # Return string or "" - elif column == self.COL_ITEM_TYPE: return item.item_type_override # Return string or None - return None # Default for FileRule + if column == self.COL_TARGET_ASSET: return item.target_asset_name_override if item.target_asset_name_override is not None else "" + elif column == self.COL_ITEM_TYPE: return item.item_type_override + return None - return None # Default return if role/item combination not handled + return None def setData(self, index: QModelIndex, value, role: int = Qt.EditRole) -> bool: """Sets the role data for the item at index to value.""" @@ -335,119 +349,8 @@ class UnifiedViewModel(QAbstractItemModel): old_value = item.target_asset_name_override # Store old value for potential revert/comparison item.target_asset_name_override = new_value changed = True - - # --- Start: New Direct Model Restructuring Logic --- - old_parent_asset = getattr(item, 'parent_asset', None) - if old_parent_asset: # Ensure we have the old parent - source_rule = getattr(old_parent_asset, 'parent_source', None) - if source_rule: # Ensure we have the grandparent - new_target_name = new_value # Can be None or a string - - # Get old parent index and source row - try: - grandparent_row = self._source_rules.index(source_rule) - old_parent_row = source_rule.assets.index(old_parent_asset) - source_row = old_parent_asset.files.index(item) - old_parent_index = self.createIndex(old_parent_row, 0, old_parent_asset) - grandparent_index = self.createIndex(grandparent_row, 0, source_rule) # Needed for insert/remove parent - except ValueError: - print("Error: Could not find item, parent, or grandparent in model structure during setData.") - item.target_asset_name_override = old_value # Revert data change - return False # Indicate failure - - target_parent_asset = None - target_parent_index = QModelIndex() - target_parent_row = -1 # Row within source_rule.assets - target_row = -1 # Row within target_parent_asset.files - move_occurred = False # Flag to track if a move happened - - # 1. Find existing target parent - if new_target_name: # Only search if a specific target is given - for i, asset in enumerate(source_rule.assets): - if asset.asset_name == new_target_name: - target_parent_asset = asset - target_parent_row = i - target_parent_index = self.createIndex(target_parent_row, 0, target_parent_asset) - break - - # 2. Handle Move/Creation - if target_parent_asset: - # --- Move to Existing Parent --- - if target_parent_asset != old_parent_asset: # Don't move if target is the same as old parent - target_row = len(target_parent_asset.files) # Append to the end - # print(f"DEBUG: Moving {Path(item.file_path).name} from {old_parent_asset.asset_name} ({source_row}) to {target_parent_asset.asset_name} ({target_row})") - self.beginMoveRows(old_parent_index, source_row, source_row, target_parent_index, target_row) - # Restructure internal data - old_parent_asset.files.pop(source_row) - target_parent_asset.files.append(item) - item.parent_asset = target_parent_asset # Update parent reference - self.endMoveRows() - move_occurred = True - else: - # Target is the same as the old parent. No move needed. - pass - - elif new_target_name: # Only create if a *new* specific target name was given - # --- Create New Parent and Move --- - # print(f"DEBUG: Creating new parent '{new_target_name}' and moving {Path(item.file_path).name}") - # Create new AssetRule - new_asset_rule = AssetRule(asset_name=new_target_name) - new_asset_rule.asset_type = old_parent_asset.asset_type # Copy type from old parent - new_asset_rule.asset_type_override = old_parent_asset.asset_type_override # Copy override too - new_asset_rule.parent_source = source_rule # Set parent reference - - # Determine insertion row for the new parent (e.g., append) - new_parent_row = len(source_rule.assets) - # print(f"DEBUG: Inserting new parent at row {new_parent_row} under {Path(source_rule.input_path).name}") - - # Emit signals for inserting the new parent row - self.beginInsertRows(grandparent_index, new_parent_row, new_parent_row) - source_rule.assets.insert(new_parent_row, new_asset_rule) # Insert into data structure - self.endInsertRows() - - # Get index for the newly inserted parent - target_parent_index = self.createIndex(new_parent_row, 0, new_asset_rule) - target_row = 0 # Insert file at the beginning of the new parent (for signal) - - # Emit signals for moving the file row - # print(f"DEBUG: Moving {Path(item.file_path).name} from {old_parent_asset.asset_name} ({source_row}) to new {new_asset_rule.asset_name} ({target_row})") - self.beginMoveRows(old_parent_index, source_row, source_row, target_parent_index, target_row) - # Restructure internal data - old_parent_asset.files.pop(source_row) - new_asset_rule.files.append(item) # Append is fine, target_row=0 was for signal - item.parent_asset = new_asset_rule # Update parent reference - self.endMoveRows() - move_occurred = True - - # Update target_parent_asset for potential cleanup check later - target_parent_asset = new_asset_rule - - else: # new_target_name is None or empty - # No move happens when the override is simply cleared. - pass - - # 3. Cleanup Empty Old Parent (only if a move occurred and old parent is empty) - if move_occurred and not old_parent_asset.files: - # print(f"DEBUG: Removing empty old parent {old_parent_asset.asset_name}") - try: - # Find the row of the old parent again, as it might have shifted - old_parent_row_for_removal = source_rule.assets.index(old_parent_asset) - # print(f"DEBUG: Removing parent at row {old_parent_row_for_removal} under {Path(source_rule.input_path).name}") - self.beginRemoveRows(grandparent_index, old_parent_row_for_removal, old_parent_row_for_removal) - source_rule.assets.pop(old_parent_row_for_removal) - self.endRemoveRows() - except ValueError: - print(f"Error: Could not find old parent '{old_parent_asset.asset_name}' for removal.") - # Log error, but continue - else: - print("Error: Could not find grandparent SourceRule during setData restructuring.") - item.target_asset_name_override = old_value # Revert - return False - else: - print("Error: Could not find parent AssetRule during setData restructuring.") - item.target_asset_name_override = old_value # Revert - return False - # --- End: New Direct Model Restructuring Logic --- + # Emit signal that the override changed, let handler deal with restructuring + self.targetAssetOverrideChanged.emit(index, new_value) elif column == self.COL_ITEM_TYPE: # Item-Type Override # Delegate provides string value (e.g., "MAP_COL") or None new_value = str(value) if value is not None else None @@ -515,15 +418,15 @@ class UnifiedViewModel(QAbstractItemModel): item = index.internalPointer() column = index.column() + # Always use detailed mode editability logic can_edit = False - # Determine editability based on item type and column - if isinstance(item, SourceRule): # If SourceRule is displayed/editable - if column == self.COL_SUPPLIER: can_edit = True # Supplier is editable + if isinstance(item, SourceRule): + if column == self.COL_SUPPLIER: can_edit = True elif isinstance(item, AssetRule): - if column == self.COL_ASSET_TYPE: can_edit = True # Asset Type is editable + if column == self.COL_ASSET_TYPE: can_edit = True elif isinstance(item, FileRule): - if column == self.COL_TARGET_ASSET: can_edit = True # Target Asset is editable - if column == self.COL_ITEM_TYPE: can_edit = True # Item Type is editable + if column == self.COL_TARGET_ASSET: can_edit = True + if column == self.COL_ITEM_TYPE: can_edit = True if can_edit: return default_flags | Qt.ItemIsEditable @@ -548,98 +451,316 @@ class UnifiedViewModel(QAbstractItemModel): if item: # Ensure internal pointer is not None return item return None # Return None for invalid index or None pointer -# --- Method to update model based on LLM predictions --- - def update_rules_for_sources(self, source_rules: List[SourceRule]): + # --- Method to update model based on prediction results, preserving overrides --- + def update_rules_for_sources(self, new_source_rules: List[SourceRule]): """ - Updates the model's internal data based on a list of SourceRule objects, - typically containing predictions for one or more source directories. + Updates the model's internal data based on a list of new SourceRule objects + (typically from prediction results), merging them with existing data while + preserving user overrides. Args: - source_rules: A list of SourceRule objects containing the new structure. + new_source_rules: A list of SourceRule objects containing the new structure. """ - if not source_rules: - print("UnifiedViewModel: update_rules_for_sources called with empty list.") + if not new_source_rules: + log.warning("UnifiedViewModel: update_rules_for_sources called with empty list.") return - # --- Important: Model Change Signaling --- - # Using Option 2 (per-source update) as it's generally more efficient. - print(f"UnifiedViewModel: Updating rules for {len(source_rules)} source(s).") + log.info(f"UnifiedViewModel: Updating rules for {len(new_source_rules)} source(s).") - # --- Node Class Placeholders --- - # Ensure these match your actual node implementation if different. - # These might be imported from another module or defined within this model. - # Example: from .your_node_module import SourceNode, AssetNode, FileNode - # For now, we assume they are available in the scope. + for new_source_rule in new_source_rules: + source_path = new_source_rule.input_path + existing_source_rule = None + existing_source_row = -1 - for rule in source_rules: - source_path = rule.input_path # Use input_path as per SourceRule definition - # --- Find the corresponding SourceRule in the model's internal list --- - # This replaces the placeholder _find_source_node_by_path logic - # We need the *object* and its *index* in self._source_rules - source_rule_obj = None - source_rule_row = -1 - for i, existing_rule in enumerate(self._source_rules): - if existing_rule.input_path == source_path: - source_rule_obj = existing_rule - source_rule_row = i + # 1. Find existing SourceRule in the model + for i, rule in enumerate(self._source_rules): + if rule.input_path == source_path: + existing_source_rule = rule + existing_source_row = i break - if source_rule_obj is None: - # --- ADD NEW RULE LOGIC --- - log.debug(f"No existing rule found for '{source_path}'. Adding new rule to model.") - # Ensure parent references are set within the new rule - for asset_rule in rule.assets: - asset_rule.parent_source = rule # Set parent to the rule being added + if existing_source_rule is None: + # 2. Add New SourceRule if not found + log.debug(f"Adding new SourceRule for '{source_path}'") + # Ensure parent references are set within the new rule hierarchy + for asset_rule in new_source_rule.assets: + asset_rule.parent_source = new_source_rule for file_rule in asset_rule.files: file_rule.parent_asset = asset_rule + # Add to model's internal list and emit signal - current_row_count = len(self._source_rules) - self.beginInsertRows(QModelIndex(), current_row_count, current_row_count) - self._source_rules.append(rule) # Append the new rule + insert_row = len(self._source_rules) + self.beginInsertRows(QModelIndex(), insert_row, insert_row) + self._source_rules.append(new_source_rule) self.endInsertRows() - continue # Skip the rest of the loop for this rule as it's newly added - # --- END ADD NEW RULE LOGIC --- + continue # Process next new_source_rule - # Get the QModelIndex corresponding to the source_rule_obj - # This index represents the parent for layout changes. - source_index = self.createIndex(source_rule_row, 0, source_rule_obj) + # 3. Merge Existing SourceRule + log.debug(f"Merging SourceRule for '{source_path}'") + existing_source_index = self.createIndex(existing_source_row, 0, existing_source_rule) + if not existing_source_index.isValid(): + log.error(f"Could not create valid index for existing SourceRule: {source_path}. Skipping.") + continue - if not source_index.isValid(): - print(f"Warning: Could not create valid QModelIndex for SourceRule: {source_path}. Skipping update.") - continue + # Update non-override SourceRule fields (e.g., supplier identifier if needed) + if existing_source_rule.supplier_identifier != new_source_rule.supplier_identifier: + # Only update if override is not set, or if you want prediction to always update base identifier + if existing_source_rule.supplier_override is None: + existing_source_rule.supplier_identifier = new_source_rule.supplier_identifier + # Emit dataChanged for the supplier column if it's displayed/editable at source level + supplier_col_index = self.createIndex(existing_source_row, self.COL_SUPPLIER, existing_source_rule) + self.dataChanged.emit(supplier_col_index, supplier_col_index, [Qt.DisplayRole, Qt.EditRole]) - # --- Signal layout change for the specific source node --- - # We are changing the children (AssetRules) of this SourceRule. - # Emit with parent index list and orientation. - self.layoutAboutToBeChanged.emit() # Emit without arguments - # --- Clear existing children (AssetRules) --- - # Directly modify the assets list of the found SourceRule object - source_rule_obj.assets.clear() # Clear the list in place + # --- Merge AssetRules --- + existing_assets_dict = {asset.asset_name: asset for asset in existing_source_rule.assets} + new_assets_dict = {asset.asset_name: asset for asset in new_source_rule.assets} + processed_asset_names = set() - # --- Rebuild children based on the new rule --- - for asset_rule in rule.assets: - # Add the new AssetRule object directly - source_rule_obj.assets.append(asset_rule) - # Set the parent reference on the new asset rule - asset_rule.parent_source = source_rule_obj + # Iterate through new assets to update existing or add new ones + for asset_name, new_asset in new_assets_dict.items(): + processed_asset_names.add(asset_name) + existing_asset = existing_assets_dict.get(asset_name) - # Set parent references for the FileRules within the new AssetRule - for file_rule in asset_rule.files: - file_rule.parent_asset = asset_rule + if existing_asset: + # --- Update Existing AssetRule --- + log.debug(f" Merging AssetRule: {asset_name}") + existing_asset_row = existing_source_rule.assets.index(existing_asset) + existing_asset_index = self.createIndex(existing_asset_row, 0, existing_asset) - # --- Signal layout change completion --- - self.layoutChanged.emit() # Emit without arguments - print(f"UnifiedViewModel: Updated children for SourceRule: {source_path}") + # Update non-override fields (e.g., asset_type) + if existing_asset.asset_type != new_asset.asset_type and existing_asset.asset_type_override is None: + existing_asset.asset_type = new_asset.asset_type + asset_type_col_index = self.createIndex(existing_asset_row, self.COL_ASSET_TYPE, existing_asset) + self.dataChanged.emit(asset_type_col_index, asset_type_col_index, [Qt.DisplayRole, Qt.EditRole, Qt.BackgroundRole]) # Include BackgroundRole for color + + # --- Merge FileRules within the AssetRule --- + self._merge_file_rules(existing_asset, new_asset, existing_asset_index) + + else: + # --- Add New AssetRule --- + log.debug(f" Adding new AssetRule: {asset_name}") + new_asset.parent_source = existing_source_rule # Set parent + # Ensure file parents are set + for file_rule in new_asset.files: + file_rule.parent_asset = new_asset + + insert_row = len(existing_source_rule.assets) + self.beginInsertRows(existing_source_index, insert_row, insert_row) + existing_source_rule.assets.append(new_asset) + self.endInsertRows() + + # --- Remove Old AssetRules --- + # Find assets in existing but not in new, and remove them in reverse order + assets_to_remove = [] + for i, existing_asset in reversed(list(enumerate(existing_source_rule.assets))): + if existing_asset.asset_name not in processed_asset_names: + assets_to_remove.append((i, existing_asset.asset_name)) # Store index and name + + for row_index, asset_name_to_remove in assets_to_remove: + log.debug(f" Removing old AssetRule: {asset_name_to_remove}") + self.beginRemoveRows(existing_source_index, row_index, row_index) + existing_source_rule.assets.pop(row_index) + self.endRemoveRows() + + + def _merge_file_rules(self, existing_asset: AssetRule, new_asset: AssetRule, parent_asset_index: QModelIndex): + """Helper method to merge FileRules for a given AssetRule.""" + existing_files_dict = {file.file_path: file for file in existing_asset.files} + new_files_dict = {file.file_path: file for file in new_asset.files} + processed_file_paths = set() + + # Iterate through new files to update existing or add new ones + for file_path, new_file in new_files_dict.items(): + processed_file_paths.add(file_path) + existing_file = existing_files_dict.get(file_path) + + if existing_file: + # --- Update Existing FileRule --- + log.debug(f" Merging FileRule: {Path(file_path).name}") + existing_file_row = existing_asset.files.index(existing_file) + existing_file_index = self.createIndex(existing_file_row, 0, existing_file) # Index relative to parent_asset_index + + # Update non-override fields (item_type, standard_map_type) + changed_roles = [] + if existing_file.item_type != new_file.item_type and existing_file.item_type_override is None: + existing_file.item_type = new_file.item_type + changed_roles.extend([Qt.DisplayRole, Qt.EditRole, Qt.BackgroundRole]) # Include BackgroundRole for color + + # Update standard_map_type (assuming it's derived/set during prediction) + # Check if standard_map_type exists on both objects before comparing + new_standard_type = getattr(new_file, 'standard_map_type', None) + old_standard_type = getattr(existing_file, 'standard_map_type', None) + if old_standard_type != new_standard_type: + # Update only if item_type_override is not set, as override dictates standard type + if existing_file.item_type_override is None: + existing_file.standard_map_type = new_standard_type + # standard_map_type might not directly affect display, but item_type change covers it + if Qt.DisplayRole not in changed_roles: # Avoid duplicates + changed_roles.extend([Qt.DisplayRole, Qt.EditRole]) + + + # Emit dataChanged only if something actually changed + if changed_roles: + # Emit for all relevant columns potentially affected by type changes + for col in [self.COL_ITEM_TYPE]: # Add other cols if needed + col_index = self.createIndex(existing_file_row, col, existing_file) + self.dataChanged.emit(col_index, col_index, changed_roles) + + else: + # --- Add New FileRule --- + log.debug(f" Adding new FileRule: {Path(file_path).name}") + new_file.parent_asset = existing_asset # Set parent + insert_row = len(existing_asset.files) + self.beginInsertRows(parent_asset_index, insert_row, insert_row) + existing_asset.files.append(new_file) + self.endInsertRows() + + # --- Remove Old FileRules --- + files_to_remove = [] + for i, existing_file in reversed(list(enumerate(existing_asset.files))): + if existing_file.file_path not in processed_file_paths: + files_to_remove.append((i, Path(existing_file.file_path).name)) + + for row_index, file_name_to_remove in files_to_remove: + log.debug(f" Removing old FileRule: {file_name_to_remove}") + self.beginRemoveRows(parent_asset_index, row_index, row_index) + existing_asset.files.pop(row_index) + self.endRemoveRows() + + + # --- Dedicated Model Restructuring Methods --- + + def moveFileRule(self, source_file_index: QModelIndex, target_parent_asset_index: QModelIndex): + """Moves a FileRule (source_file_index) to a different AssetRule parent (target_parent_asset_index).""" + if not source_file_index.isValid() or not target_parent_asset_index.isValid(): + log.error("moveFileRule: Invalid source or target index provided.") + return False + + file_item = source_file_index.internalPointer() + target_parent_asset = target_parent_asset_index.internalPointer() + + if not isinstance(file_item, FileRule) or not isinstance(target_parent_asset, AssetRule): + log.error("moveFileRule: Invalid item types for source or target.") + return False + + old_parent_asset = getattr(file_item, 'parent_asset', None) + if not old_parent_asset: + log.error(f"moveFileRule: Source file '{Path(file_item.file_path).name}' has no parent asset.") + return False + + if old_parent_asset == target_parent_asset: + log.debug("moveFileRule: Source and target parent are the same. No move needed.") + return True # Technically successful, no change needed + + # Get old parent index + source_rule = getattr(old_parent_asset, 'parent_source', None) + if not source_rule: + log.error(f"moveFileRule: Could not find SourceRule parent for old asset '{old_parent_asset.asset_name}'.") + return False + + try: + old_parent_row = source_rule.assets.index(old_parent_asset) + old_parent_index = self.createIndex(old_parent_row, 0, old_parent_asset) + source_row = old_parent_asset.files.index(file_item) + except ValueError: + log.error("moveFileRule: Could not find old parent or source file within their respective lists.") + return False + + target_row = len(target_parent_asset.files) # Append to the end of the target + + log.debug(f"Moving file '{Path(file_item.file_path).name}' from '{old_parent_asset.asset_name}' (row {source_row}) to '{target_parent_asset.asset_name}' (row {target_row})") + self.beginMoveRows(old_parent_index, source_row, source_row, target_parent_asset_index, target_row) + # Restructure internal data + old_parent_asset.files.pop(source_row) + target_parent_asset.files.append(file_item) + file_item.parent_asset = target_parent_asset # Update parent reference + self.endMoveRows() + return True + + def createAssetRule(self, source_rule: SourceRule, new_asset_name: str, copy_from_asset: AssetRule = None) -> QModelIndex: + """Creates a new AssetRule under the given SourceRule and returns its index.""" + if not isinstance(source_rule, SourceRule) or not new_asset_name: + log.error("createAssetRule: Invalid SourceRule or empty asset name provided.") + return QModelIndex() + + # Check if asset already exists under this source + for asset in source_rule.assets: + if asset.asset_name == new_asset_name: + log.warning(f"createAssetRule: Asset '{new_asset_name}' already exists under '{Path(source_rule.input_path).name}'.") + # Return existing index? Or fail? Let's return existing for now. + try: + existing_row = source_rule.assets.index(asset) + return self.createIndex(existing_row, 0, asset) + except ValueError: + log.error("createAssetRule: Found existing asset but failed to get its index.") + return QModelIndex() # Should not happen + + log.debug(f"Creating new AssetRule '{new_asset_name}' under '{Path(source_rule.input_path).name}'") + new_asset_rule = AssetRule(asset_name=new_asset_name) + new_asset_rule.parent_source = source_rule # Set parent reference + + # Optionally copy type info from another asset + if isinstance(copy_from_asset, AssetRule): + new_asset_rule.asset_type = copy_from_asset.asset_type + new_asset_rule.asset_type_override = copy_from_asset.asset_type_override + + # Find parent SourceRule index + try: + grandparent_row = self._source_rules.index(source_rule) + grandparent_index = self.createIndex(grandparent_row, 0, source_rule) + except ValueError: + log.error(f"createAssetRule: Could not find SourceRule '{Path(source_rule.input_path).name}' in the model's root list.") + return QModelIndex() + + # Determine insertion row for the new parent (e.g., append) + new_parent_row = len(source_rule.assets) + + # Emit signals for inserting the new parent row + self.beginInsertRows(grandparent_index, new_parent_row, new_parent_row) + source_rule.assets.insert(new_parent_row, new_asset_rule) # Insert into data structure + self.endInsertRows() + + # Return index for the newly created asset + return self.createIndex(new_parent_row, 0, new_asset_rule) + + + def removeAssetRule(self, asset_rule_to_remove: AssetRule): + """Removes an AssetRule if it's empty.""" + if not isinstance(asset_rule_to_remove, AssetRule): + log.error("removeAssetRule: Invalid AssetRule provided.") + return False + + if asset_rule_to_remove.files: + log.warning(f"removeAssetRule: Asset '{asset_rule_to_remove.asset_name}' is not empty. Removal aborted.") + return False # Do not remove non-empty assets automatically + + source_rule = getattr(asset_rule_to_remove, 'parent_source', None) + if not source_rule: + log.error(f"removeAssetRule: Could not find parent SourceRule for asset '{asset_rule_to_remove.asset_name}'.") + return False + + # Find parent SourceRule index and the row of the asset to remove + try: + grandparent_row = self._source_rules.index(source_rule) + grandparent_index = self.createIndex(grandparent_row, 0, source_rule) + asset_row_for_removal = source_rule.assets.index(asset_rule_to_remove) + except ValueError: + log.error(f"removeAssetRule: Could not find parent SourceRule or the AssetRule within its parent's list.") + return False + +def get_asset_type_keys(self) -> List[str]: + """Returns the cached list of asset type keys.""" + return self._asset_type_keys + + def get_file_type_keys(self) -> List[str]: + """Returns the cached list of file type keys.""" + return self._file_type_keys + log.debug(f"Removing empty AssetRule '{asset_rule_to_remove.asset_name}' at row {asset_row_for_removal} under '{Path(source_rule.input_path).name}'") + self.beginRemoveRows(grandparent_index, asset_row_for_removal, asset_row_for_removal) + source_rule.assets.pop(asset_row_for_removal) + self.endRemoveRows() + return True # --- Placeholder for node finding method (Original Request - Replaced by direct list search above) --- - # Kept for reference, but the logic above directly searches self._source_rules - # def _find_source_node_by_path(self, path: str) -> 'SourceRule | None': - # """Placeholder: Finds a top-level SourceRule by its input_path.""" - # # This assumes the model uses separate node objects, which it doesn't. - # # The current implementation uses the Rule objects directly. - # for i, rule in enumerate(self._source_rules): - # if rule.input_path == path: - # return rule # Return the SourceRule object itself - # return None - \ No newline at end of file + # Kept for reference, but the logic above directly searches self._source_rules \ No newline at end of file diff --git a/llm_prototype/llm_classifier.py b/llm_prototype/llm_classifier.py index c91770d..56db6f7 100644 --- a/llm_prototype/llm_classifier.py +++ b/llm_prototype/llm_classifier.py @@ -4,6 +4,7 @@ import os import json import requests import sys +import re # Add re import # Add the prototype directory to the Python path to import config_llm sys.path.append(os.path.dirname(__file__)) @@ -121,9 +122,9 @@ def call_llm_api(prompt, config): def extract_json_from_response(response_data): """ - Extracts the JSON list part from the LLM's response content by finding - the first '[' and last ']' and parsing the content between them. - Handles responses that might include a thinking block or other text before/after the JSON. + Extracts the main JSON object or list from the LLM's response content. + It handles markdown fences, reasoning tags (e.g., ), and aims + to find the first complete JSON structure ({...} or [...]). """ print("Extracting JSON from LLM response...") @@ -133,44 +134,94 @@ def extract_json_from_response(response_data): message = response_data['choices'][0].get('message', {}) assistant_message_content = message.get('content', '') - # Strip markdown code fences if present - if assistant_message_content.strip().startswith("```json"): - assistant_message_content = assistant_message_content.strip()[len("```json"):].strip() - if assistant_message_content.strip().endswith("```"): - assistant_message_content = assistant_message_content.strip()[:-len("```")].strip() - - print("\n--- Processed Assistant Message Content (after stripping fences) ---") - print(assistant_message_content) - print("-------------------------------------------------------------------\n") - if not assistant_message_content: - print("Error: LLM response content is empty or unexpected format.") - print(f"Full response: {response_data}") - # Attempt to return empty list for validation to catch this - return [] + print("Warning: LLM response content is empty or not found in expected structure.") + print(f"Full response data: {response_data}") + return [] # Return empty list if no content - # Find the index of the first '[' and the last ']' - first_bracket_index = assistant_message_content.find('[') - last_bracket_index = assistant_message_content.rfind(']') + content = assistant_message_content.strip() - if first_bracket_index == -1 or last_bracket_index == -1 or last_bracket_index < first_bracket_index: - print("Error: Could not find a valid JSON list structure (matching '[' and ']') in the LLM response content.") - print(f"Response content snippet: {assistant_message_content[:500]}...") # Print snippet - # Attempt to return empty list for validation to catch this - return [] + # 1. Strip markdown code fences (```json ... ``` or ``` ... ```) + content = re.sub(r'^```(?:json)?\s*', '', content, flags=re.IGNORECASE) + content = re.sub(r'\s*```$', '', content) + content = content.strip() - # Extract the potential JSON string between the first '[' and last ']' - json_string = assistant_message_content[first_bracket_index : last_bracket_index + 1] + print("\n--- Content After Stripping Fences ---") + print(content) + print("---------------------------------------\n") - # Attempt to parse the extracted string as JSON + # 2. Remove reasoning tags like ... (non-greedy) + # Consider making tag removal more general if other tags appear + print("\n--- Content BEFORE Removing Tags ---") + print(repr(content)) # Using repr() to see hidden characters like newlines + content = re.sub(r'.*?', '', content, flags=re.DOTALL | re.IGNORECASE) + print("\n--- Content AFTER Removing Tags ---") # Added this line + print(repr(content)) # Using repr() to see hidden characters like newlines + content = content.strip() + + # Original print statement, now correctly indented and showing the final cleaned content before JSON parsing attempt + print("\n--- Final Content Before JSON Parsing Attempt ---") + print(content) + print("-------------------------------------------------\n") + + if not content: + print("Error: LLM response content is empty after stripping fences and tags.") + return [] # Return empty list if nothing remains + + # 3. Find the first opening bracket or brace indicating start of JSON + first_bracket_index = content.find('[') + first_brace_index = content.find('{') + + start_index = -1 + if first_bracket_index != -1 and first_brace_index != -1: + start_index = min(first_bracket_index, first_brace_index) + elif first_bracket_index != -1: + start_index = first_bracket_index + elif first_brace_index != -1: + start_index = first_brace_index + + if start_index == -1: + print("Error: Could not find starting '[' or '{' in the processed content.") + print(f"Processed content snippet: {content[:500]}...") + return [] # Return empty list if no JSON start found + + # 4. Attempt to find the corresponding closing bracket/brace and parse + # This uses json.JSONDecoder.raw_decode to find the first valid JSON object/array. + potential_json_str = content[start_index:] + json_decoder = json.JSONDecoder() try: - parsed_json = json.loads(json_string) + # Use decode with raw_decode to find the first valid JSON object/array + # and its end position in the string. + parsed_json, end_pos = json_decoder.raw_decode(potential_json_str) + print(f"Successfully parsed JSON ending at index {start_index + end_pos}.") + # Optional: Log the extracted part: print(f"Extracted JSON string: {potential_json_str[:end_pos]}") return parsed_json except json.JSONDecodeError as e: - print(f"Error: Could not decode extracted JSON from LLM response: {e}") - print(f"Attempted to parse (snippet): {json_string[:500]}...") # Print snippet - # Attempt to return empty list for validation to catch this - return [] + # This error means no valid JSON object was found starting at start_index + print(f"Error: Could not decode JSON starting from index {start_index}: {e}") + print(f"Content snippet starting at index {start_index}: {potential_json_str[:500]}...") + + # Fallback: Try the original naive approach (first '['/'{' to last ']'/'}') + # This might capture the JSON if it's the last element, even with preceding noise + # that confused raw_decode. + print("Attempting fallback: finding last ']' or '}'...") + last_bracket_index = content.rfind(']') + last_brace_index = content.rfind('}') + end_index = max(last_bracket_index, last_brace_index) + + if end_index > start_index: + fallback_json_string = content[start_index : end_index + 1] + print(f"Fallback attempting to parse: {fallback_json_string[:500]}...") + try: + parsed_json = json.loads(fallback_json_string) + print("Successfully parsed JSON using fallback method.") + return parsed_json + except json.JSONDecodeError as fallback_e: + print(f"Fallback JSON parsing also failed: {fallback_e}") + return [] # Return empty list if both methods fail + else: + print("Fallback failed: Could not find suitable closing bracket/brace.") + return [] # Return empty list if fallback indices are invalid diff --git a/main.py b/main.py index e11a797..83889d9 100644 --- a/main.py +++ b/main.py @@ -26,6 +26,7 @@ try: from processing_engine import ProcessingEngine # <<< ADDED NEW ENGINE IMPORT from rule_structure import SourceRule # Import SourceRule for type hinting from gui.main_window import MainWindow # Import MainWindow + from utils.workspace_utils import prepare_processing_workspace # <<< ADDED UTILITY IMPORT except ImportError as e: # Provide a more helpful error message if imports fail script_dir = Path(__file__).parent.resolve() @@ -172,35 +173,16 @@ class ProcessingTask(QRunnable): log.debug(f"DEBUG: Rule passed to ProcessingTask.run: {self.rule}") # DEBUG LOG status = "failed" # Default status result_or_error = None - temp_workspace_dir = None # Initialize outside try + prepared_workspace_path = None # Initialize path for prepared content outside try try: - # --- 1. Prepare Input Workspace --- - original_input_path = Path(self.rule.input_path) - prepared_workspace_path = None + # --- 1. Prepare Input Workspace using Utility Function --- + # The utility function creates the temp dir, prepares it, and returns its path. + # It raises exceptions on failure (FileNotFoundError, ValueError, zipfile.BadZipFile, OSError). + prepared_workspace_path = prepare_processing_workspace(self.rule.input_path) + log.info(f"Workspace prepared successfully at: {prepared_workspace_path}") - if not original_input_path.exists(): - raise FileNotFoundError(f"Original input path does not exist: {original_input_path}") - - # Create a temporary directory for processing - temp_workspace_dir = tempfile.mkdtemp(prefix="asset_proc_") - prepared_workspace_path = Path(temp_workspace_dir) - log.info(f"Created temporary workspace: {prepared_workspace_path}") - - # Check if input is directory or zip file - if original_input_path.is_dir(): - log.info(f"Input is a directory, copying contents to workspace: {original_input_path}") - # Copy directory contents into the temp workspace - shutil.copytree(original_input_path, prepared_workspace_path, dirs_exist_ok=True) - elif original_input_path.is_file() and original_input_path.suffix.lower() == '.zip': - log.info(f"Input is a zip file, extracting to workspace: {original_input_path}") - with zipfile.ZipFile(original_input_path, 'r') as zip_ref: - zip_ref.extractall(prepared_workspace_path) - else: - # Handle unsupported input types if necessary - raise ValueError(f"Unsupported input type: {original_input_path}. Must be a directory or .zip file.") - -# --- DEBUG: List files in prepared workspace --- + # --- DEBUG: List files in prepared workspace --- try: log.debug(f"Listing contents of prepared workspace: {prepared_workspace_path}") for item in prepared_workspace_path.rglob('*'): # Recursively list all items @@ -241,12 +223,13 @@ class ProcessingTask(QRunnable): log.error(f"Worker Thread: Error emitting finished signal for {self.rule.input_path}: {sig_err}") # --- 3. Cleanup Workspace --- - if temp_workspace_dir and Path(temp_workspace_dir).exists(): + # Use the path returned by the utility function for cleanup + if prepared_workspace_path and prepared_workspace_path.exists(): try: - log.info(f"Cleaning up temporary workspace: {temp_workspace_dir}") - shutil.rmtree(temp_workspace_dir) + log.info(f"Cleaning up temporary workspace: {prepared_workspace_path}") + shutil.rmtree(prepared_workspace_path) # Use the Path object except OSError as cleanup_error: - log.error(f"Worker Thread: Failed to cleanup temporary workspace {temp_workspace_dir}: {cleanup_error}") + log.error(f"Worker Thread: Failed to cleanup temporary workspace {prepared_workspace_path}: {cleanup_error}") # --- CLI Worker Function (COMMENTED OUT - Replaced by GUI Flow) --- @@ -335,7 +318,8 @@ class App(QObject): if self.processing_engine: self.main_window = MainWindow() # MainWindow now part of the App # Connect the signal from the GUI to the App's slot using QueuedConnection - connection_success = self.main_window.processing_requested.connect(self.on_processing_requested, Qt.ConnectionType.QueuedConnection) + # Connect the signal from the MainWindow (which is triggered by the panel) to the App's slot + connection_success = self.main_window.start_backend_processing.connect(self.on_processing_requested, Qt.ConnectionType.QueuedConnection) log.info(f"DEBUG: Connection result for processing_requested (Queued): {connection_success}") # <-- Modified LOG if not connection_success: log.error("*********************************************************") @@ -348,8 +332,8 @@ class App(QObject): log.error("Fatal: Cannot initialize MainWindow without ProcessingEngine.") sys.exit(1) - @Slot(list) # Slot to receive List[SourceRule] - def on_processing_requested(self, source_rules: list): + @Slot(list, dict) # Slot to receive List[SourceRule] and processing_settings dict + def on_processing_requested(self, source_rules: list, processing_settings: dict): # log.info("*********************************************************") # REMOVED log.debug("DEBUG: App.on_processing_requested slot entered.") # DEBUG Verify Entry (Keep this one) # log.info("*********************************************************") # REMOVED @@ -375,14 +359,15 @@ class App(QObject): self._task_results = {"processed": 0, "skipped": 0, "failed": 0} log.debug(f"Initialized active task count to: {self._active_tasks_count}") - # Update GUI progress bar/status - self.main_window.progress_bar.setMaximum(len(source_rules)) - self.main_window.progress_bar.setValue(0) - self.main_window.progress_bar.setFormat(f"0/{len(source_rules)} tasks") + # Update GUI progress bar/status via MainPanelWidget + self.main_window.main_panel_widget.progress_bar.setMaximum(len(source_rules)) + self.main_window.main_panel_widget.progress_bar.setValue(0) + self.main_window.main_panel_widget.progress_bar.setFormat(f"0/{len(source_rules)} tasks") # --- Get paths needed for ProcessingTask --- try: - output_base_path_str = self.main_window.output_path_edit.text().strip() + # Access output path via MainPanelWidget + output_base_path_str = self.main_window.main_panel_widget.output_path_edit.text().strip() if not output_base_path_str: log.error("Cannot queue tasks: Output directory path is empty in the GUI.") self.main_window.statusBar().showMessage("Error: Output directory cannot be empty.", 5000) @@ -406,6 +391,11 @@ class App(QObject): # --- End Get paths --- + # Set max threads based on GUI setting + worker_count = processing_settings.get('workers', 1) + self.thread_pool.setMaxThreadCount(worker_count) + log.info(f"Set thread pool max workers to: {worker_count}") + # Queue tasks in the thread pool log.debug("DEBUG: Entering task queuing loop.") # <-- Keep this log for i, rule in enumerate(source_rules): # Added enumerate for index logging @@ -484,10 +474,10 @@ class App(QObject): else: # Count all other statuses (failed_preparation, failed_processing) as failed self._task_results["failed"] += 1 - # Update progress bar - total_tasks = self.main_window.progress_bar.maximum() + # Update progress bar via MainPanelWidget + total_tasks = self.main_window.main_panel_widget.progress_bar.maximum() completed_tasks = total_tasks - self._active_tasks_count - self.main_window.update_progress_bar(completed_tasks, total_tasks) # Use MainWindow's method + self.main_window.main_panel_widget.update_progress_bar(completed_tasks, total_tasks) # Use MainPanelWidget's method # Update status for the specific file in the GUI (if needed) # self.main_window.update_file_status(rule_input_path, status, str(result_or_error) if result_or_error else "") @@ -513,182 +503,182 @@ class App(QObject): # --- Main CLI Execution Function (Adapted from old main()) --- -def run_cli(args): # Accept parsed args - """Uses parsed arguments, sets up logging, runs processing, and reports summary for CLI mode.""" - # parser = setup_arg_parser() # No longer needed - # args = parser.parse_args() # Args are passed in - - # --- Validate required CLI arguments --- - if not args.input_paths: - log.error("CLI Error: Input path(s) are required for CLI mode.") - sys.exit(1) - if not args.preset: - log.error("CLI Error: Preset (-p/--preset) is required for CLI mode.") - sys.exit(1) - # --- End Validation --- - - # Logging setup is already done outside this function in the __main__ block - - start_time = time.time() - log.info("Asset Processor Script Started (CLI Mode)") - - # --- Validate Input Paths --- - valid_inputs = [] - for p_str in args.input_paths: - p = Path(p_str) - if p.exists(): - suffix = p.suffix.lower() - # TODO: Add support for other archive types if needed (.rar, .7z) - if p.is_dir() or (p.is_file() and suffix == '.zip'): - valid_inputs.append(p_str) # Store the original string path - else: - log.warning(f"Input is not a directory or a supported archive type (.zip), skipping: {p_str}") - else: - log.warning(f"Input path not found, skipping: {p_str}") - - if not valid_inputs: - log.error("No valid input paths found. Exiting.") - sys.exit(1) # Exit with error code - - # --- Determine Output Directory --- - output_dir_str = args.output_dir # Get value from args (might be None) - if not output_dir_str: - log.debug("Output directory not specified via -o, reading default from app_settings.json via load_base_config().") - try: - base_config = load_base_config() - output_dir_str = base_config.get('OUTPUT_BASE_DIR') - if not output_dir_str: - log.error("Output directory not specified with -o and 'OUTPUT_BASE_DIR' not found or empty in app_settings.json. Exiting.") - sys.exit(1) - log.info(f"Using default output directory from app_settings.json: {output_dir_str}") - except ConfigurationError as e: - log.error(f"Error reading base configuration for OUTPUT_BASE_DIR: {e}") - sys.exit(1) - except Exception as e: - log.exception(f"Unexpected error reading base configuration for OUTPUT_BASE_DIR: {e}") - sys.exit(1) - - # --- Resolve Output Path --- - output_path_obj = Path(output_dir_str).resolve() # Resolve to absolute path - - # --- Validate and Setup Output Directory --- - try: - log.info(f"Ensuring output directory exists: {output_path_obj}") - output_path_obj.mkdir(parents=True, exist_ok=True) - output_dir_for_processor = str(output_path_obj) - except Exception as e: - log.error(f"Cannot create or access output directory '{output_path_obj}': {e}", exc_info=True) - sys.exit(1) - - # --- Load Configuration --- - try: - config = Configuration(args.preset) # Pass preset name from args - log.info(f"Configuration loaded for preset: {args.preset}") - except ConfigurationError as e: - log.error(f"Error loading configuration for preset '{args.preset}': {e}") - sys.exit(1) - except Exception as e: - log.exception(f"Unexpected error loading configuration: {e}") - sys.exit(1) - - # --- Initialize Processing Engine --- - try: - engine = ProcessingEngine(config) - log.info("ProcessingEngine initialized for CLI mode.") - except Exception as e: - log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}") - sys.exit(1) - - # --- Execute Processing (Simplified Sequential for now) --- - # TODO: Re-implement parallel processing using concurrent.futures if needed. - # TODO: CLI mode needs a way to generate SourceRule objects. - # For now, we'll pass a simplified structure or assume engine handles it. - # This part likely needs significant adaptation based on ProcessingEngine.process requirements. - log.warning("CLI processing currently uses simplified sequential execution.") - log.warning("SourceRule generation for CLI mode is basic and may need refinement.") - - processed_count = 0 - skipped_count = 0 # Placeholder - failed_count = 0 - results_list = [] # Placeholder - - for input_path_str in valid_inputs: - log.info(f"--- Processing Input: {Path(input_path_str).name} ---") - try: - # --- Basic SourceRule Creation (Needs Review/Adaptation) --- - # This is a placeholder. The engine likely needs more detailed file info. - # We might need to extract file list here like the GUI does. - input_path_obj = Path(input_path_str) - # Example: Create a rule assuming the input is a single asset - # This won't handle multi-asset archives correctly without more logic. - asset_name = input_path_obj.stem # Basic assumption - # File list extraction would be needed here for proper FileRule creation - # file_list = _extract_file_list(input_path_str) # Need to define/import this helper - # file_rules = [FileRule(file_path=f) for f in file_list] if file_list else [] - # asset_rule = AssetRule(asset_name=asset_name, files=file_rules) - # rule = SourceRule(input_path=input_path_str, assets=[asset_rule], supplier_identifier=config.settings.get('supplier_identifier')) # Access from config object - # --- End Placeholder --- - - # --- TEMPORARY: Call engine process with just config and path --- - # This assumes engine.process can handle this or needs adaptation. - # If engine.process strictly requires a SourceRule, this will fail. - # result = engine.process(config=config, input_path=input_path_obj, overwrite=args.overwrite) - # --- END TEMPORARY --- - - # --- Attempt with Placeholder SourceRule (More likely signature) --- - # This still requires file list extraction and rule creation logic - log.error("CLI Processing Logic Incomplete: SourceRule creation and engine call need implementation.") - # Example (requires file list extraction and rule building): - # rule = build_basic_source_rule(input_path_str, config) # Hypothetical function - # if rule: - # engine.process(rule) # Assuming process takes one rule - # processed_count += 1 # Basic success tracking - # else: - # log.warning(f"Could not create basic rule for {input_path_str}, skipping.") - # failed_count += 1 - # --- End Placeholder --- - raise NotImplementedError("CLI processing logic for SourceRule creation and engine call is not fully implemented.") - - - except NotImplementedError as e: - log.error(f"Stopping CLI run due to incomplete implementation: {e}") - failed_count += 1 - break # Stop processing further items - except Exception as e: - log.exception(f"Error processing input '{Path(input_path_str).name}': {e}") - failed_count += 1 - results_list.append((input_path_str, "failed", str(e))) # Placeholder result - - # --- Report Summary --- - duration = time.time() - start_time - log.info("=" * 40) - log.info("CLI Processing Summary") - log.info(f" Duration: {duration:.2f} seconds") - log.info(f" Inputs Attempted: {len(valid_inputs)}") - log.info(f" Successfully Processed: {processed_count}") - log.info(f" Skipped: {skipped_count}") - log.info(f" Failed: {failed_count}") - - exit_code = 0 - if failed_count > 0: - log.warning("Failures occurred.") - # Log specific errors if results_list was populated - for input_path, status, err_msg in results_list: - if status == "failed": - log.warning(f" - {Path(input_path).name}: {err_msg}") - exit_code = 1 # Exit with error code if failures occurred - - # --- Blender Script Execution (Optional - Copied from old main()) --- - # This section might need review based on current config/engine - run_blender = False # Placeholder, add logic if needed - if run_blender: - # ... (Blender execution logic from old main() would go here) ... - log.warning("Blender script execution from CLI not yet re-implemented.") - pass - - # --- Final Exit --- - log.info("Asset Processor Script Finished (CLI Mode).") - sys.exit(exit_code) +# def run_cli(args): # Accept parsed args +# """Uses parsed arguments, sets up logging, runs processing, and reports summary for CLI mode.""" +# # parser = setup_arg_parser() # No longer needed +# # args = parser.parse_args() # Args are passed in +# +# # --- Validate required CLI arguments --- +# if not args.input_paths: +# log.error("CLI Error: Input path(s) are required for CLI mode.") +# sys.exit(1) +# if not args.preset: +# log.error("CLI Error: Preset (-p/--preset) is required for CLI mode.") +# sys.exit(1) +# # --- End Validation --- +# +# # Logging setup is already done outside this function in the __main__ block +# +# start_time = time.time() +# log.info("Asset Processor Script Started (CLI Mode)") +# +# # --- Validate Input Paths --- +# valid_inputs = [] +# for p_str in args.input_paths: +# p = Path(p_str) +# if p.exists(): +# suffix = p.suffix.lower() +# # TODO: Add support for other archive types if needed (.rar, .7z) +# if p.is_dir() or (p.is_file() and suffix == '.zip'): +# valid_inputs.append(p_str) # Store the original string path +# else: +# log.warning(f"Input is not a directory or a supported archive type (.zip), skipping: {p_str}") +# else: +# log.warning(f"Input path not found, skipping: {p_str}") +# +# if not valid_inputs: +# log.error("No valid input paths found. Exiting.") +# sys.exit(1) # Exit with error code +# +# # --- Determine Output Directory --- +# output_dir_str = args.output_dir # Get value from args (might be None) +# if not output_dir_str: +# log.debug("Output directory not specified via -o, reading default from app_settings.json via load_base_config().") +# try: +# base_config = load_base_config() +# output_dir_str = base_config.get('OUTPUT_BASE_DIR') +# if not output_dir_str: +# log.error("Output directory not specified with -o and 'OUTPUT_BASE_DIR' not found or empty in app_settings.json. Exiting.") +# sys.exit(1) +# log.info(f"Using default output directory from app_settings.json: {output_dir_str}") +# except ConfigurationError as e: +# log.error(f"Error reading base configuration for OUTPUT_BASE_DIR: {e}") +# sys.exit(1) +# except Exception as e: +# log.exception(f"Unexpected error reading base configuration for OUTPUT_BASE_DIR: {e}") +# sys.exit(1) +# +# # --- Resolve Output Path --- +# output_path_obj = Path(output_dir_str).resolve() # Resolve to absolute path +# +# # --- Validate and Setup Output Directory --- +# try: +# log.info(f"Ensuring output directory exists: {output_path_obj}") +# output_path_obj.mkdir(parents=True, exist_ok=True) +# output_dir_for_processor = str(output_path_obj) +# except Exception as e: +# log.error(f"Cannot create or access output directory '{output_path_obj}': {e}", exc_info=True) +# sys.exit(1) +# +# # --- Load Configuration --- +# try: +# config = Configuration(args.preset) # Pass preset name from args +# log.info(f"Configuration loaded for preset: {args.preset}") +# except ConfigurationError as e: +# log.error(f"Error loading configuration for preset '{args.preset}': {e}") +# sys.exit(1) +# except Exception as e: +# log.exception(f"Unexpected error loading configuration: {e}") +# sys.exit(1) +# +# # --- Initialize Processing Engine --- +# try: +# engine = ProcessingEngine(config) +# log.info("ProcessingEngine initialized for CLI mode.") +# except Exception as e: +# log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}") +# sys.exit(1) +# +# # --- Execute Processing (Simplified Sequential for now) --- +# # TODO: Re-implement parallel processing using concurrent.futures if needed. +# # TODO: CLI mode needs a way to generate SourceRule objects. +# # For now, we'll pass a simplified structure or assume engine handles it. +# # This part likely needs significant adaptation based on ProcessingEngine.process requirements. +# log.warning("CLI processing currently uses simplified sequential execution.") +# log.warning("SourceRule generation for CLI mode is basic and may need refinement.") +# +# processed_count = 0 +# skipped_count = 0 # Placeholder +# failed_count = 0 +# results_list = [] # Placeholder +# +# for input_path_str in valid_inputs: +# log.info(f"--- Processing Input: {Path(input_path_str).name} ---") +# try: +# # --- Basic SourceRule Creation (Needs Review/Adaptation) --- +# # This is a placeholder. The engine likely needs more detailed file info. +# # We might need to extract file list here like the GUI does. +# input_path_obj = Path(input_path_str) +# # Example: Create a rule assuming the input is a single asset +# # This won't handle multi-asset archives correctly without more logic. +# asset_name = input_path_obj.stem # Basic assumption +# # File list extraction would be needed here for proper FileRule creation +# # file_list = _extract_file_list(input_path_str) # Need to define/import this helper +# # file_rules = [FileRule(file_path=f) for f in file_list] if file_list else [] +# # asset_rule = AssetRule(asset_name=asset_name, files=file_rules) +# # rule = SourceRule(input_path=input_path_str, assets=[asset_rule], supplier_identifier=config.settings.get('supplier_identifier')) # Access from config object +# # --- End Placeholder --- +# +# # --- TEMPORARY: Call engine process with just config and path --- +# # This assumes engine.process can handle this or needs adaptation. +# # If engine.process strictly requires a SourceRule, this will fail. +# # result = engine.process(config=config, input_path=input_path_obj, overwrite=args.overwrite) +# # --- END TEMPORARY --- +# +# # --- Attempt with Placeholder SourceRule (More likely signature) --- +# # This still requires file list extraction and rule creation logic +# log.error("CLI Processing Logic Incomplete: SourceRule creation and engine call need implementation.") +# # Example (requires file list extraction and rule building): +# # rule = build_basic_source_rule(input_path_str, config) # Hypothetical function +# # if rule: +# # engine.process(rule) # Assuming process takes one rule +# # processed_count += 1 # Basic success tracking +# # else: +# # log.warning(f"Could not create basic rule for {input_path_str}, skipping.") +# # failed_count += 1 +# # --- End Placeholder --- +# raise NotImplementedError("CLI processing logic for SourceRule creation and engine call is not fully implemented.") +# +# +# except NotImplementedError as e: +# log.error(f"Stopping CLI run due to incomplete implementation: {e}") +# failed_count += 1 +# break # Stop processing further items +# except Exception as e: +# log.exception(f"Error processing input '{Path(input_path_str).name}': {e}") +# failed_count += 1 +# results_list.append((input_path_str, "failed", str(e))) # Placeholder result +# +# # --- Report Summary --- +# duration = time.time() - start_time +# log.info("=" * 40) +# log.info("CLI Processing Summary") +# log.info(f" Duration: {duration:.2f} seconds") +# log.info(f" Inputs Attempted: {len(valid_inputs)}") +# log.info(f" Successfully Processed: {processed_count}") +# log.info(f" Skipped: {skipped_count}") +# log.info(f" Failed: {failed_count}") +# +# exit_code = 0 +# if failed_count > 0: +# log.warning("Failures occurred.") +# # Log specific errors if results_list was populated +# for input_path, status, err_msg in results_list: +# if status == "failed": +# log.warning(f" - {Path(input_path).name}: {err_msg}") +# exit_code = 1 # Exit with error code if failures occurred +# +# # --- Blender Script Execution (Optional - Copied from old main()) --- +# # This section might need review based on current config/engine +# run_blender = False # Placeholder, add logic if needed +# if run_blender: +# # ... (Blender execution logic from old main() would go here) ... +# log.warning("Blender script execution from CLI not yet re-implemented.") +# pass +# +# # --- Final Exit --- +# log.info("Asset Processor Script Finished (CLI Mode).") +# sys.exit(exit_code) if __name__ == "__main__": diff --git a/monitor.py b/monitor.py index 56d10d7..4a6dce2 100644 --- a/monitor.py +++ b/monitor.py @@ -6,18 +6,33 @@ import time import logging import re import shutil +import tempfile # For potential temporary workspace if needed directly from pathlib import Path +from concurrent.futures import ThreadPoolExecutor from watchdog.observers.polling import PollingObserver as Observer # Use polling for better compatibility from watchdog.events import FileSystemEventHandler, FileCreatedEvent # --- Import from local modules --- +# Assuming standard project structure +from configuration import load_config, ConfigurationError # Assuming load_config is here +from processing_engine import ProcessingEngine, ProcessingError # Assuming ProcessingError exists +from rule_structure import SourceRule # Assuming SourceRule is here +# Assuming workspace utils exist - adjust path if necessary try: - # Assuming main.py is in the same directory - from main import run_processing, setup_logging, ConfigurationError, AssetProcessingError -except ImportError as e: - print(f"ERROR: Failed to import required functions/classes from main.py: {e}") - print("Ensure main.py is in the same directory as monitor.py.") - sys.exit(1) + from utils.workspace_utils import prepare_processing_workspace, WorkspaceError +except ImportError: + log = logging.getLogger(__name__) # Need logger early for this message + log.warning("Could not import workspace_utils. Workspace preparation/cleanup might fail.") + # Define dummy functions/exceptions if import fails to avoid NameErrors later, + # but log prominently. + def prepare_processing_workspace(archive_path: Path) -> Path: + log.error("prepare_processing_workspace is not available!") + # Create a dummy temp dir to allow code flow, but it won't be the real one + return Path(tempfile.mkdtemp(prefix="dummy_workspace_")) + class WorkspaceError(Exception): pass + +from utils.prediction_utils import generate_source_rule_from_archive, PredictionError + # --- Configuration --- # Read from environment variables with defaults @@ -33,13 +48,14 @@ DEFAULT_WORKERS = max(1, os.cpu_count() // 2 if os.cpu_count() else 1) NUM_WORKERS = int(os.environ.get('NUM_WORKERS', str(DEFAULT_WORKERS))) # --- Logging Setup --- +# Configure logging (ensure logger is available before potential import errors) log_level = getattr(logging, LOG_LEVEL_STR, logging.INFO) -# Use the setup_logging from main.py but configure the level directly -# We don't have a 'verbose' flag here, so call basicConfig directly log_format = '%(asctime)s [%(levelname)-8s] %(name)s: %(message)s' date_format = '%Y-%m-%d %H:%M:%S' logging.basicConfig(level=log_level, format=log_format, datefmt=date_format, handlers=[logging.StreamHandler(sys.stdout)]) -log = logging.getLogger("monitor") +log = logging.getLogger("monitor") # Define logger after basicConfig + +# Log configuration values after logger is set up log.info(f"Logging level set to: {logging.getLevelName(log_level)}") log.info(f"Monitoring Input Directory: {INPUT_DIR}") log.info(f"Output Directory: {OUTPUT_DIR}") @@ -51,18 +67,8 @@ log.info(f"Max Workers: {NUM_WORKERS}") # --- Preset Validation --- -PRESET_DIR = Path(__file__).parent / "Presets" -PRESET_FILENAME_REGEX = re.compile(r"^\[?([a-zA-Z0-9_-]+)\]?_.*\.(zip|rar|7z)$", re.IGNORECASE) - -def validate_preset(preset_name: str) -> bool: - """Checks if the preset JSON file exists.""" - if not preset_name: - return False - preset_file = PRESET_DIR / f"{preset_name}.json" - exists = preset_file.is_file() - if not exists: - log.warning(f"Preset file not found: {preset_file}") - return exists +# --- Constants --- +SUPPORTED_SUFFIXES = ['.zip', '.rar', '.7z'] # --- Watchdog Event Handler --- class ZipHandler(FileSystemEventHandler): @@ -77,10 +83,13 @@ class ZipHandler(FileSystemEventHandler): self.output_dir.mkdir(parents=True, exist_ok=True) self.processed_dir.mkdir(parents=True, exist_ok=True) self.error_dir.mkdir(parents=True, exist_ok=True) - log.info("Handler initialized, target directories ensured.") + + # Initialize ThreadPoolExecutor + self.executor = ThreadPoolExecutor(max_workers=NUM_WORKERS) + log.info(f"Handler initialized, target directories ensured. ThreadPoolExecutor started with {NUM_WORKERS} workers.") def on_created(self, event: FileCreatedEvent): - """Called when a file or directory is created.""" + """Called when a file or directory is created. Submits task to executor.""" if event.is_directory: return @@ -88,87 +97,39 @@ class ZipHandler(FileSystemEventHandler): log.debug(f"File creation event detected: {src_path}") # Check if the file has a supported archive extension - supported_suffixes = ['.zip', '.rar', '.7z'] - if src_path.suffix.lower() not in supported_suffixes: + if src_path.suffix.lower() not in SUPPORTED_SUFFIXES: log.debug(f"Ignoring file with unsupported extension: {src_path.name}") return - log.info(f"Detected new ZIP file: {src_path.name}. Waiting {PROCESS_DELAY}s before processing...") - time.sleep(PROCESS_DELAY) + log.info(f"Detected new archive: {src_path.name}. Waiting {PROCESS_DELAY}s before queueing...") + time.sleep(PROCESS_DELAY) # Wait for file write to complete - # Re-check if file still exists (might have been temporary) + # Re-check if file still exists (might have been temporary or moved quickly) if not src_path.exists(): log.warning(f"File disappeared after delay: {src_path.name}") return - log.info(f"Processing file: {src_path.name}") + log.info(f"Queueing processing task for: {src_path.name}") + # Submit the processing task to the thread pool + # Pass necessary context like directories + self.executor.submit( + _process_archive_task, + archive_path=src_path, + output_dir=self.output_dir, + processed_dir=self.processed_dir, + error_dir=self.error_dir + ) - # --- Extract Preset Name --- - match = PRESET_FILENAME_REGEX.match(src_path.name) - if not match: - log.warning(f"Filename '{src_path.name}' does not match expected format '[preset]_filename.zip'. Ignoring.") - # Optionally move to an 'ignored' or 'error' directory? For now, leave it. - return + def shutdown(self): + """Shuts down the thread pool executor.""" + log.info("Shutting down thread pool executor...") + self.executor.shutdown(wait=True) + log.info("Executor shut down.") - preset_name = match.group(1) - log.info(f"Extracted preset name: '{preset_name}' from {src_path.name}") - - # --- Validate Preset --- - if not validate_preset(preset_name): - log.error(f"Preset '{preset_name}' is not valid (missing {PRESET_DIR / f'{preset_name}.json'}). Ignoring file {src_path.name}.") - # Move to error dir if preset is invalid? Let's do that. - self.move_file(src_path, self.error_dir, "invalid_preset") - return - - # --- Run Processing --- - try: - log.info(f"Starting asset processing for '{src_path.name}' using preset '{preset_name}'...") - # run_processing expects a list of inputs - results = run_processing( - valid_inputs=[str(src_path)], - preset_name=preset_name, - output_dir_for_processor=str(self.output_dir), # Pass absolute output path - overwrite=False, # Default to no overwrite for monitored files? Or make configurable? Let's default to False. - num_workers=NUM_WORKERS - ) - - # --- Handle Results --- - # Check overall status based on counts - processed = results.get("processed", 0) - skipped = results.get("skipped", 0) - failed = results.get("failed", 0) - pool_error = results.get("pool_error") - - if pool_error: - log.error(f"Processing pool error for {src_path.name}: {pool_error}") - self.move_file(src_path, self.error_dir, "pool_error") - elif failed > 0: - log.error(f"Processing failed for {src_path.name}. Check worker logs for details.") - # Log specific errors if available in results_list - for res_path, status, err_msg in results.get("results_list", []): - if status == "failed": - log.error(f" - Failure reason: {err_msg}") - self.move_file(src_path, self.error_dir, "processing_failed") - elif processed > 0: - log.info(f"Successfully processed {src_path.name}.") - self.move_file(src_path, self.processed_dir, "processed") - elif skipped > 0: - log.info(f"Processing skipped for {src_path.name} (likely already exists).") - self.move_file(src_path, self.processed_dir, "skipped") - else: - # Should not happen if input was valid zip - log.warning(f"Processing finished for {src_path.name} with unexpected status (0 processed, 0 skipped, 0 failed). Moving to error dir.") - self.move_file(src_path, self.error_dir, "unknown_status") - - except (ConfigurationError, AssetProcessingError) as e: - log.error(f"Asset processing error for {src_path.name}: {e}", exc_info=True) - self.move_file(src_path, self.error_dir, "processing_exception") - except Exception as e: - log.exception(f"Unexpected error during processing trigger for {src_path.name}: {e}") - self.move_file(src_path, self.error_dir, "monitor_exception") - - - def move_file(self, src: Path, dest_dir: Path, reason: str): + # move_file remains largely the same, but called from _process_archive_task now + # We make it static or move it outside the class if _process_archive_task is outside + @staticmethod + def move_file(src: Path, dest_dir: Path, reason: str): """Safely moves a file, handling potential name collisions.""" if not src.exists(): log.warning(f"Source file {src} does not exist, cannot move for reason: {reason}.") @@ -190,6 +151,95 @@ class ZipHandler(FileSystemEventHandler): log.exception(f"Failed to move file {src.name} to {dest_dir}: {e}") +# --- Processing Task Function --- +def _process_archive_task(archive_path: Path, output_dir: Path, processed_dir: Path, error_dir: Path): + """ + Task executed by the ThreadPoolExecutor to process a single archive file. + """ + log.info(f"[Task:{archive_path.name}] Starting processing.") + temp_workspace_path: Optional[Path] = None + config = None + source_rule = None + move_reason = "unknown_error" # Default reason if early exit + + try: + # --- a. Load Configuration --- + log.debug(f"[Task:{archive_path.name}] Loading configuration...") + # Assuming load_config() loads the main app config (e.g., app_settings.json) + # and potentially merges preset defaults or paths. Adjust if needed. + config = load_config() # Might need path argument depending on implementation + if not config: + raise ConfigurationError("Failed to load application configuration.") + log.debug(f"[Task:{archive_path.name}] Configuration loaded.") + + # --- b. Generate Prediction (SourceRule) --- + log.debug(f"[Task:{archive_path.name}] Generating source rule prediction...") + # This function now handles preset extraction and validation internally + source_rule = generate_source_rule_from_archive(archive_path, config) + log.info(f"[Task:{archive_path.name}] SourceRule generated successfully.") + + # --- c. Prepare Workspace --- + log.debug(f"[Task:{archive_path.name}] Preparing processing workspace...") + # This utility should handle extraction and return the temp dir path + temp_workspace_path = prepare_processing_workspace(archive_path) + log.info(f"[Task:{archive_path.name}] Workspace prepared at: {temp_workspace_path}") + + # --- d. Run Processing Engine --- + log.debug(f"[Task:{archive_path.name}] Initializing Processing Engine...") + # Pass necessary parts of the config to the engine + engine = ProcessingEngine(config=config, output_base_dir=output_dir) + log.info(f"[Task:{archive_path.name}] Running Processing Engine...") + # The engine uses the source_rule to guide processing on the workspace files + engine.run(workspace_path=temp_workspace_path, source_rule=source_rule) + log.info(f"[Task:{archive_path.name}] Processing Engine finished successfully.") + move_reason = "processed" # Set success reason + + # --- e. Handle Results & Move File (Implicit success if no exception) --- + # If engine.run completes without exception, assume success for now. + # More granular results could be returned by engine.run if needed. + # Moving is handled outside the main try block based on move_reason + + # --- f. Blender Integration (Placeholder) --- + # TODO: Add call to utils.blender_utils.run_blender_script if needed later + # if config.get('blender', {}).get('run_script_after_processing'): + # log.info(f"[Task:{archive_path.name}] Running Blender script (placeholder)...") + # # blender_utils.run_blender_script(output_dir / source_rule.name, config) + + + except FileNotFoundError as e: + log.error(f"[Task:{archive_path.name}] Prerequisite file not found: {e}") + move_reason = "file_not_found" + except (ConfigurationError, PredictionError, WorkspaceError, ProcessingError) as e: + log.error(f"[Task:{archive_path.name}] Processing failed: {e}", exc_info=True) + move_reason = f"{type(e).__name__.lower()}" # e.g., "predictionerror" + except Exception as e: + log.exception(f"[Task:{archive_path.name}] An unexpected error occurred during processing: {e}") + move_reason = "unexpected_exception" + + finally: + # --- Move Original Archive --- + log.debug(f"[Task:{archive_path.name}] Moving original archive based on outcome: {move_reason}") + dest_dir = processed_dir if move_reason == "processed" else error_dir + try: + # Use the static method from the handler class + ZipHandler.move_file(archive_path, dest_dir, move_reason) + except Exception as move_err: + log.exception(f"[Task:{archive_path.name}] CRITICAL: Failed to move archive file {archive_path} after processing: {move_err}") + + # --- g. Cleanup Workspace --- + if temp_workspace_path and temp_workspace_path.exists(): + log.debug(f"[Task:{archive_path.name}] Cleaning up workspace: {temp_workspace_path}") + try: + shutil.rmtree(temp_workspace_path) + log.info(f"[Task:{archive_path.name}] Workspace cleaned up successfully.") + except OSError as e: + log.error(f"[Task:{archive_path.name}] Error removing temporary workspace {temp_workspace_path}: {e}", exc_info=True) + elif temp_workspace_path: + log.warning(f"[Task:{archive_path.name}] Temporary workspace path recorded but not found for cleanup: {temp_workspace_path}") + + log.info(f"[Task:{archive_path.name}] Processing task finished with status: {move_reason}") + + # --- Main Monitor Loop --- if __name__ == "__main__": # Ensure input directory exists @@ -211,11 +261,13 @@ if __name__ == "__main__": # Keep the main thread alive, observer runs in background thread time.sleep(1) except KeyboardInterrupt: - log.info("Keyboard interrupt received, stopping monitor...") + log.info("Keyboard interrupt received, stopping monitor and executor...") observer.stop() + event_handler.shutdown() # Gracefully shutdown the executor except Exception as e: log.exception(f"An unexpected error occurred in the main loop: {e}") observer.stop() + event_handler.shutdown() # Ensure shutdown on other exceptions too observer.join() log.info("Monitor stopped.") \ No newline at end of file diff --git a/processing_engine.py b/processing_engine.py index d491a20..49b9988 100644 --- a/processing_engine.py +++ b/processing_engine.py @@ -430,12 +430,6 @@ class ProcessingEngine: self._cleanup_workspace() - def _setup_workspace(self): - """Creates a temporary directory for processing.""" - # This is now handled within the process method to ensure it's created per run. - # Kept as a placeholder if needed later, but currently unused. - pass - def _cleanup_workspace(self): """Removes the temporary workspace directory if it exists.""" if self.temp_dir and self.temp_dir.exists(): diff --git a/utils/prediction_utils.py b/utils/prediction_utils.py new file mode 100644 index 0000000..ff9779a --- /dev/null +++ b/utils/prediction_utils.py @@ -0,0 +1,197 @@ +# utils/prediction_utils.py + +import logging +import re +from pathlib import Path +from typing import Optional, Dict, Any + +# Assuming these imports based on project structure and task description +from rule_structure import SourceRule, RuleSet, MapRule, AssetRule +from configuration import load_preset # Assuming preset loading is handled here or similar +# If RuleBasedPredictionHandler exists and is the intended mechanism: +# from gui.rule_based_prediction_handler import RuleBasedPredictionHandler +# Or, if we need to replicate its core logic: +from utils.structure_analyzer import analyze_archive_structure # Hypothetical utility + +log = logging.getLogger(__name__) + +# Regex to extract preset name (similar to monitor.py) +# Matches "[PresetName]_anything.zip/rar/7z" +PRESET_FILENAME_REGEX = re.compile(r"^\[?([a-zA-Z0-9_-]+)\]?_.*\.(zip|rar|7z)$", re.IGNORECASE) + +class PredictionError(Exception): + """Custom exception for prediction failures.""" + pass + +def generate_source_rule_from_archive(archive_path: Path, config: Dict[str, Any]) -> SourceRule: + """ + Generates a SourceRule hierarchy based on rules defined in a preset, + determined by the archive filename. + + Args: + archive_path: Path to the input archive file. + config: The loaded application configuration dictionary, expected + to contain preset information or a way to load it. + + Returns: + The generated SourceRule hierarchy. + + Raises: + PredictionError: If the preset cannot be determined, loaded, or + if rule generation fails. + FileNotFoundError: If the archive_path does not exist. + """ + if not archive_path.is_file(): + raise FileNotFoundError(f"Archive file not found: {archive_path}") + + log.debug(f"Generating SourceRule for archive: {archive_path.name}") + + # --- 1. Extract Preset Name --- + match = PRESET_FILENAME_REGEX.match(archive_path.name) + if not match: + raise PredictionError(f"Filename '{archive_path.name}' does not match expected format '[preset]_filename.ext'. Cannot determine preset.") + + preset_name = match.group(1) + log.info(f"Extracted preset name: '{preset_name}' from {archive_path.name}") + + # --- 2. Load Preset Rules --- + # Option A: Presets are pre-loaded in config (e.g., under 'presets' key) + # preset_rules_dict = config.get('presets', {}).get(preset_name) + # Option B: Load preset dynamically using a utility + try: + # Assuming load_preset takes the name and maybe the base config/path + # Adjust based on the actual signature of load_preset + preset_config = load_preset(preset_name) # This might need config path or dict + if not preset_config: + raise PredictionError(f"Preset '{preset_name}' configuration is empty or invalid.") + # Assuming the preset config directly contains the RuleSet structure + # or needs parsing into RuleSet. Let's assume it needs parsing. + # This part is highly dependent on how presets are stored and loaded. + # For now, let's assume preset_config IS the RuleSet dictionary. + if not isinstance(preset_config.get('rules'), dict): # Basic validation + raise PredictionError(f"Preset '{preset_name}' does not contain a valid 'rules' dictionary.") + rule_set_dict = preset_config['rules'] + # We need to deserialize this dict into RuleSet object + # Assuming RuleSet has a class method or similar for this + rule_set = RuleSet.from_dict(rule_set_dict) # Placeholder for actual deserialization + + except FileNotFoundError: + raise PredictionError(f"Preset file for '{preset_name}' not found.") + except Exception as e: + log.exception(f"Failed to load or parse preset '{preset_name}': {e}") + raise PredictionError(f"Failed to load or parse preset '{preset_name}': {e}") + + if not rule_set: + raise PredictionError(f"Failed to obtain RuleSet for preset '{preset_name}'.") + + log.debug(f"Successfully loaded RuleSet for preset: {preset_name}") + + # --- 3. Generate SourceRule (Simplified Rule-Based Approach) --- + # This simulates what a RuleBasedPredictionHandler might do, but without + # needing the actual extracted files for *this* step. The rules themselves + # define the expected structure. The ProcessingEngine will later use this + # rule against the actual extracted files. + + # Create the root SourceRule based on the archive name and the loaded RuleSet + # The actual structure (AssetRules, MapRules) comes directly from the RuleSet. + # We might need to adapt the archive name slightly (e.g., remove preset prefix) + # for the root node name, depending on desired output structure. + root_name = archive_path.stem # Or further processing if needed + source_rule = SourceRule(name=root_name, rule_set=rule_set) + + # Potentially add logic here if basic archive structure analysis *is* needed + # for rule generation (e.g., using utils.structure_analyzer if it exists) + # analyze_archive_structure(archive_path, source_rule) # Example + + log.info(f"Generated initial SourceRule for '{archive_path.name}' based on preset '{preset_name}'.") + + # --- 4. Return SourceRule --- + # No temporary workspace needed/created in this function based on current plan. + # Cleanup is not required here. + return source_rule + +# Example Usage (Conceptual - requires actual config/presets) +if __name__ == '__main__': + logging.basicConfig(level=logging.DEBUG) + log.info("Testing prediction_utils...") + + # Create dummy files/config for testing + dummy_archive = Path("./[TestPreset]_MyAsset.zip") + dummy_archive.touch() + + # Need a dummy preset file `Presets/TestPreset.json` + preset_dir = Path(__file__).parent.parent / "Presets" + preset_dir.mkdir(exist_ok=True) + dummy_preset_path = preset_dir / "TestPreset.json" + dummy_preset_content = """ + { + "name": "TestPreset", + "description": "A dummy preset for testing", + "rules": { + "map_rules": [ + {"pattern": ".*albedo.*", "map_type": "Albedo", "color_space": "sRGB"}, + {"pattern": ".*normal.*", "map_type": "Normal", "color_space": "Non-Color"} + ], + "asset_rules": [ + {"pattern": ".*", "material_name": "{asset_name}"} + ] + }, + "settings": {} + } + """ + # Need RuleSet.from_dict implementation for this to work + # try: + # with open(dummy_preset_path, 'w') as f: + # f.write(dummy_preset_content) + # log.info(f"Created dummy preset: {dummy_preset_path}") + + # # Dummy config - structure depends on actual implementation + # dummy_config = { + # 'paths': {'presets': str(preset_dir)}, + # # 'presets': { 'TestPreset': json.loads(dummy_preset_content) } # Alt if pre-loaded + # } + + # # Mock load_preset if it's complex + # original_load_preset = load_preset + # def mock_load_preset(name): + # if name == "TestPreset": + # import json + # return json.loads(dummy_preset_content) + # else: + # raise FileNotFoundError + # load_preset = mock_load_preset # Monkey patch + + # # Mock RuleSet.from_dict + # original_from_dict = RuleSet.from_dict + # def mock_from_dict(data): + # # Basic mock - replace with actual logic + # mock_rule_set = RuleSet() + # mock_rule_set.map_rules = [MapRule(**mr) for mr in data.get('map_rules', [])] + # mock_rule_set.asset_rules = [AssetRule(**ar) for ar in data.get('asset_rules', [])] + # return mock_rule_set + # RuleSet.from_dict = mock_from_dict # Monkey patch + + + # try: + # generated_rule = generate_source_rule_from_archive(dummy_archive, dummy_config) + # log.info(f"Successfully generated SourceRule: {generated_rule.name}") + # log.info(f" RuleSet Map Rules: {len(generated_rule.rule_set.map_rules)}") + # log.info(f" RuleSet Asset Rules: {len(generated_rule.rule_set.asset_rules)}") + # # Add more detailed checks if needed + # except (PredictionError, FileNotFoundError) as e: + # log.error(f"Test failed: {e}") + # except Exception as e: + # log.exception("Unexpected error during test") + + # finally: + # # Clean up dummy files + # if dummy_archive.exists(): + # dummy_archive.unlink() + # if dummy_preset_path.exists(): + # dummy_preset_path.unlink() + # # Restore mocked functions + # load_preset = original_load_preset + # RuleSet.from_dict = original_from_dict + # log.info("Test cleanup complete.") + + log.warning("Note: Main execution block is commented out as it requires specific implementations of load_preset and RuleSet.from_dict.") \ No newline at end of file diff --git a/utils/workspace_utils.py b/utils/workspace_utils.py new file mode 100644 index 0000000..9f656d7 --- /dev/null +++ b/utils/workspace_utils.py @@ -0,0 +1,87 @@ +# utils/workspace_utils.py + +import tempfile +import shutil +import zipfile +import logging +from pathlib import Path +from typing import Union + +# Get a logger for this module +log = logging.getLogger(__name__) + +# Define supported archive extensions (add more as needed, e.g., '.rar', '.7z') +# Requires additional libraries like patoolib for non-zip formats. +SUPPORTED_ARCHIVES = {'.zip'} + +def prepare_processing_workspace(input_path_str: Union[str, Path]) -> Path: + """ + Prepares a temporary workspace for processing an asset source. + + Handles copying directory contents or extracting supported archives + into a unique temporary directory. + + Args: + input_path_str: The path (as a string or Path object) to the input + directory or archive file. + + Returns: + The Path object representing the created temporary workspace directory. + The caller is responsible for cleaning up this directory. + + Raises: + FileNotFoundError: If the input_path does not exist. + ValueError: If the input_path is not a directory or a supported archive type. + zipfile.BadZipFile: If a zip file is corrupted. + OSError: If there are issues creating the temp directory or copying files. + """ + input_path = Path(input_path_str) + log.info(f"Preparing workspace for input: {input_path}") + + if not input_path.exists(): + raise FileNotFoundError(f"Input path does not exist: {input_path}") + + # Create a secure temporary directory + try: + temp_workspace_dir = tempfile.mkdtemp(prefix="asset_proc_") + prepared_workspace_path = Path(temp_workspace_dir) + log.info(f"Created temporary workspace: {prepared_workspace_path}") + except OSError as e: + log.error(f"Failed to create temporary directory: {e}") + raise # Re-raise the exception + + try: + # Check if input is directory or a supported archive file + if input_path.is_dir(): + log.info(f"Input is a directory, copying contents to workspace: {input_path}") + # Copy directory contents into the temp workspace + shutil.copytree(input_path, prepared_workspace_path, dirs_exist_ok=True) + elif input_path.is_file() and input_path.suffix.lower() in SUPPORTED_ARCHIVES: + log.info(f"Input is a supported archive ({input_path.suffix}), extracting to workspace: {input_path}") + if input_path.suffix.lower() == '.zip': + with zipfile.ZipFile(input_path, 'r') as zip_ref: + zip_ref.extractall(prepared_workspace_path) + # Add elif blocks here for other archive types (e.g., using patoolib) + # elif input_path.suffix.lower() in ['.rar', '.7z']: + # import patoolib + # patoolib.extract_archive(str(input_path), outdir=str(prepared_workspace_path)) + else: + # This case should ideally not be reached if SUPPORTED_ARCHIVES is correct + raise ValueError(f"Archive type {input_path.suffix} marked as supported but no extraction logic defined.") + else: + # Handle unsupported input types + raise ValueError(f"Unsupported input type: {input_path}. Must be a directory or a supported archive ({', '.join(SUPPORTED_ARCHIVES)}).") + + log.debug(f"Workspace preparation successful for: {input_path}") + return prepared_workspace_path + + except (FileNotFoundError, ValueError, zipfile.BadZipFile, OSError, ImportError) as e: + # Clean up the created temp directory if preparation fails mid-way + log.error(f"Error during workspace preparation for {input_path}: {e}. Cleaning up workspace.") + if prepared_workspace_path.exists(): + try: + shutil.rmtree(prepared_workspace_path) + log.info(f"Cleaned up failed workspace: {prepared_workspace_path}") + except OSError as cleanup_error: + log.error(f"Failed to cleanup workspace {prepared_workspace_path} after error: {cleanup_error}") + raise # Re-raise the original exception \ No newline at end of file