Compare commits
28 Commits
d473ddd7f4
...
Stable
| Author | SHA1 | Date | |
|---|---|---|---|
| 588766ad0a | |||
| fe844a2714 | |||
| 3927f8e6c0 | |||
| ca92c72070 | |||
| 85e94a3d0d | |||
| ce1d8c770c | |||
| dfe6500141 | |||
| 58eb10b7dc | |||
| 87673507d8 | |||
| 344ae078a8 | |||
| dec5d7d27f | |||
| 383e904e1a | |||
| 6e7daf260a | |||
| 1cd81cb87a | |||
| f800bb25a9 | |||
| 35a7221f57 | |||
| 0de4db1826 | |||
| b441174076 | |||
| c2ad299ce2 | |||
| 528d9be47f | |||
| 81d8404576 | |||
| ab4db1b8bd | |||
| 06552216d5 | |||
| 4ffb2ff78c | |||
| 5bf53f036c | |||
| beb8640085 | |||
| deeb1595fd | |||
| 12cf557dd7 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -30,6 +30,6 @@ Thumbs.db
|
|||||||
gui/__pycache__
|
gui/__pycache__
|
||||||
__pycache__
|
__pycache__
|
||||||
|
|
||||||
Testfiles
|
|
||||||
Testfiles/
|
Testfiles/TestOutputs
|
||||||
Testfiles_
|
Testfiles_
|
||||||
|
|||||||
45
.roo/mcp.json
Normal file
45
.roo/mcp.json
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"conport": {
|
||||||
|
"command": "C:\\Users\\theis\\context-portal\\.venv\\Scripts\\python.exe",
|
||||||
|
"args": [
|
||||||
|
"C:\\Users\\theis\\context-portal\\src\\context_portal_mcp\\main.py",
|
||||||
|
"--mode",
|
||||||
|
"stdio",
|
||||||
|
"--workspace_id",
|
||||||
|
"${workspaceFolder}"
|
||||||
|
],
|
||||||
|
"alwaysAllow": [
|
||||||
|
"get_product_context",
|
||||||
|
"update_product_context",
|
||||||
|
"get_active_context",
|
||||||
|
"update_active_context",
|
||||||
|
"log_decision",
|
||||||
|
"get_decisions",
|
||||||
|
"search_decisions_fts",
|
||||||
|
"log_progress",
|
||||||
|
"get_progress",
|
||||||
|
"update_progress",
|
||||||
|
"delete_progress_by_id",
|
||||||
|
"log_system_pattern",
|
||||||
|
"get_system_patterns",
|
||||||
|
"log_custom_data",
|
||||||
|
"get_custom_data",
|
||||||
|
"delete_custom_data",
|
||||||
|
"search_project_glossary_fts",
|
||||||
|
"export_conport_to_markdown",
|
||||||
|
"import_markdown_to_conport",
|
||||||
|
"link_conport_items",
|
||||||
|
"search_custom_data_value_fts",
|
||||||
|
"get_linked_items",
|
||||||
|
"batch_log_items",
|
||||||
|
"get_item_history",
|
||||||
|
"delete_decision_by_id",
|
||||||
|
"delete_system_pattern_by_id",
|
||||||
|
"get_conport_schema",
|
||||||
|
"get_recent_activity_summary",
|
||||||
|
"semantic_search_conport"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -8,6 +8,6 @@
|
|||||||
".vscode": true,
|
".vscode": true,
|
||||||
".vs": true,
|
".vs": true,
|
||||||
".lh": true,
|
".lh": true,
|
||||||
"__pycache__": true,
|
"__pycache__": true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
112
AUTOTEST_GUI_PLAN.md
Normal file
112
AUTOTEST_GUI_PLAN.md
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# Plan for Autotest GUI Mode Implementation
|
||||||
|
|
||||||
|
**I. Objective:**
|
||||||
|
Create an `autotest.py` script that can launch the Asset Processor GUI headlessly, load a predefined asset (`.zip`), select a predefined preset, verify the predicted rule structure against an expected JSON, trigger processing to a predefined output directory, check the output, and analyze logs for errors or specific messages. This serves as a sanity check for core GUI-driven workflows.
|
||||||
|
|
||||||
|
**II. `TestFiles` Directory:**
|
||||||
|
A new directory named `TestFiles` will be created in the project root (`c:/Users/Theis/Assetprocessor/Asset-Frameworker/TestFiles/`). This directory will house:
|
||||||
|
* Sample asset `.zip` files for testing (e.g., `TestFiles/SampleAsset1.zip`).
|
||||||
|
* Expected rule structure JSON files (e.g., `TestFiles/SampleAsset1_PresetX_expected_rules.json`).
|
||||||
|
* A subdirectory for test outputs (e.g., `TestFiles/TestOutputs/`).
|
||||||
|
|
||||||
|
**III. `autotest.py` Script:**
|
||||||
|
|
||||||
|
1. **Location:** `c:/Users/Theis/Assetprocessor/Asset-Frameworker/autotest.py` (or `scripts/autotest.py`).
|
||||||
|
2. **Command-Line Arguments (with defaults pointing to `TestFiles/`):**
|
||||||
|
* `--zipfile`: Path to the test asset. Default: `TestFiles/default_test_asset.zip`.
|
||||||
|
* `--preset`: Name of the preset. Default: `DefaultTestPreset`.
|
||||||
|
* `--expectedrules`: Path to expected rules JSON. Default: `TestFiles/default_test_asset_rules.json`.
|
||||||
|
* `--outputdir`: Path for processing output. Default: `TestFiles/TestOutputs/DefaultTestOutput`.
|
||||||
|
* `--search` (optional): Log search term. Default: `None`.
|
||||||
|
* `--additional-lines` (optional): Context lines for log search. Default: `0`.
|
||||||
|
3. **Core Structure:**
|
||||||
|
* Imports necessary modules from the main application and PySide6.
|
||||||
|
* Adds project root to `sys.path` for imports.
|
||||||
|
* `AutoTester` class:
|
||||||
|
* **`__init__(self, app_instance: App)`:**
|
||||||
|
* Stores `app_instance` and `main_window`.
|
||||||
|
* Initializes `QEventLoop`.
|
||||||
|
* Connects `app_instance.all_tasks_finished` to `self._on_all_tasks_finished`.
|
||||||
|
* Loads expected rules from the `--expectedrules` file.
|
||||||
|
* **`run_test(self)`:** Orchestrates the test steps sequentially:
|
||||||
|
1. Load ZIP (`main_window.add_input_paths()`).
|
||||||
|
2. Select Preset (`main_window.preset_editor_widget.editor_preset_list.setCurrentItem()`).
|
||||||
|
3. Await Prediction (using `QTimer` to poll `main_window._pending_predictions`, manage with `QEventLoop`).
|
||||||
|
4. Retrieve & Compare Rulelist:
|
||||||
|
* Get actual rules: `main_window.unified_model.get_all_source_rules()`.
|
||||||
|
* Convert actual rules to comparable dict (`_convert_rules_to_comparable()`).
|
||||||
|
* Compare with loaded expected rules (`_compare_rules()`). If mismatch, log and fail.
|
||||||
|
5. Start Processing (emit `main_window.start_backend_processing` with rules and output settings).
|
||||||
|
6. Await Processing (use `QEventLoop` waiting for `_on_all_tasks_finished`).
|
||||||
|
7. Check Output Path (verify existence of output dir, list contents, basic sanity checks like non-emptiness or presence of key asset folders).
|
||||||
|
8. Retrieve & Analyze Logs (`main_window.log_console.log_console_output.toPlainText()`, filter by `--search`, check for tracebacks).
|
||||||
|
9. Report result and call `cleanup_and_exit()`.
|
||||||
|
* **`_check_prediction_status(self)`:** Slot for prediction polling timer.
|
||||||
|
* **`_on_all_tasks_finished(self, processed_count, skipped_count, failed_count)`:** Slot for `App.all_tasks_finished` signal.
|
||||||
|
* **`_convert_rules_to_comparable(self, source_rules_list: List[SourceRule]) -> dict`:** Converts `SourceRule` objects to the JSON structure defined below.
|
||||||
|
* **`_compare_rules(self, actual_rules_data: dict, expected_rules_data: dict) -> bool`:** Implements Option 1 comparison logic:
|
||||||
|
* Errors if an expected field is missing or its value mismatches.
|
||||||
|
* Logs (but doesn't error on) fields present in actual but not in expected.
|
||||||
|
* **`_process_and_display_logs(self, logs_text: str)`:** Handles log filtering/display.
|
||||||
|
* **`cleanup_and_exit(self, success=True)`:** Quits `QCoreApplication` and `sys.exit()`.
|
||||||
|
* `main()` function:
|
||||||
|
* Parses CLI arguments.
|
||||||
|
* Initializes `QApplication`.
|
||||||
|
* Instantiates `main.App()` (does *not* show the GUI).
|
||||||
|
* Instantiates `AutoTester(app_instance)`.
|
||||||
|
* Uses `QTimer.singleShot(0, tester.run_test)` to start the test.
|
||||||
|
* Runs `q_app.exec()`.
|
||||||
|
|
||||||
|
**IV. `expected_rules.json` Structure (Revised):**
|
||||||
|
Located in `TestFiles/`. Example: `TestFiles/SampleAsset1_PresetX_expected_rules.json`.
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"source_rules": [
|
||||||
|
{
|
||||||
|
"input_path": "SampleAsset1.zip",
|
||||||
|
"supplier_identifier": "ExpectedSupplier",
|
||||||
|
"preset_name": "PresetX",
|
||||||
|
"assets": [
|
||||||
|
{
|
||||||
|
"asset_name": "AssetNameFromPrediction",
|
||||||
|
"asset_type": "Prop",
|
||||||
|
"files": [
|
||||||
|
{
|
||||||
|
"file_path": "relative/path/to/file1.png",
|
||||||
|
"item_type": "MAP_COL",
|
||||||
|
"target_asset_name_override": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**V. Mermaid Diagram of Autotest Flow:**
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Start autotest.py with CLI Args (defaults to TestFiles/)] --> B{Setup Args & Logging};
|
||||||
|
B --> C[Init QApplication & main.App (GUI Headless)];
|
||||||
|
C --> D[Instantiate AutoTester(app_instance)];
|
||||||
|
D --> E[QTimer.singleShot -> AutoTester.run_test()];
|
||||||
|
|
||||||
|
subgraph AutoTester.run_test()
|
||||||
|
E --> F[Load Expected Rules from --expectedrules JSON];
|
||||||
|
F --> G[Load ZIP (--zipfile) via main_window.add_input_paths()];
|
||||||
|
G --> H[Select Preset (--preset) via main_window.preset_editor_widget];
|
||||||
|
H --> I[Await Prediction (Poll main_window._pending_predictions via QTimer & QEventLoop)];
|
||||||
|
I -- Prediction Done --> J[Get Actual Rules from main_window.unified_model];
|
||||||
|
J --> K[Convert Actual Rules to Comparable JSON Structure];
|
||||||
|
K --> L{Compare Actual vs Expected Rules (Option 1 Logic)};
|
||||||
|
L -- Match --> M[Start Processing (Emit main_window.start_backend_processing with --outputdir)];
|
||||||
|
L -- Mismatch --> ZFAIL[Log Mismatch & Call cleanup_and_exit(False)];
|
||||||
|
M --> N[Await Processing (QEventLoop for App.all_tasks_finished signal)];
|
||||||
|
N -- Processing Done --> O[Check Output Dir (--outputdir): Exists? Not Empty? Key Asset Folders?];
|
||||||
|
O --> P[Retrieve & Analyze Logs (Search, Tracebacks)];
|
||||||
|
P --> Q[Log Test Success & Call cleanup_and_exit(True)];
|
||||||
|
end
|
||||||
|
|
||||||
|
ZFAIL --> ZEND[AutoTester.cleanup_and_exit() -> QCoreApplication.quit() & sys.exit()];
|
||||||
|
Q --> ZEND;
|
||||||
@@ -12,9 +12,9 @@ This documentation strictly excludes details on environment setup, dependency in
|
|||||||
|
|
||||||
## Architecture and Codebase Summary
|
## Architecture and Codebase Summary
|
||||||
|
|
||||||
For developers interested in contributing, the tool's architecture centers on a **Core Processing Engine** (`processing_engine.py`) executing a pipeline based on a **Hierarchical Rule System** (`rule_structure.py`) and a **Configuration System** (`configuration.py` loading `config/app_settings.json` and `Presets/*.json`). The **Graphical User Interface** (`gui/`) has been significantly refactored: `MainWindow` (`main_window.py`) acts as a coordinator, delegating tasks to specialized widgets (`MainPanelWidget`, `PresetEditorWidget`, `LogConsoleWidget`) and background handlers (`RuleBasedPredictionHandler`, `LLMPredictionHandler`, `LLMInteractionHandler`, `AssetRestructureHandler`). The **Directory Monitor** (`monitor.py`) now processes archives asynchronously using a thread pool and utility functions (`utils/prediction_utils.py`, `utils/workspace_utils.py`). The **Command-Line Interface** entry point (`main.py`) primarily launches the GUI, with core CLI functionality currently non-operational. Optional **Blender Integration** (`blenderscripts/`) remains. A new `utils/` directory houses shared helper functions.
|
For developers interested in contributing, the tool's architecture centers on a **Core Processing Engine** (`processing_engine.py`) which initializes and runs a **Pipeline Orchestrator** (`processing/pipeline/orchestrator.py::PipelineOrchestrator`). This orchestrator executes a defined sequence of **Processing Stages** (located in `processing/pipeline/stages/`) based on a **Hierarchical Rule System** (`rule_structure.py`) and a **Configuration System** (`configuration.py` loading `config/app_settings.json` and `Presets/*.json`). The **Graphical User Interface** (`gui/`) has been significantly refactored: `MainWindow` (`main_window.py`) acts as a coordinator, delegating tasks to specialized widgets (`MainPanelWidget`, `PresetEditorWidget`, `LogConsoleWidget`) and background handlers (`RuleBasedPredictionHandler`, `LLMPredictionHandler`, `LLMInteractionHandler`, `AssetRestructureHandler`). The **Directory Monitor** (`monitor.py`) now processes archives asynchronously using a thread pool and utility functions (`utils/prediction_utils.py`, `utils/workspace_utils.py`). The **Command-Line Interface** entry point (`main.py`) primarily launches the GUI, with core CLI functionality currently non-operational. Optional **Blender Integration** (`blenderscripts/`) remains. A new `utils/` directory houses shared helper functions.
|
||||||
|
|
||||||
The codebase reflects this structure. The `gui/` directory contains the refactored UI components, `utils/` holds shared utilities, `Presets/` contains JSON presets, and `blenderscripts/` holds Blender scripts. Core logic resides in `processing_engine.py`, `configuration.py`, `rule_structure.py`, `monitor.py`, and `main.py`. The processing pipeline, executed by `processing_engine.py`, relies entirely on the input `SourceRule` and static configuration for steps like map processing, channel merging, and metadata generation.
|
The codebase reflects this structure. The `gui/` directory contains the refactored UI components, `utils/` holds shared utilities, `processing/pipeline/` contains the orchestrator and individual processing stages, `Presets/` contains JSON presets, and `blenderscripts/` holds Blender scripts. Core logic resides in `processing_engine.py`, `processing/pipeline/orchestrator.py`, `configuration.py`, `rule_structure.py`, `monitor.py`, and `main.py`. The processing pipeline, initiated by `processing_engine.py` and executed by the `PipelineOrchestrator`, relies entirely on the input `SourceRule` and static configuration. Each stage in the pipeline operates on an `AssetProcessingContext` object (`processing/pipeline/asset_context.py`) to perform specific tasks like map processing, channel merging, and metadata generation.
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ This document outlines the key features of the Asset Processor Tool.
|
|||||||
* Saves maps in appropriate formats (JPG, PNG, EXR) based on complex rules involving map type (`FORCE_LOSSLESS_MAP_TYPES`), resolution (`RESOLUTION_THRESHOLD_FOR_JPG`), bit depth, and source format.
|
* Saves maps in appropriate formats (JPG, PNG, EXR) based on complex rules involving map type (`FORCE_LOSSLESS_MAP_TYPES`), resolution (`RESOLUTION_THRESHOLD_FOR_JPG`), bit depth, and source format.
|
||||||
* Calculates basic image statistics (Min/Max/Mean) for a reference resolution.
|
* Calculates basic image statistics (Min/Max/Mean) for a reference resolution.
|
||||||
* Calculates and stores the relative aspect ratio change string in metadata (e.g., `EVEN`, `X150`, `Y125`).
|
* Calculates and stores the relative aspect ratio change string in metadata (e.g., `EVEN`, `X150`, `Y125`).
|
||||||
|
* **Low-Resolution Fallback:** If enabled (`ENABLE_LOW_RESOLUTION_FALLBACK`), automatically saves an additional "LOWRES" variant of source images if their largest dimension is below a configurable threshold (`LOW_RESOLUTION_THRESHOLD`). This "LOWRES" variant uses the original image dimensions and is saved in addition to any standard resolution outputs.
|
||||||
* **Channel Merging:** Combines channels from different maps into packed textures (e.g., NRMRGH) based on preset rules (`MAP_MERGE_RULES` in `config.py`).
|
* **Channel Merging:** Combines channels from different maps into packed textures (e.g., NRMRGH) based on preset rules (`MAP_MERGE_RULES` in `config.py`).
|
||||||
* **Metadata Generation:** Creates a `metadata.json` file for each asset containing details about maps, category, archetype, aspect ratio change, processing settings, etc.
|
* **Metadata Generation:** Creates a `metadata.json` file for each asset containing details about maps, category, archetype, aspect ratio change, processing settings, etc.
|
||||||
* **Output Organization:** Creates a clean, structured output directory (`<output_base>/<supplier>/<asset_name>/`).
|
* **Output Organization:** Creates a clean, structured output directory (`<output_base>/<supplier>/<asset_name>/`).
|
||||||
|
|||||||
@@ -13,9 +13,21 @@ The `app_settings.json` file is structured into several key sections, including:
|
|||||||
* `ASSET_TYPE_DEFINITIONS`: Defines known asset types (like Surface, Model, Decal) and their properties.
|
* `ASSET_TYPE_DEFINITIONS`: Defines known asset types (like Surface, Model, Decal) and their properties.
|
||||||
* `MAP_MERGE_RULES`: Defines how multiple input maps can be merged into a single output map (e.g., combining Normal and Roughness into one).
|
* `MAP_MERGE_RULES`: Defines how multiple input maps can be merged into a single output map (e.g., combining Normal and Roughness into one).
|
||||||
|
|
||||||
|
### Low-Resolution Fallback Settings
|
||||||
|
|
||||||
|
These settings control the generation of low-resolution "fallback" variants for source images:
|
||||||
|
|
||||||
|
* `ENABLE_LOW_RESOLUTION_FALLBACK` (boolean, default: `true`):
|
||||||
|
* If `true`, the tool will generate an additional "LOWRES" variant for source images whose largest dimension is smaller than the `LOW_RESOLUTION_THRESHOLD`.
|
||||||
|
* This "LOWRES" variant uses the original dimensions of the source image and is saved in addition to any other standard resolution outputs (e.g., 1K, PREVIEW).
|
||||||
|
* If `false`, this feature is disabled.
|
||||||
|
* `LOW_RESOLUTION_THRESHOLD` (integer, default: `512`):
|
||||||
|
* Defines the pixel dimension (for the largest side of an image) below which the "LOWRES" fallback variant will be generated (if enabled).
|
||||||
|
* For example, if set to `512`, any source image smaller than 512x512 (e.g., 256x512, 128x128) will have a "LOWRES" variant created.
|
||||||
|
|
||||||
### LLM Predictor Settings
|
### LLM Predictor Settings
|
||||||
|
|
||||||
For users who wish to utilize the experimental LLM Predictor feature, the following settings are available in `config/app_settings.json`:
|
For users who wish to utilize the experimental LLM Predictor feature, the following settings are available in `config/llm_settings.json`:
|
||||||
|
|
||||||
* `llm_endpoint_url`: The URL of the LLM API endpoint. For local LLMs like LM Studio or Ollama, this will typically be `http://localhost:<port>/v1`. Consult your LLM server documentation for the exact endpoint.
|
* `llm_endpoint_url`: The URL of the LLM API endpoint. For local LLMs like LM Studio or Ollama, this will typically be `http://localhost:<port>/v1`. Consult your LLM server documentation for the exact endpoint.
|
||||||
* `llm_api_key`: The API key required to access the LLM endpoint. Some local LLM servers may not require a key, in which case this can be left empty.
|
* `llm_api_key`: The API key required to access the LLM endpoint. Some local LLM servers may not require a key, in which case this can be left empty.
|
||||||
@@ -23,15 +35,39 @@ For users who wish to utilize the experimental LLM Predictor feature, the follow
|
|||||||
* `llm_temperature`: Controls the randomness of the LLM's output. Lower values (e.g., 0.1-0.5) make the output more deterministic and focused, while higher values (e.g., 0.6-1.0) make it more creative and varied. For prediction tasks, lower temperatures are generally recommended.
|
* `llm_temperature`: Controls the randomness of the LLM's output. Lower values (e.g., 0.1-0.5) make the output more deterministic and focused, while higher values (e.g., 0.6-1.0) make it more creative and varied. For prediction tasks, lower temperatures are generally recommended.
|
||||||
* `llm_request_timeout`: The maximum time (in seconds) to wait for a response from the LLM API. Adjust this based on the performance of your LLM server and the complexity of the requests.
|
* `llm_request_timeout`: The maximum time (in seconds) to wait for a response from the LLM API. Adjust this based on the performance of your LLM server and the complexity of the requests.
|
||||||
|
|
||||||
Note that the `llm_predictor_prompt` and `llm_predictor_examples` settings are also present in `app_settings.json`. These define the instructions and examples provided to the LLM for prediction. While they can be viewed here, they are primarily intended for developer reference and tuning the LLM's behavior, and most users will not need to modify them.
|
Note that the `llm_predictor_prompt` and `llm_predictor_examples` settings are also present in `config/llm_settings.json`. These define the instructions and examples provided to the LLM for prediction. While they can be viewed here, they are primarily intended for developer reference and tuning the LLM's behavior, and most users will not need to modify them directly via the file. These settings are editable via the LLM Editor panel in the main GUI when the LLM interpretation mode is selected.
|
||||||
|
|
||||||
## GUI Configuration Editor
|
## Application Preferences (`config/app_settings.json` overrides)
|
||||||
|
|
||||||
You can modify the `app_settings.json` file using the built-in GUI editor. Access it via the **Edit** -> **Preferences...** menu.
|
You can modify user-overridable application settings using the built-in GUI editor. These settings are loaded from `config/app_settings.json` and saved as overrides in `config/user_settings.json`. Access it via the **Edit** -> **Preferences...** menu.
|
||||||
|
|
||||||
This editor provides a tabbed interface (e.g., "General", "Output & Naming") to view and change the core application settings defined in `app_settings.json`. Settings in the editor directly correspond to the structure and values within the JSON file. Note that any changes made through the GUI editor require an application restart to take effect.
|
This editor provides a tabbed interface to view and change various application behaviors. The tabs include:
|
||||||
|
* **General:** Basic settings like output base directory and temporary file prefix.
|
||||||
|
* **Output & Naming:** Settings controlling output directory and filename patterns, and how variants are handled.
|
||||||
|
* **Image Processing:** Settings related to image resolution definitions, compression levels, and format choices.
|
||||||
|
* **Map Merging:** Configuration for how multiple input maps are combined into single output maps.
|
||||||
|
* **Postprocess Scripts:** Paths to default Blender files for post-processing.
|
||||||
|
|
||||||
*(Ideally, a screenshot of the GUI Configuration Editor would be included here.)*
|
Note that this editor focuses on user-specific overrides of core application settings. **Asset Type Definitions, File Type Definitions, and Supplier Settings are managed in a separate Definitions Editor.**
|
||||||
|
|
||||||
|
Any changes made through the Preferences editor require an application restart to take effect.
|
||||||
|
|
||||||
|
*(Ideally, a screenshot of the Application Preferences editor would be included here.)*
|
||||||
|
|
||||||
|
## Definitions Editor (`config/asset_type_definitions.json`, `config/file_type_definitions.json`, `config/suppliers.json`)
|
||||||
|
|
||||||
|
Core application definitions that are separate from general user preferences are managed in the dedicated Definitions Editor. This includes defining known asset types, file types, and configuring settings specific to different suppliers. Access it via the **Edit** -> **Edit Definitions...** menu.
|
||||||
|
|
||||||
|
The editor is organized into three tabs:
|
||||||
|
* **Asset Type Definitions:** Define the different categories of assets (e.g., Surface, Model, Decal). For each asset type, you can configure its description, a color for UI representation, and example usage strings.
|
||||||
|
* **File Type Definitions:** Define the specific types of files the tool recognizes (e.g., MAP_COL, MAP_NRM, MODEL). For each file type, you can configure its description, a color, example keywords/patterns, a standard type alias, bit depth handling rules, whether it's grayscale, and an optional keybind for quick assignment in the GUI.
|
||||||
|
* **Supplier Settings:** Configure settings that are specific to assets originating from different suppliers. Currently, this includes the "Normal Map Type" (OpenGL or DirectX) used for normal maps from that supplier.
|
||||||
|
|
||||||
|
Each tab presents a list of the defined items on the left (Asset Types, File Types, or Suppliers). Selecting an item in the list displays its configurable details on the right. Buttons are provided to add new definitions or remove existing ones.
|
||||||
|
|
||||||
|
Changes made in the Definitions Editor are saved directly to their respective configuration files (`config/asset_type_definitions.json`, `config/file_type_definitions.json`, and `config/suppliers.json`). Some changes may require an application restart to take full effect in processing logic.
|
||||||
|
|
||||||
|
*(Ideally, screenshots of the Definitions Editor tabs would be included here.)*
|
||||||
|
|
||||||
## Preset Files (`presets/*.json`)
|
## Preset Files (`presets/*.json`)
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,10 @@ python -m gui.main_window
|
|||||||
|
|
||||||
## Interface Overview
|
## Interface Overview
|
||||||
|
|
||||||
* **Menu Bar:** The "Edit" menu contains the "Preferences..." option to open the GUI Configuration Editor. The "View" menu allows you to toggle the visibility of the Log Console and the Detailed File Preview.
|
* **Menu Bar:** The "Edit" menu contains options to configure application settings and definitions:
|
||||||
|
* **Preferences...:** Opens the Application Preferences editor for user-overridable settings (saved to `config/user_settings.json`).
|
||||||
|
* **Edit Definitions...:** Opens the Definitions Editor for managing Asset Type Definitions, File Type Definitions, and Supplier Settings (saved to their respective files).
|
||||||
|
The "View" menu allows you to toggle the visibility of the Log Console and the Detailed File Preview.
|
||||||
* **Preset Editor Panel (Left):**
|
* **Preset Editor Panel (Left):**
|
||||||
* **Optional Log Console:** Displays application logs (toggle via View menu).
|
* **Optional Log Console:** Displays application logs (toggle via View menu).
|
||||||
* **Preset List:** Create, delete, load, edit, and save presets. On startup, the "-- Select a Preset --" item is explicitly selected. You must select a specific preset from this list to load it into the editor below, enable the detailed file preview, and enable the "Start Processing" button.
|
* **Preset List:** Create, delete, load, edit, and save presets. On startup, the "-- Select a Preset --" item is explicitly selected. You must select a specific preset from this list to load it into the editor below, enable the detailed file preview, and enable the "Start Processing" button.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
This document describes the directory structure and contents of the processed assets generated by the Asset Processor Tool.
|
This document describes the directory structure and contents of the processed assets generated by the Asset Processor Tool.
|
||||||
|
|
||||||
Processed assets are saved to a location determined by two global settings defined in `config/app_settings.json`:
|
Processed assets are saved to a location determined by two global settings, `OUTPUT_DIRECTORY_PATTERN` and `OUTPUT_FILENAME_PATTERN`, defined in `config/app_settings.json`. These settings can be overridden by the user via `config/user_settings.json`.
|
||||||
|
|
||||||
* `OUTPUT_DIRECTORY_PATTERN`: Defines the directory structure *within* the Base Output Directory.
|
* `OUTPUT_DIRECTORY_PATTERN`: Defines the directory structure *within* the Base Output Directory.
|
||||||
* `OUTPUT_FILENAME_PATTERN`: Defines the naming convention for individual files *within* the directory created by `OUTPUT_DIRECTORY_PATTERN`.
|
* `OUTPUT_FILENAME_PATTERN`: Defines the naming convention for individual files *within* the directory created by `OUTPUT_DIRECTORY_PATTERN`.
|
||||||
@@ -23,7 +23,7 @@ The following tokens can be used in both `OUTPUT_DIRECTORY_PATTERN` and `OUTPUT_
|
|||||||
* `[Time]`: Current time (`HHMMSS`).
|
* `[Time]`: Current time (`HHMMSS`).
|
||||||
* `[Sha5]`: The first 5 characters of the SHA-256 hash of the original input source file (e.g., the source zip archive).
|
* `[Sha5]`: The first 5 characters of the SHA-256 hash of the original input source file (e.g., the source zip archive).
|
||||||
* `[ApplicationPath]`: Absolute path to the application directory.
|
* `[ApplicationPath]`: Absolute path to the application directory.
|
||||||
* `[maptype]`: The standardized map type identifier (e.g., `COL` for Color/Albedo, `NRM` for Normal, `RGH` for Roughness). This is derived from the `standard_type` defined in the application's `FILE_TYPE_DEFINITIONS` (see `config/app_settings.json`) and may include a variant suffix if applicable. (Primarily for filename pattern)
|
* `[maptype]`: The standardized map type identifier (e.g., `COL` for Color/Albedo, `NRM` for Normal, `RGH` for Roughness). This is derived from the `standard_type` defined in the application's `FILE_TYPE_DEFINITIONS` (managed in `config/file_type_definitions.json` via the Definitions Editor) and may include a variant suffix if applicable. (Primarily for filename pattern)
|
||||||
* `[dimensions]`: Pixel dimensions (e.g., `2048x2048`).
|
* `[dimensions]`: Pixel dimensions (e.g., `2048x2048`).
|
||||||
* `[bitdepth]`: Output bit depth (e.g., `8bit`, `16bit`).
|
* `[bitdepth]`: Output bit depth (e.g., `8bit`, `16bit`).
|
||||||
* `[category]`: Asset category determined by preset rules.
|
* `[category]`: Asset category determined by preset rules.
|
||||||
@@ -51,13 +51,14 @@ The final output path is constructed by combining the Base Output Directory (set
|
|||||||
* `OUTPUT_FILENAME_PATTERN`: `[maptype].[ext]`
|
* `OUTPUT_FILENAME_PATTERN`: `[maptype].[ext]`
|
||||||
* Resulting Path for a Normal map: `Output/Texture/Wood/WoodFloor001/Normal.exr`
|
* Resulting Path for a Normal map: `Output/Texture/Wood/WoodFloor001/Normal.exr`
|
||||||
|
|
||||||
The `<output_base_directory>` (the root folder where processing output starts) is configured separately via the GUI (**Edit** -> **Preferences...** -> **Output & Naming** tab -> **Base Output Directory**) or the `--output` CLI argument. The `OUTPUT_DIRECTORY_PATTERN` defines the structure *within* this base directory, and `OUTPUT_FILENAME_PATTERN` defines the filenames within that structure.
|
The `<output_base_directory>` (the root folder where processing output starts) is configured separately via the GUI (**Edit** -> **Preferences...** -> **General** tab -> **Output Base Directory**) or the `--output` CLI argument. The `OUTPUT_DIRECTORY_PATTERN` defines the structure *within* this base directory, and `OUTPUT_FILENAME_PATTERN` defines the filenames within that structure.
|
||||||
|
|
||||||
## Contents of Each Asset Directory
|
## Contents of Each Asset Directory
|
||||||
|
|
||||||
Each asset directory contains the following:
|
Each asset directory contains the following:
|
||||||
|
|
||||||
* Processed texture maps (e.g., `WoodFloor_Albedo_4k.png`, `MetalPanel_Normal_2k.exr`). The exact filenames depend on the `OUTPUT_FILENAME_PATTERN`. These are the resized, format-converted, and bit-depth adjusted texture files.
|
* Processed texture maps (e.g., `WoodFloor_Albedo_4k.png`, `MetalPanel_Normal_2k.exr`). The exact filenames depend on the `OUTPUT_FILENAME_PATTERN`. These are the resized, format-converted, and bit-depth adjusted texture files.
|
||||||
|
* **LOWRES Variants:** If the "Low-Resolution Fallback" feature is enabled and a source image's dimensions are below the configured threshold, an additional variant with "LOWRES" as its resolution token (e.g., `MyTexture_COL_LOWRES.png`) will be saved. This variant uses the original dimensions of the source image.
|
||||||
* Merged texture maps (e.g., `WoodFloor_Combined_4k.png`). The exact filenames depend on the `OUTPUT_FILENAME_PATTERN`. These are maps created by combining channels from different source maps based on the configured merge rules.
|
* Merged texture maps (e.g., `WoodFloor_Combined_4k.png`). The exact filenames depend on the `OUTPUT_FILENAME_PATTERN`. These are maps created by combining channels from different source maps based on the configured merge rules.
|
||||||
* Model files (if present in the source asset).
|
* Model files (if present in the source asset).
|
||||||
* `metadata.json`: A JSON file containing detailed information about the asset and the processing that was performed. This includes details about the maps (resolutions, formats, bit depths, and for roughness maps, a `derived_from_gloss_filename: true` flag if it was inverted from an original gloss map), merged map details, calculated image statistics, aspect ratio change information, asset category and archetype, the source preset used, and a list of ignored source files. This file is intended for use by downstream tools or scripts (like the Blender integration scripts).
|
* `metadata.json`: A JSON file containing detailed information about the asset and the processing that was performed. This includes details about the maps (resolutions, formats, bit depths, and for roughness maps, a `derived_from_gloss_filename: true` flag if it was inverted from an original gloss map), merged map details, calculated image statistics, aspect ratio change information, asset category and archetype, the source preset used, and a list of ignored source files. This file is intended for use by downstream tools or scripts (like the Blender integration scripts).
|
||||||
|
|||||||
83
Documentation/01_User_Guide/11_Usage_Autotest.md
Normal file
83
Documentation/01_User_Guide/11_Usage_Autotest.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# User Guide: Usage - Automated GUI Testing (`autotest.py`)
|
||||||
|
|
||||||
|
This document explains how to use the `autotest.py` script for automated sanity checks of the Asset Processor Tool's GUI-driven workflow.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The `autotest.py` script provides a way to run predefined test scenarios headlessly (without displaying the GUI). It simulates the core user actions: loading an asset, selecting a preset, allowing rules to be predicted, processing the asset, and then checks the results against expectations. This is primarily intended as a developer tool for regression testing and ensuring core functionality remains stable.
|
||||||
|
|
||||||
|
## Running the Autotest Script
|
||||||
|
|
||||||
|
From the project root directory, you can run the script using Python:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python autotest.py [OPTIONS]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command-Line Options
|
||||||
|
|
||||||
|
The script accepts several command-line arguments to configure the test run. If not provided, they use predefined default values.
|
||||||
|
|
||||||
|
* `--zipfile PATH_TO_ZIP`:
|
||||||
|
* Specifies the path to the input asset `.zip` file to be used for the test.
|
||||||
|
* Default: `TestFiles/BoucleChunky001.zip`
|
||||||
|
* `--preset PRESET_NAME`:
|
||||||
|
* Specifies the name of the preset to be selected and used for rule prediction and processing.
|
||||||
|
* Default: `Dinesen`
|
||||||
|
* `--expectedrules PATH_TO_JSON`:
|
||||||
|
* Specifies the path to a JSON file containing the expected rule structure that should be generated after the preset is applied to the input asset.
|
||||||
|
* Default: `TestFiles/test-BoucleChunky001.json`
|
||||||
|
* `--outputdir PATH_TO_DIR`:
|
||||||
|
* Specifies the directory where the processed assets will be written.
|
||||||
|
* Default: `TestFiles/TestOutputs/DefaultTestOutput`
|
||||||
|
* `--search "SEARCH_TERM"` (optional):
|
||||||
|
* A string to search for within the application logs generated during the test run. If found, matching log lines (with context) will be highlighted.
|
||||||
|
* Default: None
|
||||||
|
* `--additional-lines NUM_LINES` (optional):
|
||||||
|
* When using `--search`, this specifies how many lines of context before and after each matching log line should be displayed.
|
||||||
|
* Default: `0`
|
||||||
|
|
||||||
|
**Example Usage:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run with default test files and settings
|
||||||
|
python autotest.py
|
||||||
|
|
||||||
|
# Run with specific test files and search for a log message
|
||||||
|
python autotest.py --zipfile TestFiles/MySpecificAsset.zip --preset MyPreset --expectedrules TestFiles/MySpecificAsset_rules.json --outputdir TestFiles/TestOutputs/MySpecificOutput --search "Processing complete for asset"
|
||||||
|
```
|
||||||
|
|
||||||
|
## `TestFiles` Directory
|
||||||
|
|
||||||
|
The autotest script relies on a directory named `TestFiles` located in the project root. This directory should contain:
|
||||||
|
|
||||||
|
* **Test Asset `.zip` files:** The actual asset archives used as input for tests (e.g., `default_test_asset.zip`, `MySpecificAsset.zip`).
|
||||||
|
* **Expected Rules `.json` files:** JSON files defining the expected rule structure for a given asset and preset combination (e.g., `default_test_asset_rules.json`, `MySpecificAsset_rules.json`). The structure of this file is detailed in the main autotest plan (`AUTOTEST_GUI_PLAN.md`).
|
||||||
|
* **`TestOutputs/` subdirectory:** This is the default parent directory where the autotest script will create specific output folders for each test run (e.g., `TestFiles/TestOutputs/DefaultTestOutput/`).
|
||||||
|
|
||||||
|
## Test Workflow
|
||||||
|
|
||||||
|
When executed, `autotest.py` performs the following steps:
|
||||||
|
|
||||||
|
1. **Initialization:** Parses command-line arguments and initializes the main application components headlessly.
|
||||||
|
2. **Load Expected Rules:** Loads the `expected_rules.json` file.
|
||||||
|
3. **Load Asset:** Loads the specified `.zip` file into the application.
|
||||||
|
4. **Select Preset:** Selects the specified preset. This triggers the internal rule prediction process.
|
||||||
|
5. **Await Prediction:** Waits for the rule prediction to complete.
|
||||||
|
6. **Compare Rules:** Retrieves the predicted rules from the application and compares them against the loaded expected rules. If there's a mismatch, the test typically fails at this point.
|
||||||
|
7. **Start Processing:** If the rules match, it initiates the asset processing pipeline, directing output to the specified output directory.
|
||||||
|
8. **Await Processing:** Waits for all backend processing tasks to complete.
|
||||||
|
9. **Check Output:** Verifies the existence of the output directory and lists its contents. Basic checks ensure some output was generated.
|
||||||
|
10. **Analyze Logs:** Retrieves logs from the application. If a search term was provided, it filters and displays relevant log portions. It also checks for Python tracebacks, which usually indicate a failure.
|
||||||
|
11. **Report Result:** Prints a summary of the test outcome (success or failure) and exits with an appropriate status code (0 for success, 1 for failure).
|
||||||
|
|
||||||
|
## Interpreting Results
|
||||||
|
|
||||||
|
* **Console Output:** The script will log its progress and the results of each step to the console.
|
||||||
|
* **Log Analysis:** Pay attention to the log output, especially if a `--search` term was used or if any tracebacks are reported.
|
||||||
|
* **Exit Code:**
|
||||||
|
* `0`: Test completed successfully.
|
||||||
|
* `1`: Test failed at some point (e.g., rule mismatch, processing error, traceback found).
|
||||||
|
* **Output Directory:** Inspect the contents of the specified output directory to manually verify the processed assets if needed.
|
||||||
|
|
||||||
|
This automated test helps ensure the stability of the core processing logic when driven by GUI-equivalent actions.
|
||||||
@@ -6,17 +6,19 @@ This document provides a high-level overview of the Asset Processor Tool's archi
|
|||||||
|
|
||||||
The Asset Processor Tool is designed to process 3D asset source files into a standardized library format. Its high-level architecture consists of:
|
The Asset Processor Tool is designed to process 3D asset source files into a standardized library format. Its high-level architecture consists of:
|
||||||
|
|
||||||
1. **Core Processing Engine (`processing_engine.py`):** The primary component responsible for executing the asset processing pipeline for a single input asset based on a provided `SourceRule` object and static configuration. The previous `asset_processor.py` has been removed.
|
1. **Core Processing Initiation (`processing_engine.py`):** The `ProcessingEngine` class acts as the entry point for an asset processing task. It initializes and runs a `PipelineOrchestrator`.
|
||||||
2. **Prediction System:** Responsible for analyzing input files and generating the initial `SourceRule` hierarchy with predicted values. This system utilizes a base handler (`gui/base_prediction_handler.py::BasePredictionHandler`) with specific implementations:
|
2. **Pipeline Orchestration (`processing/pipeline/orchestrator.py`):** The `PipelineOrchestrator` manages a sequence of discrete processing stages. It creates an `AssetProcessingContext` for each asset and passes this context through each stage.
|
||||||
|
3. **Processing Stages (`processing/pipeline/stages/`):** Individual modules, each responsible for a specific task in the pipeline (e.g., filtering files, processing maps, merging channels, organizing output). They operate on the `AssetProcessingContext`.
|
||||||
|
4. **Prediction System:** Responsible for analyzing input files and generating the initial `SourceRule` hierarchy with predicted values. This system utilizes a base handler (`gui/base_prediction_handler.py::BasePredictionHandler`) with specific implementations:
|
||||||
* **Rule-Based Predictor (`gui/prediction_handler.py::RuleBasedPredictionHandler`):** Uses predefined rules from presets to classify files and determine initial processing parameters.
|
* **Rule-Based Predictor (`gui/prediction_handler.py::RuleBasedPredictionHandler`):** Uses predefined rules from presets to classify files and determine initial processing parameters.
|
||||||
* **LLM Predictor (`gui/llm_prediction_handler.py::LLMPredictionHandler`):** An experimental alternative that uses a Large Language Model (LLM) to interpret file contents and context to predict processing parameters.
|
* **LLM Predictor (`gui/llm_prediction_handler.py::LLMPredictionHandler`):** An experimental alternative that uses a Large Language Model (LLM) to interpret file contents and context to predict processing parameters.
|
||||||
3. **Configuration System (`Configuration`):** Handles loading core settings (including centralized type definitions and LLM-specific configuration) and merging them with supplier-specific rules defined in JSON presets and the persistent `config/suppliers.json` file.
|
5. **Configuration System (`Configuration`):** Handles loading core settings (including centralized type definitions and LLM-specific configuration) and merging them with supplier-specific rules defined in JSON presets and the persistent `config/suppliers.json` file.
|
||||||
4. **Multiple Interfaces:** Provides different ways to interact with the tool:
|
6. **Multiple Interfaces:** Provides different ways to interact with the tool:
|
||||||
* Graphical User Interface (GUI)
|
* Graphical User Interface (GUI)
|
||||||
* Command-Line Interface (CLI) - *Note: The primary CLI execution logic (`run_cli` in `main.py`) is currently non-functional/commented out post-refactoring.*
|
* Command-Line Interface (CLI) - *Note: The primary CLI execution logic (`run_cli` in `main.py`) is currently non-functional/commented out post-refactoring.*
|
||||||
* Directory Monitor for automated processing.
|
* Directory Monitor for automated processing.
|
||||||
The GUI acts as the primary source of truth for processing rules, coordinating the generation and management of the `SourceRule` hierarchy before sending it to the processing engine. It accumulates prediction results from multiple input sources before updating the view. The Monitor interface can also generate `SourceRule` objects (using `utils/prediction_utils.py`) to bypass the GUI for automated workflows.
|
The GUI acts as the primary source of truth for processing rules, coordinating the generation and management of the `SourceRule` hierarchy before sending it to the `ProcessingEngine`. It accumulates prediction results from multiple input sources before updating the view. The Monitor interface can also generate `SourceRule` objects (using `utils/prediction_utils.py`) to bypass the GUI for automated workflows.
|
||||||
5. **Optional Integration:** Includes scripts (`blenderscripts/`) for integrating with Blender. Logic for executing these scripts was intended to be centralized in `utils/blender_utils.py`, but this utility has not yet been implemented.
|
7. **Optional Integration:** Includes scripts (`blenderscripts/`) for integrating with Blender. Logic for executing these scripts was intended to be centralized in `utils/blender_utils.py`, but this utility has not yet been implemented.
|
||||||
|
|
||||||
## Hierarchical Rule System
|
## Hierarchical Rule System
|
||||||
|
|
||||||
@@ -26,14 +28,14 @@ A key addition to the architecture is the **Hierarchical Rule System**, which pr
|
|||||||
* **AssetRule:** Represents rules applied to a specific asset within a source (a source can contain multiple assets).
|
* **AssetRule:** Represents rules applied to a specific asset within a source (a source can contain multiple assets).
|
||||||
* **FileRule:** Represents rules applied to individual files within an asset.
|
* **FileRule:** Represents rules applied to individual files within an asset.
|
||||||
|
|
||||||
This hierarchy allows for fine-grained control over processing parameters. The GUI's prediction logic generates this hierarchy with initial predicted values for overridable fields based on presets and file analysis. The processing engine then operates *solely* on the explicit values provided in this `SourceRule` object and static configuration, without internal prediction or fallback logic.
|
This hierarchy allows for fine-grained control over processing parameters. The GUI's prediction logic generates this hierarchy with initial predicted values for overridable fields based on presets and file analysis. The `ProcessingEngine` (via the `PipelineOrchestrator` and its stages) then operates *solely* on the explicit values provided in this `SourceRule` object and static configuration, without internal prediction or fallback logic.
|
||||||
|
|
||||||
## Core Components
|
## Core Components
|
||||||
|
|
||||||
* `config/app_settings.json`: Defines core, global settings, constants, and centralized definitions for allowed asset and file types (`ASSET_TYPE_DEFINITIONS`, `FILE_TYPE_DEFINITIONS`), including metadata like colors and descriptions. This replaces the old `config.py` file.
|
* `config/app_settings.json`: Defines core, global settings, constants, and centralized definitions for allowed asset and file types (`ASSET_TYPE_DEFINITIONS`, `FILE_TYPE_DEFINITIONS`), including metadata like colors and descriptions. This replaces the old `config.py` file.
|
||||||
* `config/suppliers.json`: A persistent JSON file storing known supplier names for GUI auto-completion.
|
* `config/suppliers.json`: A persistent JSON file storing known supplier names for GUI auto-completion.
|
||||||
* `Presets/*.json`: Supplier-specific JSON files defining rules for file interpretation and initial prediction.
|
* `Presets/*.json`: Supplier-specific JSON files defining rules for file interpretation and initial prediction.
|
||||||
* `configuration.py` (`Configuration` class): Loads `config/app_settings.json` settings and merges them with a selected preset, pre-compiling regex patterns for efficiency. This static configuration is used by the processing engine.
|
* `configuration.py` (`Configuration` class): Loads `config/app_settings.json` settings and merges them with a selected preset, pre-compiling regex patterns for efficiency. This static configuration is used by the processing pipeline.
|
||||||
* `rule_structure.py`: Defines the `SourceRule`, `AssetRule`, and `FileRule` dataclasses used to represent the hierarchical processing rules.
|
* `rule_structure.py`: Defines the `SourceRule`, `AssetRule`, and `FileRule` dataclasses used to represent the hierarchical processing rules.
|
||||||
* `gui/`: Directory containing modules for the Graphical User Interface (GUI), built with PySide6. The `MainWindow` (`main_window.py`) acts as a coordinator, orchestrating interactions between various components. Key GUI components include:
|
* `gui/`: Directory containing modules for the Graphical User Interface (GUI), built with PySide6. The `MainWindow` (`main_window.py`) acts as a coordinator, orchestrating interactions between various components. Key GUI components include:
|
||||||
* `main_panel_widget.py::MainPanelWidget`: Contains the primary controls for loading sources, selecting presets, viewing/editing rules, and initiating processing.
|
* `main_panel_widget.py::MainPanelWidget`: Contains the primary controls for loading sources, selecting presets, viewing/editing rules, and initiating processing.
|
||||||
@@ -47,7 +49,10 @@ This hierarchy allows for fine-grained control over processing parameters. The G
|
|||||||
* `prediction_handler.py::RuleBasedPredictionHandler`: Generates the initial `SourceRule` hierarchy based on presets and file analysis. Inherits from `BasePredictionHandler`.
|
* `prediction_handler.py::RuleBasedPredictionHandler`: Generates the initial `SourceRule` hierarchy based on presets and file analysis. Inherits from `BasePredictionHandler`.
|
||||||
* `llm_prediction_handler.py::LLMPredictionHandler`: Experimental predictor using an LLM. Inherits from `BasePredictionHandler`.
|
* `llm_prediction_handler.py::LLMPredictionHandler`: Experimental predictor using an LLM. Inherits from `BasePredictionHandler`.
|
||||||
* `llm_interaction_handler.py::LLMInteractionHandler`: Manages communication with the LLM service for the LLM predictor.
|
* `llm_interaction_handler.py::LLMInteractionHandler`: Manages communication with the LLM service for the LLM predictor.
|
||||||
* `processing_engine.py` (`ProcessingEngine` class): The core component that executes the processing pipeline for a single `SourceRule` object using the static `Configuration`. A new instance is created per task for state isolation.
|
* `processing_engine.py` (`ProcessingEngine` class): The entry-point class that initializes and runs the `PipelineOrchestrator` for a given `SourceRule` and `Configuration`.
|
||||||
|
* `processing/pipeline/orchestrator.py` (`PipelineOrchestrator` class): Manages the sequence of processing stages, creating and passing an `AssetProcessingContext` through them.
|
||||||
|
* `processing/pipeline/asset_context.py` (`AssetProcessingContext` class): A dataclass holding all data and state for the processing of a single asset, passed between stages.
|
||||||
|
* `processing/pipeline/stages/`: Directory containing individual processing stage modules, each handling a specific part of the pipeline (e.g., `IndividualMapProcessingStage`, `MapMergingStage`).
|
||||||
* `main.py`: The main entry point for the application. Primarily launches the GUI. Contains commented-out/non-functional CLI logic (`run_cli`).
|
* `main.py`: The main entry point for the application. Primarily launches the GUI. Contains commented-out/non-functional CLI logic (`run_cli`).
|
||||||
* `monitor.py`: Implements the directory monitoring feature using `watchdog`. It now processes archives asynchronously using a `ThreadPoolExecutor`, leveraging `utils.prediction_utils.py` for rule generation and `utils.workspace_utils.py` for workspace management before invoking the `ProcessingEngine`.
|
* `monitor.py`: Implements the directory monitoring feature using `watchdog`. It now processes archives asynchronously using a `ThreadPoolExecutor`, leveraging `utils.prediction_utils.py` for rule generation and `utils.workspace_utils.py` for workspace management before invoking the `ProcessingEngine`.
|
||||||
* `blenderscripts/`: Contains Python scripts designed to be executed *within* Blender for post-processing tasks.
|
* `blenderscripts/`: Contains Python scripts designed to be executed *within* Blender for post-processing tasks.
|
||||||
@@ -56,19 +61,21 @@ This hierarchy allows for fine-grained control over processing parameters. The G
|
|||||||
* `prediction_utils.py`: Contains functions like `generate_source_rule_from_archive` used by the monitor for rule-based prediction.
|
* `prediction_utils.py`: Contains functions like `generate_source_rule_from_archive` used by the monitor for rule-based prediction.
|
||||||
* `blender_utils.py`: (Intended location for Blender script execution logic, currently not implemented).
|
* `blender_utils.py`: (Intended location for Blender script execution logic, currently not implemented).
|
||||||
|
|
||||||
## Processing Pipeline (Simplified)
|
## Processing Pipeline (Simplified Overview)
|
||||||
|
|
||||||
The primary processing engine (`processing_engine.py`) executes a series of steps for each asset based on the provided `SourceRule` object and static configuration:
|
The asset processing pipeline, initiated by `processing_engine.py` and managed by `PipelineOrchestrator`, executes a series of stages for each asset defined in the `SourceRule`. An `AssetProcessingContext` object carries data between stages. The typical sequence is:
|
||||||
|
|
||||||
1. Extraction of input to a temporary workspace (using `utils.workspace_utils.py`).
|
1. **Supplier Determination**: Identify the effective supplier.
|
||||||
2. Classification of files (map, model, extra, ignored, unrecognised) based *only* on the provided `SourceRule` object (classification/prediction happens *before* the engine is called).
|
2. **Asset Skip Logic**: Check if the asset should be skipped.
|
||||||
3. Determination of base metadata (asset name, category, archetype).
|
3. **Metadata Initialization**: Set up initial asset metadata.
|
||||||
4. Skip check if output exists and overwrite is not forced.
|
4. **File Rule Filtering**: Determine which files to process.
|
||||||
5. Processing of maps (resize, format/bit depth conversion, inversion, stats calculation).
|
5. **Pre-Map Processing**:
|
||||||
6. Merging of channels based on rules.
|
* Gloss-to-Roughness Conversion.
|
||||||
7. Generation of `metadata.json` file.
|
* Alpha Channel Extraction.
|
||||||
8. Organization of processed files into the final output structure.
|
* Normal Map Green Channel Inversion.
|
||||||
9. Cleanup of the temporary workspace.
|
6. **Individual Map Processing**: Handle individual maps (scaling, variants, stats, naming).
|
||||||
10. (Optional) Execution of Blender scripts (currently triggered directly, intended to use `utils.blender_utils.py`).
|
7. **Map Merging**: Combine channels from different maps.
|
||||||
|
8. **Metadata Finalization & Save**: Generate and save `metadata.json` (temporarily).
|
||||||
|
9. **Output Organization**: Copy all processed files to final output locations.
|
||||||
|
|
||||||
This architecture allows for a modular design, separating configuration, rule generation/management (GUI, Monitor utilities), and core processing execution. The `SourceRule` object serves as a clear data contract between the rule generation layer and the processing engine. Parallel processing (in Monitor) and background threads (in GUI) are utilized for efficiency and responsiveness.
|
External steps like workspace preparation/cleanup and optional Blender script execution bracket this core pipeline. This architecture allows for a modular design, separating configuration, rule generation/management, and core processing execution.
|
||||||
@@ -2,17 +2,65 @@
|
|||||||
|
|
||||||
This document describes the major classes and modules that form the core of the Asset Processor Tool.
|
This document describes the major classes and modules that form the core of the Asset Processor Tool.
|
||||||
|
|
||||||
## `ProcessingEngine` (`processing_engine.py`)
|
## Core Processing Architecture
|
||||||
|
|
||||||
The `ProcessingEngine` class is the new core component responsible for executing the asset processing pipeline for a *single* input asset. Unlike the older `AssetProcessor`, this engine operates *solely* based on a complete `SourceRule` object provided to its `process()` method and the static `Configuration` object passed during initialization. It contains no internal prediction, classification, or fallback logic. Its key responsibilities include:
|
The asset processing pipeline has been refactored into a staged architecture, managed by an orchestrator.
|
||||||
|
|
||||||
* Setting up and cleaning up a temporary workspace for processing (potentially using `utils.workspace_utils`).
|
### `ProcessingEngine` (`processing_engine.py`)
|
||||||
* Extracting or copying input files to the workspace.
|
|
||||||
* Processing files based on the explicit rules and predicted values contained within the input `SourceRule`.
|
The `ProcessingEngine` class serves as the primary entry point for initiating an asset processing task. Its main responsibilities are:
|
||||||
* Processing texture maps (resizing, format/bit depth conversion, inversion, stats calculation) using parameters from the `SourceRule` or static `Configuration`.
|
|
||||||
* Merging channels based on rules defined in the static `Configuration` and parameters from the `SourceRule`.
|
* Initializing a `PipelineOrchestrator` instance.
|
||||||
* Generating the `metadata.json` file containing details about the processed asset, incorporating information from the `SourceRule`.
|
* Providing the `PipelineOrchestrator` with the global `Configuration` object and a predefined list of processing stages.
|
||||||
* Organizing the final output files into the structured library directory.
|
* Invoking the orchestrator's `process_source_rule()` method with the input `SourceRule`, workspace path, output path, and other processing parameters.
|
||||||
|
* Managing a top-level temporary directory for the engine's operations if needed, though individual stages might also use sub-temporary directories via the `AssetProcessingContext`.
|
||||||
|
|
||||||
|
It no longer contains the detailed logic for each processing step (like map manipulation, merging, etc.) directly. Instead, it delegates these tasks to the orchestrator and its stages.
|
||||||
|
|
||||||
|
### `PipelineOrchestrator` (`processing/pipeline/orchestrator.py`)
|
||||||
|
|
||||||
|
The `PipelineOrchestrator` class is responsible for managing the execution of the asset processing pipeline. Its key functions include:
|
||||||
|
|
||||||
|
* Receiving a `SourceRule` object, `Configuration`, and a list of `ProcessingStage` objects.
|
||||||
|
* For each `AssetRule` within the `SourceRule`:
|
||||||
|
* Creating an `AssetProcessingContext` instance.
|
||||||
|
* Sequentially executing each registered `ProcessingStage`, passing the `AssetProcessingContext` to each stage.
|
||||||
|
* Handling exceptions that occur within stages and managing the overall status of asset processing (processed, skipped, failed).
|
||||||
|
* Managing a temporary directory for the duration of a `SourceRule` processing, which is made available to stages via the `AssetProcessingContext`.
|
||||||
|
|
||||||
|
### `AssetProcessingContext` (`processing/pipeline/asset_context.py`)
|
||||||
|
|
||||||
|
The `AssetProcessingContext` is a dataclass that acts as a stateful container for all data related to the processing of a single `AssetRule`. An instance of this context is created by the `PipelineOrchestrator` for each asset and is passed through each processing stage. Key information it holds includes:
|
||||||
|
|
||||||
|
* The input `SourceRule` and the current `AssetRule`.
|
||||||
|
* Paths: `workspace_path`, `engine_temp_dir`, `output_base_path`.
|
||||||
|
* The `Configuration` object.
|
||||||
|
* `effective_supplier`: Determined by an early stage.
|
||||||
|
* `asset_metadata`: A dictionary to accumulate metadata about the asset.
|
||||||
|
* `processed_maps_details`: Stores details about individually processed maps (paths, dimensions, etc.).
|
||||||
|
* `merged_maps_details`: Stores details about merged maps.
|
||||||
|
* `files_to_process`: A list of `FileRule` objects to be processed for the current asset.
|
||||||
|
* `loaded_data_cache`: For caching loaded image data within an asset's processing.
|
||||||
|
* `status_flags`: For signaling conditions like `skip_asset` or `asset_failed`.
|
||||||
|
* `incrementing_value`, `sha5_value`: Optional values for path generation.
|
||||||
|
|
||||||
|
Each stage reads from and writes to this context, allowing data and state to flow through the pipeline.
|
||||||
|
|
||||||
|
### `Processing Stages` (`processing/pipeline/stages/`)
|
||||||
|
|
||||||
|
The actual processing logic is broken down into a series of discrete stages, each inheriting from `ProcessingStage` (`processing/pipeline/stages/base_stage.py`). Each stage implements an `execute(context: AssetProcessingContext)` method. Key stages include (in typical execution order):
|
||||||
|
|
||||||
|
* **`SupplierDeterminationStage`**: Determines the effective supplier.
|
||||||
|
* **`AssetSkipLogicStage`**: Checks if the asset processing should be skipped.
|
||||||
|
* **`MetadataInitializationStage`**: Initializes basic asset metadata.
|
||||||
|
* **`FileRuleFilterStage`**: Filters `FileRule`s to decide which files to process.
|
||||||
|
* **`GlossToRoughConversionStage`**: Handles gloss-to-roughness map inversion.
|
||||||
|
* **`AlphaExtractionToMaskStage`**: Extracts alpha channels to create masks.
|
||||||
|
* **`NormalMapGreenChannelStage`**: Inverts normal map green channels if required.
|
||||||
|
* **`IndividualMapProcessingStage`**: Processes individual maps (POT scaling, resolution variants, color conversion, stats, aspect ratio, filename conventions).
|
||||||
|
* **`MapMergingStage`**: Merges map channels based on rules.
|
||||||
|
* **`MetadataFinalizationAndSaveStage`**: Collects all metadata and saves `metadata.json` to a temporary location.
|
||||||
|
* **`OutputOrganizationStage`**: Copies all processed files and metadata to the final output directory structure.
|
||||||
|
|
||||||
## `Rule Structure` (`rule_structure.py`)
|
## `Rule Structure` (`rule_structure.py`)
|
||||||
|
|
||||||
@@ -22,19 +70,19 @@ This module defines the data structures used to represent the hierarchical proce
|
|||||||
* `AssetRule`: A dataclass representing rules applied at the asset level. It contains nested `FileRule` objects.
|
* `AssetRule`: A dataclass representing rules applied at the asset level. It contains nested `FileRule` objects.
|
||||||
* `FileRule`: A dataclass representing rules applied at the file level.
|
* `FileRule`: A dataclass representing rules applied at the file level.
|
||||||
|
|
||||||
These classes hold specific rule parameters (e.g., `supplier_identifier`, `asset_type`, `asset_type_override`, `item_type`, `item_type_override`, `target_asset_name_override`). Attributes like `asset_type` and `item_type_override` now use string types, which are validated against centralized lists in `config/app_settings.json`. These structures support serialization (Pickle, JSON) to allow them to be passed between different parts of the application, including across process boundaries.
|
These classes hold specific rule parameters (e.g., `supplier_identifier`, `asset_type`, `asset_type_override`, `item_type`, `item_type_override`, `target_asset_name_override`, `resolution_override`, `channel_merge_instructions`). Attributes like `asset_type` and `item_type_override` now use string types, which are validated against centralized lists in `config/app_settings.json`. These structures support serialization (Pickle, JSON) to allow them to be passed between different parts of theapplication, including across process boundaries. The `PipelineOrchestrator` and its stages heavily rely on the information within these rule objects, passed via the `AssetProcessingContext`.
|
||||||
|
|
||||||
## `Configuration` (`configuration.py`)
|
## `Configuration` (`configuration.py`)
|
||||||
|
|
||||||
The `Configuration` class manages the tool's settings. It is responsible for:
|
The `Configuration` class manages the tool's settings. It is responsible for:
|
||||||
|
|
||||||
* Loading the core default settings defined in `config/app_settings.json`.
|
* Loading the core default settings defined in `config/app_settings.json` (e.g., `FILE_TYPE_DEFINITIONS`, `ASSET_TYPE_DEFINITIONS`, `image_resolutions`, `map_merge_rules`, `output_filename_pattern`).
|
||||||
* Loading the supplier-specific rules from a selected preset JSON file (`Presets/*.json`).
|
* Loading the supplier-specific rules from a selected preset JSON file (`Presets/*.json`).
|
||||||
* Merging the core settings and preset rules into a single, unified configuration object.
|
* Merging the core settings and preset rules into a single, unified configuration object.
|
||||||
* Validating the loaded configuration to ensure required settings are present.
|
* Validating the loaded configuration to ensure required settings are present.
|
||||||
* Pre-compiling regular expression patterns defined in the preset for efficient file classification by the `PredictionHandler`.
|
* Pre-compiling regular expression patterns defined in the preset for efficient file classification by the prediction handlers.
|
||||||
|
|
||||||
An instance of the `Configuration` class is typically created once per application run (or per processing batch) and passed to the `ProcessingEngine`.
|
An instance of the `Configuration` class is typically created once per application run (or per processing batch) and passed to the `ProcessingEngine`, which then makes it available to the `PipelineOrchestrator` and subsequently to each stage via the `AssetProcessingContext`.
|
||||||
|
|
||||||
## GUI Components (`gui/`)
|
## GUI Components (`gui/`)
|
||||||
|
|
||||||
@@ -191,10 +239,10 @@ The `monitor.py` script implements the directory monitoring feature. It has been
|
|||||||
* Loads the necessary `Configuration`.
|
* Loads the necessary `Configuration`.
|
||||||
* Calls `utils.prediction_utils.generate_source_rule_from_archive` to get the `SourceRule`.
|
* Calls `utils.prediction_utils.generate_source_rule_from_archive` to get the `SourceRule`.
|
||||||
* Calls `utils.workspace_utils.prepare_processing_workspace` to set up the workspace.
|
* Calls `utils.workspace_utils.prepare_processing_workspace` to set up the workspace.
|
||||||
* Instantiates and runs the `ProcessingEngine`.
|
* Instantiates and runs the `ProcessingEngine` (which in turn uses the `PipelineOrchestrator`).
|
||||||
* Handles moving the source archive to 'processed' or 'error' directories.
|
* Handles moving the source archive to 'processed' or 'error' directories.
|
||||||
* Cleans up the workspace.
|
* Cleans up the workspace.
|
||||||
|
|
||||||
## Summary
|
## Summary
|
||||||
|
|
||||||
These key components, along with the refactored GUI structure and new utility modules, work together to provide the tool's functionality. The architecture emphasizes separation of concerns (configuration, rule generation, processing, UI), utilizes background processing for responsiveness (GUI prediction, Monitor tasks), and relies on the `SourceRule` object as the central data structure passed between different stages of the workflow.
|
These key components, along with the refactored GUI structure and new utility modules, work together to provide the tool's functionality. The architecture emphasizes separation of concerns (configuration, rule generation, processing, UI), utilizes background processing for responsiveness (GUI prediction, Monitor tasks), and relies on the `SourceRule` object as the central data structure passed between different stages of the workflow. The processing core is now a staged pipeline managed by the `PipelineOrchestrator`, enhancing modularity and maintainability.
|
||||||
@@ -2,43 +2,144 @@
|
|||||||
|
|
||||||
This document provides technical details about the configuration system and the structure of preset files for developers working on the Asset Processor Tool.
|
This document provides technical details about the configuration system and the structure of preset files for developers working on the Asset Processor Tool.
|
||||||
|
|
||||||
## Configuration Flow
|
## Configuration System Overview
|
||||||
|
|
||||||
The tool utilizes a two-tiered configuration system managed by the `configuration.py` module:
|
The tool's configuration is managed by the `configuration.py` module and loaded from several JSON files, providing a layered approach for defaults, user overrides, definitions, and source-specific presets.
|
||||||
|
|
||||||
1. **Application Settings (`config/app_settings.json`):** This JSON file defines the core global default settings, constants, and rules that apply generally across different asset sources (e.g., the global `OUTPUT_DIRECTORY_PATTERN` and `OUTPUT_FILENAME_PATTERN`, standard image resolutions, map merge rules, output format rules, Blender paths, `FILE_TYPE_DEFINITIONS`, `ASSET_TYPE_DEFINITIONS`). See the [User Guide: Output Structure](../01_User_Guide/09_Output_Structure.md#available-tokens) for a list of available tokens for these patterns.
|
### Configuration Files
|
||||||
* **`FILE_TYPE_DEFINITIONS` Enhancements:**
|
|
||||||
* **`keybind` Property:** Each file type object within `FILE_TYPE_DEFINITIONS` can now optionally include a `keybind` property. This property accepts a single character string (e.g., `"C"`, `"R"`) representing the keyboard key. In the GUI, this key (typically combined with `Ctrl`, or standalone like `F2` for asset naming) is used as a shortcut to set or toggle the corresponding file type for selected items in the Preview Table.
|
|
||||||
*Example:*
|
|
||||||
```json
|
|
||||||
"MAP_COL": {
|
|
||||||
"description": "Color/Albedo Map",
|
|
||||||
"color": [200, 200, 200],
|
|
||||||
"examples": ["albedo", "col", "basecolor"],
|
|
||||||
"standard_type": "COL",
|
|
||||||
"bit_depth_rule": "respect",
|
|
||||||
"is_grayscale": false,
|
|
||||||
"keybind": "C"
|
|
||||||
},
|
|
||||||
```
|
|
||||||
* **New File Type `MAP_GLOSS`:** A new standard file type, `MAP_GLOSS`, has been added. It is typically configured as follows:
|
|
||||||
*Example:*
|
|
||||||
```json
|
|
||||||
"MAP_GLOSS": {
|
|
||||||
"description": "Glossiness Map",
|
|
||||||
"color": [180, 180, 220],
|
|
||||||
"examples": ["gloss", "gls"],
|
|
||||||
"standard_type": "GLOSS",
|
|
||||||
"bit_depth_rule": "respect",
|
|
||||||
"is_grayscale": true,
|
|
||||||
"keybind": "R"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Note: The `keybind` "R" for `MAP_GLOSS` is often shared with `MAP_ROUGH` to allow toggling between them.
|
|
||||||
2. **LLM Settings (`config/llm_settings.json`):** This JSON file contains settings specifically related to the LLM predictor, such as the API endpoint, model name, prompt template, and examples. These settings can be edited through the GUI using the `LLMEditorWidget`.
|
|
||||||
3. **Preset Files (`Presets/*.json`):** These JSON files define supplier-specific rules and overrides. They contain patterns to interpret filenames, classify map types, handle variants, define naming conventions, and specify other source-specific behaviors.
|
|
||||||
|
|
||||||
The `configuration.py` module contains the `Configuration` class (for loading/merging settings for processing) and standalone functions like `load_base_config()` (for accessing `app_settings.json` directly) and `save_llm_config()` / `save_base_config()` (for writing settings back to files). Note that the old `config.py` file has been deleted.
|
The tool's configuration is loaded from several JSON files, providing a layered approach for defaults, user overrides, definitions, and source-specific presets.
|
||||||
|
|
||||||
|
1. **Application Settings (`config/app_settings.json`):** This JSON file defines the core global default settings, constants, and rules that apply generally across different asset sources (e.g., the global `OUTPUT_DIRECTORY_PATTERN` and `OUTPUT_FILENAME_PATTERN`, standard image resolutions, map merge rules, output format rules, Blender paths, temporary directory prefix, initial scaling mode, merge dimension mismatch strategy). See the [User Guide: Output Structure](../01_User_Guide/09_Output_Structure.md#available-tokens) for a list of available tokens for these patterns.
|
||||||
|
* *Note:* `ASSET_TYPE_DEFINITIONS` and `FILE_TYPE_DEFINITIONS` are no longer stored here; they have been moved to dedicated files.
|
||||||
|
* It also includes settings for new features like the "Low-Resolution Fallback":
|
||||||
|
* `ENABLE_LOW_RESOLUTION_FALLBACK` (boolean): Enables or disables the generation of "LOWRES" variants for small source images. Defaults to `true`.
|
||||||
|
* `LOW_RESOLUTION_THRESHOLD` (integer): The pixel dimension threshold (largest side) below which a "LOWRES" variant is created if the feature is enabled. Defaults to `512`.
|
||||||
|
|
||||||
|
2. **User Settings (`config/user_settings.json`):** This optional JSON file allows users to override specific settings defined in `config/app_settings.json`. If this file exists, its values for corresponding keys will take precedence over the base application settings. This file is primarily managed through the GUI's Application Preferences Editor.
|
||||||
|
|
||||||
|
3. **Asset Type Definitions (`config/asset_type_definitions.json`):** This dedicated JSON file contains the definitions for different asset types (e.g., Surface, Model, Decal), including their descriptions, colors for UI representation, and example usage strings.
|
||||||
|
|
||||||
|
4. **File Type Definitions (`config/file_type_definitions.json`):** This dedicated JSON file contains the definitions for different file types (specifically texture maps and models), including descriptions, colors for UI representation, examples of keywords/patterns, a standard alias (`standard_type`), bit depth handling rules (`bit_depth_rule`), a grayscale flag (`is_grayscale`), and an optional GUI keybind (`keybind`).
|
||||||
|
* **`keybind` Property:** Each file type object within `FILE_TYPE_DEFINITIONS` can optionally include a `keybind` property. This property accepts a single character string (e.g., `"C"`, `"R"`) representing the keyboard key. In the GUI, this key (typically combined with `Ctrl`) is used as a shortcut to set or toggle the corresponding file type for selected items in the Preview Table.
|
||||||
|
*Example:*
|
||||||
|
```json
|
||||||
|
"MAP_COL": {
|
||||||
|
"description": "Color/Albedo Map",
|
||||||
|
"color": "#ffaa00",
|
||||||
|
"examples": ["_col.", "_basecolor.", "albedo", "diffuse"],
|
||||||
|
"standard_type": "COL",
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"is_grayscale": false,
|
||||||
|
"keybind": "C"
|
||||||
|
},
|
||||||
|
```
|
||||||
|
Note: The `bit_depth_rule` property in `FILE_TYPE_DEFINITIONS` is the primary source for determining bit depth handling for a given map type.
|
||||||
|
|
||||||
|
5. **Supplier Settings (`config/suppliers.json`):** This JSON file stores settings specific to different asset suppliers. It is now structured as a dictionary where keys are supplier names and values are objects containing supplier-specific configurations.
|
||||||
|
* **Structure:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"SupplierName1": {
|
||||||
|
"setting_key1": "value",
|
||||||
|
"setting_key2": "value"
|
||||||
|
},
|
||||||
|
"SupplierName2": {
|
||||||
|
"setting_key1": "value"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
* **`normal_map_type` Property:** A key setting within each supplier's object is `normal_map_type`, specifying whether normal maps from this supplier use "OpenGL" or "DirectX" conventions.
|
||||||
|
*Example:*
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"Poliigon": {
|
||||||
|
"normal_map_type": "DirectX"
|
||||||
|
},
|
||||||
|
"Dimensiva": {
|
||||||
|
"normal_map_type": "OpenGL"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
6. **LLM Settings (`config/llm_settings.json`):** This JSON file contains settings specifically related to the LLM predictor, such as the API endpoint, model name, prompt template, and examples. These settings are managed through the GUI using the `LLMEditorWidget`.
|
||||||
|
|
||||||
|
7. **Preset Files (`Presets/*.json`):** These JSON files define source-specific rules and overrides. They contain patterns to interpret filenames, classify map types, handle variants, define naming conventions, and specify other source-specific behaviors. Preset settings override values from `app_settings.json` and `user_settings.json` where applicable.
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration Loading and Access
|
||||||
|
|
||||||
|
The `configuration.py` module contains the `Configuration` class and standalone functions for loading and saving settings.
|
||||||
|
|
||||||
|
* **`Configuration` Class:** This is the primary class used by the processing engine and other core components. When initialized with a `preset_name`, it loads settings in the following order, with later files overriding earlier ones for shared keys:
|
||||||
|
1. `config/app_settings.json` (Base Defaults)
|
||||||
|
2. `config/user_settings.json` (User Overrides - if exists)
|
||||||
|
3. `config/asset_type_definitions.json` (Asset Type Definitions)
|
||||||
|
4. `config/file_type_definitions.json` (File Type Definitions)
|
||||||
|
5. `config/llm_settings.json` (LLM Settings)
|
||||||
|
6. `Presets/{preset_name}.json` (Preset Overrides)
|
||||||
|
|
||||||
|
The loaded settings are merged into internal dictionaries, and most are accessible via instance properties (e.g., `config.output_base_dir`, `config.llm_endpoint_url`, `config.get_asset_type_definitions()`). Regex patterns defined in the merged configuration are pre-compiled for performance.
|
||||||
|
|
||||||
|
* **`load_base_config()` function:** This standalone function is primarily used by the GUI for initial setup and displaying default/user-overridden settings before a specific preset is selected. It loads and merges the following files:
|
||||||
|
1. `config/app_settings.json`
|
||||||
|
2. `config/user_settings.json` (if exists)
|
||||||
|
3. `config/asset_type_definitions.json`
|
||||||
|
4. `config/file_type_definitions.json`
|
||||||
|
|
||||||
|
It returns a single dictionary containing the combined settings and definitions.
|
||||||
|
|
||||||
|
* **Saving Functions:**
|
||||||
|
* `save_base_config(settings_dict)`: Saves the provided dictionary to `config/app_settings.json`. (Used less frequently now for user-driven saves).
|
||||||
|
* `save_user_config(settings_dict)`: Saves the provided dictionary to `config/user_settings.json`. Used by `ConfigEditorDialog`.
|
||||||
|
* `save_llm_config(settings_dict)`: Saves the provided dictionary to `config/llm_settings.json`. Used by `LLMEditorWidget`.
|
||||||
|
|
||||||
|
## Supplier Management (`config/suppliers.json`)
|
||||||
|
|
||||||
|
A file, `config/suppliers.json`, is used to store a persistent list of known supplier names. This file is a simple JSON array of strings.
|
||||||
|
|
||||||
|
* **Purpose:** Provides a list of suggestions for the "Supplier" field in the GUI's Unified View, enabling auto-completion.
|
||||||
|
* **Management:** The GUI's `SupplierSearchDelegate` is responsible for loading this list on startup, adding new, unique supplier names entered by the user, and saving the updated list back to the file.
|
||||||
|
|
||||||
|
## GUI Configuration Editors
|
||||||
|
|
||||||
|
The GUI provides dedicated editors for modifying configuration files:
|
||||||
|
|
||||||
|
* **`ConfigEditorDialog` (`gui/config_editor_dialog.py`):** Edits user-configurable application settings.
|
||||||
|
* **`LLMEditorWidget` (`gui/llm_editor_widget.py`):** Edits the LLM-specific settings.
|
||||||
|
|
||||||
|
### `ConfigEditorDialog` (`gui/config_editor_dialog.py`)
|
||||||
|
|
||||||
|
The GUI includes a dedicated editor for modifying user-configurable settings. This is implemented in `gui/config_editor_dialog.py`.
|
||||||
|
|
||||||
|
* **Purpose:** Provides a user-friendly interface for viewing the effective application settings (defaults + user overrides + definitions) and editing the user-specific overrides.
|
||||||
|
* **Implementation:** The dialog loads the effective settings using `load_base_config()`. It presents relevant settings in a tabbed layout ("General", "Output & Naming", etc.). When saving, it now performs a **granular save**: it loads the current content of `config/user_settings.json`, identifies only the settings that were changed by the user during the current dialog session (by comparing against the initial state), updates only those specific values in the loaded `user_settings.json` content, and saves the modified content back to `config/user_settings.json` using `save_user_config()`. This preserves any other settings in `user_settings.json` that were not touched. The dialog displays definitions from `asset_type_definitions.json` and `file_type_definitions.json` but does not save changes to these files.
|
||||||
|
* **Limitations:** Currently, editing complex fields like `IMAGE_RESOLUTIONS` or the full details of `MAP_MERGE_RULES` via the UI is not fully supported for saving to `user_settings.json`.
|
||||||
|
|
||||||
|
### `LLMEditorWidget` (`gui/llm_editor_widget.py`)
|
||||||
|
|
||||||
|
* **Purpose:** Provides a user-friendly interface for viewing and editing the LLM settings defined in `config/llm_settings.json`.
|
||||||
|
* **Implementation:** Uses tabs for "Prompt Settings" and "API Settings". Allows editing the prompt, managing examples, and configuring API details. When saving, it also performs a **granular save**: it loads the current content of `config/llm_settings.json`, identifies only the settings changed by the user in the current session, updates only those values, and saves the modified content back to `config/llm_settings.json` using `configuration.save_llm_config()`.
|
||||||
|
|
||||||
|
## Preset File Structure (`Presets/*.json`)
|
||||||
|
|
||||||
|
Preset files are the primary way to adapt the tool to new asset sources. Developers should use `Presets/_template.json` as a starting point. Key fields include:
|
||||||
|
|
||||||
|
* `supplier_name`: The name of the asset source (e.g., `"Poliigon"`). Used for output directory naming.
|
||||||
|
* `map_type_mapping`: A list of dictionaries, each mapping source filename patterns/keywords to a specific file type. The `target_type` for this mapping **must** be a key from the `FILE_TYPE_DEFINITIONS` now located in `config/file_type_definitions.json`.
|
||||||
|
* `target_type`: The specific file type key from `FILE_TYPE_DEFINITIONS` (e.g., `"MAP_COL"`, `"MAP_NORM_GL"`, `"MAP_RGH"`). This replaces previous alias-based systems. The common aliases like "COL" or "NRM" are now derived from the `standard_type` property within `FILE_TYPE_DEFINITIONS` but are not used directly for `target_type`.
|
||||||
|
* `keywords`: A list of filename patterns (regex or fnmatch-style wildcards) used to identify this map type. The order of keywords within this list, and the order of dictionaries in the `map_type_mapping` list, determines the priority for assigning variant suffixes (`-1`, `-2`, etc.) when multiple files match the same `target_type`.
|
||||||
|
* `bit_depth_variants`: A dictionary mapping standard map types (e.g., `"NRM"`) to a pattern identifying its high bit-depth variant (e.g., `"*_NRM16*.tif"`). Files matching these patterns are prioritized over their standard counterparts.
|
||||||
|
* `map_bit_depth_rules`: Defines how to handle the bit depth of source maps. Can specify a default behavior (`"respect"` or `"force_8bit"`) and overrides for specific map types.
|
||||||
|
* `model_patterns`: A list of regex patterns to identify model files (e.g., `".*\\.fbx"`, `".*\\.obj"`).
|
||||||
|
* `move_to_extra_patterns`: A list of regex patterns for files that should be moved directly to the `Extra/` output subdirectory without further processing.
|
||||||
|
* `source_naming_convention`: Rules for extracting the base asset name and potentially the archetype from source filenames or directory structures (e.g., using separators and indices).
|
||||||
|
* `asset_category_rules`: Keywords or patterns used to determine the asset category (e.g., identifying `"Decal"` based on keywords).
|
||||||
|
* `archetype_rules`: Keywords or patterns used to determine the asset archetype (e.g., identifying `"Wood"` or `"Metal"`).
|
||||||
|
|
||||||
|
Careful definition of these patterns and rules, especially the regex in `map_type_mapping`, `bit_depth_variants`, `model_patterns`, and `move_to_extra_patterns`, is essential for correct asset processing.
|
||||||
|
|
||||||
|
**Note on Data Passing:** As mentioned in the Architecture documentation, major changes to the data passing mechanisms between the GUI, Main (CLI orchestration), and `AssetProcessor` modules are currently being planned. The descriptions of how configuration data is handled and passed within this document reflect the current state and will require review and updates once the plan for these changes is finalized.
|
||||||
|
|
||||||
## Supplier Management (`config/suppliers.json`)
|
## Supplier Management (`config/suppliers.json`)
|
||||||
|
|
||||||
|
|||||||
@@ -1,72 +1,115 @@
|
|||||||
# Developer Guide: Processing Pipeline
|
Cl# Developer Guide: Processing Pipeline
|
||||||
|
|
||||||
This document details the step-by-step technical process executed by the `ProcessingEngine` class (`processing_engine.py`) when processing a single asset. A new instance of `ProcessingEngine` is created for each processing task to ensure state isolation.
|
This document details the step-by-step technical process executed by the asset processing pipeline, which is initiated by the [`ProcessingEngine`](processing_engine.py:73) class (`processing_engine.py`) and orchestrated by the [`PipelineOrchestrator`](processing/pipeline/orchestrator.py:36) (`processing/pipeline/orchestrator.py`).
|
||||||
|
|
||||||
The `ProcessingEngine.process()` method orchestrates the following pipeline based *solely* on the provided `SourceRule` object and the static `Configuration` object passed during engine initialization. It contains no internal prediction, classification, or fallback logic. All necessary overrides and static configuration values are accessed directly from these inputs.
|
The [`ProcessingEngine.process()`](processing_engine.py:131) method serves as the main entry point. It initializes a [`PipelineOrchestrator`](processing/pipeline/orchestrator.py:36) instance, providing it with the application's [`Configuration`](configuration.py:68) object and predefined lists of pre-item and post-item processing stages. The [`PipelineOrchestrator.process_source_rule()`](processing/pipeline/orchestrator.py:95) method then manages the execution of these stages for each asset defined in the input [`SourceRule`](rule_structure.py:40).
|
||||||
|
|
||||||
The pipeline steps are:
|
A crucial component in this architecture is the [`AssetProcessingContext`](processing/pipeline/asset_context.py:86) (`processing/pipeline/asset_context.py`). An instance of this dataclass is created for each [`AssetRule`](rule_structure.py:22) being processed. It acts as a stateful container, carrying all relevant data (source files, rules, configuration, intermediate results, metadata) and is passed sequentially through each stage. Each stage can read from and write to the context, allowing data to flow and be modified throughout the pipeline.
|
||||||
|
|
||||||
1. **Workspace Preparation (External)**:
|
The pipeline execution for each asset follows this general flow:
|
||||||
* Before the `ProcessingEngine` is invoked, the calling code (e.g., `main.ProcessingTask`, `monitor._process_archive_task`) is responsible for setting up a temporary workspace.
|
|
||||||
* This typically involves using `utils.workspace_utils.prepare_processing_workspace`, which creates a temporary directory and extracts the input source (archive or folder) into it.
|
|
||||||
* The path to this prepared workspace is passed to the `ProcessingEngine` during initialization.
|
|
||||||
|
|
||||||
2. **Prediction and Rule Generation (External)**:
|
1. **Pre-Item Stages:** A sequence of stages executed once per asset before the core item processing loop. These stages typically perform initial setup, filtering, and asset-level transformations.
|
||||||
* Also handled before the `ProcessingEngine` is invoked.
|
2. **Core Item Processing Loop:** The [`PipelineOrchestrator`](processing/pipeline/orchestrator.py:36) iterates through a list of "processing items" (individual files or merge tasks) prepared by a dedicated stage. For each item, a sequence of core processing stages is executed.
|
||||||
* Either the `RuleBasedPredictionHandler`, `LLMPredictionHandler` (triggered by the GUI), or `utils.prediction_utils.generate_source_rule_from_archive` (used by the Monitor) analyzes the input files and generates a `SourceRule` object.
|
3. **Post-Item Stages:** A sequence of stages executed once per asset after the core item processing loop is complete. These stages handle final tasks like organizing output files and saving metadata.
|
||||||
* This `SourceRule` contains predicted classifications and initial overrides.
|
|
||||||
* If using the GUI, the user can modify these rules.
|
|
||||||
* The final `SourceRule` object is the primary input to the `ProcessingEngine.process()` method.
|
|
||||||
|
|
||||||
3. **File Inventory (`_inventory_and_classify_files`)**:
|
## Pipeline Stages
|
||||||
* Scans the contents of the *already prepared* temporary workspace.
|
|
||||||
* This step primarily inventories the files present. The *classification* (determining `item_type`, etc.) is taken directly from the input `SourceRule`. The `item_type` for each file (within the `FileRule` objects of the `SourceRule`) is expected to be a key from `Configuration.FILE_TYPE_DEFINITIONS`.
|
|
||||||
* Stores the file paths and their associated rules from the `SourceRule` in `self.classified_files`.
|
|
||||||
|
|
||||||
4. **Base Metadata Determination (`_determine_base_metadata`, `_determine_single_asset_metadata`)**:
|
The stages are executed in the following order for each asset:
|
||||||
* Determines the base asset name, category, and archetype using the explicit values provided in the input `SourceRule` and the static `Configuration`. Overrides (like `supplier_identifier`, `asset_type`, `asset_name_override`) are taken directly from the `SourceRule`. The `asset_type` (within the `AssetRule` object of the `SourceRule`) is expected to be a key from `Configuration.ASSET_TYPE_DEFINITIONS`.
|
|
||||||
|
|
||||||
5. **Skip Check**:
|
### Pre-Item Stages
|
||||||
* If the `overwrite` flag is `False`, checks if the final output directory already exists and contains `metadata.json`.
|
|
||||||
* If so, processing for this asset is skipped.
|
|
||||||
|
|
||||||
6. **Map Processing (`_process_maps`)**:
|
These stages are executed sequentially once for each asset before the core item processing loop begins.
|
||||||
* Iterates through files classified as maps in the `SourceRule`.
|
|
||||||
* Loads images (`cv2.imread`).
|
|
||||||
* **Glossiness-to-Roughness Inversion**:
|
|
||||||
* The system identifies a map as a gloss map if its input filename contains "MAP_GLOSS" (case-insensitive) and is intended to become a roughness map (e.g., its `item_type` or `item_type_override` in the `SourceRule` effectively designates it as roughness).
|
|
||||||
* If these conditions are met, its colors are inverted.
|
|
||||||
* After inversion, the map is treated as a "MAP_ROUGH" type for subsequent processing steps.
|
|
||||||
* The fact that a map was derived from a gloss source and inverted is recorded in the output `metadata.json` for that map type using the `derived_from_gloss_filename: true` flag. This replaces the previous reliance on an internal `is_gloss_source` flag within the `FileRule` structure.
|
|
||||||
* Resizes images based on `Configuration`.
|
|
||||||
* Determines output bit depth and format based on `Configuration` and `SourceRule`.
|
|
||||||
* Converts data types and saves images (`cv2.imwrite`).
|
|
||||||
* The output filename uses the `standard_type` alias (e.g., `COL`, `NRM`) retrieved from the `Configuration.FILE_TYPE_DEFINITIONS` based on the file's effective `item_type`.
|
|
||||||
* Calculates image statistics.
|
|
||||||
* Stores processed map details.
|
|
||||||
|
|
||||||
7. **Map Merging (`_merge_maps_from_source`)**:
|
1. **[`SupplierDeterminationStage`](processing/pipeline/stages/supplier_determination.py:6)** (`processing/pipeline/stages/supplier_determination.py`):
|
||||||
* Iterates through `MAP_MERGE_RULES` in `Configuration`.
|
* **Responsibility**: Determines the effective supplier for the asset based on the [`SourceRule`](rule_structure.py:40)'s `supplier_override`, `supplier_identifier`, and validation against configured suppliers.
|
||||||
* Identifies required source maps by checking the `item_type_override` within the `SourceRule` (specifically in the `FileRule` for each file). Both `item_type` and `item_type_override` are expected to be keys from `Configuration.FILE_TYPE_DEFINITIONS`. Files with a base `item_type` of `"FILE_IGNORE"` are explicitly excluded from consideration.
|
* **Context Interaction**: Sets `context.effective_supplier` and may set a `supplier_error` flag in `context.status_flags`.
|
||||||
* Loads source channels, handling missing inputs with defaults from `Configuration` or `SourceRule`.
|
|
||||||
* Merges channels (`cv2.merge`).
|
|
||||||
* Determines output format/bit depth and saves the merged map.
|
|
||||||
* Stores merged map details.
|
|
||||||
|
|
||||||
8. **Metadata File Generation (`_generate_metadata_file`)**:
|
2. **[`AssetSkipLogicStage`](processing/pipeline/stages/asset_skip_logic.py:5)** (`processing/pipeline/stages/asset_skip_logic.py`):
|
||||||
* Collects asset metadata, processed/merged map details, ignored files list, etc., primarily from the `SourceRule` and internal processing results.
|
* **Responsibility**: Checks if the entire asset should be skipped based on conditions like a missing/invalid supplier, a "SKIP" status in asset metadata, or if the asset is already processed and overwrite is disabled.
|
||||||
* Writes data to `metadata.json` in the temporary workspace.
|
* **Context Interaction**: Sets the `skip_asset` flag and `skip_reason` in `context.status_flags` if the asset should be skipped.
|
||||||
|
|
||||||
9. **Output Organization (`_organize_output_files`)**:
|
3. **[`MetadataInitializationStage`](processing/pipeline/stages/metadata_initialization.py:81)** (`processing/pipeline/stages/metadata_initialization.py`):
|
||||||
* Determines the final output directory using the global `OUTPUT_DIRECTORY_PATTERN` and the final filename using the global `OUTPUT_FILENAME_PATTERN` (both from the `Configuration` object). The `utils.path_utils` module combines these with the base output directory and asset-specific data (like asset name, map type, resolution, etc.) to construct the full path for each file.
|
* **Responsibility**: Initializes the `context.asset_metadata` dictionary with base information derived from the [`AssetRule`](rule_structure.py:22), [`SourceRule`](rule_structure.py:40), and [`Configuration`](configuration.py:68). This includes asset name, IDs, source/output paths, timestamps, and initial status.
|
||||||
* Creates the final structured output directory (`<output_base_dir>/<supplier_name>/<asset_name>/`), using the supplier name from the `SourceRule`.
|
* **Context Interaction**: Populates `context.asset_metadata`. Initializes `context.processed_maps_details` and `context.merged_maps_details` as empty dictionaries (these are used internally by subsequent stages but are not directly part of the final `metadata.json` in their original form).
|
||||||
* Moves processed maps, merged maps, models, metadata, and other classified files from the temporary workspace to the final output directory.
|
|
||||||
|
|
||||||
10. **Workspace Cleanup (External)**:
|
4. **[`FileRuleFilterStage`](processing/pipeline/stages/file_rule_filter.py:10)** (`processing/pipeline/stages/file_rule_filter.py`):
|
||||||
* After the `ProcessingEngine.process()` method completes (successfully or with errors), the *calling code* is responsible for cleaning up the temporary workspace directory created in Step 1. This is often done in a `finally` block where `utils.workspace_utils.prepare_processing_workspace` was called.
|
* **Responsibility**: Filters the [`FileRule`](rule_structure.py:5) objects associated with the asset to determine which individual files should be considered for processing. It identifies and excludes files matching "FILE_IGNORE" rules based on their `item_type`.
|
||||||
|
* **Context Interaction**: Populates `context.files_to_process` with the list of [`FileRule`](rule_structure.py:5) objects that are not ignored.
|
||||||
|
|
||||||
11. **(Optional) Blender Script Execution (External)**:
|
5. **[`GlossToRoughConversionStage`](processing/pipeline/stages/gloss_to_rough_conversion.py:15)** (`processing/pipeline/stages/gloss_to_rough_conversion.py`):
|
||||||
* If triggered (e.g., via CLI arguments or GUI controls), the orchestrating code (e.g., `main.ProcessingTask`) executes the corresponding Blender scripts (`blenderscripts/*.py`) using `subprocess.run` *after* the `ProcessingEngine.process()` call completes successfully.
|
* **Responsibility**: Identifies processed maps in `context.processed_maps_details` whose `internal_map_type` starts with "MAP_GLOSS". If found, it loads the temporary image data, inverts it using the shared utility function [`apply_common_map_transformations`](processing/utils/image_processing_utils.py), saves a new temporary roughness map ("MAP_ROUGH"), and updates the corresponding details in `context.processed_maps_details` (setting `internal_map_type` to "MAP_ROUGH") and the relevant [`FileRule`](rule_structure.py:5) in `context.files_to_process` (setting `item_type` to "MAP_ROUGH").
|
||||||
* *Note: Centralized logic for this was intended for `utils/blender_utils.py`, but this utility has not yet been implemented.* See `Developer Guide: Blender Integration Internals` for more details.
|
* **Context Interaction**: Reads from and updates `context.processed_maps_details` (specifically `internal_map_type` and `temp_processed_file`) and `context.files_to_process` (specifically `item_type`).
|
||||||
|
|
||||||
This pipeline, executed by the `ProcessingEngine`, provides a clear and explicit processing flow based on the complete rule set provided by the GUI or other interfaces.
|
6. **[`AlphaExtractionToMaskStage`](processing/pipeline/stages/alpha_extraction_to_mask.py:16)** (`processing/pipeline/stages/alpha_extraction_to_mask.py`):
|
||||||
|
* **Responsibility**: If no mask map is explicitly defined for the asset (as a [`FileRule`](rule_structure.py:5) with `item_type="MAP_MASK"`), this stage searches `context.processed_maps_details` for a suitable source map (e.g., a "MAP_COL" with an alpha channel, based on its `internal_map_type`). If found, it extracts the alpha channel, saves it as a new temporary mask map, and adds a new [`FileRule`](rule_structure.py:5) (with `item_type="MAP_MASK"`) and corresponding details (with `internal_map_type="MAP_MASK"`) to the context.
|
||||||
|
* **Context Interaction**: Reads from `context.processed_maps_details`, adds a new [`FileRule`](rule_structure.py:5) to `context.files_to_process`, and adds a new entry to `context.processed_maps_details` (setting `internal_map_type`).
|
||||||
|
|
||||||
|
7. **[`NormalMapGreenChannelStage`](processing/pipeline/stages/normal_map_green_channel.py:14)** (`processing/pipeline/stages/normal_map_green_channel.py`):
|
||||||
|
* **Responsibility**: Identifies processed normal maps in `context.processed_maps_details` (those with an `internal_map_type` starting with "MAP_NRM"). If the global `invert_normal_map_green_channel_globally` configuration is true, it loads the temporary image data, inverts the green channel using the shared utility function [`apply_common_map_transformations`](processing/utils/image_processing_utils.py), saves a new temporary modified normal map, and updates the `temp_processed_file` path in `context.processed_maps_details`.
|
||||||
|
* **Context Interaction**: Reads from and updates `context.processed_maps_details` (specifically `temp_processed_file` and `notes`).
|
||||||
|
|
||||||
|
### Core Item Processing Loop
|
||||||
|
|
||||||
|
The [`PipelineOrchestrator`](processing/pipeline/orchestrator.py:36) iterates through the `context.processing_items` list (populated by the [`PrepareProcessingItemsStage`](processing/pipeline/stages/prepare_processing_items.py:10)). Each `item` in this list is now either a [`ProcessingItem`](rule_structure.py:0) (representing a specific variant of a source map, e.g., Color at 1K, or Color at LOWRES) or a [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16).
|
||||||
|
|
||||||
|
1. **[`PrepareProcessingItemsStage`](processing/pipeline/stages/prepare_processing_items.py:10)** (`processing/pipeline/stages/prepare_processing_items.py`):
|
||||||
|
* **Responsibility**: (Executed once before the loop) This stage is now responsible for "exploding" each relevant [`FileRule`](rule_structure.py:5) into one or more [`ProcessingItem`](rule_structure.py:0) objects.
|
||||||
|
* For each [`FileRule`](rule_structure.py:5) that represents an image map:
|
||||||
|
* It loads the source image data and determines its original dimensions and bit depth.
|
||||||
|
* It creates standard [`ProcessingItem`](rule_structure.py:0)s for each required output resolution (e.g., "1K", "PREVIEW"), populating them with a copy of the source image data and the respective `resolution_key`.
|
||||||
|
* If the "Low-Resolution Fallback" feature is enabled (`ENABLE_LOW_RESOLUTION_FALLBACK` in config) and the source image's largest dimension is below `LOW_RESOLUTION_THRESHOLD`, it creates an additional [`ProcessingItem`](rule_structure.py:0) with `resolution_key="LOWRES"`, using the original image data and dimensions.
|
||||||
|
* It also adds [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16)s derived from global `map_merge_rules`.
|
||||||
|
* **Context Interaction**: Reads `context.files_to_process` and `context.config_obj`. Populates `context.processing_items` with a list of [`ProcessingItem`](rule_structure.py:0) and [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16) objects. Initializes `context.intermediate_results`.
|
||||||
|
|
||||||
|
For each `item` in `context.processing_items`:
|
||||||
|
|
||||||
|
2. **Transformations (Implicit or via a dedicated stage - formerly `RegularMapProcessorStage` logic):**
|
||||||
|
* **Responsibility**: If the `item` is a [`ProcessingItem`](rule_structure.py:0), its `image_data` (loaded by `PrepareProcessingItemsStage`) may need transformations (Gloss-to-Rough, Normal Green Invert). This logic, previously in `RegularMapProcessorStage`, might be integrated into `PrepareProcessingItemsStage` before `ProcessingItem` creation, or handled by a new dedicated transformation stage that operates on `ProcessingItem.image_data`. The `item.map_type_identifier` would be updated if a transformation like Gloss-to-Rough occurs.
|
||||||
|
* **Context Interaction**: Modifies `item.image_data` and `item.map_type_identifier` within the [`ProcessingItem`](rule_structure.py:0) object.
|
||||||
|
|
||||||
|
3. **[`MergedTaskProcessorStage`](processing/pipeline/stages/merged_task_processor.py:68)** (`processing/pipeline/stages/merged_task_processor.py`):
|
||||||
|
* **Responsibility**: (Executed if `item` is a [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16)) Same as before: validates inputs, loads source map data (likely from `ProcessingItem`s in `context.processing_items` or a cache populated from them), applies transformations, merges channels, and returns [`ProcessedMergedMapData`](processing/pipeline/asset_context.py:35).
|
||||||
|
* **Context Interaction**: Reads [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16), potentially `context.processing_items` (or a cache derived from it) for input image data. Returns [`ProcessedMergedMapData`](processing/pipeline/asset_context.py:35).
|
||||||
|
|
||||||
|
4. **[`InitialScalingStage`](processing/pipeline/stages/initial_scaling.py:14)** (`processing/pipeline/stages/initial_scaling.py`):
|
||||||
|
* **Responsibility**: (Executed per item)
|
||||||
|
* If `item` is a [`ProcessingItem`](rule_structure.py:0): Takes `item.image_data`, `item.current_dimensions`, and `item.resolution_key` as input. If `item.resolution_key` is "LOWRES", POT scaling is skipped. Otherwise, applies POT scaling if configured.
|
||||||
|
* If `item` is from a `MergeTaskDefinition` (i.e., `processed_data` from `MergedTaskProcessorStage`): Applies POT scaling as before.
|
||||||
|
* **Context Interaction**: Takes [`InitialScalingInput`](processing/pipeline/asset_context.py:46) (now including `resolution_key`). Returns [`InitialScalingOutput`](processing/pipeline/asset_context.py:54) (also including `resolution_key`), which updates `context.intermediate_results`. The `current_image_data` and `current_dimensions` for saving are taken from this output.
|
||||||
|
|
||||||
|
5. **[`SaveVariantsStage`](processing/pipeline/stages/save_variants.py:15)** (`processing/pipeline/stages/save_variants.py`):
|
||||||
|
* **Responsibility**: (Executed per item) Saves the (potentially scaled) `current_image_data`.
|
||||||
|
* **Context Interaction**:
|
||||||
|
* Takes [`SaveVariantsInput`](processing/pipeline/asset_context.py:61).
|
||||||
|
* `internal_map_type` is set from `item.map_type_identifier` (for `ProcessingItem`) or `processed_data.output_map_type` (for merged).
|
||||||
|
* `output_filename_pattern_tokens['resolution']` is set to the `resolution_key` obtained from `scaled_data_output.resolution_key` (which originates from `item.resolution_key` for `ProcessingItem`s, or is `None` for merged items that get all standard resolutions).
|
||||||
|
* `image_resolutions` argument for `SaveVariantsInput`:
|
||||||
|
* If `resolution_key == "LOWRES"`: Set to `{"LOWRES": width_of_lowres_data}`.
|
||||||
|
* If `resolution_key` is a standard key (e.g., "1K"): Set to `{resolution_key: configured_dimension}`.
|
||||||
|
* For merged items (where `resolution_key` from scaling is likely `None`): Set to the full `config.image_resolutions` map to generate all applicable standard sizes.
|
||||||
|
* Returns [`SaveVariantsOutput`](processing/pipeline/asset_context.py:79). Orchestrator stores details in `context.processed_maps_details`.
|
||||||
|
|
||||||
|
### Post-Item Stages
|
||||||
|
|
||||||
|
These stages are executed sequentially once for each asset after the core item processing loop has finished for all items.
|
||||||
|
|
||||||
|
1. **[`OutputOrganizationStage`](processing/pipeline/stages/output_organization.py:14)** (`processing/pipeline/stages/output_organization.py`):
|
||||||
|
* **Responsibility**: Determines the final output paths for all processed maps (including variants) and extra files based on configured patterns. It copies the temporary files generated by the core stages to these final destinations, creating directories as needed and respecting overwrite settings.
|
||||||
|
* **Context Interaction**: Reads from `context.processed_maps_details`, `context.files_to_process` (for 'EXTRA' files), `context.output_base_path`, and [`Configuration`](configuration.py:68). Updates entries in `context.processed_maps_details` with organization status. Populates `context.asset_metadata['maps']` with the final map structure:
|
||||||
|
* The `maps` object is a dictionary where keys are standard map types (e.g., "COL", "REFL").
|
||||||
|
* Each entry contains a `variant_paths` dictionary, where keys are resolution strings (e.g., "8K", "4K") and values are the filenames of the map variants (relative to the asset's output directory).
|
||||||
|
It also populates `context.asset_metadata['final_output_files']` with a list of absolute paths to all generated files (this list itself is not saved in the final `metadata.json`).
|
||||||
|
|
||||||
|
2. **[`MetadataFinalizationAndSaveStage`](processing/pipeline/stages/metadata_finalization_save.py:14)** (`processing/pipeline/stages/metadata_finalization_save.py`):
|
||||||
|
* **Responsibility**: Finalizes the `context.asset_metadata` (setting final status based on flags). It determines the save path for the metadata file based on configuration and patterns, serializes the `context.asset_metadata` (which now contains the structured `maps` data from `OutputOrganizationStage`) to JSON, and saves the `metadata.json` file.
|
||||||
|
* **Context Interaction**: Reads from `context.asset_metadata` (including the `maps` structure), `context.output_base_path`, and [`Configuration`](configuration.py:68). Before saving, it explicitly removes the `final_output_files` key from `context.asset_metadata`. The `processing_end_time` is also no longer added. The `metadata.json` file is written, and `context.asset_metadata` is updated with its final path and status. The older `processed_maps_details` and `merged_maps_details` from the context are not directly included in the JSON.
|
||||||
|
|
||||||
|
## External Steps
|
||||||
|
|
||||||
|
Certain steps are integral to the overall asset processing workflow but are handled outside the [`PipelineOrchestrator`](processing/pipeline/orchestrator.py:36)'s direct execution loop:
|
||||||
|
|
||||||
|
* **Workspace Preparation and Cleanup**: Handled by the code that invokes [`ProcessingEngine.process()`](processing_engine.py:131) (e.g., `main.ProcessingTask`, `monitor._process_archive_task`), typically involving extracting archives and setting up temporary directories. The engine itself manages a sub-temporary directory (`engine_temp_dir`) for intermediate processing files.
|
||||||
|
* **Prediction and Rule Generation**: Performed before the [`ProcessingEngine`](processing_engine.py:73) is called. This involves analyzing source files and generating the [`SourceRule`](rule_structure.py:40) object with its nested [`AssetRule`](rule_structure.py:22)s and [`FileRule`](rule_structure.py:5)s, often involving prediction logic (potentially using LLMs).
|
||||||
|
* **Optional Blender Script Execution**: Can be triggered externally after successful processing to perform tasks like material setup in Blender using the generated output files and metadata.
|
||||||
|
|
||||||
|
This staged pipeline provides a modular and extensible architecture for asset processing, with clear separation of concerns for each step. The [`AssetProcessingContext`](processing/pipeline/asset_context.py:86) ensures that data flows consistently between these stages.
|
||||||
@@ -10,13 +10,13 @@ The GUI is built using `PySide6`, which provides Python bindings for the Qt fram
|
|||||||
|
|
||||||
The `MainWindow` class acts as the central **coordinator** for the GUI application. It is responsible for:
|
The `MainWindow` class acts as the central **coordinator** for the GUI application. It is responsible for:
|
||||||
|
|
||||||
* Setting up the main application window structure and menu bar.
|
* Setting up the main application window structure and menu bar, including actions to launch configuration and definition editors.
|
||||||
* **Layout:** Arranging the main GUI components using a `QSplitter`.
|
* **Layout:** Arranging the main GUI components using a `QSplitter`.
|
||||||
* **Left Pane:** Contains the preset selection controls (from `PresetEditorWidget`) permanently displayed at the top. Below this, a `QStackedWidget` switches between the preset JSON editor (also from `PresetEditorWidget`) and the `LLMEditorWidget`.
|
* **Left Pane:** Contains the preset selection controls (from `PresetEditorWidget`) permanently displayed at the top. Below this, a `QStackedWidget` switches between the preset JSON editor (also from `PresetEditorWidget`) and the `LLMEditorWidget`.
|
||||||
* **Right Pane:** Contains the `MainPanelWidget`.
|
* **Right Pane:** Contains the `MainPanelWidget`.
|
||||||
* Instantiating and managing the major GUI widgets:
|
* Instantiating and managing the major GUI widgets:
|
||||||
* `PresetEditorWidget` (`gui/preset_editor_widget.py`): Provides the preset selector and the JSON editor parts.
|
* `PresetEditorWidget` (`gui/preset_editor_widget.py`): Provides the preset selector and the JSON editor parts.
|
||||||
* `LLMEditorWidget` (`gui/llm_editor_widget.py`): Provides the editor for LLM settings.
|
* `LLMEditorWidget` (`gui/llm_editor_widget.py`): Provides the editor for LLM settings (from `config/llm_settings.json`).
|
||||||
* `MainPanelWidget` (`gui/main_panel_widget.py`): Contains the rule hierarchy view and processing controls.
|
* `MainPanelWidget` (`gui/main_panel_widget.py`): Contains the rule hierarchy view and processing controls.
|
||||||
* `LogConsoleWidget` (`gui/log_console_widget.py`): Displays application logs.
|
* `LogConsoleWidget` (`gui/log_console_widget.py`): Displays application logs.
|
||||||
* Instantiating key models and handlers:
|
* Instantiating key models and handlers:
|
||||||
@@ -198,13 +198,24 @@ The `LogConsoleWidget` displays logs captured by a custom `QtLogHandler` from Py
|
|||||||
|
|
||||||
The GUI provides a "Cancel" button. Cancellation logic for the actual processing is now likely handled within the `main.ProcessingTask` or the code that manages it, as the `ProcessingHandler` has been removed. The GUI button would signal this external task manager.
|
The GUI provides a "Cancel" button. Cancellation logic for the actual processing is now likely handled within the `main.ProcessingTask` or the code that manages it, as the `ProcessingHandler` has been removed. The GUI button would signal this external task manager.
|
||||||
|
|
||||||
## GUI Configuration Editor (`gui/config_editor_dialog.py`)
|
## Application Preferences Editor (`gui/config_editor_dialog.py`)
|
||||||
|
|
||||||
A dedicated dialog for editing `config/app_settings.json`.
|
A dedicated dialog for editing user-overridable application settings. It loads base settings from `config/app_settings.json` and saves user overrides to `config/user_settings.json`.
|
||||||
|
|
||||||
* **Functionality:** Loads `config/app_settings.json`, presents in tabs, allows editing basic fields, definitions tables (with color editing), and merge rules list/detail.
|
* **Functionality:** Provides a tabbed interface to edit various application settings, including general paths, output/naming patterns, image processing options (like resolutions and compression), and map merging rules. It no longer includes editors for Asset Type or File Type Definitions.
|
||||||
* **Limitations:** Editing complex fields like `IMAGE_RESOLUTIONS` or full `MAP_MERGE_RULES` details might still be limited.
|
* **Integration:** Launched by `MainWindow` via the "Edit" -> "Preferences..." menu.
|
||||||
* **Integration:** Launched by `MainWindow` ("Edit" -> "Preferences...").
|
* **Persistence:** Saves changes to `config/user_settings.json`. Changes require an application restart to take effect in processing logic.
|
||||||
* **Persistence:** Saves changes to `config/app_settings.json`. Requires application restart for changes to affect processing logic loaded by the `Configuration` class.
|
|
||||||
|
|
||||||
The refactored GUI separates concerns into distinct widgets and handlers, coordinated by the `MainWindow`. Background tasks use `QThreadPool` and `QRunnable`. The `UnifiedViewModel` focuses on data presentation and simple edits, delegating complex restructuring to the `AssetRestructureHandler`.
|
The refactored GUI separates concerns into distinct widgets and handlers, coordinated by the `MainWindow`. Background tasks use `QThreadPool` and `QRunnable`. The `UnifiedViewModel` focuses on data presentation and simple edits, delegating complex restructuring to the `AssetRestructureHandler`.
|
||||||
|
|
||||||
|
## Definitions Editor (`gui/definitions_editor_dialog.py`)
|
||||||
|
|
||||||
|
A new dedicated dialog for managing core application definitions that are separate from general user preferences.
|
||||||
|
|
||||||
|
* **Purpose:** Provides a structured UI for editing Asset Type Definitions, File Type Definitions, and Supplier Settings.
|
||||||
|
* **Structure:** Uses a `QTabWidget` with three tabs:
|
||||||
|
* **Asset Type Definitions:** Manages definitions from `config/asset_type_definitions.json`. Presents a list of asset types and allows editing their description, color, and examples.
|
||||||
|
* **File Type Definitions:** Manages definitions from `config/file_type_definitions.json`. Presents a list of file types and allows editing their description, color, examples, standard type, bit depth rule, grayscale status, and keybind.
|
||||||
|
* **Supplier Settings:** Manages settings from `config/suppliers.json`. Presents a list of suppliers and allows editing supplier-specific settings (e.g., Normal Map Type).
|
||||||
|
* **Integration:** Launched by `MainWindow` via the "Edit" -> "Edit Definitions..." menu.
|
||||||
|
* **Persistence:** Saves changes directly to the respective configuration files (`config/asset_type_definitions.json`, `config/file_type_definitions.json`, `config/suppliers.json`). Some changes may require an application restart.
|
||||||
@@ -56,7 +56,7 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"target_type": "MAP_ROUGH",
|
"target_type": "MAP_GLOSS",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"GLOSS"
|
"GLOSS"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -25,102 +25,10 @@
|
|||||||
"*.pdf",
|
"*.pdf",
|
||||||
"*.url",
|
"*.url",
|
||||||
"*.htm*",
|
"*.htm*",
|
||||||
"*_Fabric.*"
|
"*_Fabric.*",
|
||||||
],
|
"*_Albedo*"
|
||||||
"map_type_mapping": [
|
|
||||||
{
|
|
||||||
"target_type": "MAP_COL",
|
|
||||||
"keywords": [
|
|
||||||
"COLOR*",
|
|
||||||
"COL",
|
|
||||||
"DIFFUSE",
|
|
||||||
"DIF",
|
|
||||||
"ALBEDO"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_NRM",
|
|
||||||
"keywords": [
|
|
||||||
"NORMAL*",
|
|
||||||
"NORM*",
|
|
||||||
"NRM*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_ROUGH",
|
|
||||||
"keywords": [
|
|
||||||
"ROUGHNESS",
|
|
||||||
"ROUGH"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_ROUGH",
|
|
||||||
"keywords": [
|
|
||||||
"GLOSS"
|
|
||||||
],
|
|
||||||
"is_gloss_source": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_AO",
|
|
||||||
"keywords": [
|
|
||||||
"AMBIENTOCCLUSION",
|
|
||||||
"AO"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_DISP",
|
|
||||||
"keywords": [
|
|
||||||
"DISPLACEMENT",
|
|
||||||
"DISP",
|
|
||||||
"HEIGHT",
|
|
||||||
"BUMP"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_REFL",
|
|
||||||
"keywords": [
|
|
||||||
"REFLECTION",
|
|
||||||
"REFL",
|
|
||||||
"SPECULAR",
|
|
||||||
"SPEC"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_SSS",
|
|
||||||
"keywords": [
|
|
||||||
"SSS",
|
|
||||||
"SUBSURFACE*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_FUZZ",
|
|
||||||
"keywords": [
|
|
||||||
"FUZZ"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_IDMAP",
|
|
||||||
"keywords": [
|
|
||||||
"IDMAP"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_MASK",
|
|
||||||
"keywords": [
|
|
||||||
"OPAC*",
|
|
||||||
"TRANSP*",
|
|
||||||
"MASK*",
|
|
||||||
"ALPHA*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"target_type": "MAP_METAL",
|
|
||||||
"keywords": [
|
|
||||||
"METAL*",
|
|
||||||
"METALLIC"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
],
|
||||||
|
"map_type_mapping": [],
|
||||||
"asset_category_rules": {
|
"asset_category_rules": {
|
||||||
"model_patterns": [
|
"model_patterns": [
|
||||||
"*.fbx",
|
"*.fbx",
|
||||||
|
|||||||
107
ProjectNotes/ConfigurationRefactoringPlan.md
Normal file
107
ProjectNotes/ConfigurationRefactoringPlan.md
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
# Configuration System Refactoring Plan
|
||||||
|
|
||||||
|
This document outlines the plan for refactoring the configuration system of the Asset Processor Tool.
|
||||||
|
|
||||||
|
## Overall Goals
|
||||||
|
|
||||||
|
1. **Decouple Definitions:** Separate `ASSET_TYPE_DEFINITIONS` and `FILE_TYPE_DEFINITIONS` from the main `config/app_settings.json` into dedicated files.
|
||||||
|
2. **Introduce User Overrides:** Allow users to override base settings via a new `config/user_settings.json` file.
|
||||||
|
3. **Improve GUI Saving:** (Lower Priority) Make GUI configuration saving more targeted to avoid overwriting unrelated settings when saving changes from `ConfigEditorDialog` or `LLMEditorWidget`.
|
||||||
|
|
||||||
|
## Proposed Plan Phases
|
||||||
|
|
||||||
|
**Phase 1: Decouple Definitions**
|
||||||
|
|
||||||
|
1. **Create New Definition Files:**
|
||||||
|
* Create `config/asset_type_definitions.json`.
|
||||||
|
* Create `config/file_type_definitions.json`.
|
||||||
|
2. **Migrate Content:**
|
||||||
|
* Move `ASSET_TYPE_DEFINITIONS` object from `config/app_settings.json` to `config/asset_type_definitions.json`.
|
||||||
|
* Move `FILE_TYPE_DEFINITIONS` object from `config/app_settings.json` to `config/file_type_definitions.json`.
|
||||||
|
3. **Update `configuration.py`:**
|
||||||
|
* Add constants for new definition file paths.
|
||||||
|
* Modify `Configuration` class to load these new files.
|
||||||
|
* Update property methods (e.g., `get_asset_type_definitions`, `get_file_type_definitions_with_examples`) to use data from the new definition dictionaries.
|
||||||
|
* Adjust validation (`_validate_configs`) as needed.
|
||||||
|
4. **Update GUI & `load_base_config()`:**
|
||||||
|
* Modify `load_base_config()` to load and return a combined dictionary including `app_settings.json` and the two new definition files.
|
||||||
|
* Update GUI components relying on `load_base_config()` to ensure they receive the necessary definition data.
|
||||||
|
|
||||||
|
**Phase 2: Implement User Overrides**
|
||||||
|
|
||||||
|
1. **Define `user_settings.json`:**
|
||||||
|
* Establish `config/user_settings.json` for user-specific overrides, mirroring parts of `app_settings.json`.
|
||||||
|
2. **Update `configuration.py` Loading:**
|
||||||
|
* In `Configuration.__init__`, load `app_settings.json`, then definition files, then attempt to load and deep merge `user_settings.json` (user settings override base).
|
||||||
|
* Load presets *after* the base+user merge (presets override combined base+user).
|
||||||
|
* Modify `load_base_config()` to also load and merge `user_settings.json` after `app_settings.json`.
|
||||||
|
3. **Update GUI Editors:**
|
||||||
|
* Modify `ConfigEditorDialog` to load the effective settings (base+user) but save changes *only* to `config/user_settings.json`.
|
||||||
|
* `LLMEditorWidget` continues targeting `llm_settings.json`.
|
||||||
|
|
||||||
|
**Phase 3: Granular GUI Saving (Lower Priority)**
|
||||||
|
|
||||||
|
1. **Refactor Saving Logic:**
|
||||||
|
* In `ConfigEditorDialog` and `LLMEditorWidget`:
|
||||||
|
* Load the current target file (`user_settings.json` or `llm_settings.json`).
|
||||||
|
* Identify specific setting(s) changed by the user in the GUI session.
|
||||||
|
* Update only those specific key(s) in the loaded dictionary.
|
||||||
|
* Write the entire modified dictionary back to the target file, preserving untouched settings.
|
||||||
|
|
||||||
|
## Proposed File Structure & Loading Flow
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
subgraph Config Files
|
||||||
|
A[config/asset_type_definitions.json]
|
||||||
|
B[config/file_type_definitions.json]
|
||||||
|
C[config/app_settings.json (Base Defaults)]
|
||||||
|
D[config/user_settings.json (User Overrides)]
|
||||||
|
E[config/llm_settings.json]
|
||||||
|
F[config/suppliers.json]
|
||||||
|
G[Presets/*.json]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Code
|
||||||
|
H[configuration.py]
|
||||||
|
I[GUI]
|
||||||
|
J[Processing Engine / Pipeline]
|
||||||
|
K[LLM Handlers]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Loading Flow (Configuration Class)
|
||||||
|
L(Load Asset Types) --> H
|
||||||
|
M(Load File Types) --> H
|
||||||
|
N(Load Base Settings) --> P(Merge Base + User)
|
||||||
|
O(Load User Settings) --> P
|
||||||
|
P --> R(Merge Preset Overrides)
|
||||||
|
Q(Load LLM Settings) --> H
|
||||||
|
R --> T(Final Config Object)
|
||||||
|
G -- Load Preset --> R
|
||||||
|
H -- Contains --> T
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Loading Flow (GUI - load_base_config)
|
||||||
|
L2(Load Asset Types) --> U(Return Merged Defaults + Defs)
|
||||||
|
M2(Load File Types) --> U
|
||||||
|
N2(Load Base Settings) --> V(Merge Base + User)
|
||||||
|
O2(Load User Settings) --> V
|
||||||
|
V --> U
|
||||||
|
I -- Calls --> U
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
T -- Used by --> J
|
||||||
|
T -- Used by --> K
|
||||||
|
|
||||||
|
I -- Edits --> D
|
||||||
|
I -- Edits --> E
|
||||||
|
I -- Manages --> F
|
||||||
|
|
||||||
|
style A fill:#f9f,stroke:#333,stroke-width:2px
|
||||||
|
style B fill:#f9f,stroke:#333,stroke-width:2px
|
||||||
|
style C fill:#ccf,stroke:#333,stroke-width:2px
|
||||||
|
style D fill:#9cf,stroke:#333,stroke-width:2px
|
||||||
|
style E fill:#ccf,stroke:#333,stroke-width:2px
|
||||||
|
style F fill:#9cf,stroke:#333,stroke-width:2px
|
||||||
|
style G fill:#ffc,stroke:#333,stroke-width:2px
|
||||||
62
ProjectNotes/issue_definitions_editor_list_selection.md
Normal file
62
ProjectNotes/issue_definitions_editor_list_selection.md
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# Issue: List item selection not working in Definitions Editor
|
||||||
|
|
||||||
|
**Date:** 2025-05-13
|
||||||
|
|
||||||
|
**Affected File:** [`gui/definitions_editor_dialog.py`](gui/definitions_editor_dialog.py)
|
||||||
|
|
||||||
|
**Problem Description:**
|
||||||
|
User mouse clicks on items within the `QListWidget` instances (for Asset Types, File Types, and Suppliers) in the Definitions Editor dialog do not trigger item selection or the `currentItemChanged` signal. The first item is selected by default and its details are displayed correctly. Programmatic selection of items (e.g., via a diagnostic button) *does* correctly trigger the `currentItemChanged` signal and updates the UI detail views. The issue is specific to user-initiated mouse clicks for selection after the initial load.
|
||||||
|
|
||||||
|
**Debugging Steps Taken & Findings:**
|
||||||
|
|
||||||
|
1. **Initial Analysis:**
|
||||||
|
* Reviewed GUI internals documentation ([`Documentation/02_Developer_Guide/06_GUI_Internals.md`](Documentation/02_Developer_Guide/06_GUI_Internals.md)) and [`gui/definitions_editor_dialog.py`](gui/definitions_editor_dialog.py) source code.
|
||||||
|
* Confirmed signal connections (`currentItemChanged` to display slots) are made.
|
||||||
|
|
||||||
|
2. **Logging in Display Slots (`_display_*_details`):**
|
||||||
|
* Added logging to display slots. Confirmed they are called for the initial (default) item selection.
|
||||||
|
* No further calls to these slots occur on user clicks, indicating `currentItemChanged` is not firing.
|
||||||
|
|
||||||
|
3. **Color Swatch Palette Role:**
|
||||||
|
* Investigated and corrected `QPalette.ColorRole` for color swatches (reverted from `Background` to `Window`). This fixed an `AttributeError` but did not resolve the selection issue.
|
||||||
|
|
||||||
|
4. **Robust Error Handling in Display Slots:**
|
||||||
|
* Wrapped display slot logic in `try...finally` blocks with detailed logging. Confirmed slots complete without error for initial selection and signals for detail widgets are reconnected.
|
||||||
|
|
||||||
|
5. **Diagnostic Lambda for `currentItemChanged`:**
|
||||||
|
* Added a lambda logger to `currentItemChanged` alongside the main display slot.
|
||||||
|
* Confirmed both lambda and display slot fire for initial programmatic selection.
|
||||||
|
* Neither fires for subsequent user clicks. This proved the `QListWidget` itself was not emitting the signal.
|
||||||
|
|
||||||
|
6. **Explicit `setEnabled` and `setSelectionMode` on `QListWidget`:**
|
||||||
|
* Explicitly set these properties. No change in behavior.
|
||||||
|
|
||||||
|
7. **Explicit `setEnabled` and `setFocusPolicy(Qt.ClickFocus)` on `tab_page` (parent of `QListWidget` layout):**
|
||||||
|
* This change **allowed programmatic selection via a diagnostic button to correctly fire `currentItemChanged` and update the UI**.
|
||||||
|
* However, user mouse clicks still did not work and did not fire the signal.
|
||||||
|
|
||||||
|
8. **Event Filter Investigation:**
|
||||||
|
* **Filter on `QListWidget`:** Did NOT receive mouse press/release events from user clicks.
|
||||||
|
* **Filter on `tab_page` (parent of `QListWidget`'s layout):** Did NOT receive mouse press/release events.
|
||||||
|
* **Filter on `self.tab_widget` (QTabWidget):** DID receive mouse press/release events.
|
||||||
|
* Modified `self.tab_widget`'s event filter to return `False` for events over the current page, attempting to ensure propagation.
|
||||||
|
* **Result:** With the modified `tab_widget` filter, an event filter re-added to `asset_type_list_widget` *did* start receiving mouse press/release events. **However, `asset_type_list_widget` still did not emit `currentItemChanged` from these user clicks.**
|
||||||
|
|
||||||
|
9. **`DebugListWidget` (Subclassing `QListWidget`):**
|
||||||
|
* Created `DebugListWidget` overriding `mousePressEvent` with logging.
|
||||||
|
* Used `DebugListWidget` for `asset_type_list_widget`.
|
||||||
|
* **Initial user report indicated that `DebugListWidget.mousePressEvent` logs were NOT appearing for user clicks.** This means that even with the `QTabWidget` event filter attempting to propagate events, and the `asset_type_list_widget`'s filter (from step 8) confirming it received them, the `mousePressEvent` of the `QListWidget` itself was not being triggered by those propagated events. This is the current mystery.
|
||||||
|
|
||||||
|
**Current Status:**
|
||||||
|
- Programmatic selection works and fires signals.
|
||||||
|
- User clicks are received by an event filter on `asset_type_list_widget` (after `QTabWidget` filter modification) but do not result in `mousePressEvent` being called on the `QListWidget` (or `DebugListWidget`) itself, and thus no `currentItemChanged` signal is emitted.
|
||||||
|
- The issue seems to be a very low-level event processing problem specifically for user mouse clicks within the `QListWidget` instances when they are children of the `QTabWidget` pages, even when events appear to reach the list widget via an event filter.
|
||||||
|
|
||||||
|
**Next Steps (When Resuming):**
|
||||||
|
1. Re-verify the logs from the `DebugListWidget.mousePressEvent` test. If it's truly not being called despite its event filter seeing events, this is extremely unusual.
|
||||||
|
2. Simplify the `_create_tab_pane` method drastically for one tab:
|
||||||
|
* Remove the right-hand pane.
|
||||||
|
* Add the `DebugListWidget` directly to the `tab_page`'s layout without the intermediate `left_pane_layout`.
|
||||||
|
3. Consider if any styles applied to `QListWidget` or its parents via stylesheets could be interfering with hit testing or event processing (unlikely for this specific symptom, but possible).
|
||||||
|
4. Explore alternative ways to populate/manage the `QListWidget` or its items if a subtle corruption is occurring.
|
||||||
|
5. If all else fails, consider replacing the `QListWidget` with a `QListView` and a `QStringListModel` as a more fundamental change to see if the issue is specific to `QListWidget` in this context.
|
||||||
BIN
TestFiles/BoucleChunky001.zip
Normal file
BIN
TestFiles/BoucleChunky001.zip
Normal file
Binary file not shown.
57
TestFiles/Test-BoucleChunky001.json
Normal file
57
TestFiles/Test-BoucleChunky001.json
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
{
|
||||||
|
"source_rules": [
|
||||||
|
{
|
||||||
|
"input_path": "BoucleChunky001.zip",
|
||||||
|
"supplier_identifier": "Dinesen",
|
||||||
|
"preset_name": null,
|
||||||
|
"assets": [
|
||||||
|
{
|
||||||
|
"asset_name": "BoucleChunky001",
|
||||||
|
"asset_type": "Surface",
|
||||||
|
"files": [
|
||||||
|
{
|
||||||
|
"file_path": "BoucleChunky001_AO_1K_METALNESS.png",
|
||||||
|
"item_type": "MAP_AO",
|
||||||
|
"target_asset_name_override": "BoucleChunky001"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"file_path": "BoucleChunky001_COL_1K_METALNESS.png",
|
||||||
|
"item_type": "MAP_COL",
|
||||||
|
"target_asset_name_override": "BoucleChunky001"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"file_path": "BoucleChunky001_DISP16_1K_METALNESS.png",
|
||||||
|
"item_type": "MAP_DISP",
|
||||||
|
"target_asset_name_override": "BoucleChunky001"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"file_path": "BoucleChunky001_DISP_1K_METALNESS.png",
|
||||||
|
"item_type": "EXTRA",
|
||||||
|
"target_asset_name_override": "BoucleChunky001"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"file_path": "BoucleChunky001_Fabric.png",
|
||||||
|
"item_type": "EXTRA",
|
||||||
|
"target_asset_name_override": "BoucleChunky001"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"file_path": "BoucleChunky001_METALNESS_1K_METALNESS.png",
|
||||||
|
"item_type": "MAP_METAL",
|
||||||
|
"target_asset_name_override": "BoucleChunky001"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"file_path": "BoucleChunky001_NRM_1K_METALNESS.png",
|
||||||
|
"item_type": "MAP_NRM",
|
||||||
|
"target_asset_name_override": "BoucleChunky001"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"file_path": "BoucleChunky001_ROUGHNESS_1K_METALNESS.png",
|
||||||
|
"item_type": "MAP_ROUGH",
|
||||||
|
"target_asset_name_override": "BoucleChunky001"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
855
autotest.py
Normal file
855
autotest.py
Normal file
@@ -0,0 +1,855 @@
|
|||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
import logging.handlers
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
import shutil # Import shutil for directory operations
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Any
|
||||||
|
|
||||||
|
from PySide6.QtCore import QCoreApplication, QTimer, Slot, QEventLoop, QObject, Signal
|
||||||
|
from PySide6.QtWidgets import QApplication, QListWidgetItem
|
||||||
|
|
||||||
|
# Add project root to sys.path
|
||||||
|
project_root = Path(__file__).resolve().parent
|
||||||
|
if str(project_root) not in sys.path:
|
||||||
|
sys.path.insert(0, str(project_root))
|
||||||
|
|
||||||
|
try:
|
||||||
|
from main import App
|
||||||
|
from gui.main_window import MainWindow
|
||||||
|
from rule_structure import SourceRule # Assuming SourceRule is in rule_structure.py
|
||||||
|
except ImportError as e:
|
||||||
|
print(f"Error importing project modules: {e}")
|
||||||
|
print(f"Ensure that the script is run from the project root or that the project root is in PYTHONPATH.")
|
||||||
|
print(f"Current sys.path: {sys.path}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Global variable for the memory log handler
|
||||||
|
autotest_memory_handler = None
|
||||||
|
|
||||||
|
# Custom Log Filter for Concise Output
|
||||||
|
class InfoSummaryFilter(logging.Filter):
|
||||||
|
# Keywords that identify INFO messages to *allow* for concise output
|
||||||
|
SUMMARY_KEYWORDS_PRECISE = [
|
||||||
|
"Test run completed",
|
||||||
|
"Test succeeded",
|
||||||
|
"Test failed",
|
||||||
|
"Rule comparison successful",
|
||||||
|
"Rule comparison failed",
|
||||||
|
"ProcessingEngine finished. Summary:",
|
||||||
|
"Autotest Context:",
|
||||||
|
"Parsed CLI arguments:",
|
||||||
|
"Prediction completed successfully.",
|
||||||
|
"Processing completed.",
|
||||||
|
"Signal 'all_tasks_finished' received",
|
||||||
|
"final status:", # To catch "Asset '...' final status:"
|
||||||
|
"User settings file not found:",
|
||||||
|
"MainPanelWidget: Default output directory set to:",
|
||||||
|
# Search related (as per original filter)
|
||||||
|
"Searching logs for term",
|
||||||
|
"Search term ",
|
||||||
|
"Found ",
|
||||||
|
"No tracebacks found in the logs.",
|
||||||
|
"--- End Log Analysis ---",
|
||||||
|
"Log analysis completed.",
|
||||||
|
]
|
||||||
|
# Patterns for case-insensitive rejection
|
||||||
|
REJECT_PATTERNS_LOWER = [
|
||||||
|
# Original debug prefixes (ensure these are still relevant or merge if needed)
|
||||||
|
"debug:", "orchestrator_trace:", "configuration_debug:", "app_debug:", "output_org_debug:",
|
||||||
|
# Iterative / Per-item / Per-file details / Intermediate steps
|
||||||
|
": item ", # Catches "Asset '...', Item X/Y"
|
||||||
|
"item successfully processed and saved",
|
||||||
|
", file '", # Catches "Asset '...', File '...'"
|
||||||
|
": processing regular map",
|
||||||
|
": found source file:",
|
||||||
|
": determined source bit depth:",
|
||||||
|
"successfully processed regular map",
|
||||||
|
"successfully created mergetaskdefinition",
|
||||||
|
": preparing processing items",
|
||||||
|
": finished preparing items. found",
|
||||||
|
": starting core item processing loop",
|
||||||
|
", task '",
|
||||||
|
": processing merge task",
|
||||||
|
"loaded from context:",
|
||||||
|
"using dimensions from first loaded input",
|
||||||
|
"successfully merged inputs into image",
|
||||||
|
"successfully processed merge task",
|
||||||
|
"mergedtaskprocessorstage result",
|
||||||
|
"calling savevariantsstage",
|
||||||
|
"savevariantsstage result",
|
||||||
|
"adding final details to context",
|
||||||
|
": finished core item processing loop",
|
||||||
|
": copied variant",
|
||||||
|
": copied extra file",
|
||||||
|
": successfully organized",
|
||||||
|
": output organization complete.",
|
||||||
|
": metadata saved to",
|
||||||
|
"worker thread: starting processing for rule:",
|
||||||
|
"preparing workspace for input:",
|
||||||
|
"input is a supported archive",
|
||||||
|
"calling processingengine.process with rule",
|
||||||
|
"calculated sha5 for",
|
||||||
|
"calculated next incrementing value for",
|
||||||
|
"verify: processingengine.process called",
|
||||||
|
": effective supplier set to",
|
||||||
|
": metadata initialized.",
|
||||||
|
"path",
|
||||||
|
"\\asset_processor",
|
||||||
|
": file rules queued for processing",
|
||||||
|
"successfully loaded base application settings",
|
||||||
|
"successfully loaded and merged asset_type_definitions",
|
||||||
|
"successfully loaded and merged file_type_definitions",
|
||||||
|
"starting rule-based prediction for:",
|
||||||
|
"rule-based prediction finished successfully for",
|
||||||
|
"finished rule-based prediction run for",
|
||||||
|
"updating model with rule-based results for source:",
|
||||||
|
"debug task ",
|
||||||
|
"worker thread: finished processing for rule:",
|
||||||
|
"task finished signal received for",
|
||||||
|
# Autotest step markers (not global summaries)
|
||||||
|
]
|
||||||
|
|
||||||
|
def filter(self, record):
|
||||||
|
# Allow CRITICAL, ERROR, WARNING unconditionally
|
||||||
|
if record.levelno >= logging.WARNING:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if record.levelno == logging.INFO:
|
||||||
|
msg = record.getMessage()
|
||||||
|
msg_lower = msg.lower() # For case-insensitive pattern rejection
|
||||||
|
|
||||||
|
# 1. Explicitly REJECT if message contains verbose patterns (case-insensitive)
|
||||||
|
for pattern in self.REJECT_PATTERNS_LOWER: # Use the new list
|
||||||
|
if pattern in msg_lower:
|
||||||
|
return False # Reject
|
||||||
|
|
||||||
|
# 2. Then, if not rejected, ALLOW only if message contains precise summary keywords
|
||||||
|
for keyword in self.SUMMARY_KEYWORDS_PRECISE: # Use the new list
|
||||||
|
if keyword in msg: # Original message for case-sensitive summary keywords if needed
|
||||||
|
return True # Allow
|
||||||
|
|
||||||
|
# 3. Reject all other INFO messages that don't match precise summary keywords
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Reject levels below INFO (e.g., DEBUG) by default for this handler
|
||||||
|
return False
|
||||||
|
|
||||||
|
# --- Root Logger Configuration for Concise Console Output ---
|
||||||
|
def setup_autotest_logging():
|
||||||
|
"""
|
||||||
|
Configures the root logger for concise console output for autotest.py.
|
||||||
|
This ensures that only essential summary information, warnings, and errors
|
||||||
|
are displayed on the console by default.
|
||||||
|
"""
|
||||||
|
root_logger = logging.getLogger()
|
||||||
|
|
||||||
|
# 1. Remove all existing handlers from the root logger.
|
||||||
|
# This prevents interference from other logging configurations.
|
||||||
|
for handler in root_logger.handlers[:]:
|
||||||
|
root_logger.removeHandler(handler)
|
||||||
|
handler.close() # Close handler before removing
|
||||||
|
|
||||||
|
# 2. Set the root logger's level to DEBUG to capture everything for the memory handler.
|
||||||
|
# The console handler will still filter down to INFO/selected.
|
||||||
|
root_logger.setLevel(logging.DEBUG) # Changed from INFO to DEBUG
|
||||||
|
|
||||||
|
# 3. Create a new StreamHandler for sys.stdout (for concise console output).
|
||||||
|
console_handler = logging.StreamHandler(sys.stdout)
|
||||||
|
|
||||||
|
# 4. Set this console handler's level to INFO.
|
||||||
|
# The filter will then decide which INFO messages to display on console.
|
||||||
|
console_handler.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# 5. Apply the enhanced InfoSummaryFilter to the console handler.
|
||||||
|
info_filter = InfoSummaryFilter()
|
||||||
|
console_handler.addFilter(info_filter)
|
||||||
|
|
||||||
|
# 6. Set a concise formatter for the console handler.
|
||||||
|
formatter = logging.Formatter('[%(levelname)s] %(message)s')
|
||||||
|
console_handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
# 7. Add this newly configured console handler to the root_logger.
|
||||||
|
root_logger.addHandler(console_handler)
|
||||||
|
|
||||||
|
# 8. Setup the MemoryHandler
|
||||||
|
global autotest_memory_handler # Declare usage of global
|
||||||
|
autotest_memory_handler = logging.handlers.MemoryHandler(
|
||||||
|
capacity=20000, # Increased capacity
|
||||||
|
flushLevel=logging.CRITICAL + 1, # Prevent automatic flushing
|
||||||
|
target=None # Does not flush to another handler
|
||||||
|
)
|
||||||
|
autotest_memory_handler.setLevel(logging.DEBUG) # Capture all logs from DEBUG up
|
||||||
|
# Not adding a formatter here, will format in _process_and_display_logs
|
||||||
|
|
||||||
|
# 9. Add the memory handler to the root logger.
|
||||||
|
root_logger.addHandler(autotest_memory_handler)
|
||||||
|
|
||||||
|
# Call the setup function early in the script's execution.
|
||||||
|
setup_autotest_logging()
|
||||||
|
|
||||||
|
# Logger for autotest.py's own messages.
|
||||||
|
# Messages from this logger will propagate to the root logger and be filtered
|
||||||
|
# by the console_handler configured above.
|
||||||
|
# Setting its level to DEBUG allows autotest.py to generate DEBUG messages,
|
||||||
|
# which won't appear on the concise console (due to handler's INFO level)
|
||||||
|
# but can be captured by other handlers (e.g., the GUI's log console).
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.setLevel(logging.DEBUG) # Ensure autotest.py can generate DEBUGs for other handlers
|
||||||
|
|
||||||
|
# Note: The GUI's log console (e.g., self.main_window.log_console.log_console_output)
|
||||||
|
# is assumed to capture all logs (including DEBUG) from various modules.
|
||||||
|
# The _process_and_display_logs function then uses these comprehensive logs for the --search feature.
|
||||||
|
# This root logger setup primarily makes autotest.py's direct console output concise,
|
||||||
|
# ensuring that only filtered, high-level information appears on stdout by default.
|
||||||
|
# --- End of Root Logger Configuration ---
|
||||||
|
|
||||||
|
# --- Argument Parsing ---
|
||||||
|
def parse_arguments():
|
||||||
|
"""Parses command-line arguments for the autotest script."""
|
||||||
|
parser = argparse.ArgumentParser(description="Automated test script for Asset Processor GUI.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--zipfile",
|
||||||
|
type=Path,
|
||||||
|
default=project_root / "TestFiles" / "BoucleChunky001.zip",
|
||||||
|
help="Path to the test asset ZIP file. Default: TestFiles/BoucleChunky001.zip"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--preset",
|
||||||
|
type=str,
|
||||||
|
default="Dinesen", # This should match a preset name in the application
|
||||||
|
help="Name of the preset to use. Default: Dinesen"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--expectedrules",
|
||||||
|
type=Path,
|
||||||
|
default=project_root / "TestFiles" / "Test-BoucleChunky001.json",
|
||||||
|
help="Path to the JSON file with expected rules. Default: TestFiles/Test-BoucleChunky001.json"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--outputdir",
|
||||||
|
type=Path,
|
||||||
|
default=project_root / "TestFiles" / "TestOutputs" / "BoucleChunkyOutput",
|
||||||
|
help="Path for processing output. Default: TestFiles/TestOutputs/BoucleChunkyOutput"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--search",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Optional log search term. Default: None"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--additional-lines",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="Context lines for log search. Default: 0"
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
class AutoTester(QObject):
|
||||||
|
"""
|
||||||
|
Handles the automated testing process for the Asset Processor GUI.
|
||||||
|
"""
|
||||||
|
# Define signals if needed, e.g., for specific test events
|
||||||
|
# test_step_completed = Signal(str)
|
||||||
|
|
||||||
|
def __init__(self, app_instance: App, cli_args: argparse.Namespace):
|
||||||
|
super().__init__()
|
||||||
|
self.app_instance: App = app_instance
|
||||||
|
self.main_window: MainWindow = app_instance.main_window
|
||||||
|
self.cli_args: argparse.Namespace = cli_args
|
||||||
|
self.event_loop = QEventLoop(self)
|
||||||
|
self.prediction_poll_timer = QTimer(self)
|
||||||
|
self.expected_rules_data: Dict[str, Any] = {}
|
||||||
|
self.test_step: str = "INIT" # Possible values: INIT, LOADING_ZIP, SELECTING_PRESET, AWAITING_PREDICTION, PREDICTION_COMPLETE, COMPARING_RULES, STARTING_PROCESSING, AWAITING_PROCESSING, PROCESSING_COMPLETE, CHECKING_OUTPUT, ANALYZING_LOGS, DONE
|
||||||
|
|
||||||
|
if not self.main_window:
|
||||||
|
logger.error("MainWindow instance not found in App. Cannot proceed.")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Connect signals
|
||||||
|
if hasattr(self.app_instance, 'all_tasks_finished') and isinstance(self.app_instance.all_tasks_finished, Signal):
|
||||||
|
self.app_instance.all_tasks_finished.connect(self._on_all_tasks_finished)
|
||||||
|
else:
|
||||||
|
logger.warning("App instance does not have 'all_tasks_finished' signal or it's not a Signal. Processing completion might not be detected.")
|
||||||
|
|
||||||
|
self._load_expected_rules()
|
||||||
|
|
||||||
|
def _load_expected_rules(self) -> None:
|
||||||
|
"""Loads the expected rules from the JSON file specified by cli_args."""
|
||||||
|
self.test_step = "LOADING_EXPECTED_RULES"
|
||||||
|
logger.debug(f"Loading expected rules from: {self.cli_args.expectedrules}")
|
||||||
|
try:
|
||||||
|
with open(self.cli_args.expectedrules, 'r') as f:
|
||||||
|
self.expected_rules_data = json.load(f)
|
||||||
|
logger.debug("Expected rules loaded successfully.")
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.error(f"Expected rules file not found: {self.cli_args.expectedrules}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logger.error(f"Error decoding expected rules JSON: {e}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An unexpected error occurred while loading expected rules: {e}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
|
||||||
|
def run_test(self) -> None:
|
||||||
|
"""Orchestrates the test steps."""
|
||||||
|
logger.info("Starting test run...")
|
||||||
|
|
||||||
|
if not self.expected_rules_data: # Ensure rules were loaded
|
||||||
|
logger.error("Expected rules not loaded. Aborting test.")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Add a specific summary log for essential context
|
||||||
|
logger.info(f"Autotest Context: Input='{self.cli_args.zipfile.name}', Preset='{self.cli_args.preset}', Output='{self.cli_args.outputdir}'")
|
||||||
|
|
||||||
|
# Step 1: Load ZIP
|
||||||
|
self.test_step = "LOADING_ZIP"
|
||||||
|
logger.info(f"Step 1: Loading ZIP file: {self.cli_args.zipfile}") # KEEP INFO - Passes filter
|
||||||
|
if not self.cli_args.zipfile.exists():
|
||||||
|
logger.error(f"ZIP file not found: {self.cli_args.zipfile}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
# Assuming add_input_paths can take a list of strings or Path objects
|
||||||
|
self.main_window.add_input_paths([str(self.cli_args.zipfile)])
|
||||||
|
logger.debug("ZIP file loading initiated.")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during ZIP file loading: {e}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Step 2: Select Preset
|
||||||
|
self.test_step = "SELECTING_PRESET"
|
||||||
|
logger.info(f"Step 2: Selecting preset: {self.cli_args.preset}") # KEEP INFO - Passes filter
|
||||||
|
preset_found = False
|
||||||
|
preset_list_widget = self.main_window.preset_editor_widget.editor_preset_list
|
||||||
|
for i in range(preset_list_widget.count()):
|
||||||
|
item = preset_list_widget.item(i)
|
||||||
|
if item and item.text() == self.cli_args.preset:
|
||||||
|
preset_list_widget.setCurrentItem(item)
|
||||||
|
logger.debug(f"Preset '{self.cli_args.preset}' selected.")
|
||||||
|
preset_found = True
|
||||||
|
break
|
||||||
|
if not preset_found:
|
||||||
|
logger.error(f"Preset '{self.cli_args.preset}' not found in the list.")
|
||||||
|
available_presets = [preset_list_widget.item(i).text() for i in range(preset_list_widget.count())]
|
||||||
|
logger.debug(f"Available presets: {available_presets}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Step 3: Await Prediction Completion
|
||||||
|
self.test_step = "AWAITING_PREDICTION"
|
||||||
|
logger.debug("Step 3: Awaiting prediction completion...")
|
||||||
|
self.prediction_poll_timer.timeout.connect(self._check_prediction_status)
|
||||||
|
self.prediction_poll_timer.start(500) # Poll every 500ms
|
||||||
|
|
||||||
|
# Use a QTimer to allow event loop to process while waiting for this step
|
||||||
|
# This ensures that the _check_prediction_status can be called.
|
||||||
|
# We will exit this event_loop from _check_prediction_status when prediction is done.
|
||||||
|
logger.debug("Starting event loop for prediction...")
|
||||||
|
self.event_loop.exec() # This loop is quit by _check_prediction_status
|
||||||
|
self.prediction_poll_timer.stop()
|
||||||
|
logger.debug("Event loop for prediction finished.")
|
||||||
|
|
||||||
|
|
||||||
|
if self.test_step != "PREDICTION_COMPLETE":
|
||||||
|
logger.error(f"Prediction did not complete as expected. Current step: {self.test_step}")
|
||||||
|
# Check if there were any pending predictions that never cleared
|
||||||
|
if hasattr(self.main_window, '_pending_predictions'):
|
||||||
|
logger.error(f"Pending predictions at timeout: {self.main_window._pending_predictions}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
logger.info("Prediction completed successfully.") # KEEP INFO - Passes filter
|
||||||
|
|
||||||
|
# Step 4: Retrieve & Compare Rulelist
|
||||||
|
self.test_step = "COMPARING_RULES"
|
||||||
|
logger.info("Step 4: Retrieving and Comparing Rules...") # KEEP INFO - Passes filter
|
||||||
|
actual_source_rules_list: List[SourceRule] = self.main_window.unified_model.get_all_source_rules()
|
||||||
|
actual_rules_obj = actual_source_rules_list # Keep the SourceRule list for processing
|
||||||
|
|
||||||
|
comparable_actual_rules = self._convert_rules_to_comparable(actual_source_rules_list)
|
||||||
|
|
||||||
|
if not self._compare_rules(comparable_actual_rules, self.expected_rules_data):
|
||||||
|
logger.error("Rule comparison failed. See logs for details.")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
logger.info("Rule comparison successful.") # KEEP INFO - Passes filter
|
||||||
|
|
||||||
|
# Step 5: Start Processing
|
||||||
|
self.test_step = "START_PROCESSING"
|
||||||
|
logger.info("Step 5: Starting Processing...") # KEEP INFO - Passes filter
|
||||||
|
processing_settings = {
|
||||||
|
"output_dir": str(self.cli_args.outputdir), # Ensure it's a string for JSON/config
|
||||||
|
"overwrite": True,
|
||||||
|
"workers": 1,
|
||||||
|
"blender_enabled": False # Basic test, no Blender
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
Path(self.cli_args.outputdir).mkdir(parents=True, exist_ok=True)
|
||||||
|
logger.debug(f"Ensured output directory exists: {self.cli_args.outputdir}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Could not create output directory {self.cli_args.outputdir}: {e}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
if hasattr(self.main_window, 'start_backend_processing') and isinstance(self.main_window.start_backend_processing, Signal):
|
||||||
|
logger.debug(f"Emitting start_backend_processing with rules count: {len(actual_rules_obj)} and settings: {processing_settings}")
|
||||||
|
self.main_window.start_backend_processing.emit(actual_rules_obj, processing_settings)
|
||||||
|
else:
|
||||||
|
logger.error("'start_backend_processing' signal not found on MainWindow. Cannot start processing.")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Step 6: Await Processing Completion
|
||||||
|
self.test_step = "AWAIT_PROCESSING"
|
||||||
|
logger.debug("Step 6: Awaiting processing completion...")
|
||||||
|
self.event_loop.exec() # This loop is quit by _on_all_tasks_finished
|
||||||
|
|
||||||
|
if self.test_step != "PROCESSING_COMPLETE":
|
||||||
|
logger.error(f"Processing did not complete as expected. Current step: {self.test_step}")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
logger.info("Processing completed.") # KEEP INFO - Passes filter
|
||||||
|
|
||||||
|
# Step 7: Check Output Path
|
||||||
|
self.test_step = "CHECK_OUTPUT"
|
||||||
|
logger.info(f"Step 7: Checking output path: {self.cli_args.outputdir}") # KEEP INFO - Passes filter
|
||||||
|
output_path = Path(self.cli_args.outputdir)
|
||||||
|
if not output_path.exists() or not output_path.is_dir():
|
||||||
|
logger.error(f"Output directory {output_path} does not exist or is not a directory.")
|
||||||
|
self.cleanup_and_exit(success=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
output_items = list(output_path.iterdir())
|
||||||
|
if not output_items:
|
||||||
|
logger.warning(f"Output directory {output_path} is empty. This might be a test failure depending on the case.")
|
||||||
|
# For a more specific check, one might iterate through actual_rules_obj
|
||||||
|
# and verify if subdirectories matching asset_name exist.
|
||||||
|
# e.g. for asset_rule in source_rule.assets:
|
||||||
|
# expected_asset_dir = output_path / asset_rule.asset_name
|
||||||
|
# if not expected_asset_dir.is_dir(): logger.error(...)
|
||||||
|
else:
|
||||||
|
logger.debug(f"Found {len(output_items)} item(s) in output directory:")
|
||||||
|
for item in output_items:
|
||||||
|
logger.debug(f" - {item.name} ({'dir' if item.is_dir() else 'file'})")
|
||||||
|
logger.info("Output path check completed.") # KEEP INFO - Passes filter
|
||||||
|
|
||||||
|
# Step 8: Retrieve & Analyze Logs
|
||||||
|
self.test_step = "CHECK_LOGS"
|
||||||
|
logger.debug("Step 8: Retrieving and Analyzing Logs...")
|
||||||
|
all_logs_text = ""
|
||||||
|
if self.main_window.log_console and self.main_window.log_console.log_console_output:
|
||||||
|
all_logs_text = self.main_window.log_console.log_console_output.toPlainText()
|
||||||
|
else:
|
||||||
|
logger.warning("Log console or output widget not found. Cannot retrieve logs.")
|
||||||
|
|
||||||
|
self._process_and_display_logs(all_logs_text)
|
||||||
|
logger.info("Log analysis completed.")
|
||||||
|
|
||||||
|
# Final Step
|
||||||
|
logger.info("Test run completed successfully.") # KEEP INFO - Passes filter
|
||||||
|
self.cleanup_and_exit(success=True)
|
||||||
|
|
||||||
|
@Slot()
|
||||||
|
def _check_prediction_status(self) -> None:
|
||||||
|
"""Polls the main window for pending predictions."""
|
||||||
|
# logger.debug(f"Checking prediction status. Pending: {self.main_window._pending_predictions if hasattr(self.main_window, '_pending_predictions') else 'N/A'}")
|
||||||
|
if hasattr(self.main_window, '_pending_predictions'):
|
||||||
|
if not self.main_window._pending_predictions: # Assuming _pending_predictions is a list/dict that's empty when done
|
||||||
|
logger.debug("No pending predictions. Prediction assumed complete.")
|
||||||
|
self.test_step = "PREDICTION_COMPLETE"
|
||||||
|
if self.event_loop.isRunning():
|
||||||
|
self.event_loop.quit()
|
||||||
|
# else:
|
||||||
|
# logger.debug(f"Still awaiting predictions: {len(self.main_window._pending_predictions)} remaining.")
|
||||||
|
else:
|
||||||
|
logger.warning("'_pending_predictions' attribute not found on MainWindow. Cannot check prediction status automatically.")
|
||||||
|
# As a fallback, if the attribute is missing, we might assume prediction is instant or needs manual check.
|
||||||
|
# For now, let's assume it means it's done if the attribute is missing, but this is risky.
|
||||||
|
# A better approach would be to have a clear signal from MainWindow when predictions are done.
|
||||||
|
self.test_step = "PREDICTION_COMPLETE" # Risky assumption
|
||||||
|
if self.event_loop.isRunning():
|
||||||
|
self.event_loop.quit()
|
||||||
|
|
||||||
|
|
||||||
|
@Slot(int, int, int)
|
||||||
|
def _on_all_tasks_finished(self, processed_count: int, skipped_count: int, failed_count: int) -> None:
|
||||||
|
"""Slot for App.all_tasks_finished signal."""
|
||||||
|
logger.info(f"Signal 'all_tasks_finished' received: Processed={processed_count}, Skipped={skipped_count}, Failed={failed_count}") # KEEP INFO - Passes filter
|
||||||
|
|
||||||
|
if self.test_step == "AWAIT_PROCESSING":
|
||||||
|
logger.debug("Processing completion signal received.") # Covered by the summary log above
|
||||||
|
if failed_count > 0:
|
||||||
|
logger.error(f"Processing finished with {failed_count} failed task(s).")
|
||||||
|
# Even if tasks failed, the test might pass based on output checks.
|
||||||
|
# The error is logged for information.
|
||||||
|
self.test_step = "PROCESSING_COMPLETE"
|
||||||
|
if self.event_loop.isRunning():
|
||||||
|
self.event_loop.quit()
|
||||||
|
else:
|
||||||
|
logger.warning(f"Signal 'all_tasks_finished' received at an unexpected test step: '{self.test_step}'. Counts: P={processed_count}, S={skipped_count}, F={failed_count}")
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_rules_to_comparable(self, source_rules_list: List[SourceRule]) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Converts a list of SourceRule objects to a dictionary structure
|
||||||
|
suitable for comparison with the expected_rules.json.
|
||||||
|
"""
|
||||||
|
logger.debug(f"Converting {len(source_rules_list)} SourceRule objects to comparable dictionary...")
|
||||||
|
comparable_sources_list = []
|
||||||
|
for source_rule_obj in source_rules_list:
|
||||||
|
comparable_asset_list = []
|
||||||
|
# source_rule_obj.assets is List[AssetRule]
|
||||||
|
for asset_rule_obj in source_rule_obj.assets:
|
||||||
|
comparable_file_list = []
|
||||||
|
# asset_rule_obj.files is List[FileRule]
|
||||||
|
for file_rule_obj in asset_rule_obj.files:
|
||||||
|
comparable_file_list.append({
|
||||||
|
"file_path": file_rule_obj.file_path,
|
||||||
|
"item_type": file_rule_obj.item_type,
|
||||||
|
"target_asset_name_override": file_rule_obj.target_asset_name_override
|
||||||
|
})
|
||||||
|
comparable_asset_list.append({
|
||||||
|
"asset_name": asset_rule_obj.asset_name,
|
||||||
|
"asset_type": asset_rule_obj.asset_type,
|
||||||
|
"files": comparable_file_list
|
||||||
|
})
|
||||||
|
comparable_sources_list.append({
|
||||||
|
"input_path": Path(source_rule_obj.input_path).name, # Use only the filename
|
||||||
|
"supplier_identifier": source_rule_obj.supplier_identifier,
|
||||||
|
"preset_name": source_rule_obj.preset_name,
|
||||||
|
"assets": comparable_asset_list
|
||||||
|
})
|
||||||
|
logger.debug("Conversion to comparable dictionary finished.")
|
||||||
|
return {"source_rules": comparable_sources_list}
|
||||||
|
|
||||||
|
def _compare_rule_item(self, actual_item: Dict[str, Any], expected_item: Dict[str, Any], item_type_name: str, parent_context: str = "") -> bool:
|
||||||
|
"""
|
||||||
|
Recursively compares an individual actual rule item dictionary with an expected rule item dictionary.
|
||||||
|
Logs differences and returns True if they match, False otherwise.
|
||||||
|
"""
|
||||||
|
item_match = True
|
||||||
|
|
||||||
|
identifier = ""
|
||||||
|
if item_type_name == "SourceRule":
|
||||||
|
identifier = expected_item.get('input_path', f'UnknownSource_at_{parent_context}')
|
||||||
|
elif item_type_name == "AssetRule":
|
||||||
|
identifier = expected_item.get('asset_name', f'UnknownAsset_at_{parent_context}')
|
||||||
|
elif item_type_name == "FileRule":
|
||||||
|
identifier = expected_item.get('file_path', f'UnknownFile_at_{parent_context}')
|
||||||
|
|
||||||
|
current_context = f"{parent_context}/{identifier}" if parent_context else identifier
|
||||||
|
|
||||||
|
# Log Extra Fields: Iterate through keys in actual_item.
|
||||||
|
# If a key is in actual_item but not in expected_item (and is not a list container like "assets" or "files"),
|
||||||
|
# log this as an informational message.
|
||||||
|
for key in actual_item.keys():
|
||||||
|
if key not in expected_item and key not in ["assets", "files"]:
|
||||||
|
logger.debug(f"Field '{key}' present in actual {item_type_name} ({current_context}) but not specified in expected. Value: '{actual_item[key]}'")
|
||||||
|
|
||||||
|
# Check Expected Fields: Iterate through keys in expected_item.
|
||||||
|
for key, expected_value in expected_item.items():
|
||||||
|
if key not in actual_item:
|
||||||
|
logger.error(f"Missing expected field '{key}' in actual {item_type_name} ({current_context}).")
|
||||||
|
item_match = False
|
||||||
|
continue # Continue to check other fields in the expected_item
|
||||||
|
|
||||||
|
actual_value = actual_item[key]
|
||||||
|
|
||||||
|
if key == "assets": # List of AssetRule dictionaries
|
||||||
|
if not self._compare_list_of_rules(actual_value, expected_value, "AssetRule", current_context, "asset_name"):
|
||||||
|
item_match = False
|
||||||
|
elif key == "files": # List of FileRule dictionaries
|
||||||
|
if not self._compare_list_of_rules(actual_value, expected_value, "FileRule", current_context, "file_path"):
|
||||||
|
item_match = False
|
||||||
|
else: # Regular field comparison
|
||||||
|
if actual_value != expected_value:
|
||||||
|
# Handle None vs "None" string for preset_name specifically if it's a common issue
|
||||||
|
if key == "preset_name" and actual_value is None and expected_value == "None":
|
||||||
|
logger.debug(f"Field '{key}' in {item_type_name} ({current_context}): Actual is None, Expected is string \"None\". Treating as match for now.")
|
||||||
|
elif key == "target_asset_name_override" and actual_value is not None and expected_value is None:
|
||||||
|
# If actual has a value (e.g. parent asset name) and expected is null/None,
|
||||||
|
# this is a mismatch according to strict comparison.
|
||||||
|
# For a more lenient check, this logic could be adjusted here.
|
||||||
|
# Current strict comparison will flag this as error, which is what the logs show.
|
||||||
|
logger.error(f"Value mismatch for field '{key}' in {item_type_name} ({current_context}): Actual='{actual_value}', Expected='{expected_value}'.")
|
||||||
|
item_match = False
|
||||||
|
else:
|
||||||
|
logger.error(f"Value mismatch for field '{key}' in {item_type_name} ({current_context}): Actual='{actual_value}', Expected='{expected_value}'.")
|
||||||
|
item_match = False
|
||||||
|
|
||||||
|
return item_match
|
||||||
|
|
||||||
|
def _compare_list_of_rules(self, actual_list: List[Dict[str, Any]], expected_list: List[Dict[str, Any]], item_type_name: str, parent_context: str, item_key_field: str) -> bool:
|
||||||
|
"""
|
||||||
|
Compares a list of actual rule items against a list of expected rule items.
|
||||||
|
Items are matched by a key field (e.g., 'asset_name' or 'file_path').
|
||||||
|
Order independent for matching, but logs count mismatches.
|
||||||
|
"""
|
||||||
|
list_match = True
|
||||||
|
if not isinstance(actual_list, list) or not isinstance(expected_list, list):
|
||||||
|
logger.error(f"Type mismatch for list of {item_type_name}s in {parent_context}. Expected lists.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if len(actual_list) != len(expected_list):
|
||||||
|
logger.error(f"Mismatch in number of {item_type_name}s for {parent_context}. Actual: {len(actual_list)}, Expected: {len(expected_list)}.")
|
||||||
|
list_match = False # Count mismatch is an error
|
||||||
|
# If counts differ, we still try to match what we can to provide more detailed feedback,
|
||||||
|
# but the overall list_match will remain False.
|
||||||
|
|
||||||
|
actual_items_map = {item.get(item_key_field): item for item in actual_list if item.get(item_key_field) is not None}
|
||||||
|
|
||||||
|
# Keep track of expected items that found a match to identify missing ones more easily
|
||||||
|
matched_expected_keys = set()
|
||||||
|
|
||||||
|
for expected_item in expected_list:
|
||||||
|
expected_key_value = expected_item.get(item_key_field)
|
||||||
|
if expected_key_value is None:
|
||||||
|
logger.error(f"Expected {item_type_name} in {parent_context} is missing key field '{item_key_field}'. Cannot compare this item: {expected_item}")
|
||||||
|
list_match = False # This specific expected item cannot be processed
|
||||||
|
continue
|
||||||
|
|
||||||
|
actual_item = actual_items_map.get(expected_key_value)
|
||||||
|
if actual_item:
|
||||||
|
matched_expected_keys.add(expected_key_value)
|
||||||
|
if not self._compare_rule_item(actual_item, expected_item, item_type_name, parent_context):
|
||||||
|
list_match = False # Individual item comparison failed
|
||||||
|
else:
|
||||||
|
logger.error(f"Expected {item_type_name} with {item_key_field} '{expected_key_value}' not found in actual items for {parent_context}.")
|
||||||
|
list_match = False
|
||||||
|
|
||||||
|
# Identify actual items that were not matched by any expected item
|
||||||
|
# This is useful if len(actual_list) >= len(expected_list) but some actual items are "extra"
|
||||||
|
for actual_key_value, actual_item_data in actual_items_map.items():
|
||||||
|
if actual_key_value not in matched_expected_keys:
|
||||||
|
logger.debug(f"Extra actual {item_type_name} with {item_key_field} '{actual_key_value}' found in {parent_context} (not in expected list or already matched).")
|
||||||
|
if len(actual_list) != len(expected_list): # If counts already flagged a mismatch, this is just detail
|
||||||
|
pass
|
||||||
|
else: # Counts matched, but content didn't align perfectly by key
|
||||||
|
list_match = False
|
||||||
|
|
||||||
|
|
||||||
|
return list_match
|
||||||
|
|
||||||
|
|
||||||
|
def _compare_rules(self, actual_rules_data: Dict[str, Any], expected_rules_data: Dict[str, Any]) -> bool:
|
||||||
|
"""
|
||||||
|
Compares the actual rule data (converted from live SourceRule objects)
|
||||||
|
with the expected rule data (loaded from JSON).
|
||||||
|
"""
|
||||||
|
logger.debug("Comparing actual rules with expected rules...")
|
||||||
|
|
||||||
|
actual_source_rules = actual_rules_data.get("source_rules", []) if actual_rules_data else []
|
||||||
|
expected_source_rules = expected_rules_data.get("source_rules", []) if expected_rules_data else []
|
||||||
|
|
||||||
|
if not isinstance(actual_source_rules, list):
|
||||||
|
logger.error(f"Actual 'source_rules' is not a list. Found type: {type(actual_source_rules)}. Comparison aborted.")
|
||||||
|
return False # Cannot compare if actual data is malformed
|
||||||
|
if not isinstance(expected_source_rules, list):
|
||||||
|
logger.error(f"Expected 'source_rules' is not a list. Found type: {type(expected_source_rules)}. Test configuration error. Comparison aborted.")
|
||||||
|
return False # Test setup error
|
||||||
|
|
||||||
|
if not expected_source_rules and not actual_source_rules:
|
||||||
|
logger.debug("Both expected and actual source rules lists are empty. Considered a match.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
if len(actual_source_rules) != len(expected_source_rules):
|
||||||
|
logger.error(f"Mismatch in the number of source rules. Actual: {len(actual_source_rules)}, Expected: {len(expected_source_rules)}.")
|
||||||
|
# Optionally, log more details about which list is longer/shorter or identifiers if available
|
||||||
|
return False
|
||||||
|
|
||||||
|
overall_match_status = True
|
||||||
|
for i in range(len(expected_source_rules)):
|
||||||
|
actual_sr = actual_source_rules[i]
|
||||||
|
expected_sr = expected_source_rules[i]
|
||||||
|
|
||||||
|
# For context, use input_path or an index
|
||||||
|
source_rule_context = expected_sr.get('input_path', f"SourceRule_index_{i}")
|
||||||
|
|
||||||
|
if not self._compare_rule_item(actual_sr, expected_sr, "SourceRule", parent_context=source_rule_context):
|
||||||
|
overall_match_status = False
|
||||||
|
# Continue checking other source rules to log all discrepancies
|
||||||
|
|
||||||
|
if overall_match_status:
|
||||||
|
logger.debug("All rules match the expected criteria.") # Covered by "Rule comparison successful" summary
|
||||||
|
else:
|
||||||
|
logger.warning("One or more rules did not match the expected criteria. See logs above for details.")
|
||||||
|
|
||||||
|
return overall_match_status
|
||||||
|
|
||||||
|
def _process_and_display_logs(self, logs_text: str) -> None: # logs_text is no longer the primary source for search
|
||||||
|
"""
|
||||||
|
Processes and displays logs, potentially filtering them if --search is used.
|
||||||
|
Also checks for tracebacks.
|
||||||
|
Sources logs from the in-memory handler for search and detailed analysis.
|
||||||
|
"""
|
||||||
|
logger.debug("--- Log Analysis ---")
|
||||||
|
global autotest_memory_handler # Access the global handler
|
||||||
|
log_records = []
|
||||||
|
if autotest_memory_handler and autotest_memory_handler.buffer:
|
||||||
|
log_records = autotest_memory_handler.buffer
|
||||||
|
|
||||||
|
formatted_log_lines = []
|
||||||
|
# Define a consistent formatter, similar to what might be expected or useful for search
|
||||||
|
record_formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s')
|
||||||
|
# Default asctime format includes milliseconds.
|
||||||
|
|
||||||
|
|
||||||
|
for record in log_records:
|
||||||
|
formatted_log_lines.append(record_formatter.format(record))
|
||||||
|
|
||||||
|
lines_for_search_and_traceback = formatted_log_lines
|
||||||
|
|
||||||
|
if not lines_for_search_and_traceback:
|
||||||
|
logger.warning("No log records found in memory handler. No analysis to perform.")
|
||||||
|
# Still check the console logs_text for tracebacks if it exists, as a fallback
|
||||||
|
# or if some critical errors didn't make it to the memory handler (unlikely with DEBUG level)
|
||||||
|
if logs_text:
|
||||||
|
logger.debug("Checking provided logs_text (from console) for tracebacks as a fallback.")
|
||||||
|
console_lines = logs_text.splitlines()
|
||||||
|
traceback_found_console = False
|
||||||
|
for i, line in enumerate(console_lines):
|
||||||
|
if line.strip().startswith("Traceback (most recent call last):"):
|
||||||
|
logger.error(f"!!! TRACEBACK DETECTED in console logs_text around line {i+1} !!!")
|
||||||
|
traceback_found_console = True
|
||||||
|
if traceback_found_console:
|
||||||
|
logger.warning("A traceback was found in the console logs_text.")
|
||||||
|
else:
|
||||||
|
logger.info("No tracebacks found in the console logs_text either.")
|
||||||
|
logger.info("--- End Log Analysis ---")
|
||||||
|
return
|
||||||
|
|
||||||
|
traceback_found = False
|
||||||
|
|
||||||
|
if self.cli_args.search:
|
||||||
|
logger.info(f"Searching {len(lines_for_search_and_traceback)} in-memory log lines for term '{self.cli_args.search}' with {self.cli_args.additional_lines} context lines.")
|
||||||
|
matched_line_indices = [i for i, line in enumerate(lines_for_search_and_traceback) if self.cli_args.search in line]
|
||||||
|
|
||||||
|
if not matched_line_indices:
|
||||||
|
logger.info(f"Search term '{self.cli_args.search}' not found in in-memory logs.")
|
||||||
|
else:
|
||||||
|
logger.info(f"Found {len(matched_line_indices)} match(es) for '{self.cli_args.search}' in in-memory logs:")
|
||||||
|
collected_lines_to_print = set()
|
||||||
|
for match_idx in matched_line_indices:
|
||||||
|
start_idx = max(0, match_idx - self.cli_args.additional_lines)
|
||||||
|
end_idx = min(len(lines_for_search_and_traceback), match_idx + self.cli_args.additional_lines + 1)
|
||||||
|
for i in range(start_idx, end_idx):
|
||||||
|
# Use i directly as index for lines_for_search_and_traceback, line number is for display
|
||||||
|
collected_lines_to_print.add(f"L{i+1:05d}: {lines_for_search_and_traceback[i]}")
|
||||||
|
|
||||||
|
print("--- Filtered Log Output (from Memory Handler) ---")
|
||||||
|
for line_to_print in sorted(list(collected_lines_to_print)):
|
||||||
|
print(line_to_print)
|
||||||
|
print("--- End Filtered Log Output ---")
|
||||||
|
# Removed: else block that showed last N lines by default (as per original instruction for this section)
|
||||||
|
|
||||||
|
# Traceback Check (on lines_for_search_and_traceback)
|
||||||
|
for i, line in enumerate(lines_for_search_and_traceback):
|
||||||
|
if line.strip().startswith("Traceback (most recent call last):") or "Traceback (most recent call last):" in line : # More robust check
|
||||||
|
logger.error(f"!!! TRACEBACK DETECTED in in-memory logs around line index {i} !!!")
|
||||||
|
logger.error(f"Line content: {line}")
|
||||||
|
traceback_found = True
|
||||||
|
|
||||||
|
if traceback_found:
|
||||||
|
logger.warning("A traceback was found in the in-memory logs. This usually indicates a significant issue.")
|
||||||
|
else:
|
||||||
|
logger.info("No tracebacks found in the in-memory logs.") # This refers to the comprehensive memory logs
|
||||||
|
|
||||||
|
logger.info("--- End Log Analysis ---")
|
||||||
|
|
||||||
|
def cleanup_and_exit(self, success: bool = True) -> None:
|
||||||
|
"""Cleans up and exits the application."""
|
||||||
|
global autotest_memory_handler
|
||||||
|
if autotest_memory_handler:
|
||||||
|
logger.debug("Clearing memory log handler buffer and removing handler.")
|
||||||
|
autotest_memory_handler.buffer = [] # Clear buffer
|
||||||
|
logging.getLogger().removeHandler(autotest_memory_handler) # Remove handler
|
||||||
|
autotest_memory_handler.close() # MemoryHandler close is a no-op but good practice
|
||||||
|
autotest_memory_handler = None
|
||||||
|
|
||||||
|
logger.info(f"Test {'succeeded' if success else 'failed'}. Cleaning up and exiting...") # KEEP INFO - Passes filter
|
||||||
|
q_app = QCoreApplication.instance()
|
||||||
|
if q_app:
|
||||||
|
q_app.quit()
|
||||||
|
sys.exit(0 if success else 1)
|
||||||
|
|
||||||
|
# --- Main Execution ---
|
||||||
|
def main():
|
||||||
|
"""Main function to run the autotest script."""
|
||||||
|
cli_args = parse_arguments()
|
||||||
|
# Logger is configured above, this will now use the new filtered setup
|
||||||
|
logger.info(f"Parsed CLI arguments: {cli_args}") # KEEP INFO - Passes filter
|
||||||
|
|
||||||
|
# Clean and ensure output directory exists
|
||||||
|
output_dir_path = Path(cli_args.outputdir)
|
||||||
|
logger.debug(f"Preparing output directory: {output_dir_path}")
|
||||||
|
try:
|
||||||
|
if output_dir_path.exists():
|
||||||
|
logger.debug(f"Output directory {output_dir_path} exists. Cleaning its contents...")
|
||||||
|
for item in output_dir_path.iterdir():
|
||||||
|
if item.is_dir():
|
||||||
|
shutil.rmtree(item)
|
||||||
|
logger.debug(f"Removed directory: {item}")
|
||||||
|
else:
|
||||||
|
item.unlink()
|
||||||
|
logger.debug(f"Removed file: {item}")
|
||||||
|
logger.debug(f"Contents of {output_dir_path} cleaned.")
|
||||||
|
else:
|
||||||
|
logger.debug(f"Output directory {output_dir_path} does not exist. Creating it.")
|
||||||
|
|
||||||
|
output_dir_path.mkdir(parents=True, exist_ok=True) # Ensure it exists after cleaning/if it didn't exist
|
||||||
|
logger.debug(f"Output directory {output_dir_path} is ready.")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Could not prepare output directory {output_dir_path}: {e}", exc_info=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Initialize QApplication
|
||||||
|
# Use QCoreApplication if no GUI elements are directly interacted with by the test logic itself,
|
||||||
|
# but QApplication is needed if MainWindow or its widgets are constructed and used.
|
||||||
|
# Since MainWindow is instantiated by App, QApplication is appropriate.
|
||||||
|
q_app = QApplication.instance()
|
||||||
|
if not q_app:
|
||||||
|
q_app = QApplication(sys.argv)
|
||||||
|
if not q_app: # Still no app
|
||||||
|
logger.error("Failed to initialize QApplication.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
logger.debug("Initializing main.App()...")
|
||||||
|
try:
|
||||||
|
# Instantiate main.App() - this should create MainWindow but not show it by default
|
||||||
|
# if App is designed to not show GUI unless app.main_window.show() is called.
|
||||||
|
app_instance = App()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize main.App: {e}", exc_info=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not app_instance.main_window:
|
||||||
|
logger.error("main.App initialized, but main_window is None. Cannot proceed with test.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
logger.debug("Initializing AutoTester...")
|
||||||
|
try:
|
||||||
|
tester = AutoTester(app_instance, cli_args)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize AutoTester: {e}", exc_info=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Use QTimer.singleShot to start the test after the Qt event loop has started.
|
||||||
|
# This ensures that the Qt environment is fully set up.
|
||||||
|
logger.debug("Scheduling test run...")
|
||||||
|
QTimer.singleShot(0, tester.run_test)
|
||||||
|
|
||||||
|
logger.debug("Starting Qt application event loop...")
|
||||||
|
exit_code = q_app.exec()
|
||||||
|
logger.debug(f"Qt application event loop finished with exit code: {exit_code}")
|
||||||
|
sys.exit(exit_code)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,252 +1,11 @@
|
|||||||
{
|
{
|
||||||
"ASSET_TYPE_DEFINITIONS": {
|
|
||||||
"Surface": {
|
|
||||||
"description": "A single Standard PBR material set for a surface.",
|
|
||||||
"color": "#1f3e5d",
|
|
||||||
"examples": [
|
|
||||||
"Set: Wood01_COL + Wood01_NRM + WOOD01_ROUGH",
|
|
||||||
"Set: Dif_Concrete + Normal_Concrete + Refl_Concrete"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Model": {
|
|
||||||
"description": "A set that contains models, can include PBR textureset",
|
|
||||||
"color": "#b67300",
|
|
||||||
"examples": [
|
|
||||||
"Single = Chair.fbx",
|
|
||||||
"Set = Plant02.fbx + Plant02_col + Plant02_SSS"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Decal": {
|
|
||||||
"description": "A alphamasked textureset",
|
|
||||||
"color": "#68ac68",
|
|
||||||
"examples": [
|
|
||||||
"Set = DecalGraffiti01_Col + DecalGraffiti01_Alpha",
|
|
||||||
"Single = DecalLeakStain03"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Atlas": {
|
|
||||||
"description": "A texture, name usually hints that it's an atlas",
|
|
||||||
"color": "#955b8b",
|
|
||||||
"examples": [
|
|
||||||
"Set = FoliageAtlas01_col + FoliageAtlas01_nrm"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"UtilityMap": {
|
|
||||||
"description": "A useful image-asset consisting of only a single texture. Therefor each Utilitymap can only contain a single item.",
|
|
||||||
"color": "#706b87",
|
|
||||||
"examples": [
|
|
||||||
"Single = imperfection.png",
|
|
||||||
"Single = smudges.png",
|
|
||||||
"Single = scratches.tif"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"FILE_TYPE_DEFINITIONS": {
|
|
||||||
"MAP_COL": {
|
|
||||||
"description": "Color/Albedo Map",
|
|
||||||
"color": "#ffaa00",
|
|
||||||
"examples": [
|
|
||||||
"_col.",
|
|
||||||
"_basecolor.",
|
|
||||||
"albedo",
|
|
||||||
"diffuse"
|
|
||||||
],
|
|
||||||
"standard_type": "COL",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": false,
|
|
||||||
"keybind": "C"
|
|
||||||
},
|
|
||||||
"MAP_NRM": {
|
|
||||||
"description": "Normal Map",
|
|
||||||
"color": "#cca2f1",
|
|
||||||
"examples": [
|
|
||||||
"_nrm.",
|
|
||||||
"_normal."
|
|
||||||
],
|
|
||||||
"standard_type": "NRM",
|
|
||||||
"bit_depth_rule": "respect",
|
|
||||||
"is_grayscale": false,
|
|
||||||
"keybind": "N"
|
|
||||||
},
|
|
||||||
"MAP_METAL": {
|
|
||||||
"description": "Metalness Map",
|
|
||||||
"color": "#dcf4f2",
|
|
||||||
"examples": [
|
|
||||||
"_metal.",
|
|
||||||
"_met."
|
|
||||||
],
|
|
||||||
"standard_type": "METAL",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": true,
|
|
||||||
"keybind": "M"
|
|
||||||
},
|
|
||||||
"MAP_ROUGH": {
|
|
||||||
"description": "Roughness Map",
|
|
||||||
"color": "#bfd6bf",
|
|
||||||
"examples": [
|
|
||||||
"_rough.",
|
|
||||||
"_rgh.",
|
|
||||||
"_gloss"
|
|
||||||
],
|
|
||||||
"standard_type": "ROUGH",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": true,
|
|
||||||
"keybind": "R"
|
|
||||||
},
|
|
||||||
"MAP_GLOSS": {
|
|
||||||
"description": "Glossiness Map",
|
|
||||||
"color": "#d6bfd6",
|
|
||||||
"examples": [
|
|
||||||
"_gloss.",
|
|
||||||
"_gls."
|
|
||||||
],
|
|
||||||
"standard_type": "GLOSS",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": true,
|
|
||||||
"keybind": "R"
|
|
||||||
},
|
|
||||||
"MAP_AO": {
|
|
||||||
"description": "Ambient Occlusion Map",
|
|
||||||
"color": "#e3c7c7",
|
|
||||||
"examples": [
|
|
||||||
"_ao.",
|
|
||||||
"_ambientocclusion."
|
|
||||||
],
|
|
||||||
"standard_type": "AO",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": true
|
|
||||||
},
|
|
||||||
"MAP_DISP": {
|
|
||||||
"description": "Displacement/Height Map",
|
|
||||||
"color": "#c6ddd5",
|
|
||||||
"examples": [
|
|
||||||
"_disp.",
|
|
||||||
"_height."
|
|
||||||
],
|
|
||||||
"standard_type": "DISP",
|
|
||||||
"bit_depth_rule": "respect",
|
|
||||||
"is_grayscale": true,
|
|
||||||
"keybind": "D"
|
|
||||||
},
|
|
||||||
"MAP_REFL": {
|
|
||||||
"description": "Reflection/Specular Map",
|
|
||||||
"color": "#c2c2b9",
|
|
||||||
"examples": [
|
|
||||||
"_refl.",
|
|
||||||
"_specular."
|
|
||||||
],
|
|
||||||
"standard_type": "REFL",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": true,
|
|
||||||
"keybind": "M"
|
|
||||||
},
|
|
||||||
"MAP_SSS": {
|
|
||||||
"description": "Subsurface Scattering Map",
|
|
||||||
"color": "#a0d394",
|
|
||||||
"examples": [
|
|
||||||
"_sss.",
|
|
||||||
"_subsurface."
|
|
||||||
],
|
|
||||||
"standard_type": "SSS",
|
|
||||||
"bit_depth_rule": "respect",
|
|
||||||
"is_grayscale": true
|
|
||||||
},
|
|
||||||
"MAP_FUZZ": {
|
|
||||||
"description": "Fuzz/Sheen Map",
|
|
||||||
"color": "#a2d1da",
|
|
||||||
"examples": [
|
|
||||||
"_fuzz.",
|
|
||||||
"_sheen."
|
|
||||||
],
|
|
||||||
"standard_type": "FUZZ",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": true
|
|
||||||
},
|
|
||||||
"MAP_IDMAP": {
|
|
||||||
"description": "ID Map (for masking)",
|
|
||||||
"color": "#ca8fb4",
|
|
||||||
"examples": [
|
|
||||||
"_id.",
|
|
||||||
"_matid."
|
|
||||||
],
|
|
||||||
"standard_type": "IDMAP",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": false
|
|
||||||
},
|
|
||||||
"MAP_MASK": {
|
|
||||||
"description": "Generic Mask Map",
|
|
||||||
"color": "#c6e2bf",
|
|
||||||
"examples": [
|
|
||||||
"_mask."
|
|
||||||
],
|
|
||||||
"standard_type": "MASK",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": true
|
|
||||||
},
|
|
||||||
"MAP_IMPERFECTION": {
|
|
||||||
"description": "Imperfection Map (scratches, dust)",
|
|
||||||
"color": "#e6d1a6",
|
|
||||||
"examples": [
|
|
||||||
"_imp.",
|
|
||||||
"_imperfection.",
|
|
||||||
"splatter",
|
|
||||||
"scratches",
|
|
||||||
"smudges",
|
|
||||||
"hairs",
|
|
||||||
"fingerprints"
|
|
||||||
],
|
|
||||||
"standard_type": "IMPERFECTION",
|
|
||||||
"bit_depth_rule": "force_8bit",
|
|
||||||
"is_grayscale": true
|
|
||||||
},
|
|
||||||
"MODEL": {
|
|
||||||
"description": "3D Model File",
|
|
||||||
"color": "#3db2bd",
|
|
||||||
"examples": [
|
|
||||||
".fbx",
|
|
||||||
".obj"
|
|
||||||
],
|
|
||||||
"standard_type": "",
|
|
||||||
"bit_depth_rule": "",
|
|
||||||
"is_grayscale": false
|
|
||||||
},
|
|
||||||
"EXTRA": {
|
|
||||||
"description": "asset previews or metadata",
|
|
||||||
"color": "#8c8c8c",
|
|
||||||
"examples": [
|
|
||||||
".txt",
|
|
||||||
".zip",
|
|
||||||
"preview.",
|
|
||||||
"_flat.",
|
|
||||||
"_sphere.",
|
|
||||||
"_Cube.",
|
|
||||||
"thumb"
|
|
||||||
],
|
|
||||||
"standard_type": "",
|
|
||||||
"bit_depth_rule": "",
|
|
||||||
"is_grayscale": false,
|
|
||||||
"keybind": "E"
|
|
||||||
},
|
|
||||||
"FILE_IGNORE": {
|
|
||||||
"description": "File to be ignored",
|
|
||||||
"color": "#673d35",
|
|
||||||
"examples": [
|
|
||||||
"Thumbs.db",
|
|
||||||
".DS_Store"
|
|
||||||
],
|
|
||||||
"standard_type": "",
|
|
||||||
"bit_depth_rule": "",
|
|
||||||
"is_grayscale": false,
|
|
||||||
"keybind": "X"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"TARGET_FILENAME_PATTERN": "{base_name}_{map_type}_{resolution}.{ext}",
|
"TARGET_FILENAME_PATTERN": "{base_name}_{map_type}_{resolution}.{ext}",
|
||||||
"RESPECT_VARIANT_MAP_TYPES": [
|
"RESPECT_VARIANT_MAP_TYPES": [
|
||||||
"COL"
|
"COL"
|
||||||
],
|
],
|
||||||
"EXTRA_FILES_SUBDIR": "Extra",
|
"EXTRA_FILES_SUBDIR": "Extra",
|
||||||
"OUTPUT_BASE_DIR": "../Asset_Processor_Output_Tests",
|
"OUTPUT_BASE_DIR": "../Asset_Processor_Output_Tests",
|
||||||
"OUTPUT_DIRECTORY_PATTERN": "[supplier]/[sha5]_[assetname]",
|
"OUTPUT_DIRECTORY_PATTERN": "[supplier]_[assetname]",
|
||||||
"OUTPUT_FILENAME_PATTERN": "[assetname]_[maptype]_[resolution].[ext]",
|
"OUTPUT_FILENAME_PATTERN": "[assetname]_[maptype]_[resolution].[ext]",
|
||||||
"METADATA_FILENAME": "metadata.json",
|
"METADATA_FILENAME": "metadata.json",
|
||||||
"DEFAULT_NODEGROUP_BLEND_PATH": "G:/02 Content/10-19 Content/19 Catalogs/19.01 Blender Asset Catalogue/_CustomLibraries/Nodes-Linked/PBRSET-Nodes-Testing.blend",
|
"DEFAULT_NODEGROUP_BLEND_PATH": "G:/02 Content/10-19 Content/19 Catalogs/19.01 Blender Asset Catalogue/_CustomLibraries/Nodes-Linked/PBRSET-Nodes-Testing.blend",
|
||||||
@@ -259,7 +18,8 @@
|
|||||||
"8K": 8192,
|
"8K": 8192,
|
||||||
"4K": 4096,
|
"4K": 4096,
|
||||||
"2K": 2048,
|
"2K": 2048,
|
||||||
"1K": 1024
|
"1K": 1024,
|
||||||
|
"PREVIEW": 128
|
||||||
},
|
},
|
||||||
"ASPECT_RATIO_DECIMALS": 2,
|
"ASPECT_RATIO_DECIMALS": 2,
|
||||||
"OUTPUT_FORMAT_16BIT_PRIMARY": "exr",
|
"OUTPUT_FORMAT_16BIT_PRIMARY": "exr",
|
||||||
@@ -267,11 +27,11 @@
|
|||||||
"OUTPUT_FORMAT_8BIT": "png",
|
"OUTPUT_FORMAT_8BIT": "png",
|
||||||
"MAP_MERGE_RULES": [
|
"MAP_MERGE_RULES": [
|
||||||
{
|
{
|
||||||
"output_map_type": "NRMRGH",
|
"output_map_type": "MAP_NRMRGH",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"R": "NRM",
|
"R": "MAP_NRM",
|
||||||
"G": "NRM",
|
"G": "MAP_NRM",
|
||||||
"B": "ROUGH"
|
"B": "MAP_ROUGH"
|
||||||
},
|
},
|
||||||
"defaults": {
|
"defaults": {
|
||||||
"R": 0.5,
|
"R": 0.5,
|
||||||
@@ -283,5 +43,13 @@
|
|||||||
],
|
],
|
||||||
"CALCULATE_STATS_RESOLUTION": "1K",
|
"CALCULATE_STATS_RESOLUTION": "1K",
|
||||||
"DEFAULT_ASSET_CATEGORY": "Surface",
|
"DEFAULT_ASSET_CATEGORY": "Surface",
|
||||||
"TEMP_DIR_PREFIX": "_PROCESS_ASSET_"
|
"TEMP_DIR_PREFIX": "_PROCESS_ASSET_",
|
||||||
|
"INITIAL_SCALING_MODE": "POT_DOWNSCALE",
|
||||||
|
"MERGE_DIMENSION_MISMATCH_STRATEGY": "USE_LARGEST",
|
||||||
|
"ENABLE_LOW_RESOLUTION_FALLBACK": true,
|
||||||
|
"LOW_RESOLUTION_THRESHOLD": 512,
|
||||||
|
"general_settings": {
|
||||||
|
"invert_normal_map_green_channel_globally": false,
|
||||||
|
"app_version": "Pre-Alpha"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
44
config/asset_type_definitions.json
Normal file
44
config/asset_type_definitions.json
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
"ASSET_TYPE_DEFINITIONS": {
|
||||||
|
"Surface": {
|
||||||
|
"color": "#1f3e5d",
|
||||||
|
"description": "A single Standard PBR material set for a surface.",
|
||||||
|
"examples": [
|
||||||
|
"Set: Wood01_COL + Wood01_NRM + WOOD01_ROUGH",
|
||||||
|
"Set: Dif_Concrete + Normal_Concrete + Refl_Concrete"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Model": {
|
||||||
|
"color": "#b67300",
|
||||||
|
"description": "A set that contains models, can include PBR textureset",
|
||||||
|
"examples": [
|
||||||
|
"Single = Chair.fbx",
|
||||||
|
"Set = Plant02.fbx + Plant02_col + Plant02_SSS"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Decal": {
|
||||||
|
"color": "#68ac68",
|
||||||
|
"description": "A alphamasked textureset",
|
||||||
|
"examples": [
|
||||||
|
"Set = DecalGraffiti01_Col + DecalGraffiti01_Alpha",
|
||||||
|
"Single = DecalLeakStain03"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Atlas": {
|
||||||
|
"color": "#955b8b",
|
||||||
|
"description": "A texture, name usually hints that it's an atlas",
|
||||||
|
"examples": [
|
||||||
|
"Set = FoliageAtlas01_col + FoliageAtlas01_nrm"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"UtilityMap": {
|
||||||
|
"color": "#706b87",
|
||||||
|
"description": "A useful image-asset consisting of only a single texture. Therefor each Utilitymap can only contain a single item.",
|
||||||
|
"examples": [
|
||||||
|
"Single = imperfection.png",
|
||||||
|
"Single = smudges.png",
|
||||||
|
"Single = scratches.tif"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
208
config/file_type_definitions.json
Normal file
208
config/file_type_definitions.json
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
{
|
||||||
|
"FILE_TYPE_DEFINITIONS": {
|
||||||
|
"MAP_COL": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#ffaa00",
|
||||||
|
"description": "Color/Albedo Map",
|
||||||
|
"examples": [
|
||||||
|
"_col.",
|
||||||
|
"_basecolor.",
|
||||||
|
"albedo",
|
||||||
|
"diffuse"
|
||||||
|
],
|
||||||
|
"is_grayscale": false,
|
||||||
|
"keybind": "C",
|
||||||
|
"standard_type": "COL"
|
||||||
|
},
|
||||||
|
"MAP_NRM": {
|
||||||
|
"bit_depth_rule": "respect",
|
||||||
|
"color": "#cca2f1",
|
||||||
|
"description": "Normal Map",
|
||||||
|
"examples": [
|
||||||
|
"_nrm.",
|
||||||
|
"_normal."
|
||||||
|
],
|
||||||
|
"is_grayscale": false,
|
||||||
|
"keybind": "N",
|
||||||
|
"standard_type": "NRM"
|
||||||
|
},
|
||||||
|
"MAP_METAL": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#dcf4f2",
|
||||||
|
"description": "Metalness Map",
|
||||||
|
"examples": [
|
||||||
|
"_metal.",
|
||||||
|
"_met."
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "M",
|
||||||
|
"standard_type": "METAL"
|
||||||
|
},
|
||||||
|
"MAP_ROUGH": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#bfd6bf",
|
||||||
|
"description": "Roughness Map",
|
||||||
|
"examples": [
|
||||||
|
"_rough.",
|
||||||
|
"_rgh.",
|
||||||
|
"_gloss"
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "R",
|
||||||
|
"standard_type": "ROUGH"
|
||||||
|
},
|
||||||
|
"MAP_GLOSS": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#d6bfd6",
|
||||||
|
"description": "Glossiness Map",
|
||||||
|
"examples": [
|
||||||
|
"_gloss.",
|
||||||
|
"_gls."
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "R",
|
||||||
|
"standard_type": "GLOSS"
|
||||||
|
},
|
||||||
|
"MAP_AO": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#e3c7c7",
|
||||||
|
"description": "Ambient Occlusion Map",
|
||||||
|
"examples": [
|
||||||
|
"_ao.",
|
||||||
|
"_ambientocclusion."
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "",
|
||||||
|
"standard_type": "AO"
|
||||||
|
},
|
||||||
|
"MAP_DISP": {
|
||||||
|
"bit_depth_rule": "respect",
|
||||||
|
"color": "#c6ddd5",
|
||||||
|
"description": "Displacement/Height Map",
|
||||||
|
"examples": [
|
||||||
|
"_disp.",
|
||||||
|
"_height."
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "D",
|
||||||
|
"standard_type": "DISP"
|
||||||
|
},
|
||||||
|
"MAP_REFL": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#c2c2b9",
|
||||||
|
"description": "Reflection/Specular Map",
|
||||||
|
"examples": [
|
||||||
|
"_refl.",
|
||||||
|
"_specular."
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "M",
|
||||||
|
"standard_type": "REFL"
|
||||||
|
},
|
||||||
|
"MAP_SSS": {
|
||||||
|
"bit_depth_rule": "respect",
|
||||||
|
"color": "#a0d394",
|
||||||
|
"description": "Subsurface Scattering Map",
|
||||||
|
"examples": [
|
||||||
|
"_sss.",
|
||||||
|
"_subsurface."
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "",
|
||||||
|
"standard_type": "SSS"
|
||||||
|
},
|
||||||
|
"MAP_FUZZ": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#a2d1da",
|
||||||
|
"description": "Fuzz/Sheen Map",
|
||||||
|
"examples": [
|
||||||
|
"_fuzz.",
|
||||||
|
"_sheen."
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "",
|
||||||
|
"standard_type": "FUZZ"
|
||||||
|
},
|
||||||
|
"MAP_IDMAP": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#ca8fb4",
|
||||||
|
"description": "ID Map (for masking)",
|
||||||
|
"examples": [
|
||||||
|
"_id.",
|
||||||
|
"_matid."
|
||||||
|
],
|
||||||
|
"is_grayscale": false,
|
||||||
|
"keybind": "",
|
||||||
|
"standard_type": "IDMAP"
|
||||||
|
},
|
||||||
|
"MAP_MASK": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#c6e2bf",
|
||||||
|
"description": "Generic Mask Map",
|
||||||
|
"examples": [
|
||||||
|
"_mask."
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "",
|
||||||
|
"standard_type": "MASK"
|
||||||
|
},
|
||||||
|
"MAP_IMPERFECTION": {
|
||||||
|
"bit_depth_rule": "force_8bit",
|
||||||
|
"color": "#e6d1a6",
|
||||||
|
"description": "Imperfection Map (scratches, dust)",
|
||||||
|
"examples": [
|
||||||
|
"_imp.",
|
||||||
|
"_imperfection.",
|
||||||
|
"splatter",
|
||||||
|
"scratches",
|
||||||
|
"smudges",
|
||||||
|
"hairs",
|
||||||
|
"fingerprints"
|
||||||
|
],
|
||||||
|
"is_grayscale": true,
|
||||||
|
"keybind": "",
|
||||||
|
"standard_type": "IMPERFECTION"
|
||||||
|
},
|
||||||
|
"MODEL": {
|
||||||
|
"bit_depth_rule": "",
|
||||||
|
"color": "#3db2bd",
|
||||||
|
"description": "3D Model File",
|
||||||
|
"examples": [
|
||||||
|
".fbx",
|
||||||
|
".obj"
|
||||||
|
],
|
||||||
|
"is_grayscale": false,
|
||||||
|
"keybind": "",
|
||||||
|
"standard_type": ""
|
||||||
|
},
|
||||||
|
"EXTRA": {
|
||||||
|
"bit_depth_rule": "",
|
||||||
|
"color": "#8c8c8c",
|
||||||
|
"description": "asset previews or metadata",
|
||||||
|
"examples": [
|
||||||
|
".txt",
|
||||||
|
".zip",
|
||||||
|
"preview.",
|
||||||
|
"_flat.",
|
||||||
|
"_sphere.",
|
||||||
|
"_Cube.",
|
||||||
|
"thumb"
|
||||||
|
],
|
||||||
|
"is_grayscale": false,
|
||||||
|
"keybind": "E",
|
||||||
|
"standard_type": "EXTRA"
|
||||||
|
},
|
||||||
|
"FILE_IGNORE": {
|
||||||
|
"bit_depth_rule": "",
|
||||||
|
"color": "#673d35",
|
||||||
|
"description": "File to be ignored",
|
||||||
|
"examples": [
|
||||||
|
"Thumbs.db",
|
||||||
|
".DS_Store"
|
||||||
|
],
|
||||||
|
"is_grayscale": false,
|
||||||
|
"keybind": "X",
|
||||||
|
"standard_type": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,256 +3,256 @@
|
|||||||
{
|
{
|
||||||
"input": "MessyTextures/Concrete_Damage_Set/concrete_col.png\nMessyTextures/Concrete_Damage_Set/concrete_N.png\nMessyTextures/Concrete_Damage_Set/concrete_rough.jpg\nMessyTextures/Concrete_Damage_Set/height_map_concrete.tif\nMessyTextures/Concrete_Damage_Set/Thumbs.db\nMessyTextures/Fabric_Pattern/pattern_01_diffuse.tga\nMessyTextures/Fabric_Pattern/pattern_01_ao.png\nMessyTextures/Fabric_Pattern/pattern_01_normal.png\nMessyTextures/Fabric_Pattern/notes.txt\nMessyTextures/Fabric_Pattern/variant_blue_diffuse.tga\nMessyTextures/Fabric_Pattern/fabric_flat.jpg",
|
"input": "MessyTextures/Concrete_Damage_Set/concrete_col.png\nMessyTextures/Concrete_Damage_Set/concrete_N.png\nMessyTextures/Concrete_Damage_Set/concrete_rough.jpg\nMessyTextures/Concrete_Damage_Set/height_map_concrete.tif\nMessyTextures/Concrete_Damage_Set/Thumbs.db\nMessyTextures/Fabric_Pattern/pattern_01_diffuse.tga\nMessyTextures/Fabric_Pattern/pattern_01_ao.png\nMessyTextures/Fabric_Pattern/pattern_01_normal.png\nMessyTextures/Fabric_Pattern/notes.txt\nMessyTextures/Fabric_Pattern/variant_blue_diffuse.tga\nMessyTextures/Fabric_Pattern/fabric_flat.jpg",
|
||||||
"output": {
|
"output": {
|
||||||
"individual_file_analysis": [
|
"individual_file_analysis": [
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Concrete_Damage_Set/concrete_col.png",
|
"relative_file_path": "MessyTextures/Concrete_Damage_Set/concrete_col.png",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Concrete_Damage_Set"
|
"proposed_asset_group_name": "Concrete_Damage_Set"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Concrete_Damage_Set/concrete_N.png",
|
"relative_file_path": "MessyTextures/Concrete_Damage_Set/concrete_N.png",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "Concrete_Damage_Set"
|
"proposed_asset_group_name": "Concrete_Damage_Set"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Concrete_Damage_Set/concrete_rough.jpg",
|
"relative_file_path": "MessyTextures/Concrete_Damage_Set/concrete_rough.jpg",
|
||||||
"classified_file_type": "MAP_ROUGH",
|
"classified_file_type": "MAP_ROUGH",
|
||||||
"proposed_asset_group_name": "Concrete_Damage_Set"
|
"proposed_asset_group_name": "Concrete_Damage_Set"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Concrete_Damage_Set/height_map_concrete.tif",
|
"relative_file_path": "MessyTextures/Concrete_Damage_Set/height_map_concrete.tif",
|
||||||
"classified_file_type": "MAP_DISP",
|
"classified_file_type": "MAP_DISP",
|
||||||
"proposed_asset_group_name": "Concrete_Damage_Set"
|
"proposed_asset_group_name": "Concrete_Damage_Set"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Concrete_Damage_Set/Thumbs.db",
|
"relative_file_path": "MessyTextures/Concrete_Damage_Set/Thumbs.db",
|
||||||
"classified_file_type": "FILE_IGNORE",
|
"classified_file_type": "FILE_IGNORE",
|
||||||
"proposed_asset_group_name": null
|
"proposed_asset_group_name": null
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Fabric_Pattern/pattern_01_diffuse.tga",
|
"relative_file_path": "MessyTextures/Fabric_Pattern/pattern_01_diffuse.tga",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Fabric_Pattern_01"
|
"proposed_asset_group_name": "Fabric_Pattern_01"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Fabric_Pattern/pattern_01_ao.png",
|
"relative_file_path": "MessyTextures/Fabric_Pattern/pattern_01_ao.png",
|
||||||
"classified_file_type": "MAP_AO",
|
"classified_file_type": "MAP_AO",
|
||||||
"proposed_asset_group_name": "Fabric_Pattern_01"
|
"proposed_asset_group_name": "Fabric_Pattern_01"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Fabric_Pattern/pattern_01_normal.png",
|
"relative_file_path": "MessyTextures/Fabric_Pattern/pattern_01_normal.png",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "Fabric_Pattern_01"
|
"proposed_asset_group_name": "Fabric_Pattern_01"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Fabric_Pattern/notes.txt",
|
"relative_file_path": "MessyTextures/Fabric_Pattern/notes.txt",
|
||||||
"classified_file_type": "EXTRA",
|
"classified_file_type": "EXTRA",
|
||||||
"proposed_asset_group_name": "Fabric_Pattern_01"
|
"proposed_asset_group_name": "Fabric_Pattern_01"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Fabric_Pattern/variant_blue_diffuse.tga",
|
"relative_file_path": "MessyTextures/Fabric_Pattern/variant_blue_diffuse.tga",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Fabric_Pattern_01"
|
"proposed_asset_group_name": "Fabric_Pattern_01"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "MessyTextures/Fabric_Pattern/fabric_flat.jpg",
|
"relative_file_path": "MessyTextures/Fabric_Pattern/fabric_flat.jpg",
|
||||||
"classified_file_type": "EXTRA",
|
"classified_file_type": "EXTRA",
|
||||||
"proposed_asset_group_name": "Fabric_Pattern_01"
|
"proposed_asset_group_name": "Fabric_Pattern_01"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"asset_group_classifications": {
|
||||||
|
"Concrete_Damage_Set": "Surface",
|
||||||
|
"Fabric_Pattern_01": "Surface"
|
||||||
}
|
}
|
||||||
],
|
|
||||||
"asset_group_classifications": {
|
|
||||||
"Concrete_Damage_Set": "Surface",
|
|
||||||
"Fabric_Pattern_01": "Surface"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"input": "SciFi_Drone/Drone_Model.fbx\nSciFi_Drone/Textures/Drone_BaseColor.png\nSciFi_Drone/Textures/Drone_Metallic.png\nSciFi_Drone/Textures/Drone_Roughness.png\nSciFi_Drone/Textures/Drone_Normal.png\nSciFi_Drone/Textures/Drone_Emissive.jpg\nSciFi_Drone/ReferenceImages/concept.jpg",
|
"input": "SciFi_Drone/Drone_Model.fbx\nSciFi_Drone/Textures/Drone_BaseColor.png\nSciFi_Drone/Textures/Drone_Metallic.png\nSciFi_Drone/Textures/Drone_Roughness.png\nSciFi_Drone/Textures/Drone_Normal.png\nSciFi_Drone/Textures/Drone_Emissive.jpg\nSciFi_Drone/ReferenceImages/concept.jpg",
|
||||||
"output": {
|
"output": {
|
||||||
"individual_file_analysis": [
|
"individual_file_analysis": [
|
||||||
{
|
{
|
||||||
"relative_file_path": "SciFi_Drone/Drone_Model.fbx",
|
"relative_file_path": "SciFi_Drone/Drone_Model.fbx",
|
||||||
"classified_file_type": "MODEL",
|
"classified_file_type": "MODEL",
|
||||||
"proposed_asset_group_name": "SciFi_Drone"
|
"proposed_asset_group_name": "SciFi_Drone"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "SciFi_Drone/Textures/Drone_BaseColor.png",
|
"relative_file_path": "SciFi_Drone/Textures/Drone_BaseColor.png",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "SciFi_Drone"
|
"proposed_asset_group_name": "SciFi_Drone"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "SciFi_Drone/Textures/Drone_Metallic.png",
|
"relative_file_path": "SciFi_Drone/Textures/Drone_Metallic.png",
|
||||||
"classified_file_type": "MAP_METAL",
|
"classified_file_type": "MAP_METAL",
|
||||||
"proposed_asset_group_name": "SciFi_Drone"
|
"proposed_asset_group_name": "SciFi_Drone"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "SciFi_Drone/Textures/Drone_Roughness.png",
|
"relative_file_path": "SciFi_Drone/Textures/Drone_Roughness.png",
|
||||||
"classified_file_type": "MAP_ROUGH",
|
"classified_file_type": "MAP_ROUGH",
|
||||||
"proposed_asset_group_name": "SciFi_Drone"
|
"proposed_asset_group_name": "SciFi_Drone"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "SciFi_Drone/Textures/Drone_Normal.png",
|
"relative_file_path": "SciFi_Drone/Textures/Drone_Normal.png",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "SciFi_Drone"
|
"proposed_asset_group_name": "SciFi_Drone"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "SciFi_Drone/Textures/Drone_Emissive.jpg",
|
"relative_file_path": "SciFi_Drone/Textures/Drone_Emissive.jpg",
|
||||||
"classified_file_type": "EXTRA",
|
"classified_file_type": "EXTRA",
|
||||||
"proposed_asset_group_name": "SciFi_Drone"
|
"proposed_asset_group_name": "SciFi_Drone"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "SciFi_Drone/ReferenceImages/concept.jpg",
|
"relative_file_path": "SciFi_Drone/ReferenceImages/concept.jpg",
|
||||||
"classified_file_type": "EXTRA",
|
"classified_file_type": "EXTRA",
|
||||||
"proposed_asset_group_name": "SciFi_Drone"
|
"proposed_asset_group_name": "SciFi_Drone"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"asset_group_classifications": {
|
||||||
|
"SciFi_Drone": "Model"
|
||||||
}
|
}
|
||||||
],
|
|
||||||
"asset_group_classifications": {
|
|
||||||
"SciFi_Drone": "Model"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"input": "21_hairs_deposits.tif\n22_hairs_fabric.tif\n23_hairs_fibres.tif\n24_hairs_fibres.tif\n25_bonus_isolatedFingerprints.tif\n26_bonus_isolatedPalmprint.tif\n27_metal_aluminum.tif\n28_metal_castIron.tif\n29_scratcehes_deposits_shapes.tif\n30_scratches_deposits.tif",
|
"input": "21_hairs_deposits.tif\n22_hairs_fabric.tif\n23_hairs_fibres.tif\n24_hairs_fibres.tif\n25_bonus_isolatedFingerprints.tif\n26_bonus_isolatedPalmprint.tif\n27_metal_aluminum.tif\n28_metal_castIron.tif\n29_scratcehes_deposits_shapes.tif\n30_scratches_deposits.tif",
|
||||||
"output": {
|
"output": {
|
||||||
"individual_file_analysis": [
|
"individual_file_analysis": [
|
||||||
{
|
{
|
||||||
"relative_file_path": "21_hairs_deposits.tif",
|
"relative_file_path": "21_hairs_deposits.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Hairs_Deposits_21"
|
"proposed_asset_group_name": "Hairs_Deposits_21"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "22_hairs_fabric.tif",
|
"relative_file_path": "22_hairs_fabric.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Hairs_Fabric_22"
|
"proposed_asset_group_name": "Hairs_Fabric_22"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "23_hairs_fibres.tif",
|
"relative_file_path": "23_hairs_fibres.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Hairs_Fibres_23"
|
"proposed_asset_group_name": "Hairs_Fibres_23"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "24_hairs_fibres.tif",
|
"relative_file_path": "24_hairs_fibres.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Hairs_Fibres_24"
|
"proposed_asset_group_name": "Hairs_Fibres_24"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "25_bonus_isolatedFingerprints.tif",
|
"relative_file_path": "25_bonus_isolatedFingerprints.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Bonus_IsolatedFingerprints_25"
|
"proposed_asset_group_name": "Bonus_IsolatedFingerprints_25"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "26_bonus_isolatedPalmprint.tif",
|
"relative_file_path": "26_bonus_isolatedPalmprint.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Bonus_IsolatedPalmprint_26"
|
"proposed_asset_group_name": "Bonus_IsolatedPalmprint_26"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "27_metal_aluminum.tif",
|
"relative_file_path": "27_metal_aluminum.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Metal_Aluminum_27"
|
"proposed_asset_group_name": "Metal_Aluminum_27"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "28_metal_castIron.tif",
|
"relative_file_path": "28_metal_castIron.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Metal_CastIron_28"
|
"proposed_asset_group_name": "Metal_CastIron_28"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "29_scratcehes_deposits_shapes.tif",
|
"relative_file_path": "29_scratcehes_deposits_shapes.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Scratches_Deposits_Shapes_29"
|
"proposed_asset_group_name": "Scratches_Deposits_Shapes_29"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "30_scratches_deposits.tif",
|
"relative_file_path": "30_scratches_deposits.tif",
|
||||||
"classified_file_type": "MAP_IMPERFECTION",
|
"classified_file_type": "MAP_IMPERFECTION",
|
||||||
"proposed_asset_group_name": "Scratches_Deposits_30"
|
"proposed_asset_group_name": "Scratches_Deposits_30"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"asset_group_classifications": {
|
||||||
|
"Hairs_Deposits_21": "UtilityMap",
|
||||||
|
"Hairs_Fabric_22": "UtilityMap",
|
||||||
|
"Hairs_Fibres_23": "UtilityMap",
|
||||||
|
"Hairs_Fibres_24": "UtilityMap",
|
||||||
|
"Bonus_IsolatedFingerprints_25": "UtilityMap",
|
||||||
|
"Bonus_IsolatedPalmprint_26": "UtilityMap",
|
||||||
|
"Metal_Aluminum_27": "UtilityMap",
|
||||||
|
"Metal_CastIron_28": "UtilityMap",
|
||||||
|
"Scratches_Deposits_Shapes_29": "UtilityMap",
|
||||||
|
"Scratches_Deposits_30": "UtilityMap"
|
||||||
}
|
}
|
||||||
],
|
|
||||||
"asset_group_classifications": {
|
|
||||||
"Hairs_Deposits_21": "UtilityMap",
|
|
||||||
"Hairs_Fabric_22": "UtilityMap",
|
|
||||||
"Hairs_Fibres_23": "UtilityMap",
|
|
||||||
"Hairs_Fibres_24": "UtilityMap",
|
|
||||||
"Bonus_IsolatedFingerprints_25": "UtilityMap",
|
|
||||||
"Bonus_IsolatedPalmprint_26": "UtilityMap",
|
|
||||||
"Metal_Aluminum_27": "UtilityMap",
|
|
||||||
"Metal_CastIron_28": "UtilityMap",
|
|
||||||
"Scratches_Deposits_Shapes_29": "UtilityMap",
|
|
||||||
"Scratches_Deposits_30": "UtilityMap"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"input": "Part1/TextureSupply_Boards001_A_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_A_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_B_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_B_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_C_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_C_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_D_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_D_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_E_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_E_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_F_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_F_28x300cm-Normal.jpg",
|
"input": "Part1/TextureSupply_Boards001_A_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_A_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_B_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_B_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_C_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_C_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_D_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_D_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_E_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_E_28x300cm-Normal.jpg\nPart1/TextureSupply_Boards001_F_28x300cm-Albedo.jpg\nPart1/TextureSupply_Boards001_F_28x300cm-Normal.jpg",
|
||||||
"output": {
|
"output": {
|
||||||
"individual_file_analysis": [
|
"individual_file_analysis": [
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_A_28x300cm-Albedo.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_A_28x300cm-Albedo.jpg",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Boards001_A"
|
"proposed_asset_group_name": "Boards001_A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_A_28x300cm-Normal.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_A_28x300cm-Normal.jpg",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "Boards001_A"
|
"proposed_asset_group_name": "Boards001_A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_B_28x300cm-Albedo.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_B_28x300cm-Albedo.jpg",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Boards001_B"
|
"proposed_asset_group_name": "Boards001_B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_B_28x300cm-Normal.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_B_28x300cm-Normal.jpg",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "Boards001_B"
|
"proposed_asset_group_name": "Boards001_B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_C_28x300cm-Albedo.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_C_28x300cm-Albedo.jpg",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Boards001_C"
|
"proposed_asset_group_name": "Boards001_C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_C_28x300cm-Normal.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_C_28x300cm-Normal.jpg",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "Boards001_C"
|
"proposed_asset_group_name": "Boards001_C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_D_28x300cm-Albedo.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_D_28x300cm-Albedo.jpg",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Boards001_D"
|
"proposed_asset_group_name": "Boards001_D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_D_28x300cm-Normal.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_D_28x300cm-Normal.jpg",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "Boards001_D"
|
"proposed_asset_group_name": "Boards001_D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_E_28x300cm-Albedo.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_E_28x300cm-Albedo.jpg",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Boards001_E"
|
"proposed_asset_group_name": "Boards001_E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_E_28x300cm-Normal.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_E_28x300cm-Normal.jpg",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "Boards001_E"
|
"proposed_asset_group_name": "Boards001_E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_F_28x300cm-Albedo.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_F_28x300cm-Albedo.jpg",
|
||||||
"classified_file_type": "MAP_COL",
|
"classified_file_type": "MAP_COL",
|
||||||
"proposed_asset_group_name": "Boards001_F"
|
"proposed_asset_group_name": "Boards001_F"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"relative_file_path": "Part1/TextureSupply_Boards001_F_28x300cm-Normal.jpg",
|
"relative_file_path": "Part1/TextureSupply_Boards001_F_28x300cm-Normal.jpg",
|
||||||
"classified_file_type": "MAP_NRM",
|
"classified_file_type": "MAP_NRM",
|
||||||
"proposed_asset_group_name": "Boards001_F"
|
"proposed_asset_group_name": "Boards001_F"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"asset_group_classifications": {
|
||||||
|
"Boards001_A": "Surface",
|
||||||
|
"Boards001_B": "Surface",
|
||||||
|
"Boards001_C": "Surface",
|
||||||
|
"Boards001_D": "Surface",
|
||||||
|
"Boards001_E": "Surface",
|
||||||
|
"Boards001_F": "Surface"
|
||||||
}
|
}
|
||||||
],
|
|
||||||
"asset_group_classifications": {
|
|
||||||
"Boards001_A": "Surface",
|
|
||||||
"Boards001_B": "Surface",
|
|
||||||
"Boards001_C": "Surface",
|
|
||||||
"Boards001_D": "Surface",
|
|
||||||
"Boards001_E": "Surface",
|
|
||||||
"Boards001_F": "Surface"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
[
|
{
|
||||||
"Dimensiva",
|
"Dimensiva": {
|
||||||
"Dinesen",
|
"normal_map_type": "OpenGL"
|
||||||
"Poliigon"
|
},
|
||||||
]
|
"Dinesen": {
|
||||||
|
"normal_map_type": "OpenGL"
|
||||||
|
},
|
||||||
|
"Poliigon": {
|
||||||
|
"normal_map_type": "OpenGL"
|
||||||
|
}
|
||||||
|
}
|
||||||
436
configuration.py
436
configuration.py
@@ -3,12 +3,18 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
import collections.abc
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
BASE_DIR = Path(__file__).parent
|
BASE_DIR = Path(__file__).parent
|
||||||
APP_SETTINGS_PATH = BASE_DIR / "config" / "app_settings.json"
|
APP_SETTINGS_PATH = BASE_DIR / "config" / "app_settings.json"
|
||||||
LLM_SETTINGS_PATH = BASE_DIR / "config" / "llm_settings.json"
|
LLM_SETTINGS_PATH = BASE_DIR / "config" / "llm_settings.json"
|
||||||
|
ASSET_TYPE_DEFINITIONS_PATH = BASE_DIR / "config" / "asset_type_definitions.json"
|
||||||
|
FILE_TYPE_DEFINITIONS_PATH = BASE_DIR / "config" / "file_type_definitions.json"
|
||||||
|
USER_SETTINGS_PATH = BASE_DIR / "config" / "user_settings.json"
|
||||||
|
SUPPLIERS_CONFIG_PATH = BASE_DIR / "config" / "suppliers.json"
|
||||||
PRESETS_DIR = BASE_DIR / "Presets"
|
PRESETS_DIR = BASE_DIR / "Presets"
|
||||||
|
|
||||||
class ConfigurationError(Exception):
|
class ConfigurationError(Exception):
|
||||||
@@ -64,6 +70,25 @@ def _fnmatch_to_regex(pattern: str) -> str:
|
|||||||
# For filename matching, we usually want to find the pattern, not match the whole string.
|
# For filename matching, we usually want to find the pattern, not match the whole string.
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def _deep_merge_dicts(base_dict: dict, override_dict: dict) -> dict:
|
||||||
|
"""
|
||||||
|
Recursively merges override_dict into base_dict.
|
||||||
|
If a key exists in both and both values are dicts, it recursively merges them.
|
||||||
|
Otherwise, the value from override_dict takes precedence.
|
||||||
|
Modifies base_dict in place and returns it.
|
||||||
|
"""
|
||||||
|
for key, value in override_dict.items():
|
||||||
|
if isinstance(value, collections.abc.Mapping):
|
||||||
|
node = base_dict.get(key) # Use .get() to avoid creating empty dicts if not needed for override
|
||||||
|
if isinstance(node, collections.abc.Mapping):
|
||||||
|
_deep_merge_dicts(node, value) # node is base_dict[key], modified in place
|
||||||
|
else:
|
||||||
|
# If base_dict[key] is not a dict or doesn't exist, override it
|
||||||
|
base_dict[key] = value
|
||||||
|
else:
|
||||||
|
base_dict[key] = value
|
||||||
|
return base_dict
|
||||||
|
|
||||||
|
|
||||||
class Configuration:
|
class Configuration:
|
||||||
"""
|
"""
|
||||||
@@ -71,7 +96,7 @@ class Configuration:
|
|||||||
"""
|
"""
|
||||||
def __init__(self, preset_name: str):
|
def __init__(self, preset_name: str):
|
||||||
"""
|
"""
|
||||||
Loads core config and the specified preset file.
|
Loads core config, user overrides, and the specified preset file.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
preset_name: The name of the preset (without .json extension).
|
preset_name: The name of the preset (without .json extension).
|
||||||
@@ -81,9 +106,32 @@ class Configuration:
|
|||||||
"""
|
"""
|
||||||
log.debug(f"Initializing Configuration with preset: '{preset_name}'")
|
log.debug(f"Initializing Configuration with preset: '{preset_name}'")
|
||||||
self.preset_name = preset_name
|
self.preset_name = preset_name
|
||||||
|
|
||||||
|
# 1. Load core settings
|
||||||
self._core_settings: dict = self._load_core_config()
|
self._core_settings: dict = self._load_core_config()
|
||||||
|
|
||||||
|
# 2. Load asset type definitions
|
||||||
|
self._asset_type_definitions: dict = self._load_asset_type_definitions()
|
||||||
|
|
||||||
|
# 3. Load file type definitions
|
||||||
|
self._file_type_definitions: dict = self._load_file_type_definitions()
|
||||||
|
|
||||||
|
# 4. Load user settings
|
||||||
|
user_settings_overrides: dict = self._load_user_settings()
|
||||||
|
|
||||||
|
# 5. Deep merge user settings onto core settings
|
||||||
|
if user_settings_overrides:
|
||||||
|
log.info("Applying user setting overrides to core settings.")
|
||||||
|
# _deep_merge_dicts modifies self._core_settings in place
|
||||||
|
_deep_merge_dicts(self._core_settings, user_settings_overrides)
|
||||||
|
|
||||||
|
# 6. Load LLM settings
|
||||||
self._llm_settings: dict = self._load_llm_config()
|
self._llm_settings: dict = self._load_llm_config()
|
||||||
|
|
||||||
|
# 7. Load preset settings (conceptually overrides combined base + user for shared keys)
|
||||||
self._preset_settings: dict = self._load_preset(preset_name)
|
self._preset_settings: dict = self._load_preset(preset_name)
|
||||||
|
|
||||||
|
# 8. Validate and compile (after all base/user/preset settings are established)
|
||||||
self._validate_configs()
|
self._validate_configs()
|
||||||
self._compile_regex_patterns()
|
self._compile_regex_patterns()
|
||||||
log.info(f"Configuration loaded successfully using preset: '{self.preset_name}'")
|
log.info(f"Configuration loaded successfully using preset: '{self.preset_name}'")
|
||||||
@@ -215,9 +263,79 @@ class Configuration:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ConfigurationError(f"Failed to read preset file {preset_file}: {e}")
|
raise ConfigurationError(f"Failed to read preset file {preset_file}: {e}")
|
||||||
|
|
||||||
|
def _load_asset_type_definitions(self) -> dict:
|
||||||
|
"""Loads asset type definitions from the asset_type_definitions.json file."""
|
||||||
|
log.debug(f"Loading asset type definitions from: {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
if not ASSET_TYPE_DEFINITIONS_PATH.is_file():
|
||||||
|
raise ConfigurationError(f"Asset type definitions file not found: {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
try:
|
||||||
|
with open(ASSET_TYPE_DEFINITIONS_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
if "ASSET_TYPE_DEFINITIONS" not in data:
|
||||||
|
raise ConfigurationError(f"Key 'ASSET_TYPE_DEFINITIONS' not found in {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
settings = data["ASSET_TYPE_DEFINITIONS"]
|
||||||
|
if not isinstance(settings, dict):
|
||||||
|
raise ConfigurationError(f"'ASSET_TYPE_DEFINITIONS' in {ASSET_TYPE_DEFINITIONS_PATH} must be a dictionary.")
|
||||||
|
log.debug(f"Asset type definitions loaded successfully.")
|
||||||
|
return settings
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
raise ConfigurationError(f"Failed to parse asset type definitions file {ASSET_TYPE_DEFINITIONS_PATH}: Invalid JSON - {e}")
|
||||||
|
except Exception as e:
|
||||||
|
raise ConfigurationError(f"Failed to read asset type definitions file {ASSET_TYPE_DEFINITIONS_PATH}: {e}")
|
||||||
|
|
||||||
|
def _load_file_type_definitions(self) -> dict:
|
||||||
|
"""Loads file type definitions from the file_type_definitions.json file."""
|
||||||
|
log.debug(f"Loading file type definitions from: {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
if not FILE_TYPE_DEFINITIONS_PATH.is_file():
|
||||||
|
raise ConfigurationError(f"File type definitions file not found: {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
try:
|
||||||
|
with open(FILE_TYPE_DEFINITIONS_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
if "FILE_TYPE_DEFINITIONS" not in data:
|
||||||
|
raise ConfigurationError(f"Key 'FILE_TYPE_DEFINITIONS' not found in {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
settings = data["FILE_TYPE_DEFINITIONS"]
|
||||||
|
if not isinstance(settings, dict):
|
||||||
|
raise ConfigurationError(f"'FILE_TYPE_DEFINITIONS' in {FILE_TYPE_DEFINITIONS_PATH} must be a dictionary.")
|
||||||
|
log.debug(f"File type definitions loaded successfully.")
|
||||||
|
return settings
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
raise ConfigurationError(f"Failed to parse file type definitions file {FILE_TYPE_DEFINITIONS_PATH}: Invalid JSON - {e}")
|
||||||
|
except Exception as e:
|
||||||
|
raise ConfigurationError(f"Failed to read file type definitions file {FILE_TYPE_DEFINITIONS_PATH}: {e}")
|
||||||
|
|
||||||
|
def _load_user_settings(self) -> dict:
|
||||||
|
"""Loads user override settings from config/user_settings.json."""
|
||||||
|
log.debug(f"Attempting to load user settings from: {USER_SETTINGS_PATH}")
|
||||||
|
if not USER_SETTINGS_PATH.is_file():
|
||||||
|
log.info(f"User settings file not found: {USER_SETTINGS_PATH}. Proceeding without user overrides.")
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
with open(USER_SETTINGS_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
settings = json.load(f)
|
||||||
|
log.info(f"User settings loaded successfully from {USER_SETTINGS_PATH}.")
|
||||||
|
return settings
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
log.warning(f"Failed to parse user settings file {USER_SETTINGS_PATH}: Invalid JSON - {e}. Using empty user settings.")
|
||||||
|
return {}
|
||||||
|
except Exception as e:
|
||||||
|
log.warning(f"Failed to read user settings file {USER_SETTINGS_PATH}: {e}. Using empty user settings.")
|
||||||
|
return {}
|
||||||
|
|
||||||
def _validate_configs(self):
|
def _validate_configs(self):
|
||||||
"""Performs basic validation checks on loaded settings."""
|
"""Performs basic validation checks on loaded settings."""
|
||||||
log.debug("Validating loaded configurations...")
|
log.debug("Validating loaded configurations...")
|
||||||
|
|
||||||
|
# Validate new definition files first
|
||||||
|
if not isinstance(self._asset_type_definitions, dict):
|
||||||
|
raise ConfigurationError("Asset type definitions were not loaded correctly or are not a dictionary.")
|
||||||
|
if not self._asset_type_definitions: # Check if empty
|
||||||
|
raise ConfigurationError("Asset type definitions are empty.")
|
||||||
|
|
||||||
|
if not isinstance(self._file_type_definitions, dict):
|
||||||
|
raise ConfigurationError("File type definitions were not loaded correctly or are not a dictionary.")
|
||||||
|
if not self._file_type_definitions: # Check if empty
|
||||||
|
raise ConfigurationError("File type definitions are empty.")
|
||||||
|
|
||||||
# Preset validation
|
# Preset validation
|
||||||
required_preset_keys = [
|
required_preset_keys = [
|
||||||
"preset_name", "supplier_name", "source_naming", "map_type_mapping",
|
"preset_name", "supplier_name", "source_naming", "map_type_mapping",
|
||||||
@@ -235,8 +353,8 @@ class Configuration:
|
|||||||
raise ConfigurationError(f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' must be a dictionary.")
|
raise ConfigurationError(f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' must be a dictionary.")
|
||||||
if 'target_type' not in rule or not isinstance(rule['target_type'], str):
|
if 'target_type' not in rule or not isinstance(rule['target_type'], str):
|
||||||
raise ConfigurationError(f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' is missing 'target_type' string.")
|
raise ConfigurationError(f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' is missing 'target_type' string.")
|
||||||
|
|
||||||
valid_file_type_keys = self._core_settings.get('FILE_TYPE_DEFINITIONS', {}).keys()
|
valid_file_type_keys = self._file_type_definitions.keys()
|
||||||
if rule['target_type'] not in valid_file_type_keys:
|
if rule['target_type'] not in valid_file_type_keys:
|
||||||
raise ConfigurationError(
|
raise ConfigurationError(
|
||||||
f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' "
|
f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' "
|
||||||
@@ -261,7 +379,7 @@ class Configuration:
|
|||||||
raise ConfigurationError("Core config 'IMAGE_RESOLUTIONS' must be a dictionary.")
|
raise ConfigurationError("Core config 'IMAGE_RESOLUTIONS' must be a dictionary.")
|
||||||
|
|
||||||
# Validate DEFAULT_ASSET_CATEGORY
|
# Validate DEFAULT_ASSET_CATEGORY
|
||||||
valid_asset_type_keys = self._core_settings.get('ASSET_TYPE_DEFINITIONS', {}).keys()
|
valid_asset_type_keys = self._asset_type_definitions.keys()
|
||||||
default_asset_category_value = self._core_settings.get('DEFAULT_ASSET_CATEGORY')
|
default_asset_category_value = self._core_settings.get('DEFAULT_ASSET_CATEGORY')
|
||||||
if not default_asset_category_value:
|
if not default_asset_category_value:
|
||||||
raise ConfigurationError("Core config 'DEFAULT_ASSET_CATEGORY' is missing.")
|
raise ConfigurationError("Core config 'DEFAULT_ASSET_CATEGORY' is missing.")
|
||||||
@@ -379,10 +497,33 @@ class Configuration:
|
|||||||
"""Gets the configured JPG quality level."""
|
"""Gets the configured JPG quality level."""
|
||||||
return self._core_settings.get('JPG_QUALITY', 95)
|
return self._core_settings.get('JPG_QUALITY', 95)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def invert_normal_green_globally(self) -> bool:
|
||||||
|
"""Gets the global setting for inverting the green channel of normal maps."""
|
||||||
|
# Default to False if the setting is missing in the core config
|
||||||
|
return self._core_settings.get('invert_normal_map_green_channel_globally', False)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def overwrite_existing(self) -> bool:
|
||||||
|
"""Gets the setting for overwriting existing files from core settings."""
|
||||||
|
return self._core_settings.get('overwrite_existing', False)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def png_compression_level(self) -> int:
|
||||||
|
"""Gets the PNG compression level from core settings."""
|
||||||
|
return self._core_settings.get('PNG_COMPRESSION', 6) # Default to 6 if not found
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def resolution_threshold_for_jpg(self) -> int:
|
def resolution_threshold_for_jpg(self) -> int:
|
||||||
"""Gets the pixel dimension threshold for using JPG for 8-bit images."""
|
"""Gets the pixel dimension threshold for using JPG for 8-bit images."""
|
||||||
return self._core_settings.get('RESOLUTION_THRESHOLD_FOR_JPG', 4096)
|
value = self._core_settings.get('RESOLUTION_THRESHOLD_FOR_JPG', 4096)
|
||||||
|
log.info(f"CONFIGURATION_DEBUG: resolution_threshold_for_jpg property returning: {value} (type: {type(value)})")
|
||||||
|
# Ensure it's an int, as downstream might expect it.
|
||||||
|
# The .get() default is an int, but if the JSON had null or a string, it might be different.
|
||||||
|
if not isinstance(value, int):
|
||||||
|
log.warning(f"CONFIGURATION_DEBUG: RESOLUTION_THRESHOLD_FOR_JPG was not an int, got {type(value)}. Defaulting to 4096.")
|
||||||
|
return 4096
|
||||||
|
return value
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def respect_variant_map_types(self) -> list:
|
def respect_variant_map_types(self) -> list:
|
||||||
@@ -400,11 +541,11 @@ class Configuration:
|
|||||||
Gets the bit depth rule ('respect', 'force_8bit', 'force_16bit') for a given map type identifier.
|
Gets the bit depth rule ('respect', 'force_8bit', 'force_16bit') for a given map type identifier.
|
||||||
The map_type_input can be an FTD key (e.g., "MAP_COL") or a suffixed FTD key (e.g., "MAP_COL-1").
|
The map_type_input can be an FTD key (e.g., "MAP_COL") or a suffixed FTD key (e.g., "MAP_COL-1").
|
||||||
"""
|
"""
|
||||||
if not self._core_settings or 'FILE_TYPE_DEFINITIONS' not in self._core_settings:
|
if not self._file_type_definitions: # Check if the attribute exists and is not empty
|
||||||
log.warning("FILE_TYPE_DEFINITIONS not found in core settings. Cannot determine bit depth rule.")
|
log.warning("File type definitions not loaded. Cannot determine bit depth rule.")
|
||||||
return "respect"
|
return "respect"
|
||||||
|
|
||||||
file_type_definitions = self._core_settings['FILE_TYPE_DEFINITIONS']
|
file_type_definitions = self._file_type_definitions
|
||||||
|
|
||||||
# 1. Try direct match with map_type_input as FTD key
|
# 1. Try direct match with map_type_input as FTD key
|
||||||
definition = file_type_definitions.get(map_type_input)
|
definition = file_type_definitions.get(map_type_input)
|
||||||
@@ -450,8 +591,8 @@ class Configuration:
|
|||||||
from FILE_TYPE_DEFINITIONS.
|
from FILE_TYPE_DEFINITIONS.
|
||||||
"""
|
"""
|
||||||
aliases = set()
|
aliases = set()
|
||||||
file_type_definitions = self._core_settings.get('FILE_TYPE_DEFINITIONS', {})
|
# _file_type_definitions is guaranteed to be a dict by the loader
|
||||||
for _key, definition in file_type_definitions.items():
|
for _key, definition in self._file_type_definitions.items():
|
||||||
if isinstance(definition, dict):
|
if isinstance(definition, dict):
|
||||||
standard_type = definition.get('standard_type')
|
standard_type = definition.get('standard_type')
|
||||||
if standard_type and isinstance(standard_type, str) and standard_type.strip():
|
if standard_type and isinstance(standard_type, str) and standard_type.strip():
|
||||||
@@ -459,16 +600,16 @@ class Configuration:
|
|||||||
return sorted(list(aliases))
|
return sorted(list(aliases))
|
||||||
|
|
||||||
def get_asset_type_definitions(self) -> dict:
|
def get_asset_type_definitions(self) -> dict:
|
||||||
"""Returns the ASSET_TYPE_DEFINITIONS dictionary from core settings."""
|
"""Returns the _asset_type_definitions dictionary."""
|
||||||
return self._core_settings.get('ASSET_TYPE_DEFINITIONS', {})
|
return self._asset_type_definitions
|
||||||
|
|
||||||
def get_asset_type_keys(self) -> list:
|
def get_asset_type_keys(self) -> list:
|
||||||
"""Returns a list of valid asset type keys from core settings."""
|
"""Returns a list of valid asset type keys from core settings."""
|
||||||
return list(self.get_asset_type_definitions().keys())
|
return list(self.get_asset_type_definitions().keys())
|
||||||
|
|
||||||
def get_file_type_definitions_with_examples(self) -> dict:
|
def get_file_type_definitions_with_examples(self) -> dict:
|
||||||
"""Returns the FILE_TYPE_DEFINITIONS dictionary (including descriptions and examples) from core settings."""
|
"""Returns the _file_type_definitions dictionary (including descriptions and examples)."""
|
||||||
return self._core_settings.get('FILE_TYPE_DEFINITIONS', {})
|
return self._file_type_definitions
|
||||||
|
|
||||||
def get_file_type_keys(self) -> list:
|
def get_file_type_keys(self) -> list:
|
||||||
"""Returns a list of valid file type keys from core settings."""
|
"""Returns a list of valid file type keys from core settings."""
|
||||||
@@ -509,9 +650,27 @@ class Configuration:
|
|||||||
"""Returns the LLM request timeout in seconds from LLM settings."""
|
"""Returns the LLM request timeout in seconds from LLM settings."""
|
||||||
return self._llm_settings.get('llm_request_timeout', 120)
|
return self._llm_settings.get('llm_request_timeout', 120)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def app_version(self) -> Optional[str]:
|
||||||
|
"""Returns the application version from general_settings."""
|
||||||
|
gs = self._core_settings.get('general_settings')
|
||||||
|
if isinstance(gs, dict):
|
||||||
|
return gs.get('app_version')
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def enable_low_resolution_fallback(self) -> bool:
|
||||||
|
"""Gets the setting for enabling low-resolution fallback."""
|
||||||
|
return self._core_settings.get('ENABLE_LOW_RESOLUTION_FALLBACK', True)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def low_resolution_threshold(self) -> int:
|
||||||
|
"""Gets the pixel dimension threshold for low-resolution fallback."""
|
||||||
|
return self._core_settings.get('LOW_RESOLUTION_THRESHOLD', 512)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def FILE_TYPE_DEFINITIONS(self) -> dict:
|
def FILE_TYPE_DEFINITIONS(self) -> dict:
|
||||||
return self._core_settings.get('FILE_TYPE_DEFINITIONS', {})
|
return self._file_type_definitions
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def keybind_config(self) -> dict[str, list[str]]:
|
def keybind_config(self) -> dict[str, list[str]]:
|
||||||
@@ -521,8 +680,8 @@ class Configuration:
|
|||||||
Example: {'C': ['MAP_COL'], 'R': ['MAP_ROUGH', 'MAP_GLOSS']}
|
Example: {'C': ['MAP_COL'], 'R': ['MAP_ROUGH', 'MAP_GLOSS']}
|
||||||
"""
|
"""
|
||||||
keybinds = {}
|
keybinds = {}
|
||||||
file_type_defs = self._core_settings.get('FILE_TYPE_DEFINITIONS', {})
|
# _file_type_definitions is guaranteed to be a dict by the loader
|
||||||
for ftd_key, ftd_value in file_type_defs.items():
|
for ftd_key, ftd_value in self._file_type_definitions.items():
|
||||||
if isinstance(ftd_value, dict) and 'keybind' in ftd_value:
|
if isinstance(ftd_value, dict) and 'keybind' in ftd_value:
|
||||||
key = ftd_value['keybind']
|
key = ftd_value['keybind']
|
||||||
if key not in keybinds:
|
if key not in keybinds:
|
||||||
@@ -538,25 +697,92 @@ class Configuration:
|
|||||||
|
|
||||||
def load_base_config() -> dict:
|
def load_base_config() -> dict:
|
||||||
"""
|
"""
|
||||||
Loads only the base configuration from app_settings.json.
|
Loads base configuration by merging app_settings.json, user_settings.json (if exists),
|
||||||
Does not load presets or perform merging/validation.
|
asset_type_definitions.json, and file_type_definitions.json.
|
||||||
|
Does not load presets or perform full validation beyond basic file loading.
|
||||||
|
Returns a dictionary containing the merged settings. If app_settings.json
|
||||||
|
fails to load, an empty dictionary is returned. If other files
|
||||||
|
fail, errors are logged, and the function proceeds with what has been loaded.
|
||||||
"""
|
"""
|
||||||
|
base_settings = {}
|
||||||
|
|
||||||
|
# 1. Load app_settings.json (critical)
|
||||||
if not APP_SETTINGS_PATH.is_file():
|
if not APP_SETTINGS_PATH.is_file():
|
||||||
log.error(f"Base configuration file not found: {APP_SETTINGS_PATH}")
|
log.error(f"Critical: Base application settings file not found: {APP_SETTINGS_PATH}. Returning empty configuration.")
|
||||||
# Return empty dict or raise a specific error if preferred
|
|
||||||
# For now, return empty dict to allow GUI to potentially start with defaults
|
|
||||||
return {}
|
return {}
|
||||||
try:
|
try:
|
||||||
with open(APP_SETTINGS_PATH, 'r', encoding='utf-8') as f:
|
with open(APP_SETTINGS_PATH, 'r', encoding='utf-8') as f:
|
||||||
settings = json.load(f)
|
base_settings = json.load(f)
|
||||||
return settings
|
log.info(f"Successfully loaded base application settings from: {APP_SETTINGS_PATH}")
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
log.error(f"Failed to parse base configuration file {APP_SETTINGS_PATH}: Invalid JSON - {e}")
|
log.error(f"Critical: Failed to parse base application settings file {APP_SETTINGS_PATH}: Invalid JSON - {e}. Returning empty configuration.")
|
||||||
return {}
|
return {}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.error(f"Failed to read base configuration file {APP_SETTINGS_PATH}: {e}")
|
log.error(f"Critical: Failed to read base application settings file {APP_SETTINGS_PATH}: {e}. Returning empty configuration.")
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
# 2. Attempt to load user_settings.json
|
||||||
|
user_settings_overrides = {}
|
||||||
|
if USER_SETTINGS_PATH.is_file():
|
||||||
|
try:
|
||||||
|
with open(USER_SETTINGS_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
user_settings_overrides = json.load(f)
|
||||||
|
log.info(f"User settings loaded successfully for base_config from {USER_SETTINGS_PATH}.")
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
log.warning(f"Failed to parse user settings file {USER_SETTINGS_PATH} for base_config: Invalid JSON - {e}. Proceeding without these user overrides.")
|
||||||
|
except Exception as e:
|
||||||
|
log.warning(f"Failed to read user settings file {USER_SETTINGS_PATH} for base_config: {e}. Proceeding without these user overrides.")
|
||||||
|
|
||||||
|
# 3. Deep merge user settings onto base_settings
|
||||||
|
if user_settings_overrides:
|
||||||
|
log.info("Applying user setting overrides to base_settings in load_base_config.")
|
||||||
|
# _deep_merge_dicts modifies base_settings in place
|
||||||
|
_deep_merge_dicts(base_settings, user_settings_overrides)
|
||||||
|
|
||||||
|
# 4. Load asset_type_definitions.json (non-critical, merge if successful)
|
||||||
|
if not ASSET_TYPE_DEFINITIONS_PATH.is_file():
|
||||||
|
log.error(f"Asset type definitions file not found: {ASSET_TYPE_DEFINITIONS_PATH}. Proceeding without it.")
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
with open(ASSET_TYPE_DEFINITIONS_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
asset_defs_data = json.load(f)
|
||||||
|
if "ASSET_TYPE_DEFINITIONS" in asset_defs_data:
|
||||||
|
if isinstance(asset_defs_data["ASSET_TYPE_DEFINITIONS"], dict):
|
||||||
|
# Merge into base_settings, which might already contain user overrides
|
||||||
|
base_settings['ASSET_TYPE_DEFINITIONS'] = asset_defs_data["ASSET_TYPE_DEFINITIONS"]
|
||||||
|
log.info(f"Successfully loaded and merged ASSET_TYPE_DEFINITIONS from: {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
else:
|
||||||
|
log.error(f"Value under 'ASSET_TYPE_DEFINITIONS' in {ASSET_TYPE_DEFINITIONS_PATH} is not a dictionary. Skipping merge.")
|
||||||
|
else:
|
||||||
|
log.error(f"Key 'ASSET_TYPE_DEFINITIONS' not found in {ASSET_TYPE_DEFINITIONS_PATH}. Skipping merge.")
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
log.error(f"Failed to parse asset type definitions file {ASSET_TYPE_DEFINITIONS_PATH}: Invalid JSON - {e}. Skipping merge.")
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to read asset type definitions file {ASSET_TYPE_DEFINITIONS_PATH}: {e}. Skipping merge.")
|
||||||
|
|
||||||
|
# 5. Load file_type_definitions.json (non-critical, merge if successful)
|
||||||
|
if not FILE_TYPE_DEFINITIONS_PATH.is_file():
|
||||||
|
log.error(f"File type definitions file not found: {FILE_TYPE_DEFINITIONS_PATH}. Proceeding without it.")
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
with open(FILE_TYPE_DEFINITIONS_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
file_defs_data = json.load(f)
|
||||||
|
if "FILE_TYPE_DEFINITIONS" in file_defs_data:
|
||||||
|
if isinstance(file_defs_data["FILE_TYPE_DEFINITIONS"], dict):
|
||||||
|
# Merge into base_settings
|
||||||
|
base_settings['FILE_TYPE_DEFINITIONS'] = file_defs_data["FILE_TYPE_DEFINITIONS"]
|
||||||
|
log.info(f"Successfully loaded and merged FILE_TYPE_DEFINITIONS from: {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
else:
|
||||||
|
log.error(f"Value under 'FILE_TYPE_DEFINITIONS' in {FILE_TYPE_DEFINITIONS_PATH} is not a dictionary. Skipping merge.")
|
||||||
|
else:
|
||||||
|
log.error(f"Key 'FILE_TYPE_DEFINITIONS' not found in {FILE_TYPE_DEFINITIONS_PATH}. Skipping merge.")
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
log.error(f"Failed to parse file type definitions file {FILE_TYPE_DEFINITIONS_PATH}: Invalid JSON - {e}. Skipping merge.")
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to read file type definitions file {FILE_TYPE_DEFINITIONS_PATH}: {e}. Skipping merge.")
|
||||||
|
|
||||||
|
return base_settings
|
||||||
|
|
||||||
def save_llm_config(settings_dict: dict):
|
def save_llm_config(settings_dict: dict):
|
||||||
"""
|
"""
|
||||||
Saves the provided LLM settings dictionary to llm_settings.json.
|
Saves the provided LLM settings dictionary to llm_settings.json.
|
||||||
@@ -571,6 +797,18 @@ def save_llm_config(settings_dict: dict):
|
|||||||
log.error(f"Failed to save LLM configuration file {LLM_SETTINGS_PATH}: {e}")
|
log.error(f"Failed to save LLM configuration file {LLM_SETTINGS_PATH}: {e}")
|
||||||
# Re-raise as ConfigurationError to signal failure upstream
|
# Re-raise as ConfigurationError to signal failure upstream
|
||||||
raise ConfigurationError(f"Failed to save LLM configuration: {e}")
|
raise ConfigurationError(f"Failed to save LLM configuration: {e}")
|
||||||
|
def save_user_config(settings_dict: dict):
|
||||||
|
"""Saves the provided settings dictionary to user_settings.json."""
|
||||||
|
log.debug(f"Saving user config to: {USER_SETTINGS_PATH}")
|
||||||
|
try:
|
||||||
|
# Ensure parent directory exists (though 'config/' should always exist)
|
||||||
|
USER_SETTINGS_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(USER_SETTINGS_PATH, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(settings_dict, f, indent=4)
|
||||||
|
log.info(f"User config saved successfully to {USER_SETTINGS_PATH}")
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to save user configuration file {USER_SETTINGS_PATH}: {e}")
|
||||||
|
raise ConfigurationError(f"Failed to save user configuration: {e}")
|
||||||
def save_base_config(settings_dict: dict):
|
def save_base_config(settings_dict: dict):
|
||||||
"""
|
"""
|
||||||
Saves the provided settings dictionary to app_settings.json.
|
Saves the provided settings dictionary to app_settings.json.
|
||||||
@@ -583,3 +821,149 @@ def save_base_config(settings_dict: dict):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.error(f"Failed to save base configuration file {APP_SETTINGS_PATH}: {e}")
|
log.error(f"Failed to save base configuration file {APP_SETTINGS_PATH}: {e}")
|
||||||
raise ConfigurationError(f"Failed to save configuration: {e}")
|
raise ConfigurationError(f"Failed to save configuration: {e}")
|
||||||
|
|
||||||
|
def load_asset_definitions() -> dict:
|
||||||
|
"""
|
||||||
|
Reads config/asset_type_definitions.json.
|
||||||
|
Returns the dictionary under the "ASSET_TYPE_DEFINITIONS" key.
|
||||||
|
Handles file not found or JSON errors gracefully (e.g., return empty dict, log error).
|
||||||
|
"""
|
||||||
|
log.debug(f"Loading asset type definitions from: {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
if not ASSET_TYPE_DEFINITIONS_PATH.is_file():
|
||||||
|
log.error(f"Asset type definitions file not found: {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
with open(ASSET_TYPE_DEFINITIONS_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
if "ASSET_TYPE_DEFINITIONS" not in data:
|
||||||
|
log.error(f"Key 'ASSET_TYPE_DEFINITIONS' not found in {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
return {}
|
||||||
|
settings = data["ASSET_TYPE_DEFINITIONS"]
|
||||||
|
if not isinstance(settings, dict):
|
||||||
|
log.error(f"'ASSET_TYPE_DEFINITIONS' in {ASSET_TYPE_DEFINITIONS_PATH} must be a dictionary.")
|
||||||
|
return {}
|
||||||
|
log.debug(f"Asset type definitions loaded successfully.")
|
||||||
|
return settings
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
log.error(f"Failed to parse asset type definitions file {ASSET_TYPE_DEFINITIONS_PATH}: Invalid JSON - {e}")
|
||||||
|
return {}
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to read asset type definitions file {ASSET_TYPE_DEFINITIONS_PATH}: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def save_asset_definitions(data: dict):
|
||||||
|
"""
|
||||||
|
Takes a dictionary (representing the content for the "ASSET_TYPE_DEFINITIONS" key).
|
||||||
|
Writes it to config/asset_type_definitions.json under the root key "ASSET_TYPE_DEFINITIONS".
|
||||||
|
Handles potential I/O errors.
|
||||||
|
"""
|
||||||
|
log.debug(f"Saving asset type definitions to: {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
try:
|
||||||
|
with open(ASSET_TYPE_DEFINITIONS_PATH, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump({"ASSET_TYPE_DEFINITIONS": data}, f, indent=4)
|
||||||
|
log.info(f"Asset type definitions saved successfully to {ASSET_TYPE_DEFINITIONS_PATH}")
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to save asset type definitions file {ASSET_TYPE_DEFINITIONS_PATH}: {e}")
|
||||||
|
raise ConfigurationError(f"Failed to save asset type definitions: {e}")
|
||||||
|
|
||||||
|
def load_file_type_definitions() -> dict:
|
||||||
|
"""
|
||||||
|
Reads config/file_type_definitions.json.
|
||||||
|
Returns the dictionary under the "FILE_TYPE_DEFINITIONS" key.
|
||||||
|
Handles errors gracefully.
|
||||||
|
"""
|
||||||
|
log.debug(f"Loading file type definitions from: {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
if not FILE_TYPE_DEFINITIONS_PATH.is_file():
|
||||||
|
log.error(f"File type definitions file not found: {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
with open(FILE_TYPE_DEFINITIONS_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
if "FILE_TYPE_DEFINITIONS" not in data:
|
||||||
|
log.error(f"Key 'FILE_TYPE_DEFINITIONS' not found in {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
return {}
|
||||||
|
settings = data["FILE_TYPE_DEFINITIONS"]
|
||||||
|
if not isinstance(settings, dict):
|
||||||
|
log.error(f"'FILE_TYPE_DEFINITIONS' in {FILE_TYPE_DEFINITIONS_PATH} must be a dictionary.")
|
||||||
|
return {}
|
||||||
|
log.debug(f"File type definitions loaded successfully.")
|
||||||
|
return settings
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
log.error(f"Failed to parse file type definitions file {FILE_TYPE_DEFINITIONS_PATH}: Invalid JSON - {e}")
|
||||||
|
return {}
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to read file type definitions file {FILE_TYPE_DEFINITIONS_PATH}: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def save_file_type_definitions(data: dict):
|
||||||
|
"""
|
||||||
|
Takes a dictionary (representing content for "FILE_TYPE_DEFINITIONS" key).
|
||||||
|
Writes it to config/file_type_definitions.json under the root key "FILE_TYPE_DEFINITIONS".
|
||||||
|
Handles errors.
|
||||||
|
"""
|
||||||
|
log.debug(f"Saving file type definitions to: {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
try:
|
||||||
|
with open(FILE_TYPE_DEFINITIONS_PATH, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump({"FILE_TYPE_DEFINITIONS": data}, f, indent=4)
|
||||||
|
log.info(f"File type definitions saved successfully to {FILE_TYPE_DEFINITIONS_PATH}")
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to save file type definitions file {FILE_TYPE_DEFINITIONS_PATH}: {e}")
|
||||||
|
raise ConfigurationError(f"Failed to save file type definitions: {e}")
|
||||||
|
|
||||||
|
def load_supplier_settings() -> dict:
|
||||||
|
"""
|
||||||
|
Reads config/suppliers.json.
|
||||||
|
Returns the entire dictionary.
|
||||||
|
Handles file not found (return empty dict) or JSON errors.
|
||||||
|
If the loaded data is a list (old format), convert it in memory to the new
|
||||||
|
dictionary format, defaulting normal_map_type to "OpenGL" for each supplier.
|
||||||
|
"""
|
||||||
|
log.debug(f"Loading supplier settings from: {SUPPLIERS_CONFIG_PATH}")
|
||||||
|
if not SUPPLIERS_CONFIG_PATH.is_file():
|
||||||
|
log.warning(f"Supplier settings file not found: {SUPPLIERS_CONFIG_PATH}. Returning empty dict.")
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
with open(SUPPLIERS_CONFIG_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
if isinstance(data, list):
|
||||||
|
log.warning(f"Supplier settings in {SUPPLIERS_CONFIG_PATH} is in the old list format. Converting to new dictionary format.")
|
||||||
|
new_data = {}
|
||||||
|
for supplier_name in data:
|
||||||
|
if isinstance(supplier_name, str):
|
||||||
|
new_data[supplier_name] = {"normal_map_type": "OpenGL"}
|
||||||
|
else:
|
||||||
|
log.warning(f"Skipping non-string item '{supplier_name}' during old format conversion of supplier settings.")
|
||||||
|
log.info(f"Supplier settings converted to new format: {new_data}")
|
||||||
|
return new_data
|
||||||
|
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
log.error(f"Supplier settings in {SUPPLIERS_CONFIG_PATH} must be a dictionary. Found {type(data)}. Returning empty dict.")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
log.debug(f"Supplier settings loaded successfully.")
|
||||||
|
return data
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
log.error(f"Failed to parse supplier settings file {SUPPLIERS_CONFIG_PATH}: Invalid JSON - {e}. Returning empty dict.")
|
||||||
|
return {}
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to read supplier settings file {SUPPLIERS_CONFIG_PATH}: {e}. Returning empty dict.")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def save_supplier_settings(data: dict):
|
||||||
|
"""
|
||||||
|
Takes a dictionary (in the new format).
|
||||||
|
Writes it directly to config/suppliers.json.
|
||||||
|
Handles errors.
|
||||||
|
"""
|
||||||
|
log.debug(f"Saving supplier settings to: {SUPPLIERS_CONFIG_PATH}")
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
log.error(f"Data for save_supplier_settings must be a dictionary. Got {type(data)}.")
|
||||||
|
raise ConfigurationError(f"Invalid data type for saving supplier settings: {type(data)}")
|
||||||
|
try:
|
||||||
|
with open(SUPPLIERS_CONFIG_PATH, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(data, f, indent=2) # Using indent=2 as per the example for suppliers.json
|
||||||
|
log.info(f"Supplier settings saved successfully to {SUPPLIERS_CONFIG_PATH}")
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to save supplier settings file {SUPPLIERS_CONFIG_PATH}: {e}")
|
||||||
|
raise ConfigurationError(f"Failed to save supplier settings: {e}")
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
context_portal/conport_vector_data/chroma.sqlite3
Normal file
BIN
context_portal/conport_vector_data/chroma.sqlite3
Normal file
Binary file not shown.
BIN
context_portal/context.db
Normal file
BIN
context_portal/context.db
Normal file
Binary file not shown.
137
documentation/definitions_editor_plan.md
Normal file
137
documentation/definitions_editor_plan.md
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
# Plan for New Definitions Editor UI
|
||||||
|
|
||||||
|
## 1. Overview
|
||||||
|
|
||||||
|
This document outlines the plan to create a new, dedicated UI for managing "Asset Type Definitions", "File Type Definitions", and "Supplier Settings". This editor will provide a more structured and user-friendly way to manage these core application configurations, which are currently stored in separate JSON files.
|
||||||
|
|
||||||
|
## 2. General Design Principles
|
||||||
|
|
||||||
|
* **Dedicated Dialog:** The editor will be a new `QDialog` (e.g., `DefinitionsEditorDialog`).
|
||||||
|
* **Access Point:** Launched from the `MainWindow` menu bar (e.g., under a "Definitions" menu or "Edit" -> "Edit Definitions...").
|
||||||
|
* **Tabbed Interface:** The dialog will use a `QTabWidget` to separate the management of different definition types.
|
||||||
|
* **List/Details View:** Each tab will generally follow a two-pane layout:
|
||||||
|
* **Left Pane:** A `QListWidget` displaying the primary keys or names of the definitions (e.g., asset type names, file type IDs, supplier names). Includes "Add" and "Remove" buttons for managing these primary entries.
|
||||||
|
* **Right Pane:** A details area (e.g., `QGroupBox` with a `QFormLayout`) that shows the specific settings for the item selected in the left-pane list.
|
||||||
|
* **Data Persistence:** The dialog will load from and save to the respective JSON configuration files:
|
||||||
|
* Asset Types: `config/asset_type_definitions.json`
|
||||||
|
* File Types: `config/file_type_definitions.json`
|
||||||
|
* Supplier Settings: `config/suppliers.json` (This file will be refactored from a simple list to a dictionary of supplier objects).
|
||||||
|
* **User Experience:** Standard "Save" and "Cancel" buttons, with a check for unsaved changes.
|
||||||
|
|
||||||
|
## 3. Tab-Specific Plans
|
||||||
|
|
||||||
|
### 3.1. Asset Type Definitions Tab
|
||||||
|
|
||||||
|
* **Manages:** `config/asset_type_definitions.json`
|
||||||
|
* **UI Sketch:**
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
subgraph AssetTypeTab [Asset Type Definitions Tab]
|
||||||
|
direction LR
|
||||||
|
AssetList[QListWidget (Asset Type Keys e.g., "Surface")] --> AssetDetailsGroup{Details for Selected Asset Type};
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph AssetDetailsGroup
|
||||||
|
direction TB
|
||||||
|
Desc[Description: QTextEdit]
|
||||||
|
Color[Color: QPushButton ("Choose Color...") + Color Swatch Display]
|
||||||
|
Examples[Examples: QListWidget + Add/Remove Example Buttons]
|
||||||
|
end
|
||||||
|
AssetActions["Add Asset Type (Prompt for Name)\nRemove Selected Asset Type"] --> AssetList
|
||||||
|
```
|
||||||
|
* **Details:**
|
||||||
|
* **Left Pane:** `QListWidget` for asset type names. "Add Asset Type" (prompts for new key) and "Remove Selected Asset Type" buttons.
|
||||||
|
* **Right Pane (Details):**
|
||||||
|
* `description`: `QTextEdit`.
|
||||||
|
* `color`: `QPushButton` opening `QColorDialog`, with an adjacent `QLabel` to display the color swatch.
|
||||||
|
* `examples`: `QListWidget` with "Add Example" (`QInputDialog.getText`) and "Remove Selected Example" buttons.
|
||||||
|
|
||||||
|
### 3.2. File Type Definitions Tab
|
||||||
|
|
||||||
|
* **Manages:** `config/file_type_definitions.json`
|
||||||
|
* **UI Sketch:**
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
subgraph FileTypeTab [File Type Definitions Tab]
|
||||||
|
direction LR
|
||||||
|
FileList[QListWidget (File Type Keys e.g., "MAP_COL")] --> FileDetailsGroup{Details for Selected File Type};
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph FileDetailsGroup
|
||||||
|
direction TB
|
||||||
|
DescF[Description: QTextEdit]
|
||||||
|
ColorF[Color: QPushButton ("Choose Color...") + Color Swatch Display]
|
||||||
|
ExamplesF[Examples: QListWidget + Add/Remove Example Buttons]
|
||||||
|
StdType[Standard Type: QLineEdit]
|
||||||
|
BitDepth[Bit Depth Rule: QComboBox ("respect", "force_8bit", "force_16bit")]
|
||||||
|
IsGrayscale[Is Grayscale: QCheckBox]
|
||||||
|
Keybind[Keybind: QLineEdit (1 char)]
|
||||||
|
end
|
||||||
|
FileActions["Add File Type (Prompt for ID)\nRemove Selected File Type"] --> FileList
|
||||||
|
```
|
||||||
|
* **Details:**
|
||||||
|
* **Left Pane:** `QListWidget` for file type IDs. "Add File Type" (prompts for new key) and "Remove Selected File Type" buttons.
|
||||||
|
* **Right Pane (Details):**
|
||||||
|
* `description`: `QTextEdit`.
|
||||||
|
* `color`: `QPushButton` opening `QColorDialog`, with an adjacent `QLabel` for color swatch.
|
||||||
|
* `examples`: `QListWidget` with "Add Example" and "Remove Selected Example" buttons.
|
||||||
|
* `standard_type`: `QLineEdit`.
|
||||||
|
* `bit_depth_rule`: `QComboBox` (options: "respect", "force_8bit", "force_16bit").
|
||||||
|
* `is_grayscale`: `QCheckBox`.
|
||||||
|
* `keybind`: `QLineEdit` (validation for single character recommended).
|
||||||
|
|
||||||
|
### 3.3. Supplier Settings Tab
|
||||||
|
|
||||||
|
* **Manages:** `config/suppliers.json` (This file will be refactored to a dictionary structure, e.g., `{"SupplierName": {"normal_map_type": "OpenGL", ...}}`).
|
||||||
|
* **UI Sketch:**
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
subgraph SupplierTab [Supplier Settings Tab]
|
||||||
|
direction LR
|
||||||
|
SupplierList[QListWidget (Supplier Names)] --> SupplierDetailsGroup{Details for Selected Supplier};
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph SupplierDetailsGroup
|
||||||
|
direction TB
|
||||||
|
NormalMapType[Normal Map Type: QComboBox ("OpenGL", "DirectX")]
|
||||||
|
%% Future supplier-specific settings can be added here
|
||||||
|
end
|
||||||
|
SupplierActions["Add Supplier (Prompt for Name)\nRemove Selected Supplier"] --> SupplierList
|
||||||
|
```
|
||||||
|
* **Details:**
|
||||||
|
* **Left Pane:** `QListWidget` for supplier names. "Add Supplier" (prompts for new name) and "Remove Selected Supplier" buttons.
|
||||||
|
* **Right Pane (Details):**
|
||||||
|
* `normal_map_type`: `QComboBox` (options: "OpenGL", "DirectX"). Default for new suppliers: "OpenGL".
|
||||||
|
* *(Space for future supplier-specific settings).*
|
||||||
|
* **Data Handling Note for `config/suppliers.json`:**
|
||||||
|
* The editor will load from and save to `config/suppliers.json` using the new dictionary format (supplier name as key, object of settings as value).
|
||||||
|
* Initial implementation might require `config/suppliers.json` to be manually updated to this new format if it currently exists as a simple list. Alternatively, the editor could attempt an automatic conversion on first load if the old list format is detected, or prompt the user. For the first pass, assuming the editor works with the new format is simpler.
|
||||||
|
|
||||||
|
## 4. Implementation Steps (High-Level)
|
||||||
|
|
||||||
|
1. **(Potentially Manual First Step) Refactor `config/suppliers.json`:** If `config/suppliers.json` exists as a list, manually convert it to the new dictionary structure (e.g., `{"SupplierName": {"normal_map_type": "OpenGL"}}`) before starting UI development for this tab, or plan for the editor to handle this conversion.
|
||||||
|
2. **Create `DefinitionsEditorDialog` Class:** Inherit from `QDialog`.
|
||||||
|
3. **Implement UI Structure:** Main `QTabWidget`, and for each tab, the two-pane layout with `QListWidget`, `QGroupBox` for details, and relevant input widgets (`QLineEdit`, `QTextEdit`, `QComboBox`, `QCheckBox`, `QPushButton`).
|
||||||
|
4. **Implement Loading Logic:**
|
||||||
|
* For each tab, read data from its corresponding JSON file.
|
||||||
|
* Populate the left-pane `QListWidget` with the primary keys/names.
|
||||||
|
* Store the full data structure internally (e.g., in dictionaries within the dialog instance).
|
||||||
|
5. **Implement Display Logic:**
|
||||||
|
* When an item is selected in a `QListWidget`, populate the right-pane detail fields with the data for that item.
|
||||||
|
6. **Implement Editing Logic:**
|
||||||
|
* Ensure that changes made in the detail fields (text edits, combobox selections, checkbox states, color choices, list example modifications) update the corresponding internal data structure for the currently selected item.
|
||||||
|
7. **Implement Add/Remove Functionality:**
|
||||||
|
* For each definition type (Asset Type, File Type, Supplier), implement the "Add" and "Remove" buttons.
|
||||||
|
* "Add": Prompt for a unique key/name, create a new default entry in the internal data, and add it to the `QListWidget`.
|
||||||
|
* "Remove": Remove the selected item from the `QListWidget` and the internal data.
|
||||||
|
* For "examples" lists within Asset and File types, implement their "Add Example" and "Remove Selected Example" buttons.
|
||||||
|
8. **Implement Saving Logic:**
|
||||||
|
* When the main "Save" button is clicked:
|
||||||
|
* Write the (potentially modified) Asset Type definitions data structure to `config/asset_type_definitions.json`.
|
||||||
|
* Write File Type definitions to `config/file_type_definitions.json`.
|
||||||
|
* Write Supplier settings (in the new dictionary format) to `config/suppliers.json`.
|
||||||
|
* Consider creating new dedicated save functions in `configuration.py` for each of these files if they don't already exist or if existing ones are not suitable.
|
||||||
|
9. **Implement Unsaved Changes Check & Cancel Logic.**
|
||||||
|
10. **Integrate Dialog Launch:** Add a menu action in `MainWindow.py` to open the `DefinitionsEditorDialog`.
|
||||||
|
|
||||||
|
This plan provides a comprehensive approach to creating a dedicated editor for these crucial application definitions.
|
||||||
113
documentation/preferences_refactor_plan.md
Normal file
113
documentation/preferences_refactor_plan.md
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
# Refactoring Plan for Preferences Window (ConfigEditorDialog)
|
||||||
|
|
||||||
|
## 1. Overview
|
||||||
|
|
||||||
|
This document outlines the plan to refactor the preferences window (`gui/config_editor_dialog.py`). The primary goal is to address issues related to misaligned scope, poor user experience for certain data types, and incomplete interactivity. The refactoring will focus on making the `ConfigEditorDialog` a robust editor for settings in `config/app_settings.json` that are intended to be overridden by the user via `config/user_settings.json`.
|
||||||
|
|
||||||
|
## 2. Assessment Summary
|
||||||
|
|
||||||
|
* **Misaligned Scope:** The dialog currently includes UI for "Asset Type Definitions" and "File Type Definitions". However, these are managed in separate dedicated JSON files ([`config/asset_type_definitions.json`](config/asset_type_definitions.json) and [`config/file_type_definitions.json`](config/file_type_definitions.json)) and are not saved by this dialog (which targets `config/user_settings.json`).
|
||||||
|
* **Poor UX for Data Types:**
|
||||||
|
* Lists (e.g., `RESPECT_VARIANT_MAP_TYPES`) are edited as comma-separated strings.
|
||||||
|
* Dictionary-like structures (e.g., `IMAGE_RESOLUTIONS`) are handled inconsistently (JSON defines as dict, UI attempts list-of-pairs).
|
||||||
|
* Editing complex list-of-objects (e.g., `MAP_MERGE_RULES`) is functionally incomplete.
|
||||||
|
* **Incomplete Interactivity:** Many table-based editors lack "Add/Remove Row" functionality and proper cell delegates for intuitive editing.
|
||||||
|
* **LLM Settings:** Confirmed to be correctly managed by the separate `LLMEditorWidget` and `config/llm_settings.json`, so they are out of scope for this specific dialog refactor.
|
||||||
|
|
||||||
|
## 3. Refactoring Phases and Plan Details
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Start: Current State] --> B{Phase 1: Correct Scope & Critical UX/Data Fixes};
|
||||||
|
B --> C{Phase 2: Enhance MAP_MERGE_RULES Editor};
|
||||||
|
C --> D{Phase 3: General UX & Table Interactivity};
|
||||||
|
D --> E[End: Refactored Preferences Window];
|
||||||
|
|
||||||
|
subgraph "Phase 1: Correct Scope & Critical UX/Data Fixes"
|
||||||
|
B1[Remove Definitions Editing from ConfigEditorDialog]
|
||||||
|
B2[Improve List Editing for RESPECT_VARIANT_MAP_TYPES]
|
||||||
|
B3[Fix IMAGE_RESOLUTIONS Handling (Dictionary)]
|
||||||
|
B4[Handle Simple Nested Settings (e.g., general_settings)]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Phase 2: Enhance MAP_MERGE_RULES Editor"
|
||||||
|
C1[Implement Add/Remove for Merge Rules]
|
||||||
|
C2[Improve Rule Detail Editing (ComboBoxes, SpinBoxes)]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Phase 3: General UX & Table Interactivity"
|
||||||
|
D1[Implement IMAGE_RESOLUTIONS Table Add/Remove Buttons]
|
||||||
|
D2[Implement Necessary Table Cell Delegates (e.g., for IMAGE_RESOLUTIONS values)]
|
||||||
|
D3[Review/Refine Tab Layout & Widget Grouping]
|
||||||
|
end
|
||||||
|
|
||||||
|
B --> B1; B --> B2; B --> B3; B --> B4;
|
||||||
|
C --> C1; C --> C2;
|
||||||
|
D --> D1; D --> D2; D --> D3;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 1: Correct Scope & Critical UX/Data Fixes (in `gui/config_editor_dialog.py`)
|
||||||
|
|
||||||
|
1. **Remove Definitions Editing:**
|
||||||
|
* **Action:** In `populate_definitions_tab`, remove the inner `QTabWidget` and the code that creates/populates the "Asset Types" and "File Types" tables.
|
||||||
|
* The `DEFAULT_ASSET_CATEGORY` `QComboBox` (for the setting from `app_settings.json`) should remain. Its items should be populated using keys obtained from the `Configuration` class (which loads the actual `ASSET_TYPE_DEFINITIONS` from its dedicated file).
|
||||||
|
* **Rationale:** Simplifies the dialog to settings managed via `user_settings.json`. Editing of the full definition files requires dedicated UI (see Future Enhancements note).
|
||||||
|
|
||||||
|
2. **Improve `RESPECT_VARIANT_MAP_TYPES` Editing:**
|
||||||
|
* **Action:** In `populate_output_naming_tab`, replace the `QLineEdit` for `RESPECT_VARIANT_MAP_TYPES` with a `QListWidget` and "Add"/"Remove" buttons.
|
||||||
|
* "Add" button: Use `QInputDialog.getItem` with items populated from `Configuration.get_file_type_keys()` (or similar method accessing loaded `FILE_TYPE_DEFINITIONS`) to allow users to select a valid file type key.
|
||||||
|
* "Remove" button: Remove the selected item from the `QListWidget`.
|
||||||
|
* Update `save_settings` to read the list of strings from this `QListWidget`.
|
||||||
|
* Update `populate_widgets_from_settings` to populate this `QListWidget`.
|
||||||
|
|
||||||
|
3. **Fix `IMAGE_RESOLUTIONS` Handling:**
|
||||||
|
* **Action:** In `populate_image_processing_tab`:
|
||||||
|
* The `QTableWidget` for `IMAGE_RESOLUTIONS` should have two columns: "Name" (string, for the dictionary key) and "Resolution (px)" (integer, for the dictionary value).
|
||||||
|
* In `populate_image_resolutions_table`, ensure it correctly populates from the dictionary structure in `self.settings['IMAGE_RESOLUTIONS']` (from `app_settings.json`).
|
||||||
|
* In `save_settings`, ensure it correctly reads data from the table and reconstructs the `IMAGE_RESOLUTIONS` dictionary (e.g., `{"4K": 4096, "2K": 2048}`) when saving to `user_settings.json`.
|
||||||
|
* ComboBoxes `CALCULATE_STATS_RESOLUTION` and `RESOLUTION_THRESHOLD_FOR_JPG` should be populated with the *keys* (names like "4K", "2K") from the `IMAGE_RESOLUTIONS` dictionary. `RESOLUTION_THRESHOLD_FOR_JPG` should also include "Never" and "Always" options. The `save_settings` method needs to correctly map these special ComboBox values back to appropriate storable values if necessary (e.g., sentinel numbers or specific strings if the backend configuration expects them for "Never"/"Always").
|
||||||
|
|
||||||
|
4. **Handle Simple Nested Settings (e.g., `general_settings`):**
|
||||||
|
* **Action:** For `general_settings.invert_normal_map_green_channel_globally` (from `config/app_settings.json`):
|
||||||
|
* Add a `QCheckBox` labeled "Invert Normal Map Green Channel Globally" to an appropriate tab (e.g., "Image Processing" or a "General" tab after layout review).
|
||||||
|
* Update `populate_widgets_from_settings` to read `self.settings.get('general_settings', {}).get('invert_normal_map_green_channel_globally', False)`.
|
||||||
|
* Update `save_settings` to write this value back to `target_file_content.setdefault('general_settings', {})['invert_normal_map_green_channel_globally'] = widget.isChecked()`.
|
||||||
|
|
||||||
|
### Phase 2: Enhance `MAP_MERGE_RULES` Editor (in `gui/config_editor_dialog.py`)
|
||||||
|
|
||||||
|
1. **Rule Management:**
|
||||||
|
* **Action:** In `populate_map_merging_tab`:
|
||||||
|
* Connect the "Add Rule" button:
|
||||||
|
* Create a default new rule dictionary (e.g., `{"output_map_type": "NEW_RULE", "inputs": {}, "defaults": {}, "output_bit_depth": "respect_inputs"}`).
|
||||||
|
* Add it to the internal list of rules that will be saved (e.g., a copy of `self.settings['MAP_MERGE_RULES']` that gets modified).
|
||||||
|
* Add a new `QListWidgetItem` for it and select it to display its details.
|
||||||
|
* Connect the "Remove Rule" button:
|
||||||
|
* Remove the selected rule from the internal list and the `QListWidget`.
|
||||||
|
* Clear the details panel.
|
||||||
|
|
||||||
|
2. **Rule Details Panel Improvements (`display_merge_rule_details`):**
|
||||||
|
* **`output_map_type`:** Change the `QLineEdit` to a `QComboBox`. Populate its items from `Configuration.get_file_type_keys()`.
|
||||||
|
* **`inputs` Table:** The "Input Map Type" column cells should use a `QComboBox` delegate, populated with `Configuration.get_file_type_keys()` plus an empty/None option.
|
||||||
|
* **`defaults` Table:** The "Default Value" column cells should use a `QDoubleSpinBox` delegate (e.g., range 0.0 to 1.0, or 0-255 if appropriate for specific channel types).
|
||||||
|
* Ensure changes in these detail editors update the underlying rule data associated with the selected `QListWidgetItem` and the internal list of rules.
|
||||||
|
|
||||||
|
### Phase 3: General UX & Table Interactivity (in `gui/config_editor_dialog.py`)
|
||||||
|
|
||||||
|
1. **Implement `IMAGE_RESOLUTIONS` Table Add/Remove Buttons:**
|
||||||
|
* **Action:** In `populate_image_processing_tab`, connect the "Add Row" and "Remove Row" buttons for the `IMAGE_RESOLUTIONS` table.
|
||||||
|
* "Add Row": Prompt for "Name" (string) and "Resolution (px)" (integer).
|
||||||
|
* "Remove Row": Remove the selected row from the table and the underlying data.
|
||||||
|
2. **Implement Necessary Table Cell Delegates:**
|
||||||
|
* **Action:** For the `IMAGE_RESOLUTIONS` table, the "Resolution (px)" column should use a `QSpinBox` delegate or a `QLineEdit` with integer validation to ensure correct data input.
|
||||||
|
3. **Review/Refine Tab Layout & Widget Grouping:**
|
||||||
|
* **Action:** After the functional changes, review the overall layout of tabs and the grouping of settings within `gui/config_editor_dialog.py`.
|
||||||
|
* Ensure settings from `config/app_settings.json` are logically placed and clearly labeled.
|
||||||
|
* Verify widget labels are descriptive and tooltips are helpful where needed.
|
||||||
|
* Confirm correct mapping between UI widgets and the keys in `app_settings.json` (e.g., `OUTPUT_FILENAME_PATTERN` vs. `TARGET_FILENAME_PATTERN`).
|
||||||
|
|
||||||
|
## 4. Future Enhancements (Out of Scope for this Refactor)
|
||||||
|
|
||||||
|
* **Dedicated Editors for Definitions:** As per user feedback, if `ASSET_TYPE_DEFINITIONS` and `FILE_TYPE_DEFINITIONS` require UI-based editing, dedicated dialogs/widgets should be created. These would read from and save to their respective files ([`config/asset_type_definitions.json`](config/asset_type_definitions.json) and [`config/file_type_definitions.json`](config/file_type_definitions.json)) and could adopt a list/details UI similar to the `MAP_MERGE_RULES` editor.
|
||||||
|
* **Live Updates:** Consider mechanisms for applying some settings without requiring an application restart, if feasible for specific settings.
|
||||||
|
|
||||||
|
This plan aims to create a more focused, usable, and correct preferences window.
|
||||||
File diff suppressed because it is too large
Load Diff
1288
gui/definitions_editor_dialog.py
Normal file
1288
gui/definitions_editor_dialog.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -126,12 +126,15 @@ class SupplierSearchDelegate(QStyledItemDelegate):
|
|||||||
"""Loads the list of known suppliers from the JSON config file."""
|
"""Loads the list of known suppliers from the JSON config file."""
|
||||||
try:
|
try:
|
||||||
with open(SUPPLIERS_CONFIG_PATH, 'r') as f:
|
with open(SUPPLIERS_CONFIG_PATH, 'r') as f:
|
||||||
suppliers = json.load(f)
|
suppliers_data = json.load(f) # Renamed variable for clarity
|
||||||
if isinstance(suppliers, list):
|
if isinstance(suppliers_data, list):
|
||||||
# Ensure all items are strings
|
# Ensure all items are strings
|
||||||
return sorted([str(s) for s in suppliers if isinstance(s, str)])
|
return sorted([str(s) for s in suppliers_data if isinstance(s, str)])
|
||||||
else:
|
elif isinstance(suppliers_data, dict): # ADDED: Handle dictionary case
|
||||||
log.warning(f"'{SUPPLIERS_CONFIG_PATH}' does not contain a valid list. Starting fresh.")
|
# If it's a dictionary, extract keys as supplier names
|
||||||
|
return sorted([str(key) for key in suppliers_data.keys() if isinstance(key, str)])
|
||||||
|
else: # MODIFIED: Updated warning message
|
||||||
|
log.warning(f"'{SUPPLIERS_CONFIG_PATH}' does not contain a valid list or dictionary of suppliers. Starting fresh.")
|
||||||
return []
|
return []
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
log.info(f"'{SUPPLIERS_CONFIG_PATH}' not found. Starting with an empty supplier list.")
|
log.info(f"'{SUPPLIERS_CONFIG_PATH}' not found. Starting with an empty supplier list.")
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
# gui/llm_editor_widget.py
|
# gui/llm_editor_widget.py
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import copy # Added for deepcopy
|
||||||
from PySide6.QtWidgets import (
|
from PySide6.QtWidgets import (
|
||||||
QWidget, QVBoxLayout, QTabWidget, QPlainTextEdit, QGroupBox,
|
QWidget, QVBoxLayout, QTabWidget, QPlainTextEdit, QGroupBox,
|
||||||
QHBoxLayout, QPushButton, QFormLayout, QLineEdit, QDoubleSpinBox,
|
QHBoxLayout, QPushButton, QFormLayout, QLineEdit, QDoubleSpinBox,
|
||||||
@@ -24,6 +25,7 @@ class LLMEditorWidget(QWidget):
|
|||||||
def __init__(self, parent=None):
|
def __init__(self, parent=None):
|
||||||
super().__init__(parent)
|
super().__init__(parent)
|
||||||
self._unsaved_changes = False
|
self._unsaved_changes = False
|
||||||
|
self.original_llm_settings = {} # Initialize original_llm_settings
|
||||||
self._init_ui()
|
self._init_ui()
|
||||||
self._connect_signals()
|
self._connect_signals()
|
||||||
self.save_button.setEnabled(False) # Initially disabled
|
self.save_button.setEnabled(False) # Initially disabled
|
||||||
@@ -131,6 +133,7 @@ class LLMEditorWidget(QWidget):
|
|||||||
try:
|
try:
|
||||||
with open(LLM_CONFIG_PATH, 'r', encoding='utf-8') as f:
|
with open(LLM_CONFIG_PATH, 'r', encoding='utf-8') as f:
|
||||||
settings = json.load(f)
|
settings = json.load(f)
|
||||||
|
self.original_llm_settings = copy.deepcopy(settings) # Store a deep copy
|
||||||
|
|
||||||
# Populate Prompt Settings
|
# Populate Prompt Settings
|
||||||
self.prompt_editor.setPlainText(settings.get("llm_predictor_prompt", ""))
|
self.prompt_editor.setPlainText(settings.get("llm_predictor_prompt", ""))
|
||||||
@@ -159,9 +162,9 @@ class LLMEditorWidget(QWidget):
|
|||||||
logger.info("LLM settings loaded successfully.")
|
logger.info("LLM settings loaded successfully.")
|
||||||
|
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
logger.warning(f"LLM settings file not found: {LLM_CONFIG_PATH}. Using defaults and disabling editor.")
|
logger.warning(f"LLM settings file not found: {LLM_CONFIG_PATH}. Using defaults.")
|
||||||
QMessageBox.warning(self, "Load Error",
|
QMessageBox.warning(self, "Load Error",
|
||||||
f"LLM settings file not found:\n{LLM_CONFIG_PATH}\n\nPlease ensure the file exists. Using default values.")
|
f"LLM settings file not found:\n{LLM_CONFIG_PATH}\n\nNew settings will be created if you save.")
|
||||||
# Reset to defaults (optional, or leave fields empty)
|
# Reset to defaults (optional, or leave fields empty)
|
||||||
self.prompt_editor.clear()
|
self.prompt_editor.clear()
|
||||||
self.endpoint_url_edit.clear()
|
self.endpoint_url_edit.clear()
|
||||||
@@ -169,19 +172,21 @@ class LLMEditorWidget(QWidget):
|
|||||||
self.model_name_edit.clear()
|
self.model_name_edit.clear()
|
||||||
self.temperature_spinbox.setValue(0.7)
|
self.temperature_spinbox.setValue(0.7)
|
||||||
self.timeout_spinbox.setValue(120)
|
self.timeout_spinbox.setValue(120)
|
||||||
# self.setEnabled(False) # Disabling might be too harsh if user wants to create settings
|
self.original_llm_settings = {} # Start with empty original settings if file not found
|
||||||
|
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
logger.error(f"Error decoding JSON from {LLM_CONFIG_PATH}: {e}")
|
logger.error(f"Error decoding JSON from {LLM_CONFIG_PATH}: {e}")
|
||||||
QMessageBox.critical(self, "Load Error",
|
QMessageBox.critical(self, "Load Error",
|
||||||
f"Failed to parse LLM settings file:\n{LLM_CONFIG_PATH}\n\nError: {e}\n\nPlease check the file for syntax errors. Editor will be disabled.")
|
f"Failed to parse LLM settings file:\n{LLM_CONFIG_PATH}\n\nError: {e}\n\nPlease check the file for syntax errors. Editor will be disabled.")
|
||||||
self.setEnabled(False) # Disable editor on critical load error
|
self.setEnabled(False) # Disable editor on critical load error
|
||||||
|
self.original_llm_settings = {} # Reset original settings on JSON error
|
||||||
|
|
||||||
except Exception as e: # Catch other potential errors during loading/populating
|
except Exception as e: # Catch other potential errors during loading/populating
|
||||||
logger.error(f"An unexpected error occurred loading LLM settings: {e}", exc_info=True)
|
logger.error(f"An unexpected error occurred loading LLM settings: {e}", exc_info=True)
|
||||||
QMessageBox.critical(self, "Load Error",
|
QMessageBox.critical(self, "Load Error",
|
||||||
f"An unexpected error occurred while loading settings:\n{e}\n\nEditor will be disabled.")
|
f"An unexpected error occurred while loading settings:\n{e}\n\nEditor will be disabled.")
|
||||||
self.setEnabled(False)
|
self.setEnabled(False)
|
||||||
|
self.original_llm_settings = {} # Reset original settings on other errors
|
||||||
|
|
||||||
|
|
||||||
# Reset unsaved changes flag and disable save button after loading
|
# Reset unsaved changes flag and disable save button after loading
|
||||||
@@ -201,26 +206,38 @@ class LLMEditorWidget(QWidget):
|
|||||||
"""Gather data from UI, save to JSON file, and handle errors."""
|
"""Gather data from UI, save to JSON file, and handle errors."""
|
||||||
logger.info("Attempting to save LLM settings...")
|
logger.info("Attempting to save LLM settings...")
|
||||||
|
|
||||||
settings_dict = {}
|
# 1.a. Load Current Target File
|
||||||
|
target_file_content = {}
|
||||||
|
try:
|
||||||
|
with open(LLM_CONFIG_PATH, 'r', encoding='utf-8') as f:
|
||||||
|
target_file_content = json.load(f)
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.info(f"{LLM_CONFIG_PATH} not found. Will create a new one.")
|
||||||
|
target_file_content = {} # Start with an empty dict if file doesn't exist
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logger.error(f"Error decoding existing {LLM_CONFIG_PATH}: {e}. Starting with an empty config for save.")
|
||||||
|
QMessageBox.warning(self, "Warning",
|
||||||
|
f"Could not parse existing LLM settings file ({LLM_CONFIG_PATH}).\n"
|
||||||
|
f"Any pre-existing settings in that file might be overwritten if you save now.\nError: {e}")
|
||||||
|
target_file_content = {} # Start fresh if current file is corrupt
|
||||||
|
|
||||||
|
# 1.b. Gather current UI settings into current_llm_settings
|
||||||
|
current_llm_settings = {}
|
||||||
parsed_examples = []
|
parsed_examples = []
|
||||||
has_errors = False
|
has_errors = False # For example parsing
|
||||||
|
|
||||||
# Gather API Settings
|
current_llm_settings["llm_endpoint_url"] = self.endpoint_url_edit.text().strip()
|
||||||
settings_dict["llm_endpoint_url"] = self.endpoint_url_edit.text().strip()
|
current_llm_settings["llm_api_key"] = self.api_key_edit.text() # Keep as is
|
||||||
settings_dict["llm_api_key"] = self.api_key_edit.text() # Keep as is, don't strip
|
current_llm_settings["llm_model_name"] = self.model_name_edit.text().strip()
|
||||||
settings_dict["llm_model_name"] = self.model_name_edit.text().strip()
|
current_llm_settings["llm_temperature"] = self.temperature_spinbox.value()
|
||||||
settings_dict["llm_temperature"] = self.temperature_spinbox.value()
|
current_llm_settings["llm_request_timeout"] = self.timeout_spinbox.value()
|
||||||
settings_dict["llm_request_timeout"] = self.timeout_spinbox.value()
|
current_llm_settings["llm_predictor_prompt"] = self.prompt_editor.toPlainText().strip()
|
||||||
|
|
||||||
# Gather Prompt Settings
|
|
||||||
settings_dict["llm_predictor_prompt"] = self.prompt_editor.toPlainText().strip()
|
|
||||||
|
|
||||||
# Gather and Parse Examples
|
|
||||||
for i in range(self.examples_tab_widget.count()):
|
for i in range(self.examples_tab_widget.count()):
|
||||||
example_editor = self.examples_tab_widget.widget(i)
|
example_editor = self.examples_tab_widget.widget(i)
|
||||||
if isinstance(example_editor, QTextEdit):
|
if isinstance(example_editor, QTextEdit):
|
||||||
example_text = example_editor.toPlainText().strip()
|
example_text = example_editor.toPlainText().strip()
|
||||||
if not example_text: # Skip empty examples silently
|
if not example_text:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
parsed_example = json.loads(example_text)
|
parsed_example = json.loads(example_text)
|
||||||
@@ -231,40 +248,58 @@ class LLMEditorWidget(QWidget):
|
|||||||
logger.warning(f"Invalid JSON in '{tab_name}': {e}. Skipping example.")
|
logger.warning(f"Invalid JSON in '{tab_name}': {e}. Skipping example.")
|
||||||
QMessageBox.warning(self, "Invalid Example",
|
QMessageBox.warning(self, "Invalid Example",
|
||||||
f"The content in '{tab_name}' is not valid JSON and will not be saved.\n\nError: {e}\n\nPlease correct it or remove the tab.")
|
f"The content in '{tab_name}' is not valid JSON and will not be saved.\n\nError: {e}\n\nPlease correct it or remove the tab.")
|
||||||
# Optionally switch to the tab with the error:
|
|
||||||
# self.examples_tab_widget.setCurrentIndex(i)
|
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Widget at index {i} in examples tab is not a QTextEdit. Skipping.")
|
logger.warning(f"Widget at index {i} in examples tab is not a QTextEdit. Skipping.")
|
||||||
|
|
||||||
|
|
||||||
if has_errors:
|
if has_errors:
|
||||||
logger.warning("LLM settings not saved due to invalid JSON in examples.")
|
logger.warning("LLM settings not saved due to invalid JSON in examples.")
|
||||||
# Keep save button enabled if there were errors, allowing user to fix and retry
|
return
|
||||||
# self.save_button.setEnabled(True)
|
|
||||||
# self._unsaved_changes = True
|
|
||||||
return # Stop saving process
|
|
||||||
|
|
||||||
settings_dict["llm_predictor_examples"] = parsed_examples
|
current_llm_settings["llm_predictor_examples"] = parsed_examples
|
||||||
|
|
||||||
# Save the dictionary to file
|
# 1.c. Identify Changes and Update Target File Content
|
||||||
|
changed_settings_count = 0
|
||||||
|
for key, current_value in current_llm_settings.items():
|
||||||
|
original_value = self.original_llm_settings.get(key)
|
||||||
|
|
||||||
|
# Special handling for lists (e.g., examples) - direct comparison works
|
||||||
|
# For other types, direct comparison also works.
|
||||||
|
# This includes new keys present in current_llm_settings but not in original_llm_settings
|
||||||
|
if key not in self.original_llm_settings or current_value != original_value:
|
||||||
|
target_file_content[key] = current_value
|
||||||
|
logger.debug(f"Setting '{key}' changed or added. Old: '{original_value}', New: '{current_value}'")
|
||||||
|
changed_settings_count +=1
|
||||||
|
|
||||||
|
if changed_settings_count == 0 and self._unsaved_changes:
|
||||||
|
logger.info("Save called, but no actual changes detected compared to original loaded settings.")
|
||||||
|
# If _unsaved_changes was true, it means UI interaction happened,
|
||||||
|
# but values might have been reverted to original.
|
||||||
|
# We still proceed to save target_file_content as it might contain
|
||||||
|
# values from a file that was modified externally since last load.
|
||||||
|
# Or, if the file didn't exist, it will now be created with current UI values.
|
||||||
|
|
||||||
|
# 1.d. Save Updated Content
|
||||||
try:
|
try:
|
||||||
save_llm_config(settings_dict)
|
save_llm_config(target_file_content) # Save the potentially modified target_file_content
|
||||||
QMessageBox.information(self, "Save Successful", f"LLM settings saved to:\n{LLM_CONFIG_PATH}")
|
QMessageBox.information(self, "Save Successful", f"LLM settings saved to:\n{LLM_CONFIG_PATH}")
|
||||||
|
|
||||||
|
# Update original_llm_settings to reflect the newly saved state
|
||||||
|
self.original_llm_settings = copy.deepcopy(target_file_content)
|
||||||
|
|
||||||
self.save_button.setEnabled(False)
|
self.save_button.setEnabled(False)
|
||||||
self._unsaved_changes = False
|
self._unsaved_changes = False
|
||||||
self.settings_saved.emit() # Notify MainWindow or others
|
self.settings_saved.emit()
|
||||||
logger.info("LLM settings saved successfully.")
|
logger.info("LLM settings saved successfully.")
|
||||||
|
|
||||||
except ConfigurationError as e:
|
except ConfigurationError as e:
|
||||||
logger.error(f"Failed to save LLM settings: {e}")
|
logger.error(f"Failed to save LLM settings: {e}")
|
||||||
QMessageBox.critical(self, "Save Error", f"Could not save LLM settings.\n\nError: {e}")
|
QMessageBox.critical(self, "Save Error", f"Could not save LLM settings.\n\nError: {e}")
|
||||||
# Keep save button enabled as save failed
|
self.save_button.setEnabled(True) # Keep save enabled
|
||||||
self.save_button.setEnabled(True)
|
|
||||||
self._unsaved_changes = True
|
self._unsaved_changes = True
|
||||||
except Exception as e: # Catch unexpected errors during save
|
except Exception as e:
|
||||||
logger.error(f"An unexpected error occurred during LLM settings save: {e}", exc_info=True)
|
logger.error(f"An unexpected error occurred during LLM settings save: {e}", exc_info=True)
|
||||||
QMessageBox.critical(self, "Save Error", f"An unexpected error occurred while saving settings:\n{e}")
|
QMessageBox.critical(self, "Save Error", f"An unexpected error occurred while saving settings:\n{e}")
|
||||||
self.save_button.setEnabled(True)
|
self.save_button.setEnabled(True) # Keep save enabled
|
||||||
self._unsaved_changes = True
|
self._unsaved_changes = True
|
||||||
|
|
||||||
# --- Example Management Slots ---
|
# --- Example Management Slots ---
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ from .llm_editor_widget import LLMEditorWidget
|
|||||||
from .log_console_widget import LogConsoleWidget
|
from .log_console_widget import LogConsoleWidget
|
||||||
from .main_panel_widget import MainPanelWidget
|
from .main_panel_widget import MainPanelWidget
|
||||||
|
|
||||||
|
from .definitions_editor_dialog import DefinitionsEditorDialog
|
||||||
# --- Backend Imports for Data Structures ---
|
# --- Backend Imports for Data Structures ---
|
||||||
from rule_structure import SourceRule, AssetRule, FileRule
|
from rule_structure import SourceRule, AssetRule, FileRule
|
||||||
|
|
||||||
@@ -861,6 +862,11 @@ class MainWindow(QMainWindow):
|
|||||||
self.preferences_action = QAction("&Preferences...", self)
|
self.preferences_action = QAction("&Preferences...", self)
|
||||||
self.preferences_action.triggered.connect(self._open_config_editor)
|
self.preferences_action.triggered.connect(self._open_config_editor)
|
||||||
edit_menu.addAction(self.preferences_action)
|
edit_menu.addAction(self.preferences_action)
|
||||||
|
edit_menu.addSeparator()
|
||||||
|
|
||||||
|
self.definitions_editor_action = QAction("Edit Definitions...", self)
|
||||||
|
self.definitions_editor_action.triggered.connect(self._open_definitions_editor)
|
||||||
|
edit_menu.addAction(self.definitions_editor_action)
|
||||||
|
|
||||||
view_menu = self.menu_bar.addMenu("&View")
|
view_menu = self.menu_bar.addMenu("&View")
|
||||||
|
|
||||||
@@ -904,6 +910,17 @@ class MainWindow(QMainWindow):
|
|||||||
log.exception(f"Error opening configuration editor dialog: {e}")
|
log.exception(f"Error opening configuration editor dialog: {e}")
|
||||||
QMessageBox.critical(self, "Error", f"An error occurred while opening the configuration editor:\n{e}")
|
QMessageBox.critical(self, "Error", f"An error occurred while opening the configuration editor:\n{e}")
|
||||||
|
|
||||||
|
@Slot() # PySide6.QtCore.Slot
|
||||||
|
def _open_definitions_editor(self):
|
||||||
|
log.debug("Opening Definitions Editor dialog.")
|
||||||
|
try:
|
||||||
|
# DefinitionsEditorDialog is imported at the top of the file
|
||||||
|
dialog = DefinitionsEditorDialog(self)
|
||||||
|
dialog.exec_() # Use exec_() for modal dialog
|
||||||
|
log.debug("Definitions Editor dialog closed.")
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(f"Error opening Definitions Editor dialog: {e}")
|
||||||
|
QMessageBox.critical(self, "Error", f"An error occurred while opening the Definitions Editor:\n{e}")
|
||||||
|
|
||||||
@Slot(bool)
|
@Slot(bool)
|
||||||
def _toggle_log_console_visibility(self, checked):
|
def _toggle_log_console_visibility(self, checked):
|
||||||
|
|||||||
@@ -20,7 +20,8 @@ script_dir = Path(__file__).parent
|
|||||||
project_root = script_dir.parent
|
project_root = script_dir.parent
|
||||||
PRESETS_DIR = project_root / "Presets"
|
PRESETS_DIR = project_root / "Presets"
|
||||||
TEMPLATE_PATH = PRESETS_DIR / "_template.json"
|
TEMPLATE_PATH = PRESETS_DIR / "_template.json"
|
||||||
APP_SETTINGS_PATH_LOCAL = project_root / "config" / "app_settings.json"
|
APP_SETTINGS_PATH_LOCAL = project_root / "config" / "app_settings.json" # Retain for other settings if used elsewhere
|
||||||
|
FILE_TYPE_DEFINITIONS_PATH = project_root / "config" / "file_type_definitions.json"
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@@ -63,18 +64,19 @@ class PresetEditorWidget(QWidget):
|
|||||||
"""Loads FILE_TYPE_DEFINITIONS keys from app_settings.json."""
|
"""Loads FILE_TYPE_DEFINITIONS keys from app_settings.json."""
|
||||||
keys = []
|
keys = []
|
||||||
try:
|
try:
|
||||||
if APP_SETTINGS_PATH_LOCAL.is_file():
|
if FILE_TYPE_DEFINITIONS_PATH.is_file():
|
||||||
with open(APP_SETTINGS_PATH_LOCAL, 'r', encoding='utf-8') as f:
|
with open(FILE_TYPE_DEFINITIONS_PATH, 'r', encoding='utf-8') as f:
|
||||||
settings = json.load(f)
|
settings = json.load(f)
|
||||||
|
# The FILE_TYPE_DEFINITIONS key is at the root of file_type_definitions.json
|
||||||
ftd = settings.get("FILE_TYPE_DEFINITIONS", {})
|
ftd = settings.get("FILE_TYPE_DEFINITIONS", {})
|
||||||
keys = list(ftd.keys())
|
keys = list(ftd.keys())
|
||||||
log.debug(f"Successfully loaded {len(keys)} FILE_TYPE_DEFINITIONS keys.")
|
log.debug(f"Successfully loaded {len(keys)} FILE_TYPE_DEFINITIONS keys from {FILE_TYPE_DEFINITIONS_PATH}.")
|
||||||
else:
|
else:
|
||||||
log.error(f"app_settings.json not found at {APP_SETTINGS_PATH_LOCAL} for PresetEditorWidget.")
|
log.error(f"file_type_definitions.json not found at {FILE_TYPE_DEFINITIONS_PATH} for PresetEditorWidget.")
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
log.error(f"Failed to parse app_settings.json in PresetEditorWidget: {e}")
|
log.error(f"Failed to parse file_type_definitions.json in PresetEditorWidget: {e}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.error(f"Error loading FILE_TYPE_DEFINITIONS keys in PresetEditorWidget: {e}")
|
log.error(f"Error loading FILE_TYPE_DEFINITIONS keys from {FILE_TYPE_DEFINITIONS_PATH} in PresetEditorWidget: {e}")
|
||||||
return keys
|
return keys
|
||||||
|
|
||||||
def _init_ui(self):
|
def _init_ui(self):
|
||||||
|
|||||||
@@ -552,6 +552,13 @@ class UnifiedViewModel(QAbstractItemModel):
|
|||||||
supplier_col_index = self.createIndex(existing_source_row, self.COL_SUPPLIER, existing_source_rule)
|
supplier_col_index = self.createIndex(existing_source_row, self.COL_SUPPLIER, existing_source_rule)
|
||||||
self.dataChanged.emit(supplier_col_index, supplier_col_index, [Qt.DisplayRole, Qt.EditRole])
|
self.dataChanged.emit(supplier_col_index, supplier_col_index, [Qt.DisplayRole, Qt.EditRole])
|
||||||
|
|
||||||
|
# Always update the preset_name from the new_source_rule, as this reflects the latest prediction context
|
||||||
|
if existing_source_rule.preset_name != new_source_rule.preset_name:
|
||||||
|
log.debug(f" Updating preset_name for SourceRule '{source_path}' from '{existing_source_rule.preset_name}' to '{new_source_rule.preset_name}'")
|
||||||
|
existing_source_rule.preset_name = new_source_rule.preset_name
|
||||||
|
# Note: preset_name is not directly displayed in the view, so no dataChanged needed for a specific column,
|
||||||
|
# but if it influenced other display elements, dataChanged would be emitted for those.
|
||||||
|
|
||||||
|
|
||||||
# --- Merge AssetRules ---
|
# --- Merge AssetRules ---
|
||||||
existing_assets_dict = {asset.asset_name: asset for asset in existing_source_rule.assets}
|
existing_assets_dict = {asset.asset_name: asset for asset in existing_source_rule.assets}
|
||||||
|
|||||||
53
main.py
53
main.py
@@ -4,6 +4,7 @@ import time
|
|||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import re # Added for checking incrementing token
|
||||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||||
import subprocess
|
import subprocess
|
||||||
import shutil
|
import shutil
|
||||||
@@ -21,15 +22,43 @@ from PySide6.QtCore import Qt
|
|||||||
from PySide6.QtWidgets import QApplication
|
from PySide6.QtWidgets import QApplication
|
||||||
|
|
||||||
# --- Backend Imports ---
|
# --- Backend Imports ---
|
||||||
|
# Add current directory to sys.path for direct execution
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.append(os.path.dirname(__file__))
|
||||||
|
print(f"DEBUG: sys.path after append: {sys.path}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
print("DEBUG: Attempting to import Configuration...")
|
||||||
from configuration import Configuration, ConfigurationError
|
from configuration import Configuration, ConfigurationError
|
||||||
|
print("DEBUG: Successfully imported Configuration.")
|
||||||
|
|
||||||
|
print("DEBUG: Attempting to import ProcessingEngine...")
|
||||||
from processing_engine import ProcessingEngine
|
from processing_engine import ProcessingEngine
|
||||||
|
print("DEBUG: Successfully imported ProcessingEngine.")
|
||||||
|
|
||||||
|
print("DEBUG: Attempting to import SourceRule...")
|
||||||
from rule_structure import SourceRule
|
from rule_structure import SourceRule
|
||||||
|
print("DEBUG: Successfully imported SourceRule.")
|
||||||
|
|
||||||
|
print("DEBUG: Attempting to import MainWindow...")
|
||||||
from gui.main_window import MainWindow
|
from gui.main_window import MainWindow
|
||||||
|
print("DEBUG: Successfully imported MainWindow.")
|
||||||
|
|
||||||
|
print("DEBUG: Attempting to import prepare_processing_workspace...")
|
||||||
from utils.workspace_utils import prepare_processing_workspace
|
from utils.workspace_utils import prepare_processing_workspace
|
||||||
|
print("DEBUG: Successfully imported prepare_processing_workspace.")
|
||||||
|
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
script_dir = Path(__file__).parent.resolve()
|
script_dir = Path(__file__).parent.resolve()
|
||||||
|
print(f"ERROR: Cannot import Configuration or rule_structure classes.")
|
||||||
|
print(f"Ensure configuration.py and rule_structure.py are in the same directory or Python path.")
|
||||||
print(f"ERROR: Failed to import necessary classes: {e}")
|
print(f"ERROR: Failed to import necessary classes: {e}")
|
||||||
|
print(f"DEBUG: Exception type: {type(e)}")
|
||||||
|
print(f"DEBUG: Exception args: {e.args}")
|
||||||
|
import traceback
|
||||||
|
print("DEBUG: Full traceback of the ImportError:")
|
||||||
|
traceback.print_exc()
|
||||||
print(f"Ensure 'configuration.py' and 'asset_processor.py' exist in the directory:")
|
print(f"Ensure 'configuration.py' and 'asset_processor.py' exist in the directory:")
|
||||||
print(f" {script_dir}")
|
print(f" {script_dir}")
|
||||||
print("Or that the directory is included in your PYTHONPATH.")
|
print("Or that the directory is included in your PYTHONPATH.")
|
||||||
@@ -210,9 +239,14 @@ class ProcessingTask(QRunnable):
|
|||||||
# output_dir should already be a Path object
|
# output_dir should already be a Path object
|
||||||
pattern = getattr(config, 'output_directory_pattern', None)
|
pattern = getattr(config, 'output_directory_pattern', None)
|
||||||
if pattern:
|
if pattern:
|
||||||
log.debug(f"Calculating next incrementing value for dir: {output_dir} using pattern: {pattern}")
|
# Only call get_next_incrementing_value if the pattern contains an incrementing token
|
||||||
next_increment_str = get_next_incrementing_value(output_dir, pattern)
|
if re.search(r"\[IncrementingValue\]|#+", pattern):
|
||||||
log.info(f"Calculated next incrementing value for {output_dir}: {next_increment_str}")
|
log.debug(f"Incrementing token found in pattern '{pattern}'. Calculating next value for dir: {output_dir}")
|
||||||
|
next_increment_str = get_next_incrementing_value(output_dir, pattern)
|
||||||
|
log.info(f"Calculated next incrementing value for {output_dir}: {next_increment_str}")
|
||||||
|
else:
|
||||||
|
log.debug(f"No incrementing token found in pattern '{pattern}'. Skipping increment calculation.")
|
||||||
|
next_increment_str = None # Or a default like "00" if downstream expects a string, but None is cleaner if handled.
|
||||||
else:
|
else:
|
||||||
log.warning(f"Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration for preset {config.preset_name}")
|
log.warning(f"Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration for preset {config.preset_name}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -373,11 +407,13 @@ class App(QObject):
|
|||||||
|
|
||||||
# --- Get paths needed for ProcessingTask ---
|
# --- Get paths needed for ProcessingTask ---
|
||||||
try:
|
try:
|
||||||
# Access output path via MainPanelWidget
|
# Get output_dir from processing_settings passed from autotest.py
|
||||||
output_base_path_str = self.main_window.main_panel_widget.output_path_edit.text().strip()
|
output_base_path_str = processing_settings.get("output_dir")
|
||||||
|
log.info(f"APP_DEBUG: Received output_dir in processing_settings: {output_base_path_str}")
|
||||||
|
|
||||||
if not output_base_path_str:
|
if not output_base_path_str:
|
||||||
log.error("Cannot queue tasks: Output directory path is empty in the GUI.")
|
log.error("Cannot queue tasks: Output directory path is empty in processing_settings.")
|
||||||
self.main_window.statusBar().showMessage("Error: Output directory cannot be empty.", 5000)
|
# self.main_window.statusBar().showMessage("Error: Output directory cannot be empty.", 5000) # GUI specific
|
||||||
return
|
return
|
||||||
output_base_path = Path(output_base_path_str)
|
output_base_path = Path(output_base_path_str)
|
||||||
# Basic validation - check if it's likely a valid path structure (doesn't guarantee existence/writability here)
|
# Basic validation - check if it's likely a valid path structure (doesn't guarantee existence/writability here)
|
||||||
@@ -449,8 +485,9 @@ class App(QObject):
|
|||||||
engine=task_engine,
|
engine=task_engine,
|
||||||
rule=rule,
|
rule=rule,
|
||||||
workspace_path=workspace_path,
|
workspace_path=workspace_path,
|
||||||
output_base_path=output_base_path
|
output_base_path=output_base_path # This is Path(output_base_path_str)
|
||||||
)
|
)
|
||||||
|
log.info(f"APP_DEBUG: Passing to ProcessingTask: output_base_path = {output_base_path}")
|
||||||
task.signals.finished.connect(self._on_task_finished)
|
task.signals.finished.connect(self._on_task_finished)
|
||||||
log.debug(f"DEBUG: Calling thread_pool.start() for task {i+1}")
|
log.debug(f"DEBUG: Calling thread_pool.start() for task {i+1}")
|
||||||
self.thread_pool.start(task)
|
self.thread_pool.start(task)
|
||||||
|
|||||||
20
monitor.py
20
monitor.py
@@ -195,17 +195,25 @@ def _process_archive_task(archive_path: Path, output_dir: Path, processed_dir: P
|
|||||||
# Assuming config object has 'output_directory_pattern' attribute/key
|
# Assuming config object has 'output_directory_pattern' attribute/key
|
||||||
pattern = getattr(config, 'output_directory_pattern', None) # Use getattr for safety
|
pattern = getattr(config, 'output_directory_pattern', None) # Use getattr for safety
|
||||||
if pattern:
|
if pattern:
|
||||||
log.debug(f"[Task:{archive_path.name}] Calculating next incrementing value for dir: {output_dir} using pattern: {pattern}")
|
if re.search(r"\[IncrementingValue\]|#+", pattern):
|
||||||
next_increment_str = get_next_incrementing_value(output_dir, pattern)
|
log.debug(f"[Task:{archive_path.name}] Incrementing token found in pattern '{pattern}'. Calculating next value for dir: {output_dir}")
|
||||||
log.info(f"[Task:{archive_path.name}] Calculated next incrementing value: {next_increment_str}")
|
next_increment_str = get_next_incrementing_value(output_dir, pattern)
|
||||||
|
log.info(f"[Task:{archive_path.name}] Calculated next incrementing value: {next_increment_str}")
|
||||||
|
else:
|
||||||
|
log.debug(f"[Task:{archive_path.name}] No incrementing token found in pattern '{pattern}'. Skipping increment calculation.")
|
||||||
|
next_increment_str = None
|
||||||
else:
|
else:
|
||||||
# Check if config is a dict as fallback (depends on load_config implementation)
|
# Check if config is a dict as fallback (depends on load_config implementation)
|
||||||
if isinstance(config, dict):
|
if isinstance(config, dict):
|
||||||
pattern = config.get('output_directory_pattern')
|
pattern = config.get('output_directory_pattern')
|
||||||
if pattern:
|
if pattern:
|
||||||
log.debug(f"[Task:{archive_path.name}] Calculating next incrementing value for dir: {output_dir} using pattern (from dict): {pattern}")
|
if re.search(r"\[IncrementingValue\]|#+", pattern):
|
||||||
next_increment_str = get_next_incrementing_value(output_dir, pattern)
|
log.debug(f"[Task:{archive_path.name}] Incrementing token found in pattern '{pattern}' (from dict). Calculating next value for dir: {output_dir}")
|
||||||
log.info(f"[Task:{archive_path.name}] Calculated next incrementing value (from dict): {next_increment_str}")
|
next_increment_str = get_next_incrementing_value(output_dir, pattern)
|
||||||
|
log.info(f"[Task:{archive_path.name}] Calculated next incrementing value (from dict): {next_increment_str}")
|
||||||
|
else:
|
||||||
|
log.debug(f"[Task:{archive_path.name}] No incrementing token found in pattern '{pattern}' (from dict). Skipping increment calculation.")
|
||||||
|
next_increment_str = None
|
||||||
else:
|
else:
|
||||||
log.warning(f"[Task:{archive_path.name}] Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration dictionary.")
|
log.warning(f"[Task:{archive_path.name}] Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration dictionary.")
|
||||||
else:
|
else:
|
||||||
|
|||||||
110
processing/pipeline/asset_context.py
Normal file
110
processing/pipeline/asset_context.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
import dataclasses # Added import
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
|
from rule_structure import AssetRule, FileRule, SourceRule
|
||||||
|
from configuration import Configuration
|
||||||
|
|
||||||
|
# Imports needed for new dataclasses
|
||||||
|
import numpy as np
|
||||||
|
from typing import Any, Tuple, Union
|
||||||
|
|
||||||
|
# --- Stage Input/Output Dataclasses ---
|
||||||
|
|
||||||
|
# Item types for PrepareProcessingItemsStage output
|
||||||
|
@dataclass
|
||||||
|
class MergeTaskDefinition:
|
||||||
|
"""Represents a merge task identified by PrepareProcessingItemsStage."""
|
||||||
|
task_data: Dict # The original task data from context.merged_image_tasks
|
||||||
|
task_key: str # e.g., "merged_task_0"
|
||||||
|
|
||||||
|
# Output for RegularMapProcessorStage
|
||||||
|
@dataclass
|
||||||
|
class ProcessedRegularMapData:
|
||||||
|
processed_image_data: np.ndarray
|
||||||
|
final_internal_map_type: str
|
||||||
|
source_file_path: Path
|
||||||
|
original_bit_depth: Optional[int]
|
||||||
|
original_dimensions: Optional[Tuple[int, int]] # (width, height)
|
||||||
|
transformations_applied: List[str]
|
||||||
|
resolution_key: Optional[str] = None # Added field
|
||||||
|
status: str = "Processed"
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
|
||||||
|
# Output for MergedTaskProcessorStage
|
||||||
|
@dataclass
|
||||||
|
class ProcessedMergedMapData:
|
||||||
|
merged_image_data: np.ndarray
|
||||||
|
output_map_type: str # Internal type
|
||||||
|
source_bit_depths: List[int]
|
||||||
|
final_dimensions: Optional[Tuple[int, int]] # (width, height)
|
||||||
|
transformations_applied_to_inputs: Dict[str, List[str]] # Map type -> list of transforms
|
||||||
|
status: str = "Processed"
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
|
||||||
|
# Input for InitialScalingStage
|
||||||
|
@dataclass
|
||||||
|
class InitialScalingInput:
|
||||||
|
image_data: np.ndarray
|
||||||
|
initial_scaling_mode: str # Moved before fields with defaults
|
||||||
|
original_dimensions: Optional[Tuple[int, int]] # (width, height)
|
||||||
|
resolution_key: Optional[str] = None # Added field
|
||||||
|
# Configuration needed
|
||||||
|
|
||||||
|
# Output for InitialScalingStage
|
||||||
|
@dataclass
|
||||||
|
class InitialScalingOutput:
|
||||||
|
scaled_image_data: np.ndarray
|
||||||
|
scaling_applied: bool
|
||||||
|
final_dimensions: Tuple[int, int] # (width, height)
|
||||||
|
resolution_key: Optional[str] = None # Added field
|
||||||
|
|
||||||
|
# Input for SaveVariantsStage
|
||||||
|
@dataclass
|
||||||
|
class SaveVariantsInput:
|
||||||
|
image_data: np.ndarray # Final data (potentially scaled)
|
||||||
|
internal_map_type: str # Final internal type (e.g., MAP_ROUGH, MAP_COL-1)
|
||||||
|
source_bit_depth_info: List[int]
|
||||||
|
# Configuration needed
|
||||||
|
output_filename_pattern_tokens: Dict[str, Any]
|
||||||
|
image_resolutions: List[int]
|
||||||
|
file_type_defs: Dict[str, Dict]
|
||||||
|
output_format_8bit: str
|
||||||
|
output_format_16bit_primary: str
|
||||||
|
output_format_16bit_fallback: str
|
||||||
|
png_compression_level: int
|
||||||
|
jpg_quality: int
|
||||||
|
output_filename_pattern: str
|
||||||
|
resolution_threshold_for_jpg: Optional[int] # Added for JPG conversion
|
||||||
|
|
||||||
|
# Output for SaveVariantsStage
|
||||||
|
@dataclass
|
||||||
|
class SaveVariantsOutput:
|
||||||
|
saved_files_details: List[Dict]
|
||||||
|
status: str = "Processed"
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
|
||||||
|
# Add a field to AssetProcessingContext for the prepared items
|
||||||
|
@dataclass
|
||||||
|
class AssetProcessingContext:
|
||||||
|
source_rule: SourceRule
|
||||||
|
asset_rule: AssetRule
|
||||||
|
workspace_path: Path
|
||||||
|
engine_temp_dir: Path
|
||||||
|
output_base_path: Path
|
||||||
|
effective_supplier: Optional[str]
|
||||||
|
asset_metadata: Dict
|
||||||
|
processed_maps_details: Dict[str, Dict] # Will store final results per item_key
|
||||||
|
merged_maps_details: Dict[str, Dict] # This might become redundant? Keep for now.
|
||||||
|
files_to_process: List[FileRule]
|
||||||
|
loaded_data_cache: Dict
|
||||||
|
config_obj: Configuration
|
||||||
|
status_flags: Dict
|
||||||
|
incrementing_value: Optional[str]
|
||||||
|
sha5_value: Optional[str] # Keep existing fields
|
||||||
|
# New field for prepared items
|
||||||
|
processing_items: Optional[List[Union[FileRule, MergeTaskDefinition]]] = None
|
||||||
|
# Temporary storage during pipeline execution (managed by orchestrator)
|
||||||
|
# Keys could be FileRule object hash/id or MergeTaskDefinition task_key
|
||||||
|
intermediate_results: Optional[Dict[Any, Union[ProcessedRegularMapData, ProcessedMergedMapData, InitialScalingOutput]]] = None
|
||||||
518
processing/pipeline/orchestrator.py
Normal file
518
processing/pipeline/orchestrator.py
Normal file
@@ -0,0 +1,518 @@
|
|||||||
|
# --- Imports ---
|
||||||
|
import logging
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Optional, Any, Union # Added Any, Union
|
||||||
|
|
||||||
|
import numpy as np # Added numpy
|
||||||
|
|
||||||
|
from configuration import Configuration
|
||||||
|
from rule_structure import SourceRule, AssetRule, FileRule, ProcessingItem # Added ProcessingItem
|
||||||
|
|
||||||
|
# Import new context classes and stages
|
||||||
|
from .asset_context import (
|
||||||
|
AssetProcessingContext,
|
||||||
|
MergeTaskDefinition,
|
||||||
|
ProcessedRegularMapData,
|
||||||
|
ProcessedMergedMapData,
|
||||||
|
InitialScalingInput,
|
||||||
|
InitialScalingOutput,
|
||||||
|
SaveVariantsInput,
|
||||||
|
SaveVariantsOutput,
|
||||||
|
)
|
||||||
|
from .stages.base_stage import ProcessingStage
|
||||||
|
# Import the new stages we created
|
||||||
|
from .stages.prepare_processing_items import PrepareProcessingItemsStage
|
||||||
|
from .stages.regular_map_processor import RegularMapProcessorStage
|
||||||
|
from .stages.merged_task_processor import MergedTaskProcessorStage
|
||||||
|
from .stages.initial_scaling import InitialScalingStage
|
||||||
|
from .stages.save_variants import SaveVariantsStage
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# --- PipelineOrchestrator Class ---
|
||||||
|
|
||||||
|
class PipelineOrchestrator:
|
||||||
|
"""
|
||||||
|
Orchestrates the processing of assets based on source rules and a series of processing stages.
|
||||||
|
Manages the overall flow, including the core item processing sequence.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config_obj: Configuration,
|
||||||
|
pre_item_stages: List[ProcessingStage],
|
||||||
|
post_item_stages: List[ProcessingStage]):
|
||||||
|
"""
|
||||||
|
Initializes the PipelineOrchestrator.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_obj: The main configuration object.
|
||||||
|
pre_item_stages: Stages to run before the core item processing loop.
|
||||||
|
post_item_stages: Stages to run after the core item processing loop.
|
||||||
|
"""
|
||||||
|
self.config_obj: Configuration = config_obj
|
||||||
|
self.pre_item_stages: List[ProcessingStage] = pre_item_stages
|
||||||
|
self.post_item_stages: List[ProcessingStage] = post_item_stages
|
||||||
|
# Instantiate the core item processing stages internally
|
||||||
|
self._prepare_stage = PrepareProcessingItemsStage()
|
||||||
|
self._regular_processor_stage = RegularMapProcessorStage()
|
||||||
|
self._merged_processor_stage = MergedTaskProcessorStage()
|
||||||
|
self._scaling_stage = InitialScalingStage()
|
||||||
|
self._save_stage = SaveVariantsStage()
|
||||||
|
|
||||||
|
def _execute_specific_stages(
|
||||||
|
self, context: AssetProcessingContext,
|
||||||
|
stages_to_run: List[ProcessingStage],
|
||||||
|
stage_group_name: str,
|
||||||
|
stop_on_skip: bool = True
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
"""Executes a specific list of stages."""
|
||||||
|
asset_name = context.asset_rule.asset_name if context.asset_rule else "Unknown"
|
||||||
|
log.debug(f"Asset '{asset_name}': Executing {stage_group_name} stages...")
|
||||||
|
for stage in stages_to_run:
|
||||||
|
stage_name = stage.__class__.__name__
|
||||||
|
log.debug(f"Asset '{asset_name}': Executing {stage_group_name} stage: {stage_name}")
|
||||||
|
try:
|
||||||
|
# Check if stage expects context directly or specific input
|
||||||
|
# For now, assume outer stages take context directly
|
||||||
|
# This might need refinement if outer stages also adopt Input/Output pattern
|
||||||
|
context = stage.execute(context)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Asset '{asset_name}': Error during outer stage '{stage_name}': {e}", exc_info=True)
|
||||||
|
context.status_flags["asset_failed"] = True
|
||||||
|
context.status_flags["asset_failed_stage"] = stage_name
|
||||||
|
context.status_flags["asset_failed_reason"] = str(e)
|
||||||
|
# Update overall metadata immediately on outer stage failure
|
||||||
|
context.asset_metadata["status"] = f"Failed: Error in stage {stage_name}"
|
||||||
|
context.asset_metadata["error_message"] = str(e)
|
||||||
|
break # Stop processing outer stages for this asset on error
|
||||||
|
|
||||||
|
if stop_on_skip and context.status_flags.get("skip_asset"):
|
||||||
|
log.info(f"Asset '{asset_name}': Skipped by outer stage '{stage_name}'. Reason: {context.status_flags.get('skip_reason', 'N/A')}")
|
||||||
|
break # Skip remaining outer stages for this asset
|
||||||
|
return context
|
||||||
|
|
||||||
|
def process_source_rule(
|
||||||
|
self,
|
||||||
|
source_rule: SourceRule,
|
||||||
|
workspace_path: Path,
|
||||||
|
output_base_path: Path,
|
||||||
|
overwrite: bool,
|
||||||
|
incrementing_value: Optional[str],
|
||||||
|
sha5_value: Optional[str] # Keep param name consistent for now
|
||||||
|
) -> Dict[str, List[str]]:
|
||||||
|
"""
|
||||||
|
Processes a single source rule, applying pre-processing stages,
|
||||||
|
the core item processing loop (Prepare, Process, Scale, Save),
|
||||||
|
and post-processing stages.
|
||||||
|
"""
|
||||||
|
overall_status: Dict[str, List[str]] = {
|
||||||
|
"processed": [],
|
||||||
|
"skipped": [],
|
||||||
|
"failed": [],
|
||||||
|
}
|
||||||
|
engine_temp_dir_path: Optional[Path] = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# --- Setup Temporary Directory ---
|
||||||
|
temp_dir_path_str = tempfile.mkdtemp(prefix=self.config_obj.temp_dir_prefix)
|
||||||
|
engine_temp_dir_path = Path(temp_dir_path_str)
|
||||||
|
log.debug(f"PipelineOrchestrator created temporary directory: {engine_temp_dir_path}")
|
||||||
|
|
||||||
|
# --- Process Each Asset Rule ---
|
||||||
|
for asset_rule in source_rule.assets:
|
||||||
|
asset_name = asset_rule.asset_name
|
||||||
|
log.info(f"Orchestrator: Processing asset '{asset_name}'")
|
||||||
|
|
||||||
|
# --- Initialize Asset Context ---
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=source_rule,
|
||||||
|
asset_rule=asset_rule,
|
||||||
|
workspace_path=workspace_path,
|
||||||
|
engine_temp_dir=engine_temp_dir_path,
|
||||||
|
output_base_path=output_base_path,
|
||||||
|
effective_supplier=None,
|
||||||
|
asset_metadata={},
|
||||||
|
processed_maps_details={}, # Final results per item
|
||||||
|
merged_maps_details={}, # Keep for potential backward compat or other uses?
|
||||||
|
files_to_process=[], # Populated by FileRuleFilterStage (assumed in outer_stages)
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=self.config_obj,
|
||||||
|
status_flags={"skip_asset": False, "asset_failed": False},
|
||||||
|
incrementing_value=incrementing_value,
|
||||||
|
sha5_value=sha5_value,
|
||||||
|
processing_items=[], # Initialize new fields
|
||||||
|
intermediate_results={}
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- Execute Pre-Item-Processing Outer Stages ---
|
||||||
|
# (e.g., MetadataInit, SupplierDet, FileRuleFilter, GlossToRough, NormalInvert)
|
||||||
|
# Identify which outer stages run before the item loop
|
||||||
|
# This requires knowing the intended order. Assume all run before for now.
|
||||||
|
context = self._execute_specific_stages(context, self.pre_item_stages, "pre-item", stop_on_skip=True)
|
||||||
|
|
||||||
|
# Check if asset should be skipped or failed after pre-processing
|
||||||
|
if context.status_flags.get("asset_failed"):
|
||||||
|
log.error(f"Asset '{asset_name}': Failed during pre-processing stage '{context.status_flags.get('asset_failed_stage', 'Unknown')}'. Skipping item processing.")
|
||||||
|
overall_status["failed"].append(f"{asset_name} (Failed in {context.status_flags.get('asset_failed_stage', 'Pre-Processing')})")
|
||||||
|
continue # Move to the next asset rule
|
||||||
|
|
||||||
|
if context.status_flags.get("skip_asset"):
|
||||||
|
log.info(f"Asset '{asset_name}': Skipped during pre-processing. Skipping item processing.")
|
||||||
|
overall_status["skipped"].append(asset_name)
|
||||||
|
continue # Move to the next asset rule
|
||||||
|
|
||||||
|
# --- Prepare Processing Items ---
|
||||||
|
log.debug(f"Asset '{asset_name}': Preparing processing items...")
|
||||||
|
try:
|
||||||
|
log.info(f"ORCHESTRATOR_TRACE: Asset '{asset_name}': Attempting to call _prepare_stage.execute(). Current context.status_flags: {context.status_flags}")
|
||||||
|
# Prepare stage modifies context directly
|
||||||
|
context = self._prepare_stage.execute(context)
|
||||||
|
log.info(f"ORCHESTRATOR_TRACE: Asset '{asset_name}': Successfully RETURNED from _prepare_stage.execute(). context.processing_items count: {len(context.processing_items) if context.processing_items is not None else 'None'}. context.status_flags: {context.status_flags}")
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"ORCHESTRATOR_TRACE: Asset '{asset_name}': EXCEPTION during _prepare_stage.execute(): {e}", exc_info=True)
|
||||||
|
context.status_flags["asset_failed"] = True
|
||||||
|
context.status_flags["asset_failed_stage"] = "PrepareProcessingItemsStage"
|
||||||
|
context.status_flags["asset_failed_reason"] = str(e)
|
||||||
|
overall_status["failed"].append(f"{asset_name} (Failed in Prepare Items)")
|
||||||
|
continue # Move to next asset
|
||||||
|
|
||||||
|
if context.status_flags.get('prepare_items_failed'):
|
||||||
|
log.error(f"Asset '{asset_name}': Failed during item preparation. Reason: {context.status_flags.get('prepare_items_failed_reason', 'Unknown')}. Skipping item processing loop.")
|
||||||
|
overall_status["failed"].append(f"{asset_name} (Failed Prepare Items: {context.status_flags.get('prepare_items_failed_reason', 'Unknown')})")
|
||||||
|
continue # Move to next asset
|
||||||
|
|
||||||
|
if not context.processing_items:
|
||||||
|
log.info(f"Asset '{asset_name}': No items to process after preparation stage.")
|
||||||
|
# Status will be determined at the end
|
||||||
|
|
||||||
|
# --- Core Item Processing Loop ---
|
||||||
|
log.info("ORCHESTRATOR: Starting processing items loop for asset '%s'", asset_name) # Corrected indentation and message
|
||||||
|
log.info(f"Asset '{asset_name}': Starting core item processing loop for {len(context.processing_items)} items...")
|
||||||
|
asset_had_item_errors = False
|
||||||
|
for item_index, item in enumerate(context.processing_items):
|
||||||
|
item_key: Any = None # Key for storing results (FileRule object or task_key string)
|
||||||
|
item_log_prefix = f"Asset '{asset_name}', Item {item_index + 1}/{len(context.processing_items)}"
|
||||||
|
processed_data: Optional[Union[ProcessedRegularMapData, ProcessedMergedMapData]] = None
|
||||||
|
scaled_data_output: Optional[InitialScalingOutput] = None # Store output object
|
||||||
|
saved_data: Optional[SaveVariantsOutput] = None
|
||||||
|
item_status = "Failed" # Default item status
|
||||||
|
current_image_data: Optional[np.ndarray] = None # Track current image data ref
|
||||||
|
|
||||||
|
try:
|
||||||
|
# The 'item' is now expected to be a ProcessingItem or MergeTaskDefinition
|
||||||
|
|
||||||
|
if isinstance(item, ProcessingItem):
|
||||||
|
item_key = f"{item.source_file_info_ref}_{item.map_type_identifier}_{item.resolution_key}"
|
||||||
|
item_log_prefix = f"Asset '{asset_name}', ProcItem '{item_key}'"
|
||||||
|
log.info(f"{item_log_prefix}: Starting processing.")
|
||||||
|
|
||||||
|
# Data for ProcessingItem is already loaded by PrepareProcessingItemsStage
|
||||||
|
current_image_data = item.image_data
|
||||||
|
current_dimensions = item.current_dimensions
|
||||||
|
item_resolution_key = item.resolution_key
|
||||||
|
|
||||||
|
# Transformations (like gloss to rough, normal invert) are assumed to be applied
|
||||||
|
# by RegularMapProcessorStage if it's still used, or directly in PrepareProcessingItemsStage
|
||||||
|
# before creating the ProcessingItem, or a new dedicated transformation stage.
|
||||||
|
# For now, assume item.image_data is ready for scaling/saving.
|
||||||
|
|
||||||
|
# Store initial ProcessingItem data as "processed_data" for consistency if RegularMapProcessor is bypassed
|
||||||
|
# This is a simplification; a dedicated transformation stage would be cleaner.
|
||||||
|
# For now, we assume transformations happened before or within PrepareProcessingItemsStage.
|
||||||
|
# The 'processed_data' variable here is more of a placeholder for what would feed into scaling.
|
||||||
|
|
||||||
|
# Create a simple ProcessedRegularMapData-like structure for logging/details if needed,
|
||||||
|
# or adapt the final_details population later.
|
||||||
|
# For now, we'll directly use 'item' fields.
|
||||||
|
|
||||||
|
# 2. Scale (Optional)
|
||||||
|
scaling_mode = getattr(context.config_obj, "INITIAL_SCALING_MODE", "NONE")
|
||||||
|
# Pass the item's resolution_key to InitialScalingInput
|
||||||
|
scale_input = InitialScalingInput(
|
||||||
|
image_data=current_image_data,
|
||||||
|
original_dimensions=current_dimensions,
|
||||||
|
initial_scaling_mode=scaling_mode,
|
||||||
|
resolution_key=item_resolution_key # Pass the key
|
||||||
|
)
|
||||||
|
# Add _source_file_path for logging within InitialScalingStage if available
|
||||||
|
setattr(scale_input, '_source_file_path', item.source_file_info_ref)
|
||||||
|
|
||||||
|
log.debug(f"{item_log_prefix}: Calling InitialScalingStage. Input res_key: {scale_input.resolution_key}")
|
||||||
|
scaled_data_output = self._scaling_stage.execute(scale_input)
|
||||||
|
current_image_data = scaled_data_output.scaled_image_data
|
||||||
|
current_dimensions = scaled_data_output.final_dimensions # Dimensions after scaling
|
||||||
|
# The resolution_key from item is passed through by InitialScalingOutput
|
||||||
|
output_resolution_key = scaled_data_output.resolution_key
|
||||||
|
log.debug(f"{item_log_prefix}: InitialScalingStage output. Scaled: {scaled_data_output.scaling_applied}, New Dims: {current_dimensions}, Output ResKey: {output_resolution_key}")
|
||||||
|
context.intermediate_results[item_key] = scaled_data_output
|
||||||
|
|
||||||
|
|
||||||
|
# 3. Save Variants
|
||||||
|
if current_image_data is None or current_image_data.size == 0:
|
||||||
|
log.warning(f"{item_log_prefix}: Skipping save stage because image data is empty.")
|
||||||
|
context.processed_maps_details[item_key] = {"status": "Skipped", "notes": "No image data to save", "stage": "SaveVariantsStage"}
|
||||||
|
continue
|
||||||
|
|
||||||
|
log.debug(f"{item_log_prefix}: Preparing to save variant with resolution key '{output_resolution_key}'...")
|
||||||
|
|
||||||
|
output_filename_tokens = {
|
||||||
|
'asset_name': asset_name,
|
||||||
|
'output_base_directory': context.engine_temp_dir,
|
||||||
|
'supplier': context.effective_supplier or 'UnknownSupplier',
|
||||||
|
'resolution': output_resolution_key # Use the key from the item/scaling stage
|
||||||
|
}
|
||||||
|
|
||||||
|
# Determine image_resolutions argument for save_image_variants
|
||||||
|
save_specific_resolutions = {}
|
||||||
|
if output_resolution_key == "LOWRES":
|
||||||
|
# For LOWRES, the "resolution value" is its actual dimension.
|
||||||
|
# image_saving_utils needs a dict like {"LOWRES": 64} if current_dim is 64x64
|
||||||
|
# Assuming current_dimensions[0] is width.
|
||||||
|
save_specific_resolutions = {"LOWRES": current_dimensions[0] if current_dimensions else 0}
|
||||||
|
log.debug(f"{item_log_prefix}: Preparing to save LOWRES variant. Dimensions: {current_dimensions}. Save resolutions arg: {save_specific_resolutions}")
|
||||||
|
elif output_resolution_key in context.config_obj.image_resolutions:
|
||||||
|
save_specific_resolutions = {output_resolution_key: context.config_obj.image_resolutions[output_resolution_key]}
|
||||||
|
else:
|
||||||
|
log.warning(f"{item_log_prefix}: Resolution key '{output_resolution_key}' not found in config.image_resolutions and not LOWRES. Saving might fail or use full res.")
|
||||||
|
# Fallback: pass all configured resolutions, image_saving_utils will try to match by size.
|
||||||
|
# This might not be ideal if the key is truly unknown.
|
||||||
|
# Or, more strictly, fail here if key is unknown and not LOWRES.
|
||||||
|
# For now, let image_saving_utils handle it by passing all.
|
||||||
|
save_specific_resolutions = context.config_obj.image_resolutions
|
||||||
|
|
||||||
|
|
||||||
|
save_input = SaveVariantsInput(
|
||||||
|
image_data=current_image_data,
|
||||||
|
internal_map_type=item.map_type_identifier,
|
||||||
|
source_bit_depth_info=[item.bit_depth] if item.bit_depth is not None else [8], # Default to 8 if not set
|
||||||
|
output_filename_pattern_tokens=output_filename_tokens,
|
||||||
|
image_resolutions=save_specific_resolutions, # Pass the specific resolution(s)
|
||||||
|
file_type_defs=getattr(context.config_obj, "FILE_TYPE_DEFINITIONS", {}),
|
||||||
|
output_format_8bit=context.config_obj.get_8bit_output_format(),
|
||||||
|
output_format_16bit_primary=context.config_obj.get_16bit_output_formats()[0],
|
||||||
|
output_format_16bit_fallback=context.config_obj.get_16bit_output_formats()[1],
|
||||||
|
png_compression_level=context.config_obj.png_compression_level,
|
||||||
|
jpg_quality=context.config_obj.jpg_quality,
|
||||||
|
output_filename_pattern=context.config_obj.output_filename_pattern,
|
||||||
|
resolution_threshold_for_jpg=getattr(context.config_obj, "resolution_threshold_for_jpg", None)
|
||||||
|
)
|
||||||
|
saved_data = self._save_stage.execute(save_input)
|
||||||
|
|
||||||
|
if saved_data and saved_data.status.startswith("Processed"):
|
||||||
|
item_status = saved_data.status
|
||||||
|
log.info(f"{item_log_prefix}: Item successfully processed and saved. Status: {item_status}")
|
||||||
|
context.processed_maps_details[item_key] = {
|
||||||
|
"status": item_status,
|
||||||
|
"saved_files_info": saved_data.saved_files_details,
|
||||||
|
"internal_map_type": item.map_type_identifier,
|
||||||
|
"resolution_key": output_resolution_key,
|
||||||
|
"original_dimensions": item.original_dimensions,
|
||||||
|
"final_dimensions": current_dimensions, # Dimensions after scaling
|
||||||
|
"source_file": item.source_file_info_ref,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
error_msg = saved_data.error_message if saved_data else "Save stage returned None"
|
||||||
|
log.error(f"{item_log_prefix}: Failed during save stage. Error: {error_msg}")
|
||||||
|
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Save Error: {error_msg}", "stage": "SaveVariantsStage"}
|
||||||
|
asset_had_item_errors = True
|
||||||
|
item_status = "Failed"
|
||||||
|
|
||||||
|
elif isinstance(item, MergeTaskDefinition):
|
||||||
|
# --- This part needs similar refactoring for resolution_key if merged outputs can be LOWRES ---
|
||||||
|
# --- For now, assume merged tasks always produce standard resolutions ---
|
||||||
|
item_key = item.task_key
|
||||||
|
item_log_prefix = f"Asset '{asset_name}', MergeTask '{item_key}'"
|
||||||
|
log.info(f"{item_log_prefix}: Processing MergeTask.")
|
||||||
|
|
||||||
|
# 1. Process Merge Task
|
||||||
|
processed_data = self._merged_processor_stage.execute(context, item)
|
||||||
|
if not processed_data or processed_data.status != "Processed":
|
||||||
|
error_msg = processed_data.error_message if processed_data else "Merge processor returned None"
|
||||||
|
log.error(f"{item_log_prefix}: Failed during merge processing. Error: {error_msg}")
|
||||||
|
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Merge Error: {error_msg}", "stage": "MergedTaskProcessorStage"}
|
||||||
|
asset_had_item_errors = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
context.intermediate_results[item_key] = processed_data
|
||||||
|
current_image_data = processed_data.merged_image_data
|
||||||
|
current_dimensions = processed_data.final_dimensions
|
||||||
|
|
||||||
|
# 2. Scale Merged Output (Optional)
|
||||||
|
# Merged tasks typically don't have a single "resolution_key" like LOWRES from source.
|
||||||
|
# They produce an image that then gets downscaled to 1K, PREVIEW etc.
|
||||||
|
# So, resolution_key for InitialScalingInput here would be None or a default.
|
||||||
|
scaling_mode = getattr(context.config_obj, "INITIAL_SCALING_MODE", "NONE")
|
||||||
|
scale_input = InitialScalingInput(
|
||||||
|
image_data=current_image_data,
|
||||||
|
original_dimensions=current_dimensions,
|
||||||
|
initial_scaling_mode=scaling_mode,
|
||||||
|
resolution_key=None # Merged outputs are not "LOWRES" themselves before this scaling
|
||||||
|
)
|
||||||
|
setattr(scale_input, '_source_file_path', f"MergeTask_{item_key}") # For logging
|
||||||
|
|
||||||
|
log.debug(f"{item_log_prefix}: Calling InitialScalingStage for merged data.")
|
||||||
|
scaled_data_output = self._scaling_stage.execute(scale_input)
|
||||||
|
current_image_data = scaled_data_output.scaled_image_data
|
||||||
|
current_dimensions = scaled_data_output.final_dimensions
|
||||||
|
# Merged items don't have a specific output_resolution_key from source,
|
||||||
|
# they will be saved to all applicable resolutions from config.
|
||||||
|
# So scaled_data_output.resolution_key will be None here.
|
||||||
|
context.intermediate_results[item_key] = scaled_data_output
|
||||||
|
|
||||||
|
# 3. Save Merged Variants
|
||||||
|
if current_image_data is None or current_image_data.size == 0:
|
||||||
|
log.warning(f"{item_log_prefix}: Skipping save for merged task, image data is empty.")
|
||||||
|
context.processed_maps_details[item_key] = {"status": "Skipped", "notes": "No merged image data to save", "stage": "SaveVariantsStage"}
|
||||||
|
continue
|
||||||
|
|
||||||
|
output_filename_tokens = {
|
||||||
|
'asset_name': asset_name,
|
||||||
|
'output_base_directory': context.engine_temp_dir,
|
||||||
|
'supplier': context.effective_supplier or 'UnknownSupplier',
|
||||||
|
# 'resolution' token will be filled by image_saving_utils for each variant
|
||||||
|
}
|
||||||
|
|
||||||
|
# For merged tasks, we usually want to generate all standard resolutions.
|
||||||
|
# The `resolution_key` from the item itself is not applicable here for the `resolution` token.
|
||||||
|
# The `image_saving_utils.save_image_variants` will iterate through `context.config_obj.image_resolutions`.
|
||||||
|
save_input = SaveVariantsInput(
|
||||||
|
image_data=current_image_data,
|
||||||
|
internal_map_type=processed_data.output_map_type,
|
||||||
|
source_bit_depth_info=processed_data.source_bit_depths,
|
||||||
|
output_filename_pattern_tokens=output_filename_tokens,
|
||||||
|
image_resolutions=context.config_obj.image_resolutions, # Pass all configured resolutions
|
||||||
|
file_type_defs=getattr(context.config_obj, "FILE_TYPE_DEFINITIONS", {}),
|
||||||
|
output_format_8bit=context.config_obj.get_8bit_output_format(),
|
||||||
|
output_format_16bit_primary=context.config_obj.get_16bit_output_formats()[0],
|
||||||
|
output_format_16bit_fallback=context.config_obj.get_16bit_output_formats()[1],
|
||||||
|
png_compression_level=context.config_obj.png_compression_level,
|
||||||
|
jpg_quality=context.config_obj.jpg_quality,
|
||||||
|
output_filename_pattern=context.config_obj.output_filename_pattern,
|
||||||
|
resolution_threshold_for_jpg=getattr(context.config_obj, "resolution_threshold_for_jpg", None)
|
||||||
|
)
|
||||||
|
saved_data = self._save_stage.execute(save_input)
|
||||||
|
|
||||||
|
if saved_data and saved_data.status.startswith("Processed"):
|
||||||
|
item_status = saved_data.status
|
||||||
|
log.info(f"{item_log_prefix}: Merged task successfully processed and saved. Status: {item_status}")
|
||||||
|
context.processed_maps_details[item_key] = {
|
||||||
|
"status": item_status,
|
||||||
|
"saved_files_info": saved_data.saved_files_details,
|
||||||
|
"internal_map_type": processed_data.output_map_type,
|
||||||
|
"final_dimensions": current_dimensions,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
error_msg = saved_data.error_message if saved_data else "Save stage for merged task returned None"
|
||||||
|
log.error(f"{item_log_prefix}: Failed during save stage for merged task. Error: {error_msg}")
|
||||||
|
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Save Error (Merged): {error_msg}", "stage": "SaveVariantsStage"}
|
||||||
|
asset_had_item_errors = True
|
||||||
|
item_status = "Failed"
|
||||||
|
else:
|
||||||
|
log.warning(f"{item_log_prefix}: Unknown item type in loop: {type(item)}. Skipping.")
|
||||||
|
# Ensure some key exists to prevent KeyError if item_key was not set
|
||||||
|
unknown_item_key = f"unknown_item_at_index_{item_index}"
|
||||||
|
context.processed_maps_details[unknown_item_key] = {"status": "Skipped", "notes": f"Unknown item type {type(item)}"}
|
||||||
|
asset_had_item_errors = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(f"Asset '{asset_name}', Item Loop Index {item_index}: Unhandled exception: {e}")
|
||||||
|
# Ensure details are recorded even on unhandled exception
|
||||||
|
if item_key is not None:
|
||||||
|
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Unhandled Loop Error: {e}", "stage": "OrchestratorLoop"}
|
||||||
|
else:
|
||||||
|
log.error(f"Asset '{asset_name}': Unhandled exception in item loop before item key was set.")
|
||||||
|
asset_had_item_errors = True
|
||||||
|
item_status = "Failed"
|
||||||
|
# Optionally break loop or continue? Continue for now to process other items.
|
||||||
|
|
||||||
|
log.info("ORCHESTRATOR: Finished processing items loop for asset '%s'", asset_name)
|
||||||
|
log.info(f"Asset '{asset_name}': Finished core item processing loop.")
|
||||||
|
|
||||||
|
# --- Execute Post-Item-Processing Outer Stages ---
|
||||||
|
# (e.g., OutputOrganization, MetadataFinalizationSave)
|
||||||
|
# Identify which outer stages run after the item loop
|
||||||
|
# This needs better handling based on stage purpose. Assume none run after for now.
|
||||||
|
if not context.status_flags.get("asset_failed"):
|
||||||
|
log.info("ORCHESTRATOR: Executing post-item-processing outer stages for asset '%s'", asset_name)
|
||||||
|
context = self._execute_specific_stages(context, self.post_item_stages, "post-item", stop_on_skip=False)
|
||||||
|
|
||||||
|
# --- Final Asset Status Determination ---
|
||||||
|
final_asset_status = "Unknown"
|
||||||
|
fail_reason = ""
|
||||||
|
if context.status_flags.get("asset_failed"):
|
||||||
|
final_asset_status = "Failed"
|
||||||
|
fail_reason = f"(Failed in {context.status_flags.get('asset_failed_stage', 'Unknown Stage')}: {context.status_flags.get('asset_failed_reason', 'Unknown Reason')})"
|
||||||
|
elif context.status_flags.get("skip_asset"):
|
||||||
|
final_asset_status = "Skipped"
|
||||||
|
fail_reason = f"(Skipped: {context.status_flags.get('skip_reason', 'Unknown Reason')})"
|
||||||
|
elif asset_had_item_errors:
|
||||||
|
final_asset_status = "Failed"
|
||||||
|
fail_reason = "(One or more items failed)"
|
||||||
|
elif not context.processing_items:
|
||||||
|
# No items prepared, no errors -> consider skipped or processed based on definition?
|
||||||
|
final_asset_status = "Skipped" # Or "Processed (No Items)"
|
||||||
|
fail_reason = "(No items to process)"
|
||||||
|
elif not context.processed_maps_details and context.processing_items:
|
||||||
|
# Items were prepared, but none resulted in processed_maps_details entry
|
||||||
|
final_asset_status = "Skipped" # Or Failed?
|
||||||
|
fail_reason = "(All processing items skipped or failed internally)"
|
||||||
|
elif context.processed_maps_details:
|
||||||
|
# Check if all items in processed_maps_details are actually processed successfully
|
||||||
|
all_processed_ok = all(
|
||||||
|
str(details.get("status", "")).startswith("Processed")
|
||||||
|
for details in context.processed_maps_details.values()
|
||||||
|
)
|
||||||
|
some_processed_ok = any(
|
||||||
|
str(details.get("status", "")).startswith("Processed")
|
||||||
|
for details in context.processed_maps_details.values()
|
||||||
|
)
|
||||||
|
|
||||||
|
if all_processed_ok:
|
||||||
|
final_asset_status = "Processed"
|
||||||
|
elif some_processed_ok:
|
||||||
|
final_asset_status = "Partial" # Introduce a partial status? Or just Failed?
|
||||||
|
fail_reason = "(Some items failed)"
|
||||||
|
final_asset_status = "Failed" # Treat partial as Failed for overall status
|
||||||
|
else: # No items processed successfully
|
||||||
|
final_asset_status = "Failed"
|
||||||
|
fail_reason = "(All items failed)"
|
||||||
|
else:
|
||||||
|
# Should not happen if processing_items existed
|
||||||
|
final_asset_status = "Failed"
|
||||||
|
fail_reason = "(Unknown state after item processing)"
|
||||||
|
|
||||||
|
|
||||||
|
# Update overall status list
|
||||||
|
if final_asset_status == "Processed":
|
||||||
|
overall_status["processed"].append(asset_name)
|
||||||
|
elif final_asset_status == "Skipped":
|
||||||
|
overall_status["skipped"].append(f"{asset_name} {fail_reason}")
|
||||||
|
else: # Failed or Unknown
|
||||||
|
overall_status["failed"].append(f"{asset_name} {fail_reason}")
|
||||||
|
|
||||||
|
log.info(f"Asset '{asset_name}' final status: {final_asset_status} {fail_reason}")
|
||||||
|
# Clean up intermediate results for the asset to save memory
|
||||||
|
context.intermediate_results = {}
|
||||||
|
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"PipelineOrchestrator.process_source_rule failed critically: {e}", exc_info=True)
|
||||||
|
# Mark all assets from this source rule that weren't finished as failed
|
||||||
|
processed_or_skipped_or_failed = set(overall_status["processed"]) | \
|
||||||
|
set(name.split(" ")[0] for name in overall_status["skipped"]) | \
|
||||||
|
set(name.split(" ")[0] for name in overall_status["failed"])
|
||||||
|
for asset_rule in source_rule.assets:
|
||||||
|
if asset_rule.asset_name not in processed_or_skipped_or_failed:
|
||||||
|
overall_status["failed"].append(f"{asset_rule.asset_name} (Orchestrator Error: {e})")
|
||||||
|
finally:
|
||||||
|
# --- Cleanup Temporary Directory ---
|
||||||
|
if engine_temp_dir_path and engine_temp_dir_path.exists():
|
||||||
|
try:
|
||||||
|
log.debug(f"PipelineOrchestrator cleaning up temporary directory: {engine_temp_dir_path}")
|
||||||
|
shutil.rmtree(engine_temp_dir_path, ignore_errors=True)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Error cleaning up orchestrator temporary directory {engine_temp_dir_path}: {e}", exc_info=True)
|
||||||
|
|
||||||
|
return overall_status
|
||||||
179
processing/pipeline/stages/alpha_extraction_to_mask.py
Normal file
179
processing/pipeline/stages/alpha_extraction_to_mask.py
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional, Dict
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
from ...utils import image_processing_utils as ipu
|
||||||
|
from rule_structure import FileRule
|
||||||
|
from utils.path_utils import sanitize_filename
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class AlphaExtractionToMaskStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Extracts an alpha channel from a suitable source map (e.g., Albedo, Diffuse)
|
||||||
|
to generate a MASK map if one is not explicitly defined.
|
||||||
|
"""
|
||||||
|
# Use MAP_ prefixed types for internal logic checks
|
||||||
|
SUITABLE_SOURCE_MAP_TYPES = ["MAP_COL", "MAP_ALBEDO", "MAP_BASECOLOR"] # Map types likely to have alpha
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Running AlphaExtractionToMaskStage.")
|
||||||
|
|
||||||
|
if context.status_flags.get('skip_asset'):
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Skipping due to 'skip_asset' flag.")
|
||||||
|
return context
|
||||||
|
|
||||||
|
if not context.files_to_process or not context.processed_maps_details:
|
||||||
|
logger.debug(
|
||||||
|
f"Asset '{asset_name_for_log}': Skipping alpha extraction - "
|
||||||
|
f"no files to process or no processed map details."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# A. Check for Existing MASK Map
|
||||||
|
for file_rule in context.files_to_process:
|
||||||
|
# Assuming file_rule has 'map_type' and 'file_path' (instead of filename_pattern)
|
||||||
|
# Check for existing MASK map using the correct item_type field and MAP_ prefix
|
||||||
|
if file_rule.item_type == "MAP_MASK":
|
||||||
|
file_path_for_log = file_rule.file_path if hasattr(file_rule, 'file_path') else "Unknown file path"
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': MASK map already defined by FileRule "
|
||||||
|
f"for '{file_path_for_log}'. Skipping alpha extraction."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# B. Find Suitable Source Map with Alpha
|
||||||
|
source_map_details_for_alpha: Optional[Dict] = None
|
||||||
|
source_file_rule_id_for_alpha: Optional[str] = None # This ID comes from processed_maps_details keys
|
||||||
|
|
||||||
|
for file_rule_id, details in context.processed_maps_details.items():
|
||||||
|
# Check for suitable source map using the standardized internal_map_type field
|
||||||
|
internal_map_type = details.get('internal_map_type') # Use the standardized field
|
||||||
|
if details.get('status') == 'Processed' and \
|
||||||
|
internal_map_type in self.SUITABLE_SOURCE_MAP_TYPES:
|
||||||
|
try:
|
||||||
|
temp_path = Path(details['temp_processed_file'])
|
||||||
|
if not temp_path.exists():
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}': Temp file {temp_path} for map "
|
||||||
|
f"{details['map_type']} (ID: {file_rule_id}) does not exist. Cannot check for alpha."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
image_data = ipu.load_image(temp_path)
|
||||||
|
|
||||||
|
if image_data is not None and image_data.ndim == 3 and image_data.shape[2] == 4:
|
||||||
|
source_map_details_for_alpha = details
|
||||||
|
source_file_rule_id_for_alpha = file_rule_id
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': Found potential source for alpha extraction: "
|
||||||
|
f"{temp_path} (MapType: {details['map_type']})"
|
||||||
|
)
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}': Error checking alpha for {details.get('temp_processed_file', 'N/A')}: {e}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
if source_map_details_for_alpha is None or source_file_rule_id_for_alpha is None:
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': No suitable source map with alpha channel found "
|
||||||
|
f"for MASK extraction."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# C. Extract Alpha Channel
|
||||||
|
source_image_path = Path(source_map_details_for_alpha['temp_processed_file'])
|
||||||
|
full_image_data = ipu.load_image(source_image_path) # Reload to ensure we have the original RGBA
|
||||||
|
|
||||||
|
if full_image_data is None or not (full_image_data.ndim == 3 and full_image_data.shape[2] == 4):
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Failed to reload or verify alpha channel from "
|
||||||
|
f"{source_image_path} for MASK extraction."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
alpha_channel: np.ndarray = full_image_data[:, :, 3] # Extract alpha (0-255)
|
||||||
|
|
||||||
|
# D. Save New Temporary MASK Map
|
||||||
|
if alpha_channel.ndim == 2: # Expected
|
||||||
|
pass
|
||||||
|
elif alpha_channel.ndim == 3 and alpha_channel.shape[2] == 1: # (H, W, 1)
|
||||||
|
alpha_channel = alpha_channel.squeeze(axis=2)
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Extracted alpha channel has unexpected dimensions: "
|
||||||
|
f"{alpha_channel.shape}. Cannot save."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
mask_temp_filename = (
|
||||||
|
f"mask_from_alpha_{sanitize_filename(source_map_details_for_alpha['map_type'])}"
|
||||||
|
f"_{source_file_rule_id_for_alpha}{source_image_path.suffix}"
|
||||||
|
)
|
||||||
|
mask_temp_path = context.engine_temp_dir / mask_temp_filename
|
||||||
|
|
||||||
|
save_success = ipu.save_image(mask_temp_path, alpha_channel)
|
||||||
|
|
||||||
|
if not save_success:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Failed to save extracted alpha mask to {mask_temp_path}."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': Extracted alpha and saved as new MASK map: {mask_temp_path}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# E. Create New FileRule for the MASK and Update Context
|
||||||
|
# FileRule does not have id, active, transform_settings, source_map_ids_for_generation
|
||||||
|
# It has file_path, item_type, item_type_override, etc.
|
||||||
|
new_mask_file_rule = FileRule(
|
||||||
|
file_path=mask_temp_path.name, # Use file_path
|
||||||
|
item_type="MAP_MASK", # This should be the item_type for a mask
|
||||||
|
map_type="MASK" # Explicitly set map_type if FileRule has it, or handle via item_type
|
||||||
|
# Other FileRule fields like item_type_override can be set if needed
|
||||||
|
)
|
||||||
|
# If FileRule needs a unique identifier, it should be handled differently,
|
||||||
|
# perhaps by generating one and storing it in common_metadata or a separate mapping.
|
||||||
|
# For now, we create a simple FileRule.
|
||||||
|
|
||||||
|
context.files_to_process.append(new_mask_file_rule)
|
||||||
|
|
||||||
|
# For processed_maps_details, we need a unique key. Using a new UUID.
|
||||||
|
new_mask_processed_map_key = uuid.uuid4().hex
|
||||||
|
|
||||||
|
original_dims = source_map_details_for_alpha.get('original_dimensions')
|
||||||
|
if original_dims is None and full_image_data is not None: # Fallback if not in details
|
||||||
|
original_dims = (full_image_data.shape[1], full_image_data.shape[0])
|
||||||
|
|
||||||
|
|
||||||
|
context.processed_maps_details[new_mask_processed_map_key] = {
|
||||||
|
'internal_map_type': "MAP_MASK", # Use the standardized MAP_ prefixed field
|
||||||
|
'map_type': "MASK", # Keep standard type for metadata/naming consistency if needed
|
||||||
|
'source_file': str(source_image_path),
|
||||||
|
'temp_processed_file': str(mask_temp_path),
|
||||||
|
'original_dimensions': original_dims,
|
||||||
|
'processed_dimensions': (alpha_channel.shape[1], alpha_channel.shape[0]),
|
||||||
|
'status': 'Processed',
|
||||||
|
'notes': (
|
||||||
|
f"Generated from alpha of {source_map_details_for_alpha.get('internal_map_type', 'unknown type')} " # Use internal_map_type for notes
|
||||||
|
f"(Source Detail ID: {source_file_rule_id_for_alpha})"
|
||||||
|
),
|
||||||
|
# 'file_rule_id': new_mask_file_rule_id_str # FileRule doesn't have an ID to link here directly
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': Added new FileRule for generated MASK "
|
||||||
|
f"and updated processed_maps_details with key '{new_mask_processed_map_key}'."
|
||||||
|
)
|
||||||
|
|
||||||
|
return context
|
||||||
55
processing/pipeline/stages/asset_skip_logic.py
Normal file
55
processing/pipeline/stages/asset_skip_logic.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
import logging
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
|
||||||
|
class AssetSkipLogicStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Processing stage to determine if an asset should be skipped based on various conditions.
|
||||||
|
"""
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
"""
|
||||||
|
Executes the asset skip logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context: The asset processing context.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The updated asset processing context.
|
||||||
|
"""
|
||||||
|
context.status_flags['skip_asset'] = False # Initialize/reset skip flag
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
|
||||||
|
# 1. Check for Supplier Error
|
||||||
|
# Assuming 'supplier_error' might be set by a previous stage (e.g., SupplierDeterminationStage)
|
||||||
|
# or if effective_supplier is None after attempts to determine it.
|
||||||
|
if context.effective_supplier is None or context.status_flags.get('supplier_error', False):
|
||||||
|
logging.info(f"Asset '{asset_name_for_log}': Skipping due to missing or invalid supplier.")
|
||||||
|
context.status_flags['skip_asset'] = True
|
||||||
|
context.status_flags['skip_reason'] = "Invalid or missing supplier"
|
||||||
|
return context
|
||||||
|
|
||||||
|
# 2. Check process_status in asset_rule.common_metadata
|
||||||
|
process_status = context.asset_rule.common_metadata.get('process_status')
|
||||||
|
|
||||||
|
if process_status == "SKIP":
|
||||||
|
logging.info(f"Asset '{asset_name_for_log}': Skipping as per common_metadata.process_status 'SKIP'.")
|
||||||
|
context.status_flags['skip_asset'] = True
|
||||||
|
context.status_flags['skip_reason'] = "Process status set to SKIP in common_metadata"
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Assuming context.config_obj.general_settings.overwrite_existing is a valid path.
|
||||||
|
# This might need adjustment if 'general_settings' or 'overwrite_existing' is not found.
|
||||||
|
# For now, we'll assume it's correct based on the original code's intent.
|
||||||
|
if process_status == "PROCESSED" and \
|
||||||
|
hasattr(context.config_obj, 'general_settings') and \
|
||||||
|
not getattr(context.config_obj.general_settings, 'overwrite_existing', True): # Default to True (allow overwrite) if not found
|
||||||
|
logging.info(
|
||||||
|
f"Asset '{asset_name_for_log}': Skipping as it's already 'PROCESSED' (from common_metadata) "
|
||||||
|
f"and overwrite is disabled."
|
||||||
|
)
|
||||||
|
context.status_flags['skip_asset'] = True
|
||||||
|
context.status_flags['skip_reason'] = "Already processed (common_metadata), overwrite disabled"
|
||||||
|
return context
|
||||||
|
|
||||||
|
# If none of the above conditions are met, skip_asset remains False.
|
||||||
|
return context
|
||||||
22
processing/pipeline/stages/base_stage.py
Normal file
22
processing/pipeline/stages/base_stage.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
|
||||||
|
|
||||||
|
class ProcessingStage(ABC):
|
||||||
|
"""
|
||||||
|
Abstract base class for a stage in the asset processing pipeline.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
"""
|
||||||
|
Executes the processing logic of this stage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context: The current asset processing context.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The updated asset processing context.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
90
processing/pipeline/stages/file_rule_filter.py
Normal file
90
processing/pipeline/stages/file_rule_filter.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
import logging
|
||||||
|
import fnmatch
|
||||||
|
from typing import List, Set
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import FileRule
|
||||||
|
|
||||||
|
|
||||||
|
class FileRuleFilterStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Determines which FileRules associated with an AssetRule should be processed.
|
||||||
|
Populates context.files_to_process, respecting FILE_IGNORE rules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
"""
|
||||||
|
Executes the file rule filtering logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context: The AssetProcessingContext for the current asset.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The modified AssetProcessingContext.
|
||||||
|
"""
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
if context.status_flags.get('skip_asset'):
|
||||||
|
logging.debug(f"Asset '{asset_name_for_log}': Skipping FileRuleFilterStage due to 'skip_asset' flag.")
|
||||||
|
return context
|
||||||
|
|
||||||
|
context.files_to_process: List[FileRule] = []
|
||||||
|
ignore_patterns: Set[str] = set()
|
||||||
|
|
||||||
|
# Step 1: Collect all FILE_IGNORE patterns
|
||||||
|
if context.asset_rule and context.asset_rule.files:
|
||||||
|
for file_rule in context.asset_rule.files:
|
||||||
|
if file_rule.item_type == "FILE_IGNORE": # Removed 'and file_rule.active'
|
||||||
|
if hasattr(file_rule, 'file_path') and file_rule.file_path:
|
||||||
|
ignore_patterns.add(file_rule.file_path)
|
||||||
|
logging.debug(
|
||||||
|
f"Asset '{asset_name_for_log}': Registering ignore pattern: '{file_rule.file_path}'"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.warning(f"Asset '{asset_name_for_log}': FILE_IGNORE rule found without a file_path. Skipping this ignore rule.")
|
||||||
|
else:
|
||||||
|
logging.debug(f"Asset '{asset_name_for_log}': No file rules (context.asset_rule.files) to process or asset_rule is None.")
|
||||||
|
# Still need to return context even if there are no rules
|
||||||
|
logging.info(f"Asset '{asset_name_for_log}': 0 file rules queued for processing after filtering.")
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
# Step 2: Filter and add processable FileRules
|
||||||
|
for file_rule in context.asset_rule.files: # Iterate over .files
|
||||||
|
# Removed 'if not file_rule.active:' check
|
||||||
|
|
||||||
|
if file_rule.item_type == "FILE_IGNORE":
|
||||||
|
# Already processed, skip.
|
||||||
|
continue
|
||||||
|
|
||||||
|
is_ignored = False
|
||||||
|
# Ensure file_rule.file_path exists before using it with fnmatch
|
||||||
|
current_file_path = file_rule.file_path if hasattr(file_rule, 'file_path') else None
|
||||||
|
if not current_file_path:
|
||||||
|
logging.warning(f"Asset '{asset_name_for_log}': FileRule found without a file_path. Skipping this rule for ignore matching.")
|
||||||
|
# Decide if this rule should be added or skipped if it has no path
|
||||||
|
# For now, let's assume it might be an error and not add it if it can't be matched.
|
||||||
|
# If it should be added by default, this logic needs adjustment.
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
for ignore_pat in ignore_patterns:
|
||||||
|
if fnmatch.fnmatch(current_file_path, ignore_pat):
|
||||||
|
is_ignored = True
|
||||||
|
logging.debug(
|
||||||
|
f"Asset '{asset_name_for_log}': Skipping file rule for '{current_file_path}' "
|
||||||
|
f"due to matching ignore pattern '{ignore_pat}'."
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
if not is_ignored:
|
||||||
|
context.files_to_process.append(file_rule)
|
||||||
|
logging.debug(
|
||||||
|
f"Asset '{asset_name_for_log}': Adding file rule for '{current_file_path}' "
|
||||||
|
f"(type: {file_rule.item_type}) to processing queue."
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"Asset '{asset_name_for_log}': {len(context.files_to_process)} file rules queued for processing after filtering."
|
||||||
|
)
|
||||||
|
return context
|
||||||
195
processing/pipeline/stages/gloss_to_rough_conversion.py
Normal file
195
processing/pipeline/stages/gloss_to_rough_conversion.py
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
import numpy as np
|
||||||
|
from typing import List
|
||||||
|
import dataclasses
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import FileRule
|
||||||
|
from ...utils import image_processing_utils as ipu
|
||||||
|
from utils.path_utils import sanitize_filename
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class GlossToRoughConversionStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Processing stage to convert glossiness maps to roughness maps.
|
||||||
|
Iterates through FileRules, identifies GLOSS maps, loads their
|
||||||
|
corresponding temporary processed images, inverts them, and saves
|
||||||
|
them as new temporary ROUGHNESS maps. Updates the FileRule and
|
||||||
|
context.processed_maps_details accordingly.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
"""
|
||||||
|
Executes the gloss to roughness conversion logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context: The AssetProcessingContext containing asset and processing details.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The updated AssetProcessingContext.
|
||||||
|
"""
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
if context.status_flags.get('skip_asset'):
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Skipping GlossToRoughConversionStage due to skip_asset flag.")
|
||||||
|
return context
|
||||||
|
|
||||||
|
if not context.processed_maps_details: # files_to_process might be empty if only gloss maps existed and all are converted
|
||||||
|
logger.debug(
|
||||||
|
f"Asset '{asset_name_for_log}': processed_maps_details is empty in GlossToRoughConversionStage. Skipping."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Start with a copy of the current file rules. We will modify this list.
|
||||||
|
new_files_to_process: List[FileRule] = list(context.files_to_process) if context.files_to_process else []
|
||||||
|
processed_a_gloss_map = False
|
||||||
|
successful_conversion_statuses = ['BasePOTSaved', 'Processed_With_Variants', 'Processed_No_Variants']
|
||||||
|
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Starting Gloss to Roughness Conversion Stage. Examining {len(context.processed_maps_details)} processed map entries.")
|
||||||
|
|
||||||
|
# Iterate using the index (map_key_index) as the key, which is now standard.
|
||||||
|
for map_key_index, map_details in context.processed_maps_details.items():
|
||||||
|
# Use the standardized internal_map_type field
|
||||||
|
internal_map_type = map_details.get('internal_map_type', '')
|
||||||
|
map_status = map_details.get('status')
|
||||||
|
original_temp_path_str = map_details.get('temp_processed_file')
|
||||||
|
# source_file_rule_idx from details should align with map_key_index.
|
||||||
|
# We primarily use map_key_index for accessing FileRule from context.files_to_process.
|
||||||
|
source_file_rule_idx_from_details = map_details.get('source_file_rule_index')
|
||||||
|
processing_tag = map_details.get('processing_tag')
|
||||||
|
|
||||||
|
if map_key_index != source_file_rule_idx_from_details:
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index}: Mismatch between map key index and 'source_file_rule_index' ({source_file_rule_idx_from_details}) in details. "
|
||||||
|
f"Using map_key_index ({map_key_index}) for FileRule lookup. This might indicate a data consistency issue from previous stage."
|
||||||
|
)
|
||||||
|
|
||||||
|
if not processing_tag:
|
||||||
|
logger.warning(f"Asset '{asset_name_for_log}', Map Key Index {map_key_index}: 'processing_tag' is missing in map_details. Using a fallback for temp filename. This is unexpected.")
|
||||||
|
processing_tag = f"mki_{map_key_index}_fallback_tag"
|
||||||
|
|
||||||
|
|
||||||
|
# Check if the map is a GLOSS map using the standardized internal_map_type
|
||||||
|
if not internal_map_type.startswith("MAP_GLOSS"):
|
||||||
|
# logger.debug(f"Asset '{asset_name_for_log}', Map Key Index {map_key_index}: Type '{internal_map_type}' is not GLOSS. Skipping.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Identified potential GLOSS map (Type: {internal_map_type}).")
|
||||||
|
|
||||||
|
if map_status not in successful_conversion_statuses:
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}) (GLOSS): Status '{map_status}' is not one of {successful_conversion_statuses}. "
|
||||||
|
f"Skipping conversion for this map."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not original_temp_path_str:
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}) (GLOSS): 'temp_processed_file' missing in details. "
|
||||||
|
f"Skipping conversion."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
original_temp_path = Path(original_temp_path_str)
|
||||||
|
if not original_temp_path.exists():
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}) (GLOSS): Temporary file {original_temp_path_str} "
|
||||||
|
f"does not exist. Skipping conversion."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Use map_key_index directly to access the FileRule
|
||||||
|
# Ensure map_key_index is a valid index for context.files_to_process
|
||||||
|
if not isinstance(map_key_index, int) or map_key_index < 0 or map_key_index >= len(context.files_to_process):
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}) (GLOSS): Invalid map_key_index ({map_key_index}) for accessing files_to_process (len: {len(context.files_to_process)}). "
|
||||||
|
f"Skipping conversion."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
original_file_rule = context.files_to_process[map_key_index]
|
||||||
|
source_file_path_for_log = original_file_rule.file_path if hasattr(original_file_rule, 'file_path') else "Unknown source path"
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Processing GLOSS map from '{original_temp_path_str}' (Original FileRule path: '{source_file_path_for_log}') for conversion.")
|
||||||
|
|
||||||
|
image_data = ipu.load_image(str(original_temp_path))
|
||||||
|
if image_data is None:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Failed to load image data from {original_temp_path_str}. "
|
||||||
|
f"Skipping conversion."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Perform Inversion
|
||||||
|
inverted_image_data: np.ndarray
|
||||||
|
if np.issubdtype(image_data.dtype, np.floating):
|
||||||
|
inverted_image_data = 1.0 - image_data
|
||||||
|
inverted_image_data = np.clip(inverted_image_data, 0.0, 1.0)
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Inverted float image data.")
|
||||||
|
elif np.issubdtype(image_data.dtype, np.integer):
|
||||||
|
max_val = np.iinfo(image_data.dtype).max
|
||||||
|
inverted_image_data = max_val - image_data
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Inverted integer image data (max_val: {max_val}).")
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Unsupported image data type {image_data.dtype} "
|
||||||
|
f"for GLOSS map. Cannot invert. Skipping conversion."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Save New Temporary (Roughness) Map
|
||||||
|
new_temp_filename = f"rough_from_gloss_{processing_tag}{original_temp_path.suffix}"
|
||||||
|
new_temp_path = context.engine_temp_dir / new_temp_filename
|
||||||
|
|
||||||
|
save_success = ipu.save_image(str(new_temp_path), inverted_image_data)
|
||||||
|
|
||||||
|
if save_success:
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Converted GLOSS map {original_temp_path_str} "
|
||||||
|
f"to ROUGHNESS map {new_temp_path}."
|
||||||
|
)
|
||||||
|
|
||||||
|
update_dict = {'item_type': "MAP_ROUGH", 'item_type_override': "MAP_ROUGH"}
|
||||||
|
|
||||||
|
modified_file_rule: Optional[FileRule] = None
|
||||||
|
if hasattr(original_file_rule, 'model_copy') and callable(original_file_rule.model_copy): # Pydantic
|
||||||
|
modified_file_rule = original_file_rule.model_copy(update=update_dict)
|
||||||
|
elif dataclasses.is_dataclass(original_file_rule): # Dataclass
|
||||||
|
modified_file_rule = dataclasses.replace(original_file_rule, **update_dict)
|
||||||
|
else:
|
||||||
|
logger.error(f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Original FileRule is neither Pydantic nor dataclass. Cannot modify. Skipping update for this rule.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
new_files_to_process[map_key_index] = modified_file_rule # Replace using map_key_index
|
||||||
|
|
||||||
|
# Update context.processed_maps_details for this map_key_index
|
||||||
|
map_details['temp_processed_file'] = str(new_temp_path)
|
||||||
|
map_details['original_map_type_before_conversion'] = internal_map_type # Store the original internal type
|
||||||
|
map_details['internal_map_type'] = "MAP_ROUGH" # Use the standardized MAP_ prefixed field
|
||||||
|
map_details['map_type'] = "Roughness" # Keep standard type for metadata/naming consistency if needed
|
||||||
|
map_details['status'] = "Converted_To_Rough"
|
||||||
|
map_details['notes'] = map_details.get('notes', '') + "; Converted from GLOSS by GlossToRoughConversionStage"
|
||||||
|
if 'base_pot_resolution_name' in map_details:
|
||||||
|
map_details['processed_resolution_name'] = map_details['base_pot_resolution_name']
|
||||||
|
|
||||||
|
processed_a_gloss_map = True
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}', Map Key Index {map_key_index} (Tag: {processing_tag}): Failed to save inverted ROUGHNESS map to {new_temp_path}. "
|
||||||
|
f"Original GLOSS FileRule remains."
|
||||||
|
)
|
||||||
|
|
||||||
|
context.files_to_process = new_files_to_process
|
||||||
|
|
||||||
|
if processed_a_gloss_map:
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': Gloss to Roughness conversion stage finished. Processed one or more maps and updated file list and map details."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': No gloss maps were converted in GlossToRoughConversionStage. "
|
||||||
|
f"File list for next stage contains original non-gloss maps and any gloss maps that failed or were ineligible for conversion."
|
||||||
|
)
|
||||||
|
|
||||||
|
return context
|
||||||
99
processing/pipeline/stages/initial_scaling.py
Normal file
99
processing/pipeline/stages/initial_scaling.py
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
import logging
|
||||||
|
from typing import Tuple, Optional # Added Optional
|
||||||
|
|
||||||
|
import cv2 # Assuming cv2 is available for interpolation flags
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
# Import necessary context classes and utils
|
||||||
|
from ..asset_context import InitialScalingInput, InitialScalingOutput
|
||||||
|
# ProcessingItem is no longer created here, so its import can be removed if not used otherwise.
|
||||||
|
# For now, keep rule_structure import if other elements from it might be needed,
|
||||||
|
# but ProcessingItem itself is not directly instantiated by this stage anymore.
|
||||||
|
# from rule_structure import ProcessingItem
|
||||||
|
from ...utils import image_processing_utils as ipu
|
||||||
|
import numpy as np
|
||||||
|
import cv2 # Added cv2 for interpolation flags (already used implicitly by ipu.resize_image)
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class InitialScalingStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Applies initial Power-of-Two (POT) downscaling to image data if configured
|
||||||
|
and if the item is not already a 'LOWRES' variant.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, input_data: InitialScalingInput) -> InitialScalingOutput:
|
||||||
|
"""
|
||||||
|
Applies POT scaling based on input_data.initial_scaling_mode,
|
||||||
|
unless input_data.resolution_key is 'LOWRES'.
|
||||||
|
Passes through the resolution_key.
|
||||||
|
"""
|
||||||
|
# Safely access source_file_path for logging, if provided by orchestrator via underscore attribute
|
||||||
|
source_file_path = getattr(input_data, '_source_file_path', "UnknownSourcePath")
|
||||||
|
log_prefix = f"InitialScalingStage (Source: {source_file_path}, ResKey: {input_data.resolution_key})"
|
||||||
|
|
||||||
|
log.debug(f"{log_prefix}: Mode '{input_data.initial_scaling_mode}'. Received resolution_key: '{input_data.resolution_key}'")
|
||||||
|
|
||||||
|
image_to_scale = input_data.image_data
|
||||||
|
current_dimensions_wh = input_data.original_dimensions # Dimensions of the image_to_scale
|
||||||
|
scaling_mode = input_data.initial_scaling_mode
|
||||||
|
|
||||||
|
output_resolution_key = input_data.resolution_key # Pass through the resolution key
|
||||||
|
|
||||||
|
if image_to_scale is None or image_to_scale.size == 0:
|
||||||
|
log.warning(f"{log_prefix}: Input image data is None or empty. Skipping POT scaling.")
|
||||||
|
return InitialScalingOutput(
|
||||||
|
scaled_image_data=np.array([]),
|
||||||
|
scaling_applied=False,
|
||||||
|
final_dimensions=(0, 0),
|
||||||
|
resolution_key=output_resolution_key
|
||||||
|
)
|
||||||
|
|
||||||
|
if not current_dimensions_wh:
|
||||||
|
log.warning(f"{log_prefix}: Original dimensions not provided for POT scaling. Using current image shape.")
|
||||||
|
h_pre_pot_scale, w_pre_pot_scale = image_to_scale.shape[:2]
|
||||||
|
else:
|
||||||
|
w_pre_pot_scale, h_pre_pot_scale = current_dimensions_wh
|
||||||
|
|
||||||
|
final_image_data = image_to_scale # Default to original if no scaling happens
|
||||||
|
scaling_applied = False
|
||||||
|
|
||||||
|
# Skip POT scaling if the item is already a LOWRES variant or scaling mode is NONE
|
||||||
|
if output_resolution_key == "LOWRES":
|
||||||
|
log.info(f"{log_prefix}: Item is a 'LOWRES' variant. Skipping POT downscaling.")
|
||||||
|
elif scaling_mode == "NONE":
|
||||||
|
log.info(f"{log_prefix}: Mode is NONE. No POT scaling applied.")
|
||||||
|
elif scaling_mode == "POT_DOWNSCALE":
|
||||||
|
pot_w = ipu.get_nearest_power_of_two_downscale(w_pre_pot_scale)
|
||||||
|
pot_h = ipu.get_nearest_power_of_two_downscale(h_pre_pot_scale)
|
||||||
|
|
||||||
|
if (pot_w, pot_h) != (w_pre_pot_scale, h_pre_pot_scale):
|
||||||
|
log.info(f"{log_prefix}: Applying POT Downscale from ({w_pre_pot_scale},{h_pre_pot_scale}) to ({pot_w},{pot_h}).")
|
||||||
|
resized_img = ipu.resize_image(image_to_scale, pot_w, pot_h, interpolation=cv2.INTER_AREA)
|
||||||
|
if resized_img is not None:
|
||||||
|
final_image_data = resized_img
|
||||||
|
scaling_applied = True
|
||||||
|
log.debug(f"{log_prefix}: POT Downscale applied successfully.")
|
||||||
|
else:
|
||||||
|
log.warning(f"{log_prefix}: POT Downscale resize failed. Using pre-POT-scaled data.")
|
||||||
|
else:
|
||||||
|
log.info(f"{log_prefix}: Image already POT or smaller. No POT scaling needed.")
|
||||||
|
else:
|
||||||
|
log.warning(f"{log_prefix}: Unknown INITIAL_SCALING_MODE '{scaling_mode}'. Defaulting to NONE (no scaling).")
|
||||||
|
|
||||||
|
# Determine final dimensions
|
||||||
|
if final_image_data is not None and final_image_data.size > 0:
|
||||||
|
final_h, final_w = final_image_data.shape[:2]
|
||||||
|
final_dims_wh = (final_w, final_h)
|
||||||
|
else:
|
||||||
|
final_dims_wh = (0,0)
|
||||||
|
if final_image_data is None: # Ensure it's an empty array for consistency if None
|
||||||
|
final_image_data = np.array([])
|
||||||
|
|
||||||
|
return InitialScalingOutput(
|
||||||
|
scaled_image_data=final_image_data,
|
||||||
|
scaling_applied=scaling_applied,
|
||||||
|
final_dimensions=final_dims_wh,
|
||||||
|
resolution_key=output_resolution_key # Pass through the resolution key
|
||||||
|
)
|
||||||
329
processing/pipeline/stages/merged_task_processor.py
Normal file
329
processing/pipeline/stages/merged_task_processor.py
Normal file
@@ -0,0 +1,329 @@
|
|||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional, Tuple, Dict, Any
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
# Import necessary context classes and utils
|
||||||
|
from ..asset_context import AssetProcessingContext, MergeTaskDefinition, ProcessedMergedMapData
|
||||||
|
from ...utils import image_processing_utils as ipu
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class MergedTaskProcessorStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Processes a single merge task defined in the configuration.
|
||||||
|
Loads inputs, applies transformations to inputs, handles fallbacks/resizing,
|
||||||
|
performs the merge, and returns the merged data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _find_input_map_details_in_context(
|
||||||
|
self,
|
||||||
|
required_map_type: str,
|
||||||
|
processed_map_details_context: Dict[str, Dict[str, Any]],
|
||||||
|
log_prefix_for_find: str
|
||||||
|
) -> Optional[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Finds the details of a required input map from the context's processed_maps_details.
|
||||||
|
Prefers exact match for full types (e.g. MAP_TYPE-1), or base type / base type + "-1" for base types (e.g. MAP_TYPE).
|
||||||
|
Returns the details dictionary for the found map if it has saved_files_info.
|
||||||
|
"""
|
||||||
|
# Try exact match first (e.g., rule asks for "MAP_NRM-1" or "MAP_NRM" if that's how it was processed)
|
||||||
|
for item_key, details in processed_map_details_context.items():
|
||||||
|
if details.get('internal_map_type') == required_map_type:
|
||||||
|
if details.get('saved_files_info') and isinstance(details['saved_files_info'], list) and len(details['saved_files_info']) > 0:
|
||||||
|
log.debug(f"{log_prefix_for_find}: Found exact match for '{required_map_type}' with key '{item_key}'.")
|
||||||
|
return details
|
||||||
|
log.warning(f"{log_prefix_for_find}: Found exact match for '{required_map_type}' (key '{item_key}') but no saved_files_info.")
|
||||||
|
return None # Found type but no usable files
|
||||||
|
|
||||||
|
# If exact match not found, and required_map_type is a base type (e.g. "MAP_NRM")
|
||||||
|
# try to find the primary suffixed version "MAP_NRM-1" or the base type itself if it was processed without a suffix.
|
||||||
|
if not re.search(r'-\d+$', required_map_type): # if it's a base type like MAP_XXX
|
||||||
|
# Prefer "MAP_XXX-1" as the primary variant if suffixed types exist
|
||||||
|
primary_suffixed_type = f"{required_map_type}-1"
|
||||||
|
for item_key, details in processed_map_details_context.items():
|
||||||
|
if details.get('internal_map_type') == primary_suffixed_type:
|
||||||
|
if details.get('saved_files_info') and isinstance(details['saved_files_info'], list) and len(details['saved_files_info']) > 0:
|
||||||
|
log.debug(f"{log_prefix_for_find}: Found primary suffixed match '{primary_suffixed_type}' for base '{required_map_type}' with key '{item_key}'.")
|
||||||
|
return details
|
||||||
|
log.warning(f"{log_prefix_for_find}: Found primary suffixed match '{primary_suffixed_type}' (key '{item_key}') but no saved_files_info.")
|
||||||
|
return None # Found type but no usable files
|
||||||
|
|
||||||
|
log.debug(f"{log_prefix_for_find}: No suitable match found for '{required_map_type}' via exact or primary suffixed type search.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def execute(
|
||||||
|
self,
|
||||||
|
context: AssetProcessingContext,
|
||||||
|
merge_task: MergeTaskDefinition # Specific item passed by orchestrator
|
||||||
|
) -> ProcessedMergedMapData:
|
||||||
|
"""
|
||||||
|
Processes the given MergeTaskDefinition item.
|
||||||
|
"""
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
task_key = merge_task.task_key
|
||||||
|
task_data = merge_task.task_data
|
||||||
|
log_prefix = f"Asset '{asset_name_for_log}', Task '{task_key}'"
|
||||||
|
log.info(f"{log_prefix}: Processing Merge Task.")
|
||||||
|
|
||||||
|
# Initialize output object with default failure state
|
||||||
|
result = ProcessedMergedMapData(
|
||||||
|
merged_image_data=np.array([]), # Placeholder
|
||||||
|
output_map_type=task_data.get('output_map_type', 'UnknownMergeOutput'),
|
||||||
|
source_bit_depths=[],
|
||||||
|
final_dimensions=None,
|
||||||
|
transformations_applied_to_inputs={},
|
||||||
|
status="Failed",
|
||||||
|
error_message="Initialization error"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# --- Configuration & Task Data ---
|
||||||
|
config = context.config_obj
|
||||||
|
file_type_definitions = getattr(config, "FILE_TYPE_DEFINITIONS", {})
|
||||||
|
invert_normal_green = config.invert_normal_green_globally
|
||||||
|
merge_dimension_mismatch_strategy = getattr(config, "MERGE_DIMENSION_MISMATCH_STRATEGY", "USE_LARGEST")
|
||||||
|
workspace_path = context.workspace_path # Base for resolving relative input paths
|
||||||
|
|
||||||
|
# input_map_sources_from_task is no longer used for paths. Paths are sourced from context.processed_maps_details.
|
||||||
|
target_dimensions_hw = task_data.get('source_dimensions') # Expected dimensions (h, w) for fallback creation, must be in config.
|
||||||
|
merge_inputs_config = task_data.get('inputs', {}) # e.g., {'R': 'MAP_AO', 'G': 'MAP_ROUGH', ...}
|
||||||
|
merge_defaults = task_data.get('defaults', {}) # e.g., {'R': 255, 'G': 255, ...}
|
||||||
|
merge_channels_order = task_data.get('channel_order', 'RGB') # e.g., 'RGB', 'RGBA'
|
||||||
|
|
||||||
|
# Target dimensions are crucial if fallbacks are needed.
|
||||||
|
# Merge inputs config is essential.
|
||||||
|
# Merge inputs config is essential. Check directly in task_data.
|
||||||
|
inputs_from_task_data = task_data.get('inputs')
|
||||||
|
if not isinstance(inputs_from_task_data, dict) or not inputs_from_task_data:
|
||||||
|
result.error_message = "Merge task data is incomplete (missing or invalid 'inputs' dictionary in task_data)."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
if not target_dimensions_hw and any(merge_defaults.get(ch) is not None for ch in merge_inputs_config.keys()):
|
||||||
|
log.warning(f"{log_prefix}: Merge task has defaults defined, but 'source_dimensions' (target_dimensions_hw) is missing in task_data. Fallback image creation might fail if needed.")
|
||||||
|
# Not returning error yet, as fallbacks might not be triggered.
|
||||||
|
|
||||||
|
loaded_inputs_for_merge: Dict[str, np.ndarray] = {} # Channel char -> image data
|
||||||
|
actual_input_dimensions: List[Tuple[int, int]] = [] # List of (h, w) for loaded files
|
||||||
|
input_source_bit_depths: Dict[str, int] = {} # Channel char -> bit depth
|
||||||
|
all_transform_notes: Dict[str, List[str]] = {} # Channel char -> list of transform notes
|
||||||
|
|
||||||
|
# --- Load, Transform, and Prepare Inputs ---
|
||||||
|
log.debug(f"{log_prefix}: Loading and preparing inputs...")
|
||||||
|
for channel_char, required_map_type_from_rule in merge_inputs_config.items():
|
||||||
|
# Validate that the required input map type starts with "MAP_"
|
||||||
|
if not required_map_type_from_rule.startswith("MAP_"):
|
||||||
|
result.error_message = (
|
||||||
|
f"Invalid input map type '{required_map_type_from_rule}' for channel '{channel_char}'. "
|
||||||
|
f"Input map types for merging must start with 'MAP_'."
|
||||||
|
)
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result # Fail the task if an input type is invalid
|
||||||
|
|
||||||
|
input_image_data: Optional[np.ndarray] = None
|
||||||
|
input_source_desc = f"Fallback for {required_map_type_from_rule}"
|
||||||
|
input_log_prefix = f"{log_prefix}, Input '{required_map_type_from_rule}' (Channel '{channel_char}')"
|
||||||
|
channel_transform_notes: List[str] = []
|
||||||
|
|
||||||
|
# 1. Attempt to load from context.processed_maps_details
|
||||||
|
found_input_map_details = self._find_input_map_details_in_context(
|
||||||
|
required_map_type_from_rule, context.processed_maps_details, input_log_prefix
|
||||||
|
)
|
||||||
|
|
||||||
|
if found_input_map_details:
|
||||||
|
# Assuming the first saved file is the primary one for merging.
|
||||||
|
# This might need refinement if specific variants (resolutions/formats) are required.
|
||||||
|
primary_saved_file_info = found_input_map_details['saved_files_info'][0]
|
||||||
|
input_file_path_str = primary_saved_file_info.get('path')
|
||||||
|
|
||||||
|
if input_file_path_str:
|
||||||
|
input_file_path = Path(input_file_path_str) # Path is absolute from SaveVariantsStage
|
||||||
|
if input_file_path.is_file():
|
||||||
|
try:
|
||||||
|
input_image_data = ipu.load_image(str(input_file_path))
|
||||||
|
if input_image_data is not None:
|
||||||
|
log.info(f"{input_log_prefix}: Loaded from context: {input_file_path}")
|
||||||
|
actual_input_dimensions.append(input_image_data.shape[:2]) # (h, w)
|
||||||
|
input_source_desc = str(input_file_path)
|
||||||
|
# Bit depth from the saved variant info
|
||||||
|
input_source_bit_depths[channel_char] = primary_saved_file_info.get('bit_depth', 8)
|
||||||
|
else:
|
||||||
|
log.warning(f"{input_log_prefix}: Failed to load image from {input_file_path} (found in context). Attempting fallback.")
|
||||||
|
input_image_data = None # Ensure fallback is triggered
|
||||||
|
except Exception as e:
|
||||||
|
log.warning(f"{input_log_prefix}: Error loading image from {input_file_path} (found in context): {e}. Attempting fallback.")
|
||||||
|
input_image_data = None # Ensure fallback is triggered
|
||||||
|
else:
|
||||||
|
log.warning(f"{input_log_prefix}: Input file path '{input_file_path}' (from context) not found. Attempting fallback.")
|
||||||
|
input_image_data = None # Ensure fallback is triggered
|
||||||
|
else:
|
||||||
|
log.warning(f"{input_log_prefix}: Found map type '{required_map_type_from_rule}' in context, but 'path' is missing in saved_files_info. Attempting fallback.")
|
||||||
|
input_image_data = None # Ensure fallback is triggered
|
||||||
|
else:
|
||||||
|
log.info(f"{input_log_prefix}: Input map type '{required_map_type_from_rule}' not found in context.processed_maps_details. Attempting fallback.")
|
||||||
|
input_image_data = None # Ensure fallback is triggered
|
||||||
|
|
||||||
|
# 2. Apply Fallback if needed
|
||||||
|
if input_image_data is None:
|
||||||
|
fallback_value = merge_defaults.get(channel_char)
|
||||||
|
if fallback_value is not None:
|
||||||
|
try:
|
||||||
|
if not target_dimensions_hw:
|
||||||
|
result.error_message = f"Cannot create fallback for channel '{channel_char}': 'source_dimensions' (target_dimensions_hw) not defined in task_data."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result # Critical failure if dimensions for fallback are missing
|
||||||
|
h, w = target_dimensions_hw
|
||||||
|
# Infer shape/dtype for fallback (simplified)
|
||||||
|
num_channels = 1 if isinstance(fallback_value, (int, float)) else len(fallback_value) if isinstance(fallback_value, (list, tuple)) else 1
|
||||||
|
dtype = np.uint8 # Default dtype
|
||||||
|
shape = (h, w) if num_channels == 1 else (h, w, num_channels)
|
||||||
|
|
||||||
|
input_image_data = np.full(shape, fallback_value, dtype=dtype)
|
||||||
|
log.warning(f"{input_log_prefix}: Using fallback value {fallback_value} (Target Dims: {target_dimensions_hw}).")
|
||||||
|
input_source_desc = f"Fallback value {fallback_value}"
|
||||||
|
input_source_bit_depths[channel_char] = 8 # Assume 8-bit for fallbacks
|
||||||
|
channel_transform_notes.append(f"Used fallback value {fallback_value}")
|
||||||
|
except Exception as e:
|
||||||
|
result.error_message = f"Error creating fallback for channel '{channel_char}': {e}"
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result # Critical failure
|
||||||
|
else:
|
||||||
|
result.error_message = f"Missing input '{required_map_type_from_rule}' and no fallback default provided for channel '{channel_char}'."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result # Critical failure
|
||||||
|
|
||||||
|
# 3. Apply Transformations to the loaded/fallback input
|
||||||
|
if input_image_data is not None:
|
||||||
|
input_image_data, _, transform_notes = ipu.apply_common_map_transformations(
|
||||||
|
input_image_data.copy(), # Transform a copy
|
||||||
|
required_map_type_from_rule, # Use the type required by the rule
|
||||||
|
invert_normal_green,
|
||||||
|
file_type_definitions,
|
||||||
|
input_log_prefix
|
||||||
|
)
|
||||||
|
channel_transform_notes.extend(transform_notes)
|
||||||
|
else:
|
||||||
|
# This case should be prevented by fallback logic, but as a safeguard:
|
||||||
|
result.error_message = f"Input data for channel '{channel_char}' is None after load/fallback attempt."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message} This indicates an internal logic error.")
|
||||||
|
return result
|
||||||
|
|
||||||
|
loaded_inputs_for_merge[channel_char] = input_image_data
|
||||||
|
all_transform_notes[channel_char] = channel_transform_notes
|
||||||
|
|
||||||
|
result.transformations_applied_to_inputs = all_transform_notes # Store notes
|
||||||
|
|
||||||
|
# --- Handle Dimension Mismatches (using transformed inputs) ---
|
||||||
|
log.debug(f"{log_prefix}: Handling dimension mismatches...")
|
||||||
|
unique_dimensions = set(actual_input_dimensions)
|
||||||
|
target_merge_dims_hw = target_dimensions_hw # Default
|
||||||
|
|
||||||
|
if len(unique_dimensions) > 1:
|
||||||
|
log.warning(f"{log_prefix}: Mismatched dimensions found among loaded inputs: {unique_dimensions}. Applying strategy: {merge_dimension_mismatch_strategy}")
|
||||||
|
mismatch_note = f"Mismatched input dimensions ({unique_dimensions}), applied {merge_dimension_mismatch_strategy}"
|
||||||
|
# Add note to all relevant inputs? Or just a general note? Add general for now.
|
||||||
|
# result.status_notes.append(mismatch_note) # Need a place for general notes
|
||||||
|
|
||||||
|
if merge_dimension_mismatch_strategy == "ERROR_SKIP":
|
||||||
|
result.error_message = "Dimension mismatch and strategy is ERROR_SKIP."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
elif merge_dimension_mismatch_strategy == "USE_LARGEST":
|
||||||
|
max_h = max(h for h, w in unique_dimensions)
|
||||||
|
max_w = max(w for h, w in unique_dimensions)
|
||||||
|
target_merge_dims_hw = (max_h, max_w)
|
||||||
|
elif merge_dimension_mismatch_strategy == "USE_FIRST":
|
||||||
|
target_merge_dims_hw = actual_input_dimensions[0] if actual_input_dimensions else target_dimensions_hw
|
||||||
|
# Add other strategies or default to USE_LARGEST
|
||||||
|
|
||||||
|
log.info(f"{log_prefix}: Resizing inputs to target merge dimensions: {target_merge_dims_hw}")
|
||||||
|
# Resize loaded inputs (not fallbacks unless they were treated as having target dims)
|
||||||
|
for channel_char, img_data in loaded_inputs_for_merge.items():
|
||||||
|
# Only resize if it was a loaded input that contributed to the mismatch check
|
||||||
|
if img_data.shape[:2] in unique_dimensions and img_data.shape[:2] != target_merge_dims_hw:
|
||||||
|
resized_img = ipu.resize_image(img_data, target_merge_dims_hw[1], target_merge_dims_hw[0]) # w, h
|
||||||
|
if resized_img is None:
|
||||||
|
result.error_message = f"Failed to resize input for channel '{channel_char}' to {target_merge_dims_hw}."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
loaded_inputs_for_merge[channel_char] = resized_img
|
||||||
|
log.debug(f"{log_prefix}: Resized input for channel '{channel_char}'.")
|
||||||
|
|
||||||
|
# If target_merge_dims_hw is still None (no source_dimensions and no mismatch), use first loaded input's dimensions
|
||||||
|
if target_merge_dims_hw is None and actual_input_dimensions:
|
||||||
|
target_merge_dims_hw = actual_input_dimensions[0]
|
||||||
|
log.info(f"{log_prefix}: Using dimensions from first loaded input: {target_merge_dims_hw}")
|
||||||
|
|
||||||
|
# --- Perform Merge ---
|
||||||
|
log.debug(f"{log_prefix}: Performing merge operation for channels '{merge_channels_order}'.")
|
||||||
|
try:
|
||||||
|
# Final check for valid dimensions before unpacking
|
||||||
|
if not isinstance(target_merge_dims_hw, tuple) or len(target_merge_dims_hw) != 2:
|
||||||
|
result.error_message = "Could not determine valid target dimensions for merge operation."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message} (target_merge_dims_hw: {target_merge_dims_hw})")
|
||||||
|
return result
|
||||||
|
|
||||||
|
output_channels = len(merge_channels_order)
|
||||||
|
h, w = target_merge_dims_hw # Use the potentially adjusted dimensions
|
||||||
|
|
||||||
|
# Determine output dtype (e.g., based on inputs or config) - Assume uint8 for now
|
||||||
|
output_dtype = np.uint8
|
||||||
|
|
||||||
|
if output_channels == 1:
|
||||||
|
# Assume the first channel in order is the one to use
|
||||||
|
channel_char_to_use = merge_channels_order[0]
|
||||||
|
source_img = loaded_inputs_for_merge[channel_char_to_use]
|
||||||
|
# Ensure it's grayscale (take first channel if it's multi-channel)
|
||||||
|
if len(source_img.shape) == 3:
|
||||||
|
merged_image = source_img[:, :, 0].copy().astype(output_dtype)
|
||||||
|
else:
|
||||||
|
merged_image = source_img.copy().astype(output_dtype)
|
||||||
|
elif output_channels > 1:
|
||||||
|
merged_image = np.zeros((h, w, output_channels), dtype=output_dtype)
|
||||||
|
for i, channel_char in enumerate(merge_channels_order):
|
||||||
|
source_img = loaded_inputs_for_merge.get(channel_char)
|
||||||
|
if source_img is not None:
|
||||||
|
# Extract the correct channel (e.g., R from RGB, or use grayscale directly)
|
||||||
|
if len(source_img.shape) == 3:
|
||||||
|
# Simple approach: take the first channel if source is color. Needs refinement if specific channel mapping (R->R, G->G etc.) is needed.
|
||||||
|
merged_image[:, :, i] = source_img[:, :, 0]
|
||||||
|
else: # Grayscale source
|
||||||
|
merged_image[:, :, i] = source_img
|
||||||
|
else:
|
||||||
|
# This case should have been caught by fallback logic earlier
|
||||||
|
result.error_message = f"Internal error: Missing prepared input for channel '{channel_char}' during final merge assembly."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
result.error_message = f"Invalid channel_order '{merge_channels_order}' in merge config."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
result.merged_image_data = merged_image
|
||||||
|
result.final_dimensions = (merged_image.shape[1], merged_image.shape[0]) # w, h
|
||||||
|
result.source_bit_depths = list(input_source_bit_depths.values()) # Collect bit depths used
|
||||||
|
log.info(f"{log_prefix}: Successfully merged inputs into image with shape {result.merged_image_data.shape}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(f"{log_prefix}: Error during merge operation: {e}")
|
||||||
|
result.error_message = f"Merge operation failed: {e}"
|
||||||
|
return result
|
||||||
|
|
||||||
|
# --- Success ---
|
||||||
|
result.status = "Processed"
|
||||||
|
result.error_message = None
|
||||||
|
log.info(f"{log_prefix}: Successfully processed merge task.")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(f"{log_prefix}: Unhandled exception during processing: {e}")
|
||||||
|
result.status = "Failed"
|
||||||
|
result.error_message = f"Unhandled exception: {e}"
|
||||||
|
# Ensure image data is empty on failure
|
||||||
|
if result.merged_image_data is None or result.merged_image_data.size == 0:
|
||||||
|
result.merged_image_data = np.array([])
|
||||||
|
|
||||||
|
return result
|
||||||
219
processing/pipeline/stages/metadata_finalization_save.py
Normal file
219
processing/pipeline/stages/metadata_finalization_save.py
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from utils.path_utils import generate_path_from_pattern, sanitize_filename
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class MetadataFinalizationAndSaveStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
This stage finalizes the asset_metadata (e.g., setting processing end time,
|
||||||
|
final status) and saves it as a JSON file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
"""
|
||||||
|
Finalizes metadata, determines output path, and saves the metadata JSON file.
|
||||||
|
"""
|
||||||
|
asset_name_for_log = "Unknown Asset"
|
||||||
|
if hasattr(context, 'asset_rule') and context.asset_rule and hasattr(context.asset_rule, 'asset_name'):
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name
|
||||||
|
|
||||||
|
if not hasattr(context, 'asset_metadata') or not context.asset_metadata:
|
||||||
|
if context.status_flags.get('skip_asset'):
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': "
|
||||||
|
f"Skipped before metadata initialization. No metadata file will be saved."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}': "
|
||||||
|
f"asset_metadata not initialized. Skipping metadata finalization and save."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Check Skip Flag
|
||||||
|
if context.status_flags.get('skip_asset'):
|
||||||
|
context.asset_metadata['status'] = "Skipped"
|
||||||
|
# context.asset_metadata['processing_end_time'] = datetime.datetime.now().isoformat()
|
||||||
|
context.asset_metadata['notes'] = context.status_flags.get('skip_reason', 'Skipped early in pipeline')
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': Marked as skipped. Reason: {context.asset_metadata['notes']}"
|
||||||
|
)
|
||||||
|
# Assuming we save metadata for skipped assets if it was initialized.
|
||||||
|
# If not, the logic to skip saving would be here or before path generation.
|
||||||
|
# However, if we are here, asset_metadata IS initialized.
|
||||||
|
|
||||||
|
# A. Finalize Metadata
|
||||||
|
# context.asset_metadata['processing_end_time'] = datetime.datetime.now().isoformat()
|
||||||
|
|
||||||
|
# Determine final status (if not already set to Skipped)
|
||||||
|
if context.asset_metadata.get('status') != "Skipped":
|
||||||
|
has_errors = any(
|
||||||
|
context.status_flags.get(error_flag)
|
||||||
|
for error_flag in ['file_processing_error', 'merge_error', 'critical_error',
|
||||||
|
'individual_map_processing_failed', 'metadata_save_error'] # Added more flags
|
||||||
|
)
|
||||||
|
if has_errors:
|
||||||
|
context.asset_metadata['status'] = "Failed"
|
||||||
|
else:
|
||||||
|
context.asset_metadata['status'] = "Processed"
|
||||||
|
|
||||||
|
# Add details of processed and merged maps
|
||||||
|
# Restructure processed_map_details before assigning
|
||||||
|
restructured_processed_maps = {}
|
||||||
|
# getattr(context, 'processed_maps_details', {}) is the source (plural 'maps')
|
||||||
|
original_processed_maps = getattr(context, 'processed_maps_details', {})
|
||||||
|
|
||||||
|
# Define keys to remove at the top level of each map entry
|
||||||
|
map_keys_to_remove = [
|
||||||
|
"status", "source_file_path", "temp_processed_file", # Assuming "source_file_path" is the correct key
|
||||||
|
"original_resolution_name", "base_pot_resolution_name", "processed_resolution_name"
|
||||||
|
]
|
||||||
|
# Define keys to remove from each variant
|
||||||
|
variant_keys_to_remove = ["temp_path", "dimensions"]
|
||||||
|
|
||||||
|
for map_key, map_detail_original in original_processed_maps.items():
|
||||||
|
# Create a new dictionary for the modified map entry
|
||||||
|
new_map_entry = {}
|
||||||
|
for key, value in map_detail_original.items():
|
||||||
|
if key not in map_keys_to_remove:
|
||||||
|
new_map_entry[key] = value
|
||||||
|
|
||||||
|
if "variants" in map_detail_original and isinstance(map_detail_original["variants"], dict):
|
||||||
|
new_variants_dict = {}
|
||||||
|
for variant_name, variant_data_original in map_detail_original["variants"].items():
|
||||||
|
new_variant_entry = {}
|
||||||
|
for key, value in variant_data_original.items():
|
||||||
|
if key not in variant_keys_to_remove:
|
||||||
|
new_variant_entry[key] = value
|
||||||
|
|
||||||
|
# Add 'path_to_file'
|
||||||
|
# This path is expected to be set by OutputOrganizationStage in the context.
|
||||||
|
# It should be a Path object representing the path relative to the metadata directory,
|
||||||
|
# or an absolute Path that make_serializable can convert.
|
||||||
|
# Using 'final_output_path_for_metadata' as the key from context.
|
||||||
|
if 'final_output_path_for_metadata' in variant_data_original:
|
||||||
|
new_variant_entry['path_to_file'] = variant_data_original['final_output_path_for_metadata']
|
||||||
|
else:
|
||||||
|
# Log a warning if the expected path is not found
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}': 'final_output_path_for_metadata' "
|
||||||
|
f"missing for variant '{variant_name}' in map '{map_key}'. "
|
||||||
|
f"Metadata will be incomplete for this variant's path."
|
||||||
|
)
|
||||||
|
new_variant_entry['path_to_file'] = "ERROR_PATH_NOT_FOUND" # Placeholder
|
||||||
|
new_variants_dict[variant_name] = new_variant_entry
|
||||||
|
new_map_entry["variants"] = new_variants_dict
|
||||||
|
|
||||||
|
restructured_processed_maps[map_key] = new_map_entry
|
||||||
|
|
||||||
|
# Assign the restructured details. Note: 'processed_map_details' (singular 'map') is the key in asset_metadata.
|
||||||
|
# context.asset_metadata['processed_map_details'] = restructured_processed_maps
|
||||||
|
# context.asset_metadata['merged_map_details'] = getattr(context, 'merged_maps_details', {})
|
||||||
|
|
||||||
|
# (Optional) Add a list of all temporary files
|
||||||
|
# context.asset_metadata['temporary_files'] = getattr(context, 'temporary_files', []) # Assuming this is populated elsewhere
|
||||||
|
|
||||||
|
# B. Determine Metadata Output Path
|
||||||
|
# asset_name_for_log is defined at the top of the function if asset_metadata exists
|
||||||
|
|
||||||
|
source_rule_identifier_for_path = "unknown_source"
|
||||||
|
if hasattr(context, 'source_rule') and context.source_rule:
|
||||||
|
if hasattr(context.source_rule, 'supplier_identifier') and context.source_rule.supplier_identifier:
|
||||||
|
source_rule_identifier_for_path = context.source_rule.supplier_identifier
|
||||||
|
elif hasattr(context.source_rule, 'input_path') and context.source_rule.input_path:
|
||||||
|
source_rule_identifier_for_path = Path(context.source_rule.input_path).stem # Use stem of input path if no identifier
|
||||||
|
else:
|
||||||
|
source_rule_identifier_for_path = "unknown_source_details"
|
||||||
|
|
||||||
|
# Use the configured metadata filename from config_obj
|
||||||
|
metadata_filename_from_config = getattr(context.config_obj, 'metadata_filename', "metadata.json")
|
||||||
|
# Ensure asset_name_for_log is safe for filenames
|
||||||
|
safe_asset_name = sanitize_filename(asset_name_for_log) # asset_name_for_log is defined at the top
|
||||||
|
final_metadata_filename = f"{safe_asset_name}_{metadata_filename_from_config}"
|
||||||
|
|
||||||
|
# Output path pattern should come from config_obj, not asset_rule
|
||||||
|
output_path_pattern_from_config = getattr(context.config_obj, 'output_directory_pattern', "[supplier]/[assetname]")
|
||||||
|
|
||||||
|
sha_value = getattr(context, 'sha5_value', None) # Prefer sha5_value if explicitly set on context
|
||||||
|
if sha_value is None: # Fallback to sha256_value if that was the intended attribute
|
||||||
|
sha_value = getattr(context, 'sha256_value', None)
|
||||||
|
|
||||||
|
token_data = {
|
||||||
|
"assetname": asset_name_for_log,
|
||||||
|
"supplier": context.effective_supplier if context.effective_supplier else source_rule_identifier_for_path,
|
||||||
|
"sourcerulename": source_rule_identifier_for_path,
|
||||||
|
"incrementingvalue": getattr(context, 'incrementing_value', None),
|
||||||
|
"sha5": sha_value, # Assuming pattern uses [sha5] or similar for sha_value
|
||||||
|
"maptype": "metadata", # Added maptype to token_data
|
||||||
|
"filename": final_metadata_filename # Added filename to token_data
|
||||||
|
# Add other tokens if your output_path_pattern_from_config expects them
|
||||||
|
}
|
||||||
|
# Clean None values, as generate_path_from_pattern might not handle them well for all tokens
|
||||||
|
token_data_cleaned = {k: v for k, v in token_data.items() if v is not None}
|
||||||
|
|
||||||
|
# Generate the relative directory path using the pattern and tokens
|
||||||
|
relative_dir_path_str = generate_path_from_pattern(
|
||||||
|
pattern_string=output_path_pattern_from_config, # This pattern should resolve to a directory
|
||||||
|
token_data=token_data_cleaned
|
||||||
|
)
|
||||||
|
|
||||||
|
# Construct the full path by joining the base output path, the generated relative directory, and the final filename
|
||||||
|
metadata_save_path = Path(context.output_base_path) / Path(relative_dir_path_str) / Path(final_metadata_filename)
|
||||||
|
|
||||||
|
# C. Save Metadata File
|
||||||
|
try:
|
||||||
|
metadata_save_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
def make_serializable(data: Any) -> Any:
|
||||||
|
if isinstance(data, Path):
|
||||||
|
# metadata_save_path is available from the outer scope
|
||||||
|
metadata_dir = metadata_save_path.parent
|
||||||
|
try:
|
||||||
|
# Attempt to make the path relative if it's absolute and under the same root
|
||||||
|
if data.is_absolute():
|
||||||
|
# Check if the path can be made relative (e.g., same drive on Windows)
|
||||||
|
# This check might need to be more robust depending on os.path.relpath behavior
|
||||||
|
# For pathlib, relative_to will raise ValueError if not possible.
|
||||||
|
return str(data.relative_to(metadata_dir))
|
||||||
|
else:
|
||||||
|
# If it's already relative, assume it's correct or handle as needed
|
||||||
|
return str(data)
|
||||||
|
except ValueError:
|
||||||
|
# If paths are on different drives or cannot be made relative,
|
||||||
|
# log a warning and return the absolute path as a string.
|
||||||
|
# This can happen if an output path was explicitly set to an unrelated directory.
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}': Could not make path {data} "
|
||||||
|
f"relative to {metadata_dir}. Storing as absolute."
|
||||||
|
)
|
||||||
|
return str(data)
|
||||||
|
if isinstance(data, datetime.datetime): # Ensure datetime is serializable
|
||||||
|
return data.isoformat()
|
||||||
|
if isinstance(data, dict):
|
||||||
|
return {k: make_serializable(v) for k, v in data.items()}
|
||||||
|
if isinstance(data, list):
|
||||||
|
return [make_serializable(i) for i in data]
|
||||||
|
return data
|
||||||
|
|
||||||
|
# final_output_files is populated by OutputOrganizationStage. Explicitly remove it as per user request.
|
||||||
|
context.asset_metadata.pop('final_output_files', None)
|
||||||
|
serializable_metadata = make_serializable(context.asset_metadata)
|
||||||
|
|
||||||
|
with open(metadata_save_path, 'w') as f:
|
||||||
|
json.dump(serializable_metadata, f, indent=4)
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Metadata saved to {metadata_save_path}") # Use asset_name_for_log
|
||||||
|
context.asset_metadata['metadata_file_path'] = str(metadata_save_path)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Asset '{asset_name_for_log}': Failed to save metadata to {metadata_save_path}. Error: {e}") # Use asset_name_for_log
|
||||||
|
context.asset_metadata['status'] = "Failed (Metadata Save Error)"
|
||||||
|
context.status_flags['metadata_save_error'] = True
|
||||||
|
|
||||||
|
return context
|
||||||
178
processing/pipeline/stages/metadata_initialization.py
Normal file
178
processing/pipeline/stages/metadata_initialization.py
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext # Adjusted import path assuming asset_context is in processing.pipeline
|
||||||
|
# If AssetProcessingContext is directly under 'processing', the import would be:
|
||||||
|
# from ...asset_context import AssetProcessingContext
|
||||||
|
# Based on the provided file structure, asset_context.py is in processing/pipeline/
|
||||||
|
# So, from ...asset_context import AssetProcessingContext is likely incorrect.
|
||||||
|
# It should be: from ..asset_context import AssetProcessingContext
|
||||||
|
# Correcting this based on typical Python package structure and the location of base_stage.py
|
||||||
|
|
||||||
|
# Re-evaluating import based on common structure:
|
||||||
|
# If base_stage.py is in processing/pipeline/stages/
|
||||||
|
# and asset_context.py is in processing/pipeline/
|
||||||
|
# then the import for AssetProcessingContext from metadata_initialization.py (in stages) would be:
|
||||||
|
# from ..asset_context import AssetProcessingContext
|
||||||
|
|
||||||
|
# Let's assume the following structure for clarity:
|
||||||
|
# processing/
|
||||||
|
# L-- pipeline/
|
||||||
|
# L-- __init__.py
|
||||||
|
# L-- asset_context.py
|
||||||
|
# L-- base_stage.py (Mistake here, base_stage is in stages, so it's ..base_stage)
|
||||||
|
# L-- stages/
|
||||||
|
# L-- __init__.py
|
||||||
|
# L-- metadata_initialization.py
|
||||||
|
# L-- base_stage.py (Corrected: base_stage.py is here)
|
||||||
|
|
||||||
|
# Corrected imports based on the plan and typical structure:
|
||||||
|
# base_stage.py is in processing/pipeline/stages/
|
||||||
|
# asset_context.py is in processing/pipeline/
|
||||||
|
|
||||||
|
# from ..base_stage import ProcessingStage # This would mean base_stage is one level up from stages (i.e. in pipeline)
|
||||||
|
# The plan says: from ..base_stage import ProcessingStage
|
||||||
|
# This implies that metadata_initialization.py is in a subdirectory of where base_stage.py is.
|
||||||
|
# However, the file path for metadata_initialization.py is processing/pipeline/stages/metadata_initialization.py
|
||||||
|
# And base_stage.py is listed as processing/pipeline/stages/base_stage.py in the open tabs.
|
||||||
|
# So, the import should be:
|
||||||
|
# from .base_stage import ProcessingStage
|
||||||
|
|
||||||
|
# AssetProcessingContext is at processing/pipeline/asset_context.py
|
||||||
|
# So from processing/pipeline/stages/metadata_initialization.py, it would be:
|
||||||
|
# from ..asset_context import AssetProcessingContext
|
||||||
|
|
||||||
|
# Final check on imports based on instructions:
|
||||||
|
# `from ..base_stage import ProcessingStage` -> This means base_stage.py is in `processing/pipeline/`
|
||||||
|
# `from ...asset_context import AssetProcessingContext` -> This means asset_context.py is in `processing/`
|
||||||
|
# Let's verify the location of these files from the environment details.
|
||||||
|
# processing/pipeline/asset_context.py
|
||||||
|
# processing/pipeline/stages/base_stage.py
|
||||||
|
#
|
||||||
|
# So, from processing/pipeline/stages/metadata_initialization.py:
|
||||||
|
# To import ProcessingStage from processing/pipeline/stages/base_stage.py:
|
||||||
|
# from .base_stage import ProcessingStage
|
||||||
|
# To import AssetProcessingContext from processing/pipeline/asset_context.py:
|
||||||
|
# from ..asset_context import AssetProcessingContext
|
||||||
|
|
||||||
|
# The instructions explicitly state:
|
||||||
|
# `from ..base_stage import ProcessingStage`
|
||||||
|
# `from ...asset_context import AssetProcessingContext`
|
||||||
|
# This implies a different structure than what seems to be in the file tree.
|
||||||
|
# I will follow the explicit import instructions from the task.
|
||||||
|
# This means:
|
||||||
|
# base_stage.py is expected at `processing/pipeline/base_stage.py`
|
||||||
|
# asset_context.py is expected at `processing/asset_context.py`
|
||||||
|
|
||||||
|
# Given the file tree:
|
||||||
|
# processing/pipeline/asset_context.py
|
||||||
|
# processing/pipeline/stages/base_stage.py
|
||||||
|
# The imports in `processing/pipeline/stages/metadata_initialization.py` should be:
|
||||||
|
# from .base_stage import ProcessingStage
|
||||||
|
# from ..asset_context import AssetProcessingContext
|
||||||
|
|
||||||
|
# I will use the imports that align with the provided file structure.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class MetadataInitializationStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Initializes metadata structures within the AssetProcessingContext.
|
||||||
|
This stage sets up asset_metadata, processed_maps_details, and
|
||||||
|
merged_maps_details.
|
||||||
|
"""
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
logger.debug(f"METADATA_INIT_DEBUG: Entry - context.output_base_path = {context.output_base_path}") # Added
|
||||||
|
"""
|
||||||
|
Executes the metadata initialization logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context: The AssetProcessingContext for the current asset.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The modified AssetProcessingContext.
|
||||||
|
"""
|
||||||
|
if context.status_flags.get('skip_asset', False):
|
||||||
|
logger.debug(f"Asset '{context.asset_rule.asset_name if context.asset_rule else 'Unknown'}': Skipping metadata initialization as 'skip_asset' is True.")
|
||||||
|
return context
|
||||||
|
|
||||||
|
logger.debug(f"Asset '{context.asset_rule.asset_name if context.asset_rule else 'Unknown'}': Initializing metadata.")
|
||||||
|
|
||||||
|
context.asset_metadata = {}
|
||||||
|
context.processed_maps_details = {}
|
||||||
|
context.merged_maps_details = {}
|
||||||
|
|
||||||
|
# Populate Initial asset_metadata
|
||||||
|
if context.asset_rule:
|
||||||
|
context.asset_metadata['asset_name'] = context.asset_rule.asset_name
|
||||||
|
# Attempt to get 'id' from common_metadata or use asset_name as a fallback
|
||||||
|
asset_id_val = context.asset_rule.common_metadata.get('id', context.asset_rule.common_metadata.get('asset_id'))
|
||||||
|
if asset_id_val is None:
|
||||||
|
logger.warning(f"Asset '{context.asset_rule.asset_name}': No 'id' or 'asset_id' found in common_metadata. Using asset_name as asset_id.")
|
||||||
|
asset_id_val = context.asset_rule.asset_name
|
||||||
|
context.asset_metadata['asset_id'] = str(asset_id_val)
|
||||||
|
|
||||||
|
# Assuming source_path, output_path_pattern, tags, custom_fields might also be in common_metadata
|
||||||
|
context.asset_metadata['source_path'] = str(context.asset_rule.common_metadata.get('source_path', 'N/A'))
|
||||||
|
context.asset_metadata['output_path_pattern'] = context.asset_rule.common_metadata.get('output_path_pattern', 'N/A')
|
||||||
|
context.asset_metadata['tags'] = list(context.asset_rule.common_metadata.get('tags', []))
|
||||||
|
context.asset_metadata['custom_fields'] = dict(context.asset_rule.common_metadata.get('custom_fields', {}))
|
||||||
|
else:
|
||||||
|
# Handle cases where asset_rule might be None, though typically it should be set
|
||||||
|
logger.warning("AssetRule is not set in context during metadata initialization.")
|
||||||
|
context.asset_metadata['asset_name'] = "Unknown Asset"
|
||||||
|
context.asset_metadata['asset_id'] = "N/A"
|
||||||
|
context.asset_metadata['source_path'] = "N/A"
|
||||||
|
context.asset_metadata['output_path_pattern'] = "N/A"
|
||||||
|
context.asset_metadata['tags'] = []
|
||||||
|
context.asset_metadata['custom_fields'] = {}
|
||||||
|
|
||||||
|
|
||||||
|
if context.source_rule:
|
||||||
|
# SourceRule also doesn't have 'name' or 'id' directly.
|
||||||
|
# Using 'input_path' as a proxy for name, and a placeholder for id.
|
||||||
|
source_rule_name_val = context.source_rule.input_path if context.source_rule.input_path else "Unknown Source Rule Path"
|
||||||
|
source_rule_id_val = context.source_rule.high_level_sorting_parameters.get('id', "N/A_SR_ID") # Check high_level_sorting_parameters
|
||||||
|
logger.debug(f"SourceRule: using input_path '{source_rule_name_val}' as name, and '{source_rule_id_val}' as id.")
|
||||||
|
context.asset_metadata['source_rule_name'] = source_rule_name_val
|
||||||
|
context.asset_metadata['source_rule_id'] = str(source_rule_id_val)
|
||||||
|
else:
|
||||||
|
logger.warning("SourceRule is not set in context during metadata initialization.")
|
||||||
|
context.asset_metadata['source_rule_name'] = "Unknown Source Rule"
|
||||||
|
context.asset_metadata['source_rule_id'] = "N/A"
|
||||||
|
|
||||||
|
context.asset_metadata['effective_supplier'] = context.effective_supplier
|
||||||
|
context.asset_metadata['processing_start_time'] = datetime.datetime.now().isoformat()
|
||||||
|
context.asset_metadata['status'] = "Pending"
|
||||||
|
|
||||||
|
app_version_value = None
|
||||||
|
if context.config_obj and hasattr(context.config_obj, 'app_version'):
|
||||||
|
app_version_value = context.config_obj.app_version
|
||||||
|
|
||||||
|
if app_version_value:
|
||||||
|
context.asset_metadata['version'] = app_version_value
|
||||||
|
else:
|
||||||
|
logger.warning("App version not found using config_obj.app_version. Setting version to 'N/A'.")
|
||||||
|
context.asset_metadata['version'] = "N/A"
|
||||||
|
|
||||||
|
if context.incrementing_value is not None:
|
||||||
|
context.asset_metadata['incrementing_value'] = context.incrementing_value
|
||||||
|
|
||||||
|
# The plan mentions sha5_value, which is likely a typo for sha256 or similar.
|
||||||
|
# Implementing as 'sha5_value' per instructions, but noting the potential typo.
|
||||||
|
if hasattr(context, 'sha5_value') and context.sha5_value is not None: # Check attribute existence
|
||||||
|
context.asset_metadata['sha5_value'] = context.sha5_value
|
||||||
|
elif hasattr(context, 'sha256_value') and context.sha256_value is not None: # Fallback if sha5 was a typo
|
||||||
|
logger.debug("sha5_value not found, using sha256_value if available for metadata.")
|
||||||
|
context.asset_metadata['sha256_value'] = context.sha256_value
|
||||||
|
|
||||||
|
|
||||||
|
logger.info(f"Asset '{context.asset_metadata.get('asset_name', 'Unknown')}': Metadata initialized.")
|
||||||
|
# Example of how you might log the full metadata for debugging:
|
||||||
|
# logger.debug(f"Initialized metadata: {context.asset_metadata}")
|
||||||
|
|
||||||
|
logger.debug(f"METADATA_INIT_DEBUG: Exit - context.output_base_path = {context.output_base_path}") # Added
|
||||||
|
return context
|
||||||
155
processing/pipeline/stages/normal_map_green_channel.py
Normal file
155
processing/pipeline/stages/normal_map_green_channel.py
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
import logging
|
||||||
|
import numpy as np
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import FileRule
|
||||||
|
from ...utils import image_processing_utils as ipu
|
||||||
|
from utils.path_utils import sanitize_filename
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class NormalMapGreenChannelStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Processing stage to invert the green channel of normal maps if configured.
|
||||||
|
This is often needed when converting between DirectX (Y-) and OpenGL (Y+) normal map formats.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
"""
|
||||||
|
Identifies NORMAL maps, checks configuration for green channel inversion,
|
||||||
|
performs inversion if needed, saves a new temporary file, and updates
|
||||||
|
the AssetProcessingContext.
|
||||||
|
"""
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
if context.status_flags.get('skip_asset'):
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Skipping NormalMapGreenChannelStage due to skip_asset flag.")
|
||||||
|
return context
|
||||||
|
|
||||||
|
if not context.processed_maps_details: # Check processed_maps_details primarily
|
||||||
|
logger.debug(
|
||||||
|
f"Asset '{asset_name_for_log}': No processed_maps_details in NormalMapGreenChannelStage. Skipping."
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
processed_a_normal_map = False
|
||||||
|
|
||||||
|
# Iterate through processed maps, as FileRule objects don't have IDs directly
|
||||||
|
for map_id_hex, map_details in context.processed_maps_details.items():
|
||||||
|
# Check if the map is a processed normal map using the standardized internal_map_type
|
||||||
|
internal_map_type = map_details.get('internal_map_type')
|
||||||
|
if internal_map_type and internal_map_type.startswith("MAP_NRM") and map_details.get('status') == 'Processed':
|
||||||
|
|
||||||
|
# Check configuration for inversion
|
||||||
|
# Assuming general_settings is an attribute of config_obj and might be a dict or an object
|
||||||
|
should_invert = False
|
||||||
|
if hasattr(context.config_obj, 'general_settings'):
|
||||||
|
if isinstance(context.config_obj.general_settings, dict):
|
||||||
|
should_invert = context.config_obj.general_settings.get('invert_normal_map_green_channel_globally', False)
|
||||||
|
elif hasattr(context.config_obj.general_settings, 'invert_normal_map_green_channel_globally'):
|
||||||
|
should_invert = getattr(context.config_obj.general_settings, 'invert_normal_map_green_channel_globally', False)
|
||||||
|
|
||||||
|
original_temp_path_str = map_details.get('temp_processed_file')
|
||||||
|
if not original_temp_path_str:
|
||||||
|
logger.warning(f"Asset '{asset_name_for_log}': Normal map (ID: {map_id_hex}) missing 'temp_processed_file' in details. Skipping.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
original_temp_path = Path(original_temp_path_str)
|
||||||
|
original_filename_for_log = original_temp_path.name
|
||||||
|
|
||||||
|
if not should_invert:
|
||||||
|
logger.debug(
|
||||||
|
f"Asset '{asset_name_for_log}': Normal map green channel inversion not enabled. "
|
||||||
|
f"Skipping for {original_filename_for_log} (ID: {map_id_hex})."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not original_temp_path.exists():
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Temporary file {original_temp_path} for normal map "
|
||||||
|
f"{original_filename_for_log} (ID: {map_id_hex}) does not exist. Cannot invert green channel."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
image_data = ipu.load_image(original_temp_path)
|
||||||
|
|
||||||
|
if image_data is None:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Failed to load image from {original_temp_path} "
|
||||||
|
f"for normal map {original_filename_for_log} (ID: {map_id_hex})."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if image_data.ndim != 3 or image_data.shape[2] < 2: # Must have at least R, G channels
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Image {original_temp_path} for normal map "
|
||||||
|
f"{original_filename_for_log} (ID: {map_id_hex}) is not a valid RGB/normal map "
|
||||||
|
f"(ndim={image_data.ndim}, channels={image_data.shape[2] if image_data.ndim == 3 else 'N/A'}) "
|
||||||
|
f"for green channel inversion."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Perform Green Channel Inversion
|
||||||
|
modified_image_data = image_data.copy()
|
||||||
|
try:
|
||||||
|
if np.issubdtype(modified_image_data.dtype, np.floating):
|
||||||
|
modified_image_data[:, :, 1] = 1.0 - modified_image_data[:, :, 1]
|
||||||
|
elif np.issubdtype(modified_image_data.dtype, np.integer):
|
||||||
|
max_val = np.iinfo(modified_image_data.dtype).max
|
||||||
|
modified_image_data[:, :, 1] = max_val - modified_image_data[:, :, 1]
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Unsupported image data type "
|
||||||
|
f"{modified_image_data.dtype} for normal map {original_temp_path}. Cannot invert green channel."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
except IndexError:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Image {original_temp_path} for normal map "
|
||||||
|
f"{original_filename_for_log} (ID: {map_id_hex}) does not have a green channel (index 1) "
|
||||||
|
f"or has unexpected dimensions ({modified_image_data.shape}). Cannot invert."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Save New Temporary (Modified Normal) Map
|
||||||
|
# Sanitize map_details.get('map_type') in case it's missing, though it should be 'NORMAL' here
|
||||||
|
map_type_for_filename = sanitize_filename(map_details.get('map_type', 'NORMAL'))
|
||||||
|
new_temp_filename = f"normal_g_inv_{map_type_for_filename}_{map_id_hex}{original_temp_path.suffix}"
|
||||||
|
new_temp_path = context.engine_temp_dir / new_temp_filename
|
||||||
|
|
||||||
|
save_success = ipu.save_image(new_temp_path, modified_image_data)
|
||||||
|
|
||||||
|
if save_success:
|
||||||
|
logger.info(
|
||||||
|
f"Asset '{asset_name_for_log}': Inverted green channel for NORMAL map "
|
||||||
|
f"{original_filename_for_log}, saved to {new_temp_path.name}."
|
||||||
|
)
|
||||||
|
# Update processed_maps_details for this map_id_hex
|
||||||
|
context.processed_maps_details[map_id_hex]['temp_processed_file'] = str(new_temp_path)
|
||||||
|
current_notes = context.processed_maps_details[map_id_hex].get('notes', '')
|
||||||
|
context.processed_maps_details[map_id_hex]['notes'] = \
|
||||||
|
f"{current_notes}; Green channel inverted by NormalMapGreenChannelStage".strip('; ')
|
||||||
|
|
||||||
|
processed_a_normal_map = True
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Asset '{asset_name_for_log}': Failed to save inverted normal map to {new_temp_path} "
|
||||||
|
f"for original {original_filename_for_log}."
|
||||||
|
)
|
||||||
|
# No need to explicitly manage new_files_to_process list in this loop,
|
||||||
|
# as we are modifying the temp_processed_file path within processed_maps_details.
|
||||||
|
# The existing FileRule objects in context.files_to_process (if any) would
|
||||||
|
# be linked to these details by a previous stage (e.g. IndividualMapProcessing)
|
||||||
|
# if that stage populates a 'file_rule_id' in map_details.
|
||||||
|
|
||||||
|
# context.files_to_process remains unchanged by this stage directly,
|
||||||
|
# as we modify the data pointed to by processed_maps_details.
|
||||||
|
|
||||||
|
if processed_a_normal_map:
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': NormalMapGreenChannelStage processed relevant normal maps.")
|
||||||
|
else:
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': No normal maps found or processed in NormalMapGreenChannelStage.")
|
||||||
|
|
||||||
|
return context
|
||||||
307
processing/pipeline/stages/output_organization.py
Normal file
307
processing/pipeline/stages/output_organization.py
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
import logging
|
||||||
|
import shutil
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Optional
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
from utils.path_utils import generate_path_from_pattern, sanitize_filename, get_filename_friendly_map_type # Absolute import
|
||||||
|
from rule_structure import FileRule # Assuming these are needed for type hints if not directly in context
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class OutputOrganizationStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Organizes output files by copying temporary processed files to their final destinations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
asset_name_for_log_early = context.asset_rule.asset_name if hasattr(context, 'asset_rule') and context.asset_rule else "Unknown Asset (early)"
|
||||||
|
log.info(f"OUTPUT_ORG_DEBUG: Stage execution started for asset '{asset_name_for_log_early}'.")
|
||||||
|
logger.debug(f"OUTPUT_ORG_DEBUG: Entry - context.output_base_path = {context.output_base_path}") # Modified
|
||||||
|
log.info(f"OUTPUT_ORG_DEBUG: Received context.config_obj.output_directory_base (raw from config) = {getattr(context.config_obj, 'output_directory_base', 'N/A')}")
|
||||||
|
# resolved_base = "N/A"
|
||||||
|
# if hasattr(context.config_obj, '_settings') and context.config_obj._settings.get('OUTPUT_BASE_DIR'):
|
||||||
|
# base_dir_from_settings = context.config_obj._settings.get('OUTPUT_BASE_DIR')
|
||||||
|
# Path resolution logic might be complex
|
||||||
|
# log.info(f"OUTPUT_ORG_DEBUG: Received context.config_obj._settings.OUTPUT_BASE_DIR (resolved guess) = {resolved_base}")
|
||||||
|
log.info(f"OUTPUT_ORG_DEBUG: context.processed_maps_details at start: {context.processed_maps_details}")
|
||||||
|
"""
|
||||||
|
Copies temporary processed and merged files to their final output locations
|
||||||
|
based on path patterns and updates AssetProcessingContext.
|
||||||
|
"""
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if hasattr(context, 'asset_rule') and context.asset_rule else "Unknown Asset"
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Starting output organization stage.")
|
||||||
|
|
||||||
|
if context.status_flags.get('skip_asset'):
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Output organization skipped as 'skip_asset' is True.")
|
||||||
|
return context
|
||||||
|
|
||||||
|
current_status = context.asset_metadata.get('status', '')
|
||||||
|
if current_status.startswith("Failed") or current_status == "Skipped":
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Output organization skipped due to prior status: {current_status}.")
|
||||||
|
return context
|
||||||
|
|
||||||
|
final_output_files: List[str] = []
|
||||||
|
overwrite_existing = context.config_obj.overwrite_existing
|
||||||
|
|
||||||
|
output_dir_pattern = getattr(context.config_obj, 'output_directory_pattern', "[supplier]/[assetname]")
|
||||||
|
output_filename_pattern_config = getattr(context.config_obj, 'output_filename_pattern', "[assetname]_[maptype]_[resolution].[ext]")
|
||||||
|
|
||||||
|
|
||||||
|
# A. Organize Processed Individual Maps
|
||||||
|
if context.processed_maps_details:
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Organizing {len(context.processed_maps_details)} processed individual map entries.")
|
||||||
|
for processed_map_key, details in context.processed_maps_details.items():
|
||||||
|
map_status = details.get('status')
|
||||||
|
# Retrieve the internal map type first
|
||||||
|
internal_map_type = details.get('internal_map_type', 'unknown_map_type')
|
||||||
|
# Convert internal type to filename-friendly type using the helper
|
||||||
|
file_type_definitions = getattr(context.config_obj, "FILE_TYPE_DEFINITIONS", {})
|
||||||
|
base_map_type = get_filename_friendly_map_type(internal_map_type, file_type_definitions) # Final filename-friendly type
|
||||||
|
|
||||||
|
# --- Handle maps processed by the SaveVariantsStage (identified by having saved_files_info) ---
|
||||||
|
saved_files_info = details.get('saved_files_info') # This is a list of dicts from SaveVariantsOutput
|
||||||
|
|
||||||
|
# Check if 'saved_files_info' exists and is a non-empty list.
|
||||||
|
# This indicates the item was processed by SaveVariantsStage.
|
||||||
|
if saved_files_info and isinstance(saved_files_info, list) and len(saved_files_info) > 0:
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Organizing {len(saved_files_info)} variants for map key '{processed_map_key}' (map type: {base_map_type}) from SaveVariantsStage.")
|
||||||
|
|
||||||
|
# Use base_map_type (e.g., "COL") as the key for the map entry
|
||||||
|
map_metadata_entry = context.asset_metadata.setdefault('maps', {}).setdefault(base_map_type, {})
|
||||||
|
# map_type is now the key, so no need to store it inside the entry
|
||||||
|
# map_metadata_entry['map_type'] = base_map_type
|
||||||
|
map_metadata_entry.setdefault('variant_paths', {}) # Initialize if not present
|
||||||
|
|
||||||
|
processed_any_variant_successfully = False
|
||||||
|
failed_any_variant = False
|
||||||
|
|
||||||
|
for variant_index, variant_detail in enumerate(saved_files_info):
|
||||||
|
# Extract info from the save utility's output structure
|
||||||
|
temp_variant_path_str = variant_detail.get('path') # Key is 'path'
|
||||||
|
if not temp_variant_path_str:
|
||||||
|
logger.warning(f"Asset '{asset_name_for_log}': Variant {variant_index} for map '{processed_map_key}' is missing 'path' in saved_files_info. Skipping.")
|
||||||
|
# Optionally update variant_detail status if it's mutable and tracked, otherwise just skip
|
||||||
|
continue
|
||||||
|
|
||||||
|
temp_variant_path = Path(temp_variant_path_str)
|
||||||
|
if not temp_variant_path.is_file():
|
||||||
|
logger.warning(f"Asset '{asset_name_for_log}': Temporary variant file '{temp_variant_path}' for map '{processed_map_key}' not found. Skipping.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
variant_resolution_key = variant_detail.get('resolution_key', f"varRes{variant_index}")
|
||||||
|
variant_ext = variant_detail.get('format', temp_variant_path.suffix.lstrip('.')) # Use 'format' key
|
||||||
|
|
||||||
|
token_data_variant = {
|
||||||
|
"assetname": asset_name_for_log,
|
||||||
|
"supplier": context.effective_supplier or "DefaultSupplier",
|
||||||
|
"maptype": base_map_type,
|
||||||
|
"resolution": variant_resolution_key,
|
||||||
|
"ext": variant_ext,
|
||||||
|
"incrementingvalue": getattr(context, 'incrementing_value', None),
|
||||||
|
"sha5": getattr(context, 'sha5_value', None)
|
||||||
|
}
|
||||||
|
token_data_variant_cleaned = {k: v for k, v in token_data_variant.items() if v is not None}
|
||||||
|
output_filename_variant = generate_path_from_pattern(output_filename_pattern_config, token_data_variant_cleaned)
|
||||||
|
|
||||||
|
try:
|
||||||
|
relative_dir_path_str_variant = generate_path_from_pattern(
|
||||||
|
pattern_string=output_dir_pattern,
|
||||||
|
token_data=token_data_variant_cleaned
|
||||||
|
)
|
||||||
|
logger.debug(f"OUTPUT_ORG_DEBUG: Variants - Using context.output_base_path = {context.output_base_path} for final_variant_path construction.") # Added
|
||||||
|
final_variant_path = Path(context.output_base_path) / Path(relative_dir_path_str_variant) / Path(output_filename_variant)
|
||||||
|
logger.debug(f"OUTPUT_ORG_DEBUG: Variants - Constructed final_variant_path = {final_variant_path}") # Added
|
||||||
|
final_variant_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
if final_variant_path.exists() and not overwrite_existing:
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Output variant file {final_variant_path} for map '{processed_map_key}' (res: {variant_resolution_key}) exists and overwrite is disabled. Skipping copy.")
|
||||||
|
# Optionally update variant_detail status if needed
|
||||||
|
else:
|
||||||
|
shutil.copy2(temp_variant_path, final_variant_path)
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Copied variant {temp_variant_path} to {final_variant_path} for map '{processed_map_key}'.")
|
||||||
|
final_output_files.append(str(final_variant_path))
|
||||||
|
# Optionally update variant_detail status if needed
|
||||||
|
|
||||||
|
# Store relative path in metadata
|
||||||
|
# Store only the filename, as it's relative to the metadata.json location
|
||||||
|
map_metadata_entry['variant_paths'][variant_resolution_key] = output_filename_variant
|
||||||
|
processed_any_variant_successfully = True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Asset '{asset_name_for_log}': Failed to copy variant {temp_variant_path} for map key '{processed_map_key}' (res: {variant_resolution_key}). Error: {e}", exc_info=True)
|
||||||
|
context.status_flags['output_organization_error'] = True
|
||||||
|
context.asset_metadata['status'] = "Failed (Output Organization Error - Variant)"
|
||||||
|
# Optionally update variant_detail status if needed
|
||||||
|
failed_any_variant = True
|
||||||
|
|
||||||
|
# Update parent map detail status based on variant outcomes
|
||||||
|
if failed_any_variant:
|
||||||
|
details['status'] = 'Organization Failed (Save Utility Variants)'
|
||||||
|
elif processed_any_variant_successfully:
|
||||||
|
details['status'] = 'Organized (Save Utility Variants)'
|
||||||
|
else: # No variants were successfully copied (e.g., all skipped due to existing file or missing temp file)
|
||||||
|
details['status'] = 'Organization Skipped (No Save Utility Variants Copied/Needed)'
|
||||||
|
|
||||||
|
# --- Handle older/other processing statuses (like single file processing) ---
|
||||||
|
elif map_status in ['Processed', 'Processed_No_Variants', 'Converted_To_Rough']: # Add other single-file statuses if needed
|
||||||
|
temp_file_path_str = details.get('temp_processed_file')
|
||||||
|
if not temp_file_path_str:
|
||||||
|
logger.warning(f"Asset '{asset_name_for_log}': Skipping map key '{processed_map_key}' (status '{map_status}') due to missing 'temp_processed_file'.")
|
||||||
|
details['status'] = 'Organization Skipped (Missing Temp File)'
|
||||||
|
continue
|
||||||
|
|
||||||
|
temp_file_path = Path(temp_file_path_str)
|
||||||
|
if not temp_file_path.is_file():
|
||||||
|
logger.warning(f"Asset '{asset_name_for_log}': Temporary file '{temp_file_path}' for map '{processed_map_key}' not found. Skipping.")
|
||||||
|
details['status'] = 'Organization Skipped (Temp File Not Found)'
|
||||||
|
continue
|
||||||
|
|
||||||
|
resolution_str = details.get('processed_resolution_name', details.get('original_resolution_name', 'resX'))
|
||||||
|
|
||||||
|
token_data = {
|
||||||
|
"assetname": asset_name_for_log,
|
||||||
|
"supplier": context.effective_supplier or "DefaultSupplier",
|
||||||
|
"maptype": base_map_type,
|
||||||
|
"resolution": resolution_str,
|
||||||
|
"ext": temp_file_path.suffix.lstrip('.'),
|
||||||
|
"incrementingvalue": getattr(context, 'incrementing_value', None),
|
||||||
|
"sha5": getattr(context, 'sha5_value', None)
|
||||||
|
}
|
||||||
|
token_data_cleaned = {k: v for k, v in token_data.items() if v is not None}
|
||||||
|
|
||||||
|
output_filename = generate_path_from_pattern(output_filename_pattern_config, token_data_cleaned)
|
||||||
|
|
||||||
|
try:
|
||||||
|
relative_dir_path_str = generate_path_from_pattern(
|
||||||
|
pattern_string=output_dir_pattern,
|
||||||
|
token_data=token_data_cleaned
|
||||||
|
)
|
||||||
|
logger.debug(f"OUTPUT_ORG_DEBUG: SingleFile - Using context.output_base_path = {context.output_base_path} for final_path construction.") # Added
|
||||||
|
final_path = Path(context.output_base_path) / Path(relative_dir_path_str) / Path(output_filename)
|
||||||
|
logger.debug(f"OUTPUT_ORG_DEBUG: SingleFile - Constructed final_path = {final_path}") # Added
|
||||||
|
final_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
if final_path.exists() and not overwrite_existing:
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Output file {final_path} for map '{processed_map_key}' exists and overwrite is disabled. Skipping copy.")
|
||||||
|
details['status'] = 'Organized (Exists, Skipped Copy)'
|
||||||
|
else:
|
||||||
|
shutil.copy2(temp_file_path, final_path)
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Copied {temp_file_path} to {final_path} for map '{processed_map_key}'.")
|
||||||
|
final_output_files.append(str(final_path))
|
||||||
|
details['status'] = 'Organized'
|
||||||
|
|
||||||
|
details['final_output_path'] = str(final_path)
|
||||||
|
|
||||||
|
# Update asset_metadata for metadata.json
|
||||||
|
# Use base_map_type (e.g., "COL") as the key for the map entry
|
||||||
|
map_metadata_entry = context.asset_metadata.setdefault('maps', {}).setdefault(base_map_type, {})
|
||||||
|
# map_type is now the key, so no need to store it inside the entry
|
||||||
|
# map_metadata_entry['map_type'] = base_map_type
|
||||||
|
# Store single path in variant_paths, keyed by its resolution string
|
||||||
|
# Store only the filename, as it's relative to the metadata.json location
|
||||||
|
map_metadata_entry.setdefault('variant_paths', {})[resolution_str] = output_filename
|
||||||
|
# Remove old cleanup logic, as variant_paths is now the standard
|
||||||
|
# if 'variant_paths' in map_metadata_entry:
|
||||||
|
# del map_metadata_entry['variant_paths']
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Asset '{asset_name_for_log}': Failed to copy {temp_file_path} for map key '{processed_map_key}'. Error: {e}", exc_info=True)
|
||||||
|
context.status_flags['output_organization_error'] = True
|
||||||
|
context.asset_metadata['status'] = "Failed (Output Organization Error)"
|
||||||
|
details['status'] = 'Organization Failed'
|
||||||
|
|
||||||
|
# --- Handle other statuses (Skipped, Failed, etc.) ---
|
||||||
|
else: # Catches statuses not explicitly handled above
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Skipping map key '{processed_map_key}' (status: '{map_status}') for organization as it's not a recognized final processed state or variant state.")
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': No processed individual maps to organize.")
|
||||||
|
|
||||||
|
# B. Organize Merged Maps (OBSOLETE BLOCK - Merged maps are handled by the main loop processing context.processed_maps_details)
|
||||||
|
# The log "No merged maps to organize" will no longer appear from here.
|
||||||
|
# If merged maps are not appearing, the issue is likely that they are not being added
|
||||||
|
# to context.processed_maps_details with 'saved_files_info' by the orchestrator/SaveVariantsStage.
|
||||||
|
|
||||||
|
# C. Organize Extra Files (e.g., previews, text files)
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Checking for EXTRA files to organize.")
|
||||||
|
extra_files_organized_count = 0
|
||||||
|
if hasattr(context, 'files_to_process') and context.files_to_process:
|
||||||
|
extra_subdir_name = getattr(context.config_obj, 'extra_files_subdir', 'Extra') # Default to 'Extra'
|
||||||
|
|
||||||
|
for file_rule in context.files_to_process:
|
||||||
|
if file_rule.item_type == 'EXTRA':
|
||||||
|
source_file_path = context.workspace_path / file_rule.file_path
|
||||||
|
if not source_file_path.is_file():
|
||||||
|
logger.warning(f"Asset '{asset_name_for_log}': EXTRA file '{source_file_path}' not found. Skipping.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Basic token data for the asset's base output directory
|
||||||
|
# We don't use map_type, resolution, or ext for the base directory of extras.
|
||||||
|
# However, generate_path_from_pattern might expect them or handle their absence.
|
||||||
|
# For the base asset directory, only assetname and supplier are typically primary.
|
||||||
|
base_token_data = {
|
||||||
|
"assetname": asset_name_for_log,
|
||||||
|
"supplier": context.effective_supplier or "DefaultSupplier",
|
||||||
|
# Add other tokens if your output_directory_pattern uses them at the asset level
|
||||||
|
"incrementingvalue": getattr(context, 'incrementing_value', None),
|
||||||
|
"sha5": getattr(context, 'sha5_value', None)
|
||||||
|
}
|
||||||
|
base_token_data_cleaned = {k: v for k, v in base_token_data.items() if v is not None}
|
||||||
|
|
||||||
|
try:
|
||||||
|
asset_base_output_dir_str = generate_path_from_pattern(
|
||||||
|
pattern_string=output_dir_pattern, # Uses the same pattern as other maps for base dir
|
||||||
|
token_data=base_token_data_cleaned
|
||||||
|
)
|
||||||
|
# Destination: <output_base_path>/<asset_base_output_dir_str>/<extra_subdir_name>/<original_filename>
|
||||||
|
logger.debug(f"OUTPUT_ORG_DEBUG: ExtraFiles - Using context.output_base_path = {context.output_base_path} for final_dest_path construction.") # Added
|
||||||
|
final_dest_path = (Path(context.output_base_path) /
|
||||||
|
Path(asset_base_output_dir_str) /
|
||||||
|
Path(extra_subdir_name) /
|
||||||
|
source_file_path.name) # Use original filename
|
||||||
|
logger.debug(f"OUTPUT_ORG_DEBUG: ExtraFiles - Constructed final_dest_path = {final_dest_path}") # Added
|
||||||
|
|
||||||
|
final_dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
if final_dest_path.exists() and not overwrite_existing:
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': EXTRA file destination {final_dest_path} exists and overwrite is disabled. Skipping copy.")
|
||||||
|
else:
|
||||||
|
shutil.copy2(source_file_path, final_dest_path)
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Copied EXTRA file {source_file_path} to {final_dest_path}")
|
||||||
|
final_output_files.append(str(final_dest_path))
|
||||||
|
extra_files_organized_count += 1
|
||||||
|
|
||||||
|
# Optionally, add more detailed tracking for extra files in context.asset_metadata
|
||||||
|
# For example:
|
||||||
|
# if 'extra_files_details' not in context.asset_metadata:
|
||||||
|
# context.asset_metadata['extra_files_details'] = []
|
||||||
|
# context.asset_metadata['extra_files_details'].append({
|
||||||
|
# 'source_path': str(source_file_path),
|
||||||
|
# 'destination_path': str(final_dest_path),
|
||||||
|
# 'status': 'Organized'
|
||||||
|
# })
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Asset '{asset_name_for_log}': Failed to copy EXTRA file {source_file_path} to destination. Error: {e}", exc_info=True)
|
||||||
|
context.status_flags['output_organization_error'] = True
|
||||||
|
context.asset_metadata['status'] = "Failed (Output Organization Error - Extra Files)"
|
||||||
|
# Optionally, update status for the specific file_rule if tracked
|
||||||
|
|
||||||
|
if extra_files_organized_count > 0:
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Successfully organized {extra_files_organized_count} EXTRA file(s).")
|
||||||
|
else:
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': No EXTRA files were processed or found to organize.")
|
||||||
|
|
||||||
|
|
||||||
|
context.asset_metadata['final_output_files'] = final_output_files
|
||||||
|
|
||||||
|
if context.status_flags.get('output_organization_error'):
|
||||||
|
logger.error(f"Asset '{asset_name_for_log}': Output organization encountered errors. Status: {context.asset_metadata['status']}")
|
||||||
|
else:
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Output organization complete. {len(final_output_files)} files placed.")
|
||||||
|
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Output organization stage finished.")
|
||||||
|
return context
|
||||||
216
processing/pipeline/stages/prepare_processing_items.py
Normal file
216
processing/pipeline/stages/prepare_processing_items.py
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
import logging
|
||||||
|
from typing import List, Union, Optional, Tuple, Dict # Added Dict
|
||||||
|
from pathlib import Path # Added Path
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext, MergeTaskDefinition
|
||||||
|
from rule_structure import FileRule, ProcessingItem # Added ProcessingItem
|
||||||
|
from processing.utils import image_processing_utils as ipu # Added ipu
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class PrepareProcessingItemsStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Identifies and prepares a unified list of ProcessingItem and MergeTaskDefinition objects
|
||||||
|
to be processed in subsequent stages. Performs initial validation and explodes
|
||||||
|
FileRules into specific ProcessingItems for each required output variant.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _get_target_resolutions(self, source_w: int, source_h: int, config_resolutions: dict, file_rule: FileRule) -> Dict[str, int]:
|
||||||
|
"""
|
||||||
|
Determines the target output resolutions for a given source image.
|
||||||
|
Placeholder logic: Uses all config resolutions smaller than or equal to source, plus PREVIEW if smaller.
|
||||||
|
Needs to be refined to consider FileRule.resolution_override and actual project requirements.
|
||||||
|
"""
|
||||||
|
# For now, very basic logic:
|
||||||
|
# If FileRule has a resolution_override (e.g., (1024,1024)), that might be the *only* target.
|
||||||
|
# This needs to be clarified. Assuming override means *only* that size.
|
||||||
|
if file_rule.resolution_override and isinstance(file_rule.resolution_override, tuple) and len(file_rule.resolution_override) == 2:
|
||||||
|
# How to get a "key" for an arbitrary override? For now, skip if overridden.
|
||||||
|
# This part of the design (how overrides interact with standard resolutions) is unclear.
|
||||||
|
# Let's assume for now that if resolution_override is set, we don't generate standard named resolutions.
|
||||||
|
# This is likely incorrect for a full implementation.
|
||||||
|
log.warning(f"FileRule '{file_rule.file_path}' has resolution_override. Standard resolution key generation skipped (needs design refinement).")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
target_res = {}
|
||||||
|
max_source_dim = max(source_w, source_h)
|
||||||
|
|
||||||
|
for key, res_val in config_resolutions.items():
|
||||||
|
if key == "PREVIEW": # Always consider PREVIEW if its value is smaller
|
||||||
|
if res_val < max_source_dim : # Or just always include PREVIEW? For now, if smaller.
|
||||||
|
target_res[key] = res_val
|
||||||
|
elif res_val <= max_source_dim:
|
||||||
|
target_res[key] = res_val
|
||||||
|
|
||||||
|
# Ensure PREVIEW is included if it's defined and smaller than the smallest other target, or if no other targets.
|
||||||
|
# This logic is still a bit naive.
|
||||||
|
if "PREVIEW" in config_resolutions and config_resolutions["PREVIEW"] < max_source_dim:
|
||||||
|
if not target_res or config_resolutions["PREVIEW"] < min(v for k,v in target_res.items() if k != "PREVIEW" and isinstance(v,int)):
|
||||||
|
target_res["PREVIEW"] = config_resolutions["PREVIEW"]
|
||||||
|
elif "PREVIEW" in config_resolutions and not target_res : # if only preview is applicable
|
||||||
|
if config_resolutions["PREVIEW"] <= max_source_dim:
|
||||||
|
target_res["PREVIEW"] = config_resolutions["PREVIEW"]
|
||||||
|
|
||||||
|
|
||||||
|
if not target_res and max_source_dim > 0 : # If no standard res is smaller, but image exists
|
||||||
|
log.debug(f"No standard resolutions from config are <= source dimension {max_source_dim}. Only LOWRES (if applicable) or PREVIEW (if smaller) might be generated.")
|
||||||
|
|
||||||
|
log.debug(f"Determined target resolutions for source {source_w}x{source_h}: {target_res}")
|
||||||
|
return target_res
|
||||||
|
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
"""
|
||||||
|
Populates context.processing_items with ProcessingItem and MergeTaskDefinition objects.
|
||||||
|
"""
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
log.info(f"Asset '{asset_name_for_log}': Preparing processing items...")
|
||||||
|
|
||||||
|
if context.status_flags.get('skip_asset', False):
|
||||||
|
log.info(f"Asset '{asset_name_for_log}': Skipping item preparation due to skip_asset flag.")
|
||||||
|
context.processing_items = []
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Output list will now be List[Union[ProcessingItem, MergeTaskDefinition]]
|
||||||
|
items_to_process: List[Union[ProcessingItem, MergeTaskDefinition]] = []
|
||||||
|
preparation_failed = False
|
||||||
|
config = context.config_obj
|
||||||
|
|
||||||
|
# --- Process FileRules into ProcessingItems ---
|
||||||
|
if context.files_to_process:
|
||||||
|
source_path_valid = True
|
||||||
|
if not context.source_rule or not context.source_rule.input_path:
|
||||||
|
log.error(f"Asset '{asset_name_for_log}': SourceRule or SourceRule.input_path is not set.")
|
||||||
|
source_path_valid = False
|
||||||
|
preparation_failed = True
|
||||||
|
context.status_flags['prepare_items_failed_reason'] = "SourceRule.input_path missing"
|
||||||
|
elif not context.workspace_path or not context.workspace_path.is_dir():
|
||||||
|
log.error(f"Asset '{asset_name_for_log}': Workspace path '{context.workspace_path}' is invalid.")
|
||||||
|
source_path_valid = False
|
||||||
|
preparation_failed = True
|
||||||
|
context.status_flags['prepare_items_failed_reason'] = "Workspace path invalid"
|
||||||
|
|
||||||
|
if source_path_valid:
|
||||||
|
for file_rule in context.files_to_process:
|
||||||
|
log_prefix_fr = f"Asset '{asset_name_for_log}', FileRule '{file_rule.file_path}'"
|
||||||
|
if not file_rule.file_path:
|
||||||
|
log.warning(f"{log_prefix_fr}: Skipping FileRule with empty file_path.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
item_type = file_rule.item_type_override or file_rule.item_type
|
||||||
|
if not item_type or item_type == "EXTRA" or not item_type.startswith("MAP_"):
|
||||||
|
log.debug(f"{log_prefix_fr}: Item type is '{item_type}'. Not creating map ProcessingItems.")
|
||||||
|
# Optionally, create a different kind of ProcessingItem for EXTRAs if they need pipeline processing
|
||||||
|
continue
|
||||||
|
|
||||||
|
source_image_path = context.workspace_path / file_rule.file_path
|
||||||
|
if not source_image_path.is_file():
|
||||||
|
log.error(f"{log_prefix_fr}: Source image file not found at '{source_image_path}'. Skipping this FileRule.")
|
||||||
|
preparation_failed = True # Individual file error can contribute to overall stage failure
|
||||||
|
context.status_flags.setdefault('prepare_items_file_errors', []).append(str(source_image_path))
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Load image data to get dimensions and for LOWRES variant
|
||||||
|
# This data will be passed to subsequent stages via ProcessingItem.
|
||||||
|
# Consider caching this load if RegularMapProcessorStage also loads.
|
||||||
|
# For now, load here as dimensions are needed for LOWRES decision.
|
||||||
|
log.debug(f"{log_prefix_fr}: Loading image from '{source_image_path}' to determine dimensions and prepare items.")
|
||||||
|
source_image_data = ipu.load_image(str(source_image_path))
|
||||||
|
if source_image_data is None:
|
||||||
|
log.error(f"{log_prefix_fr}: Failed to load image from '{source_image_path}'. Skipping this FileRule.")
|
||||||
|
preparation_failed = True
|
||||||
|
context.status_flags.setdefault('prepare_items_file_errors', []).append(f"Failed to load {source_image_path}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
orig_h, orig_w = source_image_data.shape[:2]
|
||||||
|
original_dimensions_wh = (orig_w, orig_h)
|
||||||
|
source_bit_depth = ipu.get_image_bit_depth(str(source_image_path)) # Get bit depth from file
|
||||||
|
source_channels = ipu.get_image_channels(source_image_data)
|
||||||
|
|
||||||
|
|
||||||
|
# Determine standard resolutions to generate
|
||||||
|
# This logic needs to be robust and consider file_rule.resolution_override, etc.
|
||||||
|
# Using a placeholder _get_target_resolutions for now.
|
||||||
|
target_resolutions = self._get_target_resolutions(orig_w, orig_h, config.image_resolutions, file_rule)
|
||||||
|
|
||||||
|
for res_key, _res_val in target_resolutions.items():
|
||||||
|
pi = ProcessingItem(
|
||||||
|
source_file_info_ref=str(source_image_path), # Using full path as ref
|
||||||
|
map_type_identifier=item_type,
|
||||||
|
resolution_key=res_key,
|
||||||
|
image_data=source_image_data.copy(), # Give each PI its own copy
|
||||||
|
original_dimensions=original_dimensions_wh,
|
||||||
|
current_dimensions=original_dimensions_wh,
|
||||||
|
bit_depth=source_bit_depth,
|
||||||
|
channels=source_channels,
|
||||||
|
status="Pending"
|
||||||
|
)
|
||||||
|
items_to_process.append(pi)
|
||||||
|
log.debug(f"{log_prefix_fr}: Created standard ProcessingItem: {pi.map_type_identifier}_{pi.resolution_key}")
|
||||||
|
|
||||||
|
# Create LOWRES variant if applicable
|
||||||
|
if config.enable_low_resolution_fallback and max(orig_w, orig_h) < config.low_resolution_threshold:
|
||||||
|
# Check if a LOWRES item for this source_file_info_ref already exists (e.g. if target_resolutions was empty)
|
||||||
|
# This check is important if _get_target_resolutions might return empty for small images.
|
||||||
|
# A more robust way is to ensure LOWRES is distinct from standard resolutions.
|
||||||
|
|
||||||
|
# Avoid duplicate LOWRES if _get_target_resolutions somehow already made one (unlikely with current placeholder)
|
||||||
|
is_lowres_already_added = any(p.resolution_key == "LOWRES" and p.source_file_info_ref == str(source_image_path) for p in items_to_process if isinstance(p, ProcessingItem))
|
||||||
|
|
||||||
|
if not is_lowres_already_added:
|
||||||
|
pi_lowres = ProcessingItem(
|
||||||
|
source_file_info_ref=str(source_image_path),
|
||||||
|
map_type_identifier=item_type,
|
||||||
|
resolution_key="LOWRES",
|
||||||
|
image_data=source_image_data.copy(), # Fresh copy for LOWRES
|
||||||
|
original_dimensions=original_dimensions_wh,
|
||||||
|
current_dimensions=original_dimensions_wh,
|
||||||
|
bit_depth=source_bit_depth,
|
||||||
|
channels=source_channels,
|
||||||
|
status="Pending"
|
||||||
|
)
|
||||||
|
items_to_process.append(pi_lowres)
|
||||||
|
log.info(f"{log_prefix_fr}: Created LOWRES ProcessingItem because {orig_w}x{orig_h} < {config.low_resolution_threshold}px threshold.")
|
||||||
|
else:
|
||||||
|
log.debug(f"{log_prefix_fr}: LOWRES item for this source already added by target resolution logic. Skipping duplicate LOWRES creation.")
|
||||||
|
elif config.enable_low_resolution_fallback:
|
||||||
|
log.debug(f"{log_prefix_fr}: Image {orig_w}x{orig_h} not below LOWRES threshold {config.low_resolution_threshold}px.")
|
||||||
|
|
||||||
|
|
||||||
|
else: # Source path not valid
|
||||||
|
log.warning(f"Asset '{asset_name_for_log}': Skipping creation of ProcessingItems from FileRules due to invalid source/workspace path.")
|
||||||
|
|
||||||
|
# --- Add MergeTaskDefinitions --- (This part remains largely the same)
|
||||||
|
merged_tasks_list = getattr(config, 'map_merge_rules', None)
|
||||||
|
if merged_tasks_list and isinstance(merged_tasks_list, list):
|
||||||
|
log.debug(f"Asset '{asset_name_for_log}': Found {len(merged_tasks_list)} merge tasks in global config.")
|
||||||
|
for task_idx, task_data in enumerate(merged_tasks_list):
|
||||||
|
if isinstance(task_data, dict):
|
||||||
|
task_key = f"merged_task_{task_idx}"
|
||||||
|
if not task_data.get('output_map_type') or not isinstance(task_data.get('inputs'), dict):
|
||||||
|
log.warning(f"Asset '{asset_name_for_log}', Task Index {task_idx}: Skipping merge task due to missing 'output_map_type' or valid 'inputs'. Task data: {task_data}")
|
||||||
|
continue
|
||||||
|
merge_def = MergeTaskDefinition(task_data=task_data, task_key=task_key)
|
||||||
|
items_to_process.append(merge_def)
|
||||||
|
log.info(f"Asset '{asset_name_for_log}': Added MergeTaskDefinition: Key='{merge_def.task_key}', OutputType='{merge_def.task_data.get('output_map_type', 'N/A')}'")
|
||||||
|
else:
|
||||||
|
log.warning(f"Asset '{asset_name_for_log}': Item at index {task_idx} in config.map_merge_rules is not a dict. Skipping. Item: {task_data}")
|
||||||
|
# ... (rest of merge task handling) ...
|
||||||
|
|
||||||
|
if not items_to_process and not preparation_failed: # Check preparation_failed too
|
||||||
|
log.info(f"Asset '{asset_name_for_log}': No valid items (ProcessingItem or MergeTaskDefinition) found to process.")
|
||||||
|
|
||||||
|
context.processing_items = items_to_process
|
||||||
|
context.intermediate_results = {} # Initialize intermediate results storage
|
||||||
|
|
||||||
|
if preparation_failed:
|
||||||
|
# Set a flag indicating failure during preparation, even if some items might have been added before failure
|
||||||
|
context.status_flags['prepare_items_failed'] = True
|
||||||
|
log.error(f"Asset '{asset_name_for_log}': Item preparation failed. Reason: {context.status_flags.get('prepare_items_failed_reason', 'Unknown')}")
|
||||||
|
# Optionally, clear items if failure means nothing should proceed
|
||||||
|
# context.processing_items = []
|
||||||
|
|
||||||
|
log.info(f"Asset '{asset_name_for_log}': Finished preparing items. Found {len(context.processing_items)} valid items.")
|
||||||
|
return context
|
||||||
220
processing/pipeline/stages/regular_map_processor.py
Normal file
220
processing/pipeline/stages/regular_map_processor.py
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional, Tuple, Dict
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage # Assuming base_stage is in the same directory
|
||||||
|
from ..asset_context import AssetProcessingContext, ProcessedRegularMapData
|
||||||
|
from rule_structure import FileRule, AssetRule
|
||||||
|
from processing.utils import image_processing_utils as ipu # Absolute import
|
||||||
|
from utils.path_utils import get_filename_friendly_map_type # Absolute import
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RegularMapProcessorStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Processes a single regular texture map defined by a FileRule.
|
||||||
|
Loads the image, determines map type, applies transformations,
|
||||||
|
and returns the processed data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# --- Helper Methods (Adapted from IndividualMapProcessingStage) ---
|
||||||
|
|
||||||
|
def _get_suffixed_internal_map_type(
|
||||||
|
self,
|
||||||
|
asset_rule: Optional[AssetRule],
|
||||||
|
current_file_rule: FileRule,
|
||||||
|
initial_internal_map_type: str,
|
||||||
|
respect_variant_map_types: List[str],
|
||||||
|
asset_name_for_log: str
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Determines the potentially suffixed internal map type (e.g., MAP_COL-1).
|
||||||
|
"""
|
||||||
|
final_internal_map_type = initial_internal_map_type # Default
|
||||||
|
|
||||||
|
base_map_type_match = re.match(r"(MAP_[A-Z]+)", initial_internal_map_type)
|
||||||
|
if not base_map_type_match or not asset_rule or not asset_rule.files:
|
||||||
|
return final_internal_map_type # Cannot determine suffix without base type or asset rule files
|
||||||
|
|
||||||
|
true_base_map_type = base_map_type_match.group(1) # This is "MAP_XXX"
|
||||||
|
|
||||||
|
# Find all FileRules in the asset with the same base map type
|
||||||
|
peers_of_same_base_type = []
|
||||||
|
for fr_asset in asset_rule.files:
|
||||||
|
fr_asset_item_type = fr_asset.item_type_override or fr_asset.item_type or "UnknownMapType"
|
||||||
|
fr_asset_base_match = re.match(r"(MAP_[A-Z]+)", fr_asset_item_type)
|
||||||
|
if fr_asset_base_match and fr_asset_base_match.group(1) == true_base_map_type:
|
||||||
|
peers_of_same_base_type.append(fr_asset)
|
||||||
|
|
||||||
|
num_occurrences = len(peers_of_same_base_type)
|
||||||
|
current_instance_index = 0 # 1-based index
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Find the index based on the FileRule object itself (requires object identity)
|
||||||
|
current_instance_index = peers_of_same_base_type.index(current_file_rule) + 1
|
||||||
|
except ValueError:
|
||||||
|
# Fallback: try matching by file_path if object identity fails (less reliable)
|
||||||
|
try:
|
||||||
|
current_instance_index = [fr.file_path for fr in peers_of_same_base_type].index(current_file_rule.file_path) + 1
|
||||||
|
log.warning(f"Asset '{asset_name_for_log}', FileRule path '{current_file_rule.file_path}': Found peer index using file_path fallback for suffixing.")
|
||||||
|
except (ValueError, AttributeError): # Catch AttributeError if file_path is None
|
||||||
|
log.warning(
|
||||||
|
f"Asset '{asset_name_for_log}', FileRule path '{current_file_rule.file_path}' (Initial Type: '{initial_internal_map_type}', Base: '{true_base_map_type}'): "
|
||||||
|
f"Could not find its own instance in the list of {num_occurrences} peers from asset_rule.files using object identity or path. Suffixing may be incorrect."
|
||||||
|
)
|
||||||
|
# Keep index 0, suffix logic below will handle it
|
||||||
|
|
||||||
|
# Determine Suffix
|
||||||
|
map_type_for_respect_check = true_base_map_type.replace("MAP_", "") # e.g., "COL"
|
||||||
|
is_in_respect_list = map_type_for_respect_check in respect_variant_map_types
|
||||||
|
|
||||||
|
suffix_to_append = ""
|
||||||
|
if num_occurrences > 1:
|
||||||
|
if current_instance_index > 0:
|
||||||
|
suffix_to_append = f"-{current_instance_index}"
|
||||||
|
else:
|
||||||
|
# If index is still 0 (not found), don't add suffix to avoid ambiguity
|
||||||
|
log.warning(f"Asset '{asset_name_for_log}', FileRule path '{current_file_rule.file_path}': Index for multi-occurrence map type '{true_base_map_type}' (count: {num_occurrences}) not determined. Omitting numeric suffix.")
|
||||||
|
elif num_occurrences == 1 and is_in_respect_list:
|
||||||
|
suffix_to_append = "-1" # Add suffix even for single instance if in respect list
|
||||||
|
|
||||||
|
if suffix_to_append:
|
||||||
|
final_internal_map_type = true_base_map_type + suffix_to_append
|
||||||
|
|
||||||
|
if final_internal_map_type != initial_internal_map_type:
|
||||||
|
log.debug(f"Asset '{asset_name_for_log}', FileRule path '{current_file_rule.file_path}': Suffixed internal map type determined: '{initial_internal_map_type}' -> '{final_internal_map_type}'")
|
||||||
|
|
||||||
|
return final_internal_map_type
|
||||||
|
|
||||||
|
|
||||||
|
# --- Execute Method ---
|
||||||
|
|
||||||
|
def execute(
|
||||||
|
self,
|
||||||
|
context: AssetProcessingContext,
|
||||||
|
file_rule: FileRule # Specific item passed by orchestrator
|
||||||
|
) -> ProcessedRegularMapData:
|
||||||
|
"""
|
||||||
|
Processes the given FileRule item.
|
||||||
|
"""
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
log_prefix = f"Asset '{asset_name_for_log}', File '{file_rule.file_path}'"
|
||||||
|
log.info(f"{log_prefix}: Processing Regular Map.")
|
||||||
|
|
||||||
|
# Initialize output object with default failure state
|
||||||
|
result = ProcessedRegularMapData(
|
||||||
|
processed_image_data=np.array([]), # Placeholder
|
||||||
|
final_internal_map_type="Unknown",
|
||||||
|
source_file_path=Path(file_rule.file_path or "InvalidPath"),
|
||||||
|
original_bit_depth=None,
|
||||||
|
original_dimensions=None,
|
||||||
|
transformations_applied=[],
|
||||||
|
status="Failed",
|
||||||
|
error_message="Initialization error"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# --- Configuration ---
|
||||||
|
config = context.config_obj
|
||||||
|
file_type_definitions = getattr(config, "FILE_TYPE_DEFINITIONS", {})
|
||||||
|
respect_variant_map_types = getattr(config, "respect_variant_map_types", [])
|
||||||
|
invert_normal_green = config.invert_normal_green_globally
|
||||||
|
|
||||||
|
# --- Determine Map Type (with suffix) ---
|
||||||
|
initial_internal_map_type = file_rule.item_type_override or file_rule.item_type or "UnknownMapType"
|
||||||
|
if not initial_internal_map_type or initial_internal_map_type == "UnknownMapType":
|
||||||
|
result.error_message = "Map type (item_type) not defined in FileRule."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result # Early exit
|
||||||
|
|
||||||
|
# Explicitly skip if the determined type doesn't start with "MAP_"
|
||||||
|
if not initial_internal_map_type.startswith("MAP_"):
|
||||||
|
result.status = "Skipped (Invalid Type)"
|
||||||
|
result.error_message = f"FileRule item_type '{initial_internal_map_type}' does not start with 'MAP_'. Skipping processing."
|
||||||
|
log.warning(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result # Early exit
|
||||||
|
|
||||||
|
processing_map_type = self._get_suffixed_internal_map_type(
|
||||||
|
context.asset_rule, file_rule, initial_internal_map_type, respect_variant_map_types, asset_name_for_log
|
||||||
|
)
|
||||||
|
result.final_internal_map_type = processing_map_type # Store initial suffixed type
|
||||||
|
|
||||||
|
# --- Find and Load Source File ---
|
||||||
|
if not file_rule.file_path: # Should have been caught by Prepare stage, but double-check
|
||||||
|
result.error_message = "FileRule has empty file_path."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
source_base_path = context.workspace_path
|
||||||
|
potential_source_path = source_base_path / file_rule.file_path
|
||||||
|
source_file_path_found: Optional[Path] = None
|
||||||
|
|
||||||
|
if potential_source_path.is_file():
|
||||||
|
source_file_path_found = potential_source_path
|
||||||
|
log.info(f"{log_prefix}: Found source file: {source_file_path_found}")
|
||||||
|
else:
|
||||||
|
# Optional: Add globbing fallback if needed, similar to original stage
|
||||||
|
log.warning(f"{log_prefix}: Source file not found directly at '{potential_source_path}'. Add globbing if necessary.")
|
||||||
|
result.error_message = f"Source file not found at '{potential_source_path}'"
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
result.source_file_path = source_file_path_found # Update result with found path
|
||||||
|
|
||||||
|
# Load image
|
||||||
|
source_image_data = ipu.load_image(str(source_file_path_found))
|
||||||
|
if source_image_data is None:
|
||||||
|
result.error_message = f"Failed to load image from '{source_file_path_found}'."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
original_height, original_width = source_image_data.shape[:2]
|
||||||
|
result.original_dimensions = (original_width, original_height)
|
||||||
|
log.debug(f"{log_prefix}: Loaded image {result.original_dimensions[0]}x{result.original_dimensions[1]}.")
|
||||||
|
|
||||||
|
# Get original bit depth
|
||||||
|
try:
|
||||||
|
result.original_bit_depth = ipu.get_image_bit_depth(str(source_file_path_found))
|
||||||
|
log.info(f"{log_prefix}: Determined source bit depth: {result.original_bit_depth}")
|
||||||
|
except Exception as e:
|
||||||
|
log.warning(f"{log_prefix}: Could not determine source bit depth for {source_file_path_found}: {e}. Setting to None.")
|
||||||
|
result.original_bit_depth = None # Indicate failure to determine
|
||||||
|
|
||||||
|
# --- Apply Transformations ---
|
||||||
|
transformed_image_data, final_map_type, transform_notes = ipu.apply_common_map_transformations(
|
||||||
|
source_image_data.copy(), # Pass a copy to avoid modifying original load
|
||||||
|
processing_map_type,
|
||||||
|
invert_normal_green,
|
||||||
|
file_type_definitions,
|
||||||
|
log_prefix
|
||||||
|
)
|
||||||
|
result.processed_image_data = transformed_image_data
|
||||||
|
result.final_internal_map_type = final_map_type # Update if Gloss->Rough changed it
|
||||||
|
result.transformations_applied = transform_notes
|
||||||
|
|
||||||
|
# --- Determine Resolution Key for LOWRES ---
|
||||||
|
if config.enable_low_resolution_fallback and result.original_dimensions:
|
||||||
|
w, h = result.original_dimensions
|
||||||
|
if max(w, h) < config.low_resolution_threshold:
|
||||||
|
result.resolution_key = "LOWRES"
|
||||||
|
log.info(f"{log_prefix}: Image dimensions ({w}x{h}) are below threshold ({config.low_resolution_threshold}px). Flagging as LOWRES.")
|
||||||
|
|
||||||
|
# --- Success ---
|
||||||
|
result.status = "Processed"
|
||||||
|
result.error_message = None
|
||||||
|
log.info(f"{log_prefix}: Successfully processed regular map. Final type: '{result.final_internal_map_type}', ResolutionKey: {result.resolution_key}.")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(f"{log_prefix}: Unhandled exception during processing: {e}")
|
||||||
|
result.status = "Failed"
|
||||||
|
result.error_message = f"Unhandled exception: {e}"
|
||||||
|
# Ensure image data is empty on failure if it wasn't set
|
||||||
|
if result.processed_image_data is None or result.processed_image_data.size == 0:
|
||||||
|
result.processed_image_data = np.array([])
|
||||||
|
|
||||||
|
return result
|
||||||
98
processing/pipeline/stages/save_variants.py
Normal file
98
processing/pipeline/stages/save_variants.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
import logging
|
||||||
|
from typing import List, Dict, Optional # Added Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
# Import necessary context classes and utils
|
||||||
|
from ..asset_context import SaveVariantsInput, SaveVariantsOutput
|
||||||
|
from processing.utils import image_saving_utils as isu # Absolute import
|
||||||
|
from utils.path_utils import get_filename_friendly_map_type # Absolute import
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SaveVariantsStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Takes final processed image data and configuration, calls the
|
||||||
|
save_image_variants utility, and returns the results.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, input_data: SaveVariantsInput) -> SaveVariantsOutput:
|
||||||
|
"""
|
||||||
|
Calls isu.save_image_variants with data from input_data.
|
||||||
|
"""
|
||||||
|
internal_map_type = input_data.internal_map_type
|
||||||
|
# The input_data for SaveVariantsStage doesn't directly contain the ProcessingItem.
|
||||||
|
# It receives data *derived* from a ProcessingItem by previous stages.
|
||||||
|
# For debugging, we'd need to pass more context or rely on what's in output_filename_pattern_tokens.
|
||||||
|
resolution_key_from_tokens = input_data.output_filename_pattern_tokens.get('resolution', 'UnknownResKey')
|
||||||
|
log_prefix = f"Save Variants Stage (Type: {internal_map_type}, ResKey: {resolution_key_from_tokens})"
|
||||||
|
|
||||||
|
log.info(f"{log_prefix}: Starting.")
|
||||||
|
log.debug(f"{log_prefix}: Input image_data shape: {input_data.image_data.shape if input_data.image_data is not None else 'None'}")
|
||||||
|
log.debug(f"{log_prefix}: Input source_bit_depth_info: {input_data.source_bit_depth_info}")
|
||||||
|
log.debug(f"{log_prefix}: Configured image_resolutions for saving: {input_data.image_resolutions}")
|
||||||
|
log.debug(f"{log_prefix}: Output filename pattern tokens: {input_data.output_filename_pattern_tokens}")
|
||||||
|
|
||||||
|
# Initialize output object with default failure state
|
||||||
|
result = SaveVariantsOutput(
|
||||||
|
saved_files_details=[],
|
||||||
|
status="Failed",
|
||||||
|
error_message="Initialization error"
|
||||||
|
)
|
||||||
|
|
||||||
|
if input_data.image_data is None or input_data.image_data.size == 0:
|
||||||
|
result.error_message = "Input image data is None or empty."
|
||||||
|
log.error(f"{log_prefix}: {result.error_message}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
try:
|
||||||
|
# --- Prepare arguments for save_image_variants ---
|
||||||
|
|
||||||
|
# Get the filename-friendly base map type using the helper
|
||||||
|
# This assumes the save utility expects the friendly type. Adjust if needed.
|
||||||
|
base_map_type_friendly = get_filename_friendly_map_type(
|
||||||
|
internal_map_type, input_data.file_type_defs
|
||||||
|
)
|
||||||
|
log.debug(f"{log_prefix}: Using filename-friendly base type '{base_map_type_friendly}' for saving.")
|
||||||
|
|
||||||
|
save_args = {
|
||||||
|
"source_image_data": input_data.image_data,
|
||||||
|
"base_map_type": base_map_type_friendly, # Use the friendly type
|
||||||
|
"source_bit_depth_info": input_data.source_bit_depth_info,
|
||||||
|
"image_resolutions": input_data.image_resolutions,
|
||||||
|
"file_type_defs": input_data.file_type_defs,
|
||||||
|
"output_format_8bit": input_data.output_format_8bit,
|
||||||
|
"output_format_16bit_primary": input_data.output_format_16bit_primary,
|
||||||
|
"output_format_16bit_fallback": input_data.output_format_16bit_fallback,
|
||||||
|
"png_compression_level": input_data.png_compression_level,
|
||||||
|
"jpg_quality": input_data.jpg_quality,
|
||||||
|
"output_filename_pattern_tokens": input_data.output_filename_pattern_tokens,
|
||||||
|
"output_filename_pattern": input_data.output_filename_pattern,
|
||||||
|
"resolution_threshold_for_jpg": input_data.resolution_threshold_for_jpg, # Added
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(f"{log_prefix}: Calling save_image_variants utility with args: {save_args}")
|
||||||
|
saved_files_details: List[Dict] = isu.save_image_variants(**save_args)
|
||||||
|
|
||||||
|
if saved_files_details:
|
||||||
|
log.info(f"{log_prefix}: Save utility completed successfully. Saved {len(saved_files_details)} variants: {[details.get('filepath') for details in saved_files_details]}")
|
||||||
|
result.saved_files_details = saved_files_details
|
||||||
|
result.status = "Processed"
|
||||||
|
result.error_message = None
|
||||||
|
else:
|
||||||
|
# This might not be an error, maybe no variants were configured?
|
||||||
|
log.warning(f"{log_prefix}: Save utility returned no saved file details. This might be expected if no resolutions/formats matched.")
|
||||||
|
result.saved_files_details = []
|
||||||
|
result.status = "Processed (No Output)" # Indicate processing happened but nothing saved
|
||||||
|
result.error_message = "Save utility reported no files saved (check configuration/resolutions)."
|
||||||
|
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(f"{log_prefix}: Error calling or executing save_image_variants: {e}")
|
||||||
|
result.status = "Failed"
|
||||||
|
result.error_message = f"Save utility call failed: {e}"
|
||||||
|
result.saved_files_details = [] # Ensure empty list on error
|
||||||
|
|
||||||
|
return result
|
||||||
67
processing/pipeline/stages/supplier_determination.py
Normal file
67
processing/pipeline/stages/supplier_determination.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
import logging
|
||||||
|
|
||||||
|
from .base_stage import ProcessingStage
|
||||||
|
from ..asset_context import AssetProcessingContext
|
||||||
|
|
||||||
|
class SupplierDeterminationStage(ProcessingStage):
|
||||||
|
"""
|
||||||
|
Determines the effective supplier for an asset based on asset and source rules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
"""
|
||||||
|
Determines and validates the effective supplier for the asset.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context: The asset processing context.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The updated asset processing context.
|
||||||
|
"""
|
||||||
|
effective_supplier = None
|
||||||
|
logger = logging.getLogger(__name__) # Using a logger specific to this module
|
||||||
|
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
|
||||||
|
|
||||||
|
# 1. Check source_rule.supplier_override (highest precedence)
|
||||||
|
if context.source_rule and context.source_rule.supplier_override:
|
||||||
|
effective_supplier = context.source_rule.supplier_override
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Supplier override from source_rule found: '{effective_supplier}'.")
|
||||||
|
# 2. If not overridden, check source_rule.supplier_identifier
|
||||||
|
elif context.source_rule and context.source_rule.supplier_identifier:
|
||||||
|
effective_supplier = context.source_rule.supplier_identifier
|
||||||
|
logger.debug(f"Asset '{asset_name_for_log}': Supplier identifier from source_rule found: '{effective_supplier}'.")
|
||||||
|
|
||||||
|
# 3. Validation
|
||||||
|
if not effective_supplier:
|
||||||
|
logger.error(f"Asset '{asset_name_for_log}': No supplier defined in source_rule (override or identifier).")
|
||||||
|
context.effective_supplier = None
|
||||||
|
if 'status_flags' not in context: # Ensure status_flags exists
|
||||||
|
context.status_flags = {}
|
||||||
|
context.status_flags['supplier_error'] = True
|
||||||
|
# Assuming context.config_obj.suppliers is a valid way to get the list of configured suppliers.
|
||||||
|
# This might need further investigation if errors occur here later.
|
||||||
|
elif context.config_obj and hasattr(context.config_obj, 'suppliers') and effective_supplier not in context.config_obj.suppliers:
|
||||||
|
logger.warning(
|
||||||
|
f"Asset '{asset_name_for_log}': Determined supplier '{effective_supplier}' not found in global supplier configuration. "
|
||||||
|
f"Available: {list(context.config_obj.suppliers.keys()) if context.config_obj.suppliers else 'None'}"
|
||||||
|
)
|
||||||
|
context.effective_supplier = None
|
||||||
|
if 'status_flags' not in context: # Ensure status_flags exists
|
||||||
|
context.status_flags = {}
|
||||||
|
context.status_flags['supplier_error'] = True
|
||||||
|
else:
|
||||||
|
context.effective_supplier = effective_supplier
|
||||||
|
logger.info(f"Asset '{asset_name_for_log}': Effective supplier set to '{effective_supplier}'.")
|
||||||
|
# Optionally clear the error flag if previously set and now resolved.
|
||||||
|
if 'supplier_error' in context.status_flags:
|
||||||
|
del context.status_flags['supplier_error']
|
||||||
|
|
||||||
|
# merged_image_tasks are loaded from app_settings.json into Configuration object,
|
||||||
|
# not from supplier-specific presets.
|
||||||
|
# Ensure the attribute exists on context for PrepareProcessingItemsStage,
|
||||||
|
# which will get it from context.config_obj.
|
||||||
|
if not hasattr(context, 'merged_image_tasks'):
|
||||||
|
context.merged_image_tasks = []
|
||||||
|
|
||||||
|
|
||||||
|
return context
|
||||||
1
processing/utils/__init__.py
Normal file
1
processing/utils/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# This file makes the 'utils' directory a Python package.
|
||||||
525
processing/utils/image_processing_utils.py
Normal file
525
processing/utils/image_processing_utils.py
Normal file
@@ -0,0 +1,525 @@
|
|||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from pathlib import Path
|
||||||
|
import math
|
||||||
|
from typing import Optional, Union, List, Tuple, Dict
|
||||||
|
|
||||||
|
# --- Basic Power-of-Two Utilities ---
|
||||||
|
|
||||||
|
def is_power_of_two(n: int) -> bool:
|
||||||
|
"""Checks if a number is a power of two."""
|
||||||
|
return (n > 0) and (n & (n - 1) == 0)
|
||||||
|
|
||||||
|
def get_nearest_pot(value: int) -> int:
|
||||||
|
"""Finds the nearest power of two to the given value."""
|
||||||
|
if value <= 0:
|
||||||
|
return 1 # POT must be positive, return 1 as a fallback
|
||||||
|
if is_power_of_two(value):
|
||||||
|
return value
|
||||||
|
|
||||||
|
lower_pot = 1 << (value.bit_length() - 1)
|
||||||
|
upper_pot = 1 << value.bit_length()
|
||||||
|
|
||||||
|
if (value - lower_pot) < (upper_pot - value):
|
||||||
|
return lower_pot
|
||||||
|
else:
|
||||||
|
return upper_pot
|
||||||
|
|
||||||
|
def get_nearest_power_of_two_downscale(value: int) -> int:
|
||||||
|
"""
|
||||||
|
Finds the nearest power of two that is less than or equal to the given value.
|
||||||
|
If the value is already a power of two, it returns the value itself.
|
||||||
|
Returns 1 if the value is less than 1.
|
||||||
|
"""
|
||||||
|
if value < 1:
|
||||||
|
return 1
|
||||||
|
if is_power_of_two(value):
|
||||||
|
return value
|
||||||
|
# Find the largest power of two strictly less than value,
|
||||||
|
# unless value itself is POT.
|
||||||
|
# (1 << (value.bit_length() - 1)) achieves this.
|
||||||
|
# Example: value=7 (0111, bl=3), 1<<2 = 4.
|
||||||
|
# Example: value=8 (1000, bl=4), 1<<3 = 8.
|
||||||
|
# Example: value=9 (1001, bl=4), 1<<3 = 8.
|
||||||
|
return 1 << (value.bit_length() - 1)
|
||||||
|
# --- Dimension Calculation ---
|
||||||
|
|
||||||
|
def calculate_target_dimensions(
|
||||||
|
original_width: int,
|
||||||
|
original_height: int,
|
||||||
|
target_width: Optional[int] = None,
|
||||||
|
target_height: Optional[int] = None,
|
||||||
|
resize_mode: str = "fit", # e.g., "fit", "stretch", "max_dim_pot"
|
||||||
|
ensure_pot: bool = False,
|
||||||
|
allow_upscale: bool = False,
|
||||||
|
target_max_dim_for_pot_mode: Optional[int] = None # Specific for "max_dim_pot"
|
||||||
|
) -> Tuple[int, int]:
|
||||||
|
"""
|
||||||
|
Calculates target dimensions based on various modes and constraints.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
original_width: Original width of the image.
|
||||||
|
original_height: Original height of the image.
|
||||||
|
target_width: Desired target width.
|
||||||
|
target_height: Desired target height.
|
||||||
|
resize_mode:
|
||||||
|
- "fit": Scales to fit within target_width/target_height, maintaining aspect ratio.
|
||||||
|
Requires at least one of target_width or target_height.
|
||||||
|
- "stretch": Scales to exactly target_width and target_height, ignoring aspect ratio.
|
||||||
|
Requires both target_width and target_height.
|
||||||
|
- "max_dim_pot": Scales to fit target_max_dim_for_pot_mode while maintaining aspect ratio,
|
||||||
|
then finds nearest POT for each dimension. Requires target_max_dim_for_pot_mode.
|
||||||
|
ensure_pot: If True, final dimensions will be adjusted to the nearest power of two.
|
||||||
|
allow_upscale: If False, dimensions will not exceed original dimensions unless ensure_pot forces it.
|
||||||
|
target_max_dim_for_pot_mode: Max dimension to use when resize_mode is "max_dim_pot".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple (new_width, new_height).
|
||||||
|
"""
|
||||||
|
if original_width <= 0 or original_height <= 0:
|
||||||
|
# Fallback for invalid original dimensions
|
||||||
|
fallback_dim = 1
|
||||||
|
if ensure_pot:
|
||||||
|
if target_width and target_height:
|
||||||
|
fallback_dim = get_nearest_pot(max(target_width, target_height, 1))
|
||||||
|
elif target_width:
|
||||||
|
fallback_dim = get_nearest_pot(target_width)
|
||||||
|
elif target_height:
|
||||||
|
fallback_dim = get_nearest_pot(target_height)
|
||||||
|
elif target_max_dim_for_pot_mode:
|
||||||
|
fallback_dim = get_nearest_pot(target_max_dim_for_pot_mode)
|
||||||
|
else: # Default POT if no target given
|
||||||
|
fallback_dim = 256
|
||||||
|
return (fallback_dim, fallback_dim)
|
||||||
|
return (target_width or 1, target_height or 1)
|
||||||
|
|
||||||
|
|
||||||
|
w, h = original_width, original_height
|
||||||
|
|
||||||
|
if resize_mode == "max_dim_pot":
|
||||||
|
if target_max_dim_for_pot_mode is None:
|
||||||
|
raise ValueError("target_max_dim_for_pot_mode must be provided for 'max_dim_pot' resize_mode.")
|
||||||
|
|
||||||
|
# Logic adapted from old processing_engine.calculate_target_dimensions
|
||||||
|
ratio = w / h
|
||||||
|
if ratio > 1: # Width is dominant
|
||||||
|
scaled_w = target_max_dim_for_pot_mode
|
||||||
|
scaled_h = max(1, round(scaled_w / ratio))
|
||||||
|
else: # Height is dominant or square
|
||||||
|
scaled_h = target_max_dim_for_pot_mode
|
||||||
|
scaled_w = max(1, round(scaled_h * ratio))
|
||||||
|
|
||||||
|
# Upscale check for this mode is implicitly handled by target_max_dim
|
||||||
|
# If ensure_pot is true (as it was in the original logic), it's applied here
|
||||||
|
# For this mode, ensure_pot is effectively always true for the final step
|
||||||
|
w = get_nearest_pot(scaled_w)
|
||||||
|
h = get_nearest_pot(scaled_h)
|
||||||
|
return int(w), int(h)
|
||||||
|
|
||||||
|
elif resize_mode == "fit":
|
||||||
|
if target_width is None and target_height is None:
|
||||||
|
raise ValueError("At least one of target_width or target_height must be provided for 'fit' mode.")
|
||||||
|
|
||||||
|
if target_width and target_height:
|
||||||
|
ratio_orig = w / h
|
||||||
|
ratio_target = target_width / target_height
|
||||||
|
if ratio_orig > ratio_target: # Original is wider than target aspect
|
||||||
|
w_new = target_width
|
||||||
|
h_new = max(1, round(w_new / ratio_orig))
|
||||||
|
else: # Original is taller or same aspect
|
||||||
|
h_new = target_height
|
||||||
|
w_new = max(1, round(h_new * ratio_orig))
|
||||||
|
elif target_width:
|
||||||
|
w_new = target_width
|
||||||
|
h_new = max(1, round(w_new / (w / h)))
|
||||||
|
else: # target_height is not None
|
||||||
|
h_new = target_height
|
||||||
|
w_new = max(1, round(h_new * (w / h)))
|
||||||
|
w, h = w_new, h_new
|
||||||
|
|
||||||
|
elif resize_mode == "stretch":
|
||||||
|
if target_width is None or target_height is None:
|
||||||
|
raise ValueError("Both target_width and target_height must be provided for 'stretch' mode.")
|
||||||
|
w, h = target_width, target_height
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported resize_mode: {resize_mode}")
|
||||||
|
|
||||||
|
if not allow_upscale:
|
||||||
|
if w > original_width: w = original_width
|
||||||
|
if h > original_height: h = original_height
|
||||||
|
|
||||||
|
if ensure_pot:
|
||||||
|
w = get_nearest_pot(w)
|
||||||
|
h = get_nearest_pot(h)
|
||||||
|
# Re-check upscale if POT adjustment made it larger than original and not allowed
|
||||||
|
if not allow_upscale:
|
||||||
|
if w > original_width: w = get_nearest_pot(original_width) # Get closest POT to original
|
||||||
|
if h > original_height: h = get_nearest_pot(original_height)
|
||||||
|
|
||||||
|
|
||||||
|
return int(max(1, w)), int(max(1, h))
|
||||||
|
|
||||||
|
|
||||||
|
# --- Image Statistics ---
|
||||||
|
|
||||||
|
def get_image_bit_depth(image_path_str: str) -> Optional[int]:
|
||||||
|
"""
|
||||||
|
Determines the bit depth of an image file.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Use IMREAD_UNCHANGED to preserve original bit depth
|
||||||
|
img = cv2.imread(image_path_str, cv2.IMREAD_UNCHANGED)
|
||||||
|
if img is None:
|
||||||
|
# logger.error(f"Failed to read image for bit depth: {image_path_str}") # Use print for utils
|
||||||
|
print(f"Warning: Failed to read image for bit depth: {image_path_str}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
dtype_to_bit_depth = {
|
||||||
|
np.dtype('uint8'): 8,
|
||||||
|
np.dtype('uint16'): 16,
|
||||||
|
np.dtype('float32'): 32, # Typically for EXR etc.
|
||||||
|
np.dtype('int8'): 8, # Unlikely for images but good to have
|
||||||
|
np.dtype('int16'): 16, # Unlikely
|
||||||
|
# Add other dtypes if necessary
|
||||||
|
}
|
||||||
|
bit_depth = dtype_to_bit_depth.get(img.dtype)
|
||||||
|
if bit_depth is None:
|
||||||
|
# logger.warning(f"Unknown dtype {img.dtype} for image {image_path_str}, cannot determine bit depth.") # Use print for utils
|
||||||
|
print(f"Warning: Unknown dtype {img.dtype} for image {image_path_str}, cannot determine bit depth.")
|
||||||
|
pass # Return None
|
||||||
|
return bit_depth
|
||||||
|
except Exception as e:
|
||||||
|
# logger.error(f"Error getting bit depth for {image_path_str}: {e}") # Use print for utils
|
||||||
|
print(f"Error getting bit depth for {image_path_str}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_image_channels(image_data: np.ndarray) -> Optional[int]:
|
||||||
|
"""Determines the number of channels in an image."""
|
||||||
|
if image_data is None:
|
||||||
|
return None
|
||||||
|
if len(image_data.shape) == 2: # Grayscale
|
||||||
|
return 1
|
||||||
|
elif len(image_data.shape) == 3: # Color
|
||||||
|
return image_data.shape[2]
|
||||||
|
return None # Unknown shape
|
||||||
|
|
||||||
|
def calculate_image_stats(image_data: np.ndarray) -> Optional[Dict]:
|
||||||
|
"""
|
||||||
|
Calculates min, max, mean for a given numpy image array.
|
||||||
|
Handles grayscale and multi-channel images. Converts to float64 for calculation.
|
||||||
|
Normalizes uint8/uint16 data to 0-1 range before calculating stats.
|
||||||
|
"""
|
||||||
|
if image_data is None:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
data_float = image_data.astype(np.float64)
|
||||||
|
|
||||||
|
if image_data.dtype == np.uint16:
|
||||||
|
data_float /= 65535.0
|
||||||
|
elif image_data.dtype == np.uint8:
|
||||||
|
data_float /= 255.0
|
||||||
|
|
||||||
|
stats = {}
|
||||||
|
if len(data_float.shape) == 2: # Grayscale (H, W)
|
||||||
|
stats["min"] = float(np.min(data_float))
|
||||||
|
stats["max"] = float(np.max(data_float))
|
||||||
|
stats["mean"] = float(np.mean(data_float))
|
||||||
|
stats["median"] = float(np.median(data_float))
|
||||||
|
elif len(data_float.shape) == 3: # Color (H, W, C)
|
||||||
|
stats["min"] = [float(v) for v in np.min(data_float, axis=(0, 1))]
|
||||||
|
stats["max"] = [float(v) for v in np.max(data_float, axis=(0, 1))]
|
||||||
|
stats["mean"] = [float(v) for v in np.mean(data_float, axis=(0, 1))]
|
||||||
|
stats["median"] = [float(v) for v in np.median(data_float, axis=(0, 1))]
|
||||||
|
else:
|
||||||
|
return None # Unsupported shape
|
||||||
|
return stats
|
||||||
|
except Exception:
|
||||||
|
return {"error": "Error calculating image stats"}
|
||||||
|
|
||||||
|
# --- Aspect Ratio String ---
|
||||||
|
|
||||||
|
def normalize_aspect_ratio_change(original_width: int, original_height: int, resized_width: int, resized_height: int, decimals: int = 2) -> str:
|
||||||
|
"""
|
||||||
|
Calculates the aspect ratio change string (e.g., "EVEN", "X133").
|
||||||
|
"""
|
||||||
|
if original_width <= 0 or original_height <= 0:
|
||||||
|
return "InvalidInput"
|
||||||
|
if resized_width <= 0 or resized_height <= 0:
|
||||||
|
return "InvalidResize"
|
||||||
|
|
||||||
|
width_change_percentage = ((resized_width - original_width) / original_width) * 100
|
||||||
|
height_change_percentage = ((resized_height - original_height) / original_height) * 100
|
||||||
|
|
||||||
|
normalized_width_change = width_change_percentage / 100
|
||||||
|
normalized_height_change = height_change_percentage / 100
|
||||||
|
|
||||||
|
normalized_width_change = min(max(normalized_width_change + 1, 0), 2)
|
||||||
|
normalized_height_change = min(max(normalized_height_change + 1, 0), 2)
|
||||||
|
|
||||||
|
epsilon = 1e-9
|
||||||
|
if abs(normalized_width_change) < epsilon and abs(normalized_height_change) < epsilon:
|
||||||
|
closest_value_to_one = 1.0
|
||||||
|
elif abs(normalized_width_change) < epsilon:
|
||||||
|
closest_value_to_one = abs(normalized_height_change)
|
||||||
|
elif abs(normalized_height_change) < epsilon:
|
||||||
|
closest_value_to_one = abs(normalized_width_change)
|
||||||
|
else:
|
||||||
|
closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change))
|
||||||
|
|
||||||
|
scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one
|
||||||
|
|
||||||
|
scaled_normalized_width_change = scale_factor * normalized_width_change
|
||||||
|
scaled_normalized_height_change = scale_factor * normalized_height_change
|
||||||
|
|
||||||
|
output_width = round(scaled_normalized_width_change, decimals)
|
||||||
|
output_height = round(scaled_normalized_height_change, decimals)
|
||||||
|
|
||||||
|
if abs(output_width - 1.0) < epsilon: output_width = 1
|
||||||
|
if abs(output_height - 1.0) < epsilon: output_height = 1
|
||||||
|
|
||||||
|
# Helper to format the number part
|
||||||
|
def format_value(val, dec):
|
||||||
|
# Multiply by 10^decimals, convert to int to keep trailing zeros in effect
|
||||||
|
# e.g. val=1.1, dec=2 -> 1.1 * 100 = 110
|
||||||
|
# e.g. val=1.0, dec=2 -> 1.0 * 100 = 100 (though this might become "1" if it's exactly 1.0 before this)
|
||||||
|
# The existing logic already handles output_width/height being 1.0 to produce "EVEN" or skip a component.
|
||||||
|
# This formatting is for when output_width/height is NOT 1.0.
|
||||||
|
return str(int(round(val * (10**dec))))
|
||||||
|
|
||||||
|
if abs(output_width - output_height) < epsilon: # Handles original square or aspect maintained
|
||||||
|
output = "EVEN"
|
||||||
|
elif output_width != 1 and abs(output_height - 1.0) < epsilon : # Width changed, height maintained relative to width
|
||||||
|
output = f"X{format_value(output_width, decimals)}"
|
||||||
|
elif output_height != 1 and abs(output_width - 1.0) < epsilon: # Height changed, width maintained relative to height
|
||||||
|
output = f"Y{format_value(output_height, decimals)}"
|
||||||
|
else: # Both changed relative to each other
|
||||||
|
output = f"X{format_value(output_width, decimals)}Y{format_value(output_height, decimals)}"
|
||||||
|
return output
|
||||||
|
|
||||||
|
# --- Image Loading, Conversion, Resizing ---
|
||||||
|
|
||||||
|
def load_image(image_path: Union[str, Path], read_flag: int = cv2.IMREAD_UNCHANGED) -> Optional[np.ndarray]:
|
||||||
|
"""Loads an image from the specified path. Converts BGR/BGRA to RGB/RGBA if color."""
|
||||||
|
try:
|
||||||
|
img = cv2.imread(str(image_path), read_flag)
|
||||||
|
if img is None:
|
||||||
|
# print(f"Warning: Failed to load image: {image_path}") # Optional: for debugging utils
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Ensure RGB/RGBA for color images
|
||||||
|
if len(img.shape) == 3:
|
||||||
|
if img.shape[2] == 4: # BGRA from OpenCV
|
||||||
|
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
|
||||||
|
elif img.shape[2] == 3: # BGR from OpenCV
|
||||||
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
||||||
|
return img
|
||||||
|
except Exception: # as e:
|
||||||
|
# print(f"Error loading image {image_path}: {e}") # Optional: for debugging utils
|
||||||
|
return None
|
||||||
|
|
||||||
|
def convert_bgr_to_rgb(image: np.ndarray) -> np.ndarray:
|
||||||
|
"""Converts an image from BGR/BGRA to RGB/RGBA color space."""
|
||||||
|
if image is None or len(image.shape) < 3:
|
||||||
|
return image # Return as is if not a color image or None
|
||||||
|
|
||||||
|
if image.shape[2] == 4: # BGRA
|
||||||
|
return cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) # Keep alpha, convert to RGBA
|
||||||
|
elif image.shape[2] == 3: # BGR
|
||||||
|
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
||||||
|
return image # Return as is if not 3 or 4 channels
|
||||||
|
|
||||||
|
def convert_rgb_to_bgr(image: np.ndarray) -> np.ndarray:
|
||||||
|
"""Converts an image from RGB/RGBA to BGR/BGRA color space."""
|
||||||
|
if image is None or len(image.shape) < 3:
|
||||||
|
return image # Return as is if not a color image or None
|
||||||
|
|
||||||
|
if image.shape[2] == 4: # RGBA
|
||||||
|
return cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA)
|
||||||
|
elif image.shape[2] == 3: # RGB
|
||||||
|
return cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
||||||
|
return image # Return as is if not 3 or 4 channels
|
||||||
|
|
||||||
|
|
||||||
|
def resize_image(image: np.ndarray, target_width: int, target_height: int, interpolation: Optional[int] = None) -> np.ndarray:
|
||||||
|
"""Resizes an image to target_width and target_height."""
|
||||||
|
if image is None:
|
||||||
|
raise ValueError("Cannot resize a None image.")
|
||||||
|
if target_width <= 0 or target_height <= 0:
|
||||||
|
raise ValueError("Target width and height must be positive.")
|
||||||
|
|
||||||
|
original_height, original_width = image.shape[:2]
|
||||||
|
|
||||||
|
if interpolation is None:
|
||||||
|
# Default interpolation: Lanczos for downscaling, Cubic for upscaling/same
|
||||||
|
if (target_width * target_height) < (original_width * original_height):
|
||||||
|
interpolation = cv2.INTER_LANCZOS4
|
||||||
|
else:
|
||||||
|
interpolation = cv2.INTER_CUBIC
|
||||||
|
|
||||||
|
return cv2.resize(image, (target_width, target_height), interpolation=interpolation)
|
||||||
|
|
||||||
|
# --- Image Saving ---
|
||||||
|
|
||||||
|
def save_image(
|
||||||
|
image_path: Union[str, Path],
|
||||||
|
image_data: np.ndarray,
|
||||||
|
output_format: Optional[str] = None, # e.g. "png", "jpg", "exr"
|
||||||
|
output_dtype_target: Optional[np.dtype] = None, # e.g. np.uint8, np.uint16, np.float16
|
||||||
|
params: Optional[List[int]] = None,
|
||||||
|
convert_to_bgr_before_save: bool = True # True for most formats except EXR
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Saves image data to a file. Handles data type and color space conversions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
image_path: Path to save the image.
|
||||||
|
image_data: NumPy array of the image.
|
||||||
|
output_format: Desired output format (e.g., 'png', 'jpg'). If None, derived from extension.
|
||||||
|
output_dtype_target: Target NumPy dtype for saving (e.g., np.uint8, np.uint16).
|
||||||
|
If None, tries to use image_data.dtype or a sensible default.
|
||||||
|
params: OpenCV imwrite parameters (e.g., [cv2.IMWRITE_JPEG_QUALITY, 90]).
|
||||||
|
convert_to_bgr_before_save: If True and image is 3-channel, converts RGB to BGR.
|
||||||
|
Set to False for formats like EXR that expect RGB.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if saving was successful, False otherwise.
|
||||||
|
"""
|
||||||
|
if image_data is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
img_to_save = image_data.copy()
|
||||||
|
path_obj = Path(image_path)
|
||||||
|
path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# 1. Data Type Conversion
|
||||||
|
if output_dtype_target is not None:
|
||||||
|
if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8:
|
||||||
|
if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)
|
||||||
|
elif img_to_save.dtype in [np.float16, np.float32, np.float64]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8)
|
||||||
|
else: img_to_save = img_to_save.astype(np.uint8)
|
||||||
|
elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16:
|
||||||
|
if img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0 * 65535.0).astype(np.uint16) # More accurate
|
||||||
|
elif img_to_save.dtype in [np.float16, np.float32, np.float64]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16)
|
||||||
|
else: img_to_save = img_to_save.astype(np.uint16)
|
||||||
|
elif output_dtype_target == np.float16 and img_to_save.dtype != np.float16:
|
||||||
|
if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16)
|
||||||
|
elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16)
|
||||||
|
elif img_to_save.dtype in [np.float32, np.float64]: img_to_save = img_to_save.astype(np.float16)
|
||||||
|
# else: cannot convert to float16 easily
|
||||||
|
elif output_dtype_target == np.float32 and img_to_save.dtype != np.float32:
|
||||||
|
if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0)
|
||||||
|
elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0)
|
||||||
|
elif img_to_save.dtype == np.float16: img_to_save = img_to_save.astype(np.float32)
|
||||||
|
|
||||||
|
|
||||||
|
# 2. Color Space Conversion (Internal RGB/RGBA -> BGR/BGRA for OpenCV)
|
||||||
|
# Input `image_data` is assumed to be in RGB/RGBA format (due to `load_image` changes).
|
||||||
|
# OpenCV's `imwrite` typically expects BGR/BGRA for formats like PNG, JPG.
|
||||||
|
# EXR format usually expects RGB/RGBA.
|
||||||
|
# The `convert_to_bgr_before_save` flag controls this behavior.
|
||||||
|
current_format = output_format if output_format else path_obj.suffix.lower().lstrip('.')
|
||||||
|
|
||||||
|
if convert_to_bgr_before_save and current_format != 'exr':
|
||||||
|
# If image is 3-channel (RGB) or 4-channel (RGBA), convert to BGR/BGRA.
|
||||||
|
if len(img_to_save.shape) == 3 and (img_to_save.shape[2] == 3 or img_to_save.shape[2] == 4):
|
||||||
|
img_to_save = convert_rgb_to_bgr(img_to_save) # Handles RGB->BGR and RGBA->BGRA
|
||||||
|
# If `convert_to_bgr_before_save` is False or format is 'exr',
|
||||||
|
# the image (assumed RGB/RGBA) is saved as is.
|
||||||
|
|
||||||
|
# 3. Save Image
|
||||||
|
try:
|
||||||
|
if params:
|
||||||
|
cv2.imwrite(str(path_obj), img_to_save, params)
|
||||||
|
else:
|
||||||
|
cv2.imwrite(str(path_obj), img_to_save)
|
||||||
|
return True
|
||||||
|
except Exception: # as e:
|
||||||
|
# print(f"Error saving image {path_obj}: {e}") # Optional: for debugging utils
|
||||||
|
return False
|
||||||
|
|
||||||
|
# --- Common Map Transformations ---
|
||||||
|
|
||||||
|
import re
|
||||||
|
import logging
|
||||||
|
|
||||||
|
ipu_log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def apply_common_map_transformations(
|
||||||
|
image_data: np.ndarray,
|
||||||
|
processing_map_type: str, # The potentially suffixed internal type
|
||||||
|
invert_normal_green: bool,
|
||||||
|
file_type_definitions: Dict[str, Dict],
|
||||||
|
log_prefix: str
|
||||||
|
) -> Tuple[np.ndarray, str, List[str]]:
|
||||||
|
"""
|
||||||
|
Applies common in-memory transformations (Gloss-to-Rough, Normal Green Invert).
|
||||||
|
Returns potentially transformed image data, potentially updated map type, and notes.
|
||||||
|
"""
|
||||||
|
transformation_notes = []
|
||||||
|
current_image_data = image_data # Start with original data
|
||||||
|
updated_processing_map_type = processing_map_type # Start with original type
|
||||||
|
|
||||||
|
# Gloss-to-Rough
|
||||||
|
# Check if the base type is Gloss (before suffix)
|
||||||
|
base_map_type_match = re.match(r"(MAP_GLOSS)", processing_map_type)
|
||||||
|
if base_map_type_match:
|
||||||
|
ipu_log.info(f"{log_prefix}: Applying Gloss-to-Rough conversion.")
|
||||||
|
inversion_succeeded = False
|
||||||
|
if np.issubdtype(current_image_data.dtype, np.floating):
|
||||||
|
current_image_data = 1.0 - current_image_data
|
||||||
|
current_image_data = np.clip(current_image_data, 0.0, 1.0)
|
||||||
|
ipu_log.debug(f"{log_prefix}: Inverted float image data for Gloss->Rough.")
|
||||||
|
inversion_succeeded = True
|
||||||
|
elif np.issubdtype(current_image_data.dtype, np.integer):
|
||||||
|
max_val = np.iinfo(current_image_data.dtype).max
|
||||||
|
current_image_data = max_val - current_image_data
|
||||||
|
ipu_log.debug(f"{log_prefix}: Inverted integer image data (max_val: {max_val}) for Gloss->Rough.")
|
||||||
|
inversion_succeeded = True
|
||||||
|
else:
|
||||||
|
ipu_log.error(f"{log_prefix}: Unsupported image data type {current_image_data.dtype} for GLOSS map. Cannot invert.")
|
||||||
|
transformation_notes.append("Gloss-to-Rough FAILED (unsupported dtype)")
|
||||||
|
|
||||||
|
if inversion_succeeded:
|
||||||
|
# Update the type string itself (e.g., MAP_GLOSS-1 -> MAP_ROUGH-1)
|
||||||
|
updated_processing_map_type = processing_map_type.replace("GLOSS", "ROUGH")
|
||||||
|
ipu_log.info(f"{log_prefix}: Map type updated: '{processing_map_type}' -> '{updated_processing_map_type}'")
|
||||||
|
transformation_notes.append("Gloss-to-Rough applied")
|
||||||
|
|
||||||
|
# Normal Green Invert
|
||||||
|
# Check if the base type is Normal (before suffix)
|
||||||
|
base_map_type_match_nrm = re.match(r"(MAP_NRM)", processing_map_type)
|
||||||
|
if base_map_type_match_nrm and invert_normal_green:
|
||||||
|
ipu_log.info(f"{log_prefix}: Applying Normal Map Green Channel Inversion (Global Setting).")
|
||||||
|
current_image_data = invert_normal_map_green_channel(current_image_data)
|
||||||
|
transformation_notes.append("Normal Green Inverted (Global)")
|
||||||
|
|
||||||
|
return current_image_data, updated_processing_map_type, transformation_notes
|
||||||
|
|
||||||
|
# --- Normal Map Utilities ---
|
||||||
|
|
||||||
|
def invert_normal_map_green_channel(normal_map: np.ndarray) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Inverts the green channel of a normal map.
|
||||||
|
Assumes the normal map is in RGB or RGBA format (channel order R, G, B, A).
|
||||||
|
"""
|
||||||
|
if normal_map is None or len(normal_map.shape) < 3 or normal_map.shape[2] < 3:
|
||||||
|
# Not a valid color image with at least 3 channels
|
||||||
|
return normal_map
|
||||||
|
|
||||||
|
# Ensure data is mutable
|
||||||
|
inverted_map = normal_map.copy()
|
||||||
|
|
||||||
|
# Invert the green channel (index 1)
|
||||||
|
# Handle different data types
|
||||||
|
if np.issubdtype(inverted_map.dtype, np.floating):
|
||||||
|
inverted_map[:, :, 1] = 1.0 - inverted_map[:, :, 1]
|
||||||
|
elif np.issubdtype(inverted_map.dtype, np.integer):
|
||||||
|
max_val = np.iinfo(inverted_map.dtype).max
|
||||||
|
inverted_map[:, :, 1] = max_val - inverted_map[:, :, 1]
|
||||||
|
else:
|
||||||
|
# Unsupported dtype, return original
|
||||||
|
print(f"Warning: Unsupported dtype {inverted_map.dtype} for normal map green channel inversion.")
|
||||||
|
return normal_map
|
||||||
|
|
||||||
|
return inverted_map
|
||||||
297
processing/utils/image_saving_utils.py
Normal file
297
processing/utils/image_saving_utils.py
Normal file
@@ -0,0 +1,297 @@
|
|||||||
|
import logging
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Any, Tuple, Optional
|
||||||
|
|
||||||
|
# Potentially import ipu from ...utils import image_processing_utils as ipu
|
||||||
|
# Assuming ipu is available in the same utils directory or parent
|
||||||
|
try:
|
||||||
|
from . import image_processing_utils as ipu
|
||||||
|
except ImportError:
|
||||||
|
# Fallback for different import structures if needed, adjust based on actual project structure
|
||||||
|
# For this project structure, the relative import should work.
|
||||||
|
logging.warning("Could not import image_processing_utils using relative path. Attempting absolute import.")
|
||||||
|
try:
|
||||||
|
from processing.utils import image_processing_utils as ipu
|
||||||
|
except ImportError:
|
||||||
|
logging.error("Could not import image_processing_utils.")
|
||||||
|
ipu = None # Handle case where ipu is not available
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def save_image_variants(
|
||||||
|
source_image_data: np.ndarray,
|
||||||
|
base_map_type: str, # Filename-friendly map type
|
||||||
|
source_bit_depth_info: List[Optional[int]],
|
||||||
|
image_resolutions: Dict[str, int],
|
||||||
|
file_type_defs: Dict[str, Dict[str, Any]],
|
||||||
|
output_format_8bit: str,
|
||||||
|
output_format_16bit_primary: str,
|
||||||
|
output_format_16bit_fallback: str,
|
||||||
|
png_compression_level: int,
|
||||||
|
jpg_quality: int,
|
||||||
|
output_filename_pattern_tokens: Dict[str, Any], # Must include 'output_base_directory': Path and 'asset_name': str
|
||||||
|
output_filename_pattern: str,
|
||||||
|
resolution_threshold_for_jpg: Optional[int] = None, # Added
|
||||||
|
# Consider adding ipu or relevant parts of it if not importing globally
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Centralizes image saving logic, generating and saving various resolution variants
|
||||||
|
according to configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
source_image_data (np.ndarray): High-res image data (in memory, potentially transformed).
|
||||||
|
base_map_type (str): Final map type (e.g., "COL", "ROUGH", "NORMAL", "MAP_NRMRGH").
|
||||||
|
This is the filename-friendly map type.
|
||||||
|
source_bit_depth_info (List[Optional[int]]): List of original source bit depth(s)
|
||||||
|
(e.g., [8], [16], [8, 16]). Can contain None.
|
||||||
|
image_resolutions (Dict[str, int]): Dictionary mapping resolution keys (e.g., "4K")
|
||||||
|
to max dimensions (e.g., 4096).
|
||||||
|
file_type_defs (Dict[str, Dict[str, Any]]): Dictionary defining properties for map types,
|
||||||
|
including 'bit_depth_rule'.
|
||||||
|
output_format_8bit (str): File extension for 8-bit output (e.g., "jpg", "png").
|
||||||
|
output_format_16bit_primary (str): Primary file extension for 16-bit output (e.g., "png", "tif").
|
||||||
|
output_format_16bit_fallback (str): Fallback file extension for 16-bit output.
|
||||||
|
png_compression_level (int): Compression level for PNG output (0-9).
|
||||||
|
jpg_quality (int): Quality level for JPG output (0-100).
|
||||||
|
output_filename_pattern_tokens (Dict[str, Any]): Dictionary of tokens for filename
|
||||||
|
pattern replacement. Must include
|
||||||
|
'output_base_directory' (Path) and
|
||||||
|
'asset_name' (str).
|
||||||
|
output_filename_pattern (str): Pattern string for generating output filenames
|
||||||
|
(e.g., "[assetname]_[maptype]_[resolution].[ext]").
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict[str, Any]]: A list of dictionaries, each containing details about a saved file.
|
||||||
|
Example: [{'path': str, 'resolution_key': str, 'format': str,
|
||||||
|
'bit_depth': int, 'dimensions': (w,h)}, ...]
|
||||||
|
"""
|
||||||
|
if ipu is None:
|
||||||
|
logger.error("image_processing_utils is not available. Cannot save images.")
|
||||||
|
return []
|
||||||
|
|
||||||
|
saved_file_details = []
|
||||||
|
source_h, source_w = source_image_data.shape[:2]
|
||||||
|
source_max_dim = max(source_h, source_w)
|
||||||
|
|
||||||
|
# 1. Use provided configuration inputs (already available as function arguments)
|
||||||
|
logger.info(f"SaveImageVariants: Starting for map type: {base_map_type}. Source shape: {source_image_data.shape}, Source bit depths: {source_bit_depth_info}")
|
||||||
|
logger.debug(f"SaveImageVariants: Resolutions: {image_resolutions}, File Type Defs: {file_type_defs.keys()}, Output Formats: 8bit={output_format_8bit}, 16bit_pri={output_format_16bit_primary}, 16bit_fall={output_format_16bit_fallback}")
|
||||||
|
logger.debug(f"SaveImageVariants: PNG Comp: {png_compression_level}, JPG Qual: {jpg_quality}")
|
||||||
|
logger.debug(f"SaveImageVariants: Output Tokens: {output_filename_pattern_tokens}, Output Pattern: {output_filename_pattern}")
|
||||||
|
logger.debug(f"SaveImageVariants: Received resolution_threshold_for_jpg: {resolution_threshold_for_jpg}") # Log received threshold
|
||||||
|
|
||||||
|
# 2. Determine Target Bit Depth
|
||||||
|
target_bit_depth = 8 # Default
|
||||||
|
bit_depth_rule = file_type_defs.get(base_map_type, {}).get('bit_depth_rule', 'force_8bit')
|
||||||
|
if bit_depth_rule not in ['force_8bit', 'respect_inputs']:
|
||||||
|
logger.warning(f"Unknown bit_depth_rule '{bit_depth_rule}' for map type '{base_map_type}'. Defaulting to 'force_8bit'.")
|
||||||
|
bit_depth_rule = 'force_8bit'
|
||||||
|
|
||||||
|
if bit_depth_rule == 'respect_inputs':
|
||||||
|
# Check if any source bit depth is > 8, ignoring None
|
||||||
|
if any(depth is not None and depth > 8 for depth in source_bit_depth_info):
|
||||||
|
target_bit_depth = 16
|
||||||
|
else:
|
||||||
|
target_bit_depth = 8
|
||||||
|
logger.info(f"Bit depth rule 'respect_inputs' applied. Source bit depths: {source_bit_depth_info}. Target bit depth: {target_bit_depth}")
|
||||||
|
else: # force_8bit
|
||||||
|
target_bit_depth = 8
|
||||||
|
logger.info(f"Bit depth rule 'force_8bit' applied. Target bit depth: {target_bit_depth}")
|
||||||
|
|
||||||
|
|
||||||
|
# 3. Determine Output File Format(s)
|
||||||
|
if target_bit_depth == 8:
|
||||||
|
output_ext = output_format_8bit.lstrip('.').lower()
|
||||||
|
elif target_bit_depth == 16:
|
||||||
|
# Prioritize primary, fallback to fallback if primary is not supported/desired
|
||||||
|
# For now, just use primary. More complex logic might be needed later.
|
||||||
|
output_ext = output_format_16bit_primary.lstrip('.').lower()
|
||||||
|
# Basic fallback logic example (can be expanded)
|
||||||
|
if output_ext not in ['png', 'tif']: # Assuming common 16-bit formats
|
||||||
|
output_ext = output_format_16bit_fallback.lstrip('.').lower()
|
||||||
|
logger.warning(f"Primary 16-bit format '{output_format_16bit_primary}' might not be suitable. Using fallback '{output_format_16bit_fallback}'.")
|
||||||
|
else:
|
||||||
|
logger.error(f"Unsupported target bit depth: {target_bit_depth}. Defaulting to 8-bit format.")
|
||||||
|
output_ext = output_format_8bit.lstrip('.').lower()
|
||||||
|
|
||||||
|
current_output_ext = output_ext # Store the initial extension based on bit depth
|
||||||
|
|
||||||
|
logger.info(f"SaveImageVariants: Determined target bit depth: {target_bit_depth}, Initial output format: {current_output_ext} for map type {base_map_type}")
|
||||||
|
|
||||||
|
# 4. Generate and Save Resolution Variants
|
||||||
|
# Sort resolutions by max dimension descending
|
||||||
|
sorted_resolutions = sorted(image_resolutions.items(), key=lambda item: item[1], reverse=True)
|
||||||
|
|
||||||
|
for res_key, res_max_dim in sorted_resolutions:
|
||||||
|
logger.info(f"SaveImageVariants: Processing variant {res_key} ({res_max_dim}px) for {base_map_type}")
|
||||||
|
|
||||||
|
# --- Prevent Upscaling ---
|
||||||
|
# Skip this resolution variant if its target dimension is larger than the source image's largest dimension.
|
||||||
|
if res_max_dim > source_max_dim:
|
||||||
|
logger.info(f"SaveImageVariants: Skipping variant {res_key} ({res_max_dim}px) for {base_map_type} because target resolution is larger than source ({source_max_dim}px).")
|
||||||
|
continue # Skip to the next resolution
|
||||||
|
|
||||||
|
# Calculate target dimensions for valid variants (equal or smaller than source)
|
||||||
|
if source_max_dim == res_max_dim:
|
||||||
|
# Use source dimensions if target is equal
|
||||||
|
target_w_res, target_h_res = source_w, source_h
|
||||||
|
logger.info(f"SaveImageVariants: Using source resolution ({source_w}x{source_h}) for {res_key} variant of {base_map_type} as target matches source.")
|
||||||
|
else: # Downscale (source_max_dim > res_max_dim)
|
||||||
|
# Downscale, maintaining aspect ratio
|
||||||
|
aspect_ratio = source_w / source_h
|
||||||
|
if source_w >= source_h: # Use >= to handle square images correctly
|
||||||
|
target_w_res = res_max_dim
|
||||||
|
target_h_res = max(1, int(res_max_dim / aspect_ratio)) # Ensure height is at least 1
|
||||||
|
else:
|
||||||
|
target_h_res = res_max_dim
|
||||||
|
target_w_res = max(1, int(res_max_dim * aspect_ratio)) # Ensure width is at least 1
|
||||||
|
logger.info(f"SaveImageVariants: Calculated downscale for {base_map_type} {res_key}: from ({source_w}x{source_h}) to ({target_w_res}x{target_h_res})")
|
||||||
|
|
||||||
|
|
||||||
|
# Resize source_image_data (only if necessary)
|
||||||
|
if (target_w_res, target_h_res) == (source_w, source_h):
|
||||||
|
# No resize needed if dimensions match
|
||||||
|
variant_data = source_image_data.copy() # Copy to avoid modifying original if needed later
|
||||||
|
logger.debug(f"SaveImageVariants: No resize needed for {base_map_type} {res_key}, using copy of source data.")
|
||||||
|
else:
|
||||||
|
# Perform resize only if dimensions differ (i.e., downscaling)
|
||||||
|
interpolation_method = cv2.INTER_AREA # Good for downscaling
|
||||||
|
try:
|
||||||
|
variant_data = ipu.resize_image(source_image_data, target_w_res, target_h_res, interpolation=interpolation_method)
|
||||||
|
if variant_data is None: # Check if resize failed
|
||||||
|
raise ValueError("ipu.resize_image returned None")
|
||||||
|
logger.debug(f"SaveImageVariants: Resized variant data shape for {base_map_type} {res_key}: {variant_data.shape}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"SaveImageVariants: Error resizing image for {base_map_type} {res_key} variant: {e}")
|
||||||
|
continue # Skip this variant if resizing fails
|
||||||
|
|
||||||
|
# Filename Construction
|
||||||
|
current_tokens = output_filename_pattern_tokens.copy()
|
||||||
|
current_tokens['maptype'] = base_map_type
|
||||||
|
current_tokens['resolution'] = res_key
|
||||||
|
|
||||||
|
# Determine final extension for this variant, considering JPG threshold
|
||||||
|
final_variant_ext = current_output_ext
|
||||||
|
|
||||||
|
# --- Start JPG Threshold Logging ---
|
||||||
|
logger.debug(f"SaveImageVariants: JPG Threshold Check for {base_map_type} {res_key}:")
|
||||||
|
logger.debug(f" - target_bit_depth: {target_bit_depth}")
|
||||||
|
logger.debug(f" - resolution_threshold_for_jpg: {resolution_threshold_for_jpg}")
|
||||||
|
logger.debug(f" - target_w_res: {target_w_res}, target_h_res: {target_h_res}")
|
||||||
|
logger.debug(f" - max(target_w_res, target_h_res): {max(target_w_res, target_h_res)}")
|
||||||
|
logger.debug(f" - current_output_ext: {current_output_ext}")
|
||||||
|
|
||||||
|
cond_bit_depth = target_bit_depth == 8
|
||||||
|
cond_threshold_not_none = resolution_threshold_for_jpg is not None
|
||||||
|
cond_res_exceeded = False
|
||||||
|
if cond_threshold_not_none: # Avoid comparison if threshold is None
|
||||||
|
cond_res_exceeded = max(target_w_res, target_h_res) > resolution_threshold_for_jpg
|
||||||
|
cond_is_png = current_output_ext == 'png'
|
||||||
|
|
||||||
|
logger.debug(f" - Condition (target_bit_depth == 8): {cond_bit_depth}")
|
||||||
|
logger.debug(f" - Condition (resolution_threshold_for_jpg is not None): {cond_threshold_not_none}")
|
||||||
|
logger.debug(f" - Condition (max(res) > threshold): {cond_res_exceeded}")
|
||||||
|
logger.debug(f" - Condition (current_output_ext == 'png'): {cond_is_png}")
|
||||||
|
# --- End JPG Threshold Logging ---
|
||||||
|
|
||||||
|
if cond_bit_depth and cond_threshold_not_none and cond_res_exceeded and cond_is_png:
|
||||||
|
final_variant_ext = 'jpg'
|
||||||
|
logger.info(f"SaveImageVariants: Overriding 8-bit PNG to JPG for {base_map_type} {res_key} due to resolution {max(target_w_res, target_h_res)}px > threshold {resolution_threshold_for_jpg}px.")
|
||||||
|
|
||||||
|
current_tokens['ext'] = final_variant_ext
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Replace placeholders in the pattern
|
||||||
|
filename = output_filename_pattern
|
||||||
|
for token, value in current_tokens.items():
|
||||||
|
# Ensure value is string for replacement, handle Path objects later
|
||||||
|
filename = filename.replace(f"[{token}]", str(value))
|
||||||
|
|
||||||
|
# Construct full output path
|
||||||
|
output_base_directory = current_tokens.get('output_base_directory')
|
||||||
|
if not isinstance(output_base_directory, Path):
|
||||||
|
logger.error(f"'output_base_directory' token is missing or not a Path object: {output_base_directory}. Cannot save file.")
|
||||||
|
continue # Skip this variant
|
||||||
|
|
||||||
|
output_path = output_base_directory / filename
|
||||||
|
logger.info(f"SaveImageVariants: Constructed output path for {base_map_type} {res_key}: {output_path}")
|
||||||
|
|
||||||
|
# Ensure parent directory exists
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
logger.debug(f"SaveImageVariants: Ensured directory exists for {base_map_type} {res_key}: {output_path.parent}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"SaveImageVariants: Error constructing filepath for {base_map_type} {res_key} variant: {e}")
|
||||||
|
continue # Skip this variant if path construction fails
|
||||||
|
|
||||||
|
|
||||||
|
# Prepare Save Parameters
|
||||||
|
save_params_cv2 = []
|
||||||
|
if final_variant_ext == 'jpg': # Check against final_variant_ext
|
||||||
|
save_params_cv2.append(cv2.IMWRITE_JPEG_QUALITY)
|
||||||
|
save_params_cv2.append(jpg_quality)
|
||||||
|
logger.debug(f"SaveImageVariants: Using JPG quality: {jpg_quality} for {base_map_type} {res_key}")
|
||||||
|
elif final_variant_ext == 'png': # Check against final_variant_ext
|
||||||
|
save_params_cv2.append(cv2.IMWRITE_PNG_COMPRESSION)
|
||||||
|
save_params_cv2.append(png_compression_level)
|
||||||
|
logger.debug(f"SaveImageVariants: Using PNG compression level: {png_compression_level} for {base_map_type} {res_key}")
|
||||||
|
# Add other format specific parameters if needed (e.g., TIFF compression)
|
||||||
|
|
||||||
|
|
||||||
|
# Bit Depth Conversion is handled by ipu.save_image via output_dtype_target
|
||||||
|
image_data_for_save = variant_data # Use the resized variant data directly
|
||||||
|
|
||||||
|
# Determine the target dtype for ipu.save_image
|
||||||
|
output_dtype_for_save: Optional[np.dtype] = None
|
||||||
|
if target_bit_depth == 8:
|
||||||
|
output_dtype_for_save = np.uint8
|
||||||
|
elif target_bit_depth == 16:
|
||||||
|
output_dtype_for_save = np.uint16
|
||||||
|
# Add other target bit depths like float16/float32 if necessary
|
||||||
|
# elif target_bit_depth == 32: # Assuming float32 for EXR etc.
|
||||||
|
# output_dtype_for_save = np.float32
|
||||||
|
|
||||||
|
|
||||||
|
# Saving
|
||||||
|
try:
|
||||||
|
# ipu.save_image is expected to handle the actual cv2.imwrite call
|
||||||
|
logger.debug(f"SaveImageVariants: Attempting to save {base_map_type} {res_key} to {output_path} with params {save_params_cv2}, target_dtype: {output_dtype_for_save}")
|
||||||
|
success = ipu.save_image(
|
||||||
|
str(output_path),
|
||||||
|
image_data_for_save,
|
||||||
|
output_dtype_target=output_dtype_for_save, # Pass the target dtype
|
||||||
|
params=save_params_cv2
|
||||||
|
)
|
||||||
|
if success:
|
||||||
|
logger.info(f"SaveImageVariants: Successfully saved {base_map_type} {res_key} variant to {output_path}")
|
||||||
|
# Collect details for the returned list
|
||||||
|
saved_file_details.append({
|
||||||
|
'path': str(output_path),
|
||||||
|
'resolution_key': res_key,
|
||||||
|
'format': final_variant_ext, # Log the actual saved format
|
||||||
|
'bit_depth': target_bit_depth,
|
||||||
|
'dimensions': (target_w_res, target_h_res)
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
logger.error(f"SaveImageVariants: Failed to save {base_map_type} {res_key} variant to {output_path} (ipu.save_image returned False)")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"SaveImageVariants: Error during ipu.save_image for {base_map_type} {res_key} variant to {output_path}: {e}", exc_info=True)
|
||||||
|
# Continue to next variant even if one fails
|
||||||
|
|
||||||
|
|
||||||
|
# Discard in-memory variant after saving (Python's garbage collection handles this)
|
||||||
|
del variant_data
|
||||||
|
del image_data_for_save
|
||||||
|
|
||||||
|
|
||||||
|
# 5. Return List of Saved File Details
|
||||||
|
logger.info(f"Finished saving variants for map type: {base_map_type}. Saved {len(saved_file_details)} variants.")
|
||||||
|
return saved_file_details
|
||||||
|
|
||||||
|
# Optional Helper Functions (can be added here if needed)
|
||||||
|
# def _determine_target_bit_depth(...): ...
|
||||||
|
# def _determine_output_format(...): ...
|
||||||
|
# def _construct_variant_filepath(...): ...
|
||||||
1615
processing_engine.py
1615
processing_engine.py
File diff suppressed because it is too large
Load Diff
44
projectBrief.md
Normal file
44
projectBrief.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Project Brief: Asset Processor Tool
|
||||||
|
|
||||||
|
## 1. Main Goal & Purpose
|
||||||
|
|
||||||
|
The primary goal of the Asset Processor Tool is to provide **CG artists and 3D content teams with a friendly, fast, and flexible interface to process and organize 3D asset source files into a standardized library format.** It automates repetitive and complex tasks involved in preparing assets from various suppliers for use in production pipelines.
|
||||||
|
|
||||||
|
## 2. Key Features & Components
|
||||||
|
|
||||||
|
* **Automated Asset Processing:** Ingests 3D asset source files (texture sets, models, etc.) from `.zip`, `.rar`, `.7z` archives, or folders.
|
||||||
|
* **Preset-Driven Workflow:** Utilizes configurable JSON presets to interpret different asset sources (e.g., from various online vendors or internal standards), defining rules for file classification and processing.
|
||||||
|
* **Comprehensive File Operations:**
|
||||||
|
* **Classification:** Automatically identifies map types (Color, Normal, Roughness, etc.), models, and other file categories based on preset rules.
|
||||||
|
* **Image Processing:** Performs tasks like image resizing (to standard resolutions like 1K, 2K, 4K, avoiding upscaling), glossiness-to-roughness conversion, normal map green channel inversion (OpenGL/DirectX handling), alpha channel extraction, bit-depth adjustments, and low-resolution fallback generation for small source images.
|
||||||
|
* **Channel Merging:** Combines channels from different source maps into packed textures (e.g., Normal + Roughness + Metallic into a single NRMRGH map).
|
||||||
|
* **Metadata Generation:** Creates a detailed `metadata.json` file for each processed asset, containing information about maps, categories, processing settings, and more, for downstream tool integration.
|
||||||
|
* **Flexible Output Organization:** Generates a clean, structured output directory based on user-configurable naming patterns and tokens.
|
||||||
|
* **Multiple User Interfaces:**
|
||||||
|
* **Graphical User Interface (GUI):** The primary interface, designed to be user-friendly, offering drag-and-drop functionality, an integrated preset editor, a live preview table for rule validation and overrides, and clear processing controls.
|
||||||
|
* **Directory Monitor:** An automated script that watches a specified folder for new asset archives and processes them based on preset names embedded in the archive filename.
|
||||||
|
* **Command-Line Interface (CLI):** Intended for batch processing and scripting (currently with limited core functionality).
|
||||||
|
* **Optional Blender Integration:** Can automatically run Blender scripts post-processing to create PBR node groups and materials in specified `.blend` files, linking to the newly processed textures.
|
||||||
|
* **Hierarchical Rule System:** Allows for dynamic, granular overrides of preset configurations at the source, asset, or individual file level via the GUI.
|
||||||
|
* **Experimental LLM Prediction:** Includes an option to use a Large Language Model for file interpretation and rule prediction.
|
||||||
|
|
||||||
|
## 3. Target Audience
|
||||||
|
|
||||||
|
* **CG Artists:** Individual artists looking for an efficient way to manage and prepare their personal or downloaded asset libraries.
|
||||||
|
* **3D Content Creation Teams:** Studios or groups needing a standardized pipeline for processing and organizing assets from multiple sources.
|
||||||
|
* **Technical Artists/Pipeline Developers:** Who may extend or integrate the tool into broader production workflows.
|
||||||
|
|
||||||
|
## 4. Overall Architectural Style & Key Technologies
|
||||||
|
|
||||||
|
* **Core Language:** Python
|
||||||
|
* **GUI Framework:** PySide6
|
||||||
|
* **Configuration:** Primarily JSON-based (application settings, user overrides, type definitions, supplier settings, presets, LLM settings).
|
||||||
|
* **Processing Architecture:** A modular, staged processing pipeline orchestrated by a central engine. Each stage performs a discrete task on an `AssetProcessingContext` object.
|
||||||
|
* **Key Libraries:** OpenCV (image processing), NumPy (numerical operations), py7zr/rarfile (archive handling), watchdog (directory monitoring).
|
||||||
|
* **Design Principles:** Modularity, configurability, and user-friendliness (especially for the GUI).
|
||||||
|
|
||||||
|
## 5. Foundational Information
|
||||||
|
|
||||||
|
* The tool aims to significantly reduce manual effort and ensure consistency in asset preparation.
|
||||||
|
* It is designed to be adaptable to various asset sources and pipeline requirements through its extensive configuration options and preset system.
|
||||||
|
* The output `metadata.json` is key for enabling further automation and integration with other tools or digital content creation (DCC) applications.
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
import dataclasses
|
import dataclasses
|
||||||
import json
|
import json
|
||||||
from typing import List, Dict, Any, Tuple, Optional
|
from typing import List, Dict, Any, Tuple, Optional
|
||||||
|
import numpy as np # Added for ProcessingItem
|
||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
class FileRule:
|
class FileRule:
|
||||||
file_path: str = None
|
file_path: str = None
|
||||||
@@ -10,8 +11,12 @@ class FileRule:
|
|||||||
resolution_override: Tuple[int, int] = None
|
resolution_override: Tuple[int, int] = None
|
||||||
channel_merge_instructions: Dict[str, Any] = dataclasses.field(default_factory=dict)
|
channel_merge_instructions: Dict[str, Any] = dataclasses.field(default_factory=dict)
|
||||||
output_format_override: str = None
|
output_format_override: str = None
|
||||||
|
processing_items: List['ProcessingItem'] = dataclasses.field(default_factory=list) # Added field
|
||||||
|
|
||||||
def to_json(self) -> str:
|
def to_json(self) -> str:
|
||||||
|
# Need to handle ProcessingItem serialization if it contains non-serializable types like np.ndarray
|
||||||
|
# For now, assume asdict handles it or it's handled before calling to_json for persistence.
|
||||||
|
# A custom asdict_factory might be needed for robust serialization.
|
||||||
return json.dumps(dataclasses.asdict(self), indent=4)
|
return json.dumps(dataclasses.asdict(self), indent=4)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -54,4 +59,43 @@ class SourceRule:
|
|||||||
data = json.loads(json_string)
|
data = json.loads(json_string)
|
||||||
# Manually deserialize nested AssetRule objects
|
# Manually deserialize nested AssetRule objects
|
||||||
data['assets'] = [AssetRule.from_json(json.dumps(asset_data)) for asset_data in data.get('assets', [])]
|
data['assets'] = [AssetRule.from_json(json.dumps(asset_data)) for asset_data in data.get('assets', [])]
|
||||||
return cls(**data)
|
# Need to handle ProcessingItem deserialization if it was serialized
|
||||||
|
# For now, from_json for FileRule doesn't explicitly handle processing_items from JSON.
|
||||||
|
return cls(**data)
|
||||||
|
|
||||||
|
@dataclasses.dataclass
|
||||||
|
class ProcessingItem:
|
||||||
|
"""
|
||||||
|
Represents a specific version of an image map to be processed and saved.
|
||||||
|
This could be a standard resolution (1K, 2K), a preview, or a special
|
||||||
|
variant like 'LOWRES'.
|
||||||
|
"""
|
||||||
|
source_file_info_ref: str # Reference to the original SourceFileInfo or unique ID of the source image
|
||||||
|
map_type_identifier: str # The internal map type (e.g., "MAP_COL", "MAP_ROUGH")
|
||||||
|
resolution_key: str # The resolution identifier (e.g., "1K", "PREVIEW", "LOWRES")
|
||||||
|
image_data: np.ndarray # The actual image data for this item
|
||||||
|
original_dimensions: Tuple[int, int] # (width, height) of the source image for this item
|
||||||
|
current_dimensions: Tuple[int, int] # (width, height) of the image_data in this item
|
||||||
|
target_filename: str = "" # Will be populated by SaveVariantsStage
|
||||||
|
is_extra: bool = False # If this item should be treated as an 'extra' file
|
||||||
|
bit_depth: Optional[int] = None
|
||||||
|
channels: Optional[int] = None
|
||||||
|
file_extension: Optional[str] = None # Determined during saving based on format
|
||||||
|
processing_applied_log: List[str] = dataclasses.field(default_factory=list)
|
||||||
|
status: str = "Pending" # e.g., Pending, Processed, Failed
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
|
||||||
|
# __getstate__ and __setstate__ might be needed if we pickle these objects
|
||||||
|
# and np.ndarray causes issues. For JSON, image_data would typically not be serialized.
|
||||||
|
def __getstate__(self):
|
||||||
|
state = self.__dict__.copy()
|
||||||
|
# Don't pickle image_data if it's large or not needed for state
|
||||||
|
if 'image_data' in state: # Or a more sophisticated check
|
||||||
|
del state['image_data'] # Example: remove it
|
||||||
|
return state
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
self.__dict__.update(state)
|
||||||
|
# Potentially re-initialize or handle missing 'image_data'
|
||||||
|
if 'image_data' not in self.__dict__:
|
||||||
|
self.image_data = None # Or load it if a path was stored instead
|
||||||
1
tests/__init__.py
Normal file
1
tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# This file makes the 'tests' directory a Python package.
|
||||||
1
tests/processing/pipeline/__init__.py
Normal file
1
tests/processing/pipeline/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# This file makes Python treat the directory as a package.
|
||||||
1
tests/processing/pipeline/stages/__init__.py
Normal file
1
tests/processing/pipeline/stages/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# This file makes Python treat the directory as a package.
|
||||||
@@ -0,0 +1,273 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import uuid
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from processing.pipeline.stages.alpha_extraction_to_mask import AlphaExtractionToMaskStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule, FileRule
|
||||||
|
from configuration import Configuration, GeneralSettings
|
||||||
|
import processing.utils.image_processing_utils as ipu # Ensure ipu is available for mocking
|
||||||
|
|
||||||
|
# Helper Functions
|
||||||
|
def create_mock_file_rule_for_alpha_test(
|
||||||
|
id_val: uuid.UUID = None,
|
||||||
|
map_type: str = "ALBEDO",
|
||||||
|
filename_pattern: str = "albedo.png",
|
||||||
|
item_type: str = "MAP_COL",
|
||||||
|
active: bool = True
|
||||||
|
) -> mock.MagicMock:
|
||||||
|
mock_fr = mock.MagicMock(spec=FileRule)
|
||||||
|
mock_fr.id = id_val if id_val else uuid.uuid4()
|
||||||
|
mock_fr.map_type = map_type
|
||||||
|
mock_fr.filename_pattern = filename_pattern
|
||||||
|
mock_fr.item_type = item_type
|
||||||
|
mock_fr.active = active
|
||||||
|
mock_fr.transform_settings = mock.MagicMock(spec=TransformSettings)
|
||||||
|
return mock_fr
|
||||||
|
|
||||||
|
def create_alpha_extraction_mock_context(
|
||||||
|
initial_file_rules: list = None,
|
||||||
|
initial_processed_details: dict = None,
|
||||||
|
skip_asset_flag: bool = False,
|
||||||
|
asset_name: str = "AlphaAsset",
|
||||||
|
# extract_alpha_globally: bool = True # If stage checks this
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
|
||||||
|
mock_gs = mock.MagicMock(spec=GeneralSettings)
|
||||||
|
# if your stage uses a global flag:
|
||||||
|
# mock_gs.extract_alpha_to_mask_globally = extract_alpha_globally
|
||||||
|
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
mock_config.general_settings = mock_gs
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp_engine_dir"),
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_metadata={'asset_name': asset_name},
|
||||||
|
processed_maps_details=initial_processed_details if initial_processed_details is not None else {},
|
||||||
|
merged_maps_details={},
|
||||||
|
files_to_process=list(initial_file_rules) if initial_file_rules else [],
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={'skip_asset': skip_asset_flag},
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Unit Tests
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.load_image')
|
||||||
|
@mock.patch('logging.info') # Mock logging to avoid console output during tests
|
||||||
|
def test_asset_skipped(mock_log_info, mock_load_image, mock_save_image):
|
||||||
|
stage = AlphaExtractionToMaskStage()
|
||||||
|
context = create_alpha_extraction_mock_context(skip_asset_flag=True)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context == context # Context should be unchanged
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
assert len(updated_context.files_to_process) == 0
|
||||||
|
assert not updated_context.processed_maps_details
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_existing_mask_map(mock_log_info, mock_load_image, mock_save_image):
|
||||||
|
stage = AlphaExtractionToMaskStage()
|
||||||
|
|
||||||
|
existing_mask_rule = create_mock_file_rule_for_alpha_test(map_type="MASK", filename_pattern="mask.png")
|
||||||
|
context = create_alpha_extraction_mock_context(initial_file_rules=[existing_mask_rule])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context == context
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
assert updated_context.files_to_process[0].map_type == "MASK"
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_alpha_extraction_success(mock_log_info, mock_load_image, mock_save_image):
|
||||||
|
stage = AlphaExtractionToMaskStage()
|
||||||
|
|
||||||
|
albedo_rule_id = uuid.uuid4()
|
||||||
|
albedo_fr = create_mock_file_rule_for_alpha_test(id_val=albedo_rule_id, map_type="ALBEDO")
|
||||||
|
|
||||||
|
initial_processed_details = {
|
||||||
|
albedo_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_albedo.png', 'status': 'Processed', 'map_type': 'ALBEDO', 'source_file_path': Path('/fake/source/albedo.png')}
|
||||||
|
}
|
||||||
|
context = create_alpha_extraction_mock_context(
|
||||||
|
initial_file_rules=[albedo_fr],
|
||||||
|
initial_processed_details=initial_processed_details
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_rgba_data = np.zeros((10, 10, 4), dtype=np.uint8)
|
||||||
|
mock_rgba_data[:, :, 3] = 128 # Example alpha data
|
||||||
|
mock_load_image.side_effect = [mock_rgba_data, mock_rgba_data]
|
||||||
|
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert mock_load_image.call_count == 2
|
||||||
|
# First call to check for alpha, second to get data for saving
|
||||||
|
mock_load_image.assert_any_call(Path('/fake/temp_engine_dir/processed_albedo.png'))
|
||||||
|
|
||||||
|
mock_save_image.assert_called_once()
|
||||||
|
saved_path_arg = mock_save_image.call_args[0][0]
|
||||||
|
saved_data_arg = mock_save_image.call_args[0][1]
|
||||||
|
|
||||||
|
assert isinstance(saved_path_arg, Path)
|
||||||
|
assert "mask_from_alpha_" in saved_path_arg.name
|
||||||
|
assert np.array_equal(saved_data_arg, mock_rgba_data[:, :, 3])
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 2
|
||||||
|
new_mask_rule = None
|
||||||
|
for fr in updated_context.files_to_process:
|
||||||
|
if fr.map_type == "MASK":
|
||||||
|
new_mask_rule = fr
|
||||||
|
break
|
||||||
|
assert new_mask_rule is not None
|
||||||
|
assert new_mask_rule.item_type == "MAP_DER" # Derived map
|
||||||
|
|
||||||
|
assert new_mask_rule.id.hex in updated_context.processed_maps_details
|
||||||
|
new_mask_detail = updated_context.processed_maps_details[new_mask_rule.id.hex]
|
||||||
|
assert new_mask_detail['map_type'] == "MASK"
|
||||||
|
assert "mask_from_alpha_" in new_mask_detail['temp_processed_file']
|
||||||
|
assert "Generated from alpha of ALBEDO" in new_mask_detail['notes'] # Check for specific note
|
||||||
|
assert new_mask_detail['status'] == 'Processed'
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_no_alpha_channel_in_source(mock_log_info, mock_load_image, mock_save_image):
|
||||||
|
stage = AlphaExtractionToMaskStage()
|
||||||
|
|
||||||
|
albedo_rule_id = uuid.uuid4()
|
||||||
|
albedo_fr = create_mock_file_rule_for_alpha_test(id_val=albedo_rule_id, map_type="ALBEDO")
|
||||||
|
initial_processed_details = {
|
||||||
|
albedo_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_rgb_albedo.png', 'status': 'Processed', 'map_type': 'ALBEDO', 'source_file_path': Path('/fake/source/albedo_rgb.png')}
|
||||||
|
}
|
||||||
|
context = create_alpha_extraction_mock_context(
|
||||||
|
initial_file_rules=[albedo_fr],
|
||||||
|
initial_processed_details=initial_processed_details
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_rgb_data = np.zeros((10, 10, 3), dtype=np.uint8) # RGB, no alpha
|
||||||
|
mock_load_image.return_value = mock_rgb_data # Only called once for check
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path('/fake/temp_engine_dir/processed_rgb_albedo.png'))
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
assert len(updated_context.files_to_process) == 1 # No new MASK rule
|
||||||
|
assert albedo_fr.id.hex in updated_context.processed_maps_details
|
||||||
|
assert len(updated_context.processed_maps_details) == 1
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_no_suitable_source_map_type(mock_log_info, mock_load_image, mock_save_image):
|
||||||
|
stage = AlphaExtractionToMaskStage()
|
||||||
|
|
||||||
|
normal_rule_id = uuid.uuid4()
|
||||||
|
normal_fr = create_mock_file_rule_for_alpha_test(id_val=normal_rule_id, map_type="NORMAL")
|
||||||
|
initial_processed_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_normal.png', 'status': 'Processed', 'map_type': 'NORMAL'}
|
||||||
|
}
|
||||||
|
context = create_alpha_extraction_mock_context(
|
||||||
|
initial_file_rules=[normal_fr],
|
||||||
|
initial_processed_details=initial_processed_details
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
assert normal_fr.id.hex in updated_context.processed_maps_details
|
||||||
|
assert len(updated_context.processed_maps_details) == 1
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.load_image')
|
||||||
|
@mock.patch('logging.warning') # Expect a warning log
|
||||||
|
def test_load_image_fails(mock_log_warning, mock_load_image, mock_save_image):
|
||||||
|
stage = AlphaExtractionToMaskStage()
|
||||||
|
|
||||||
|
albedo_rule_id = uuid.uuid4()
|
||||||
|
albedo_fr = create_mock_file_rule_for_alpha_test(id_val=albedo_rule_id, map_type="ALBEDO")
|
||||||
|
initial_processed_details = {
|
||||||
|
albedo_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_albedo_load_fail.png', 'status': 'Processed', 'map_type': 'ALBEDO', 'source_file_path': Path('/fake/source/albedo_load_fail.png')}
|
||||||
|
}
|
||||||
|
context = create_alpha_extraction_mock_context(
|
||||||
|
initial_file_rules=[albedo_fr],
|
||||||
|
initial_processed_details=initial_processed_details
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_load_image.return_value = None # Simulate load failure
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path('/fake/temp_engine_dir/processed_albedo_load_fail.png'))
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
assert albedo_fr.id.hex in updated_context.processed_maps_details
|
||||||
|
assert len(updated_context.processed_maps_details) == 1
|
||||||
|
mock_log_warning.assert_called_once() # Check that a warning was logged
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.alpha_extraction_to_mask.ipu.load_image')
|
||||||
|
@mock.patch('logging.error') # Expect an error log
|
||||||
|
def test_save_image_fails(mock_log_error, mock_load_image, mock_save_image):
|
||||||
|
stage = AlphaExtractionToMaskStage()
|
||||||
|
|
||||||
|
albedo_rule_id = uuid.uuid4()
|
||||||
|
albedo_fr = create_mock_file_rule_for_alpha_test(id_val=albedo_rule_id, map_type="ALBEDO")
|
||||||
|
initial_processed_details = {
|
||||||
|
albedo_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_albedo_save_fail.png', 'status': 'Processed', 'map_type': 'ALBEDO', 'source_file_path': Path('/fake/source/albedo_save_fail.png')}
|
||||||
|
}
|
||||||
|
context = create_alpha_extraction_mock_context(
|
||||||
|
initial_file_rules=[albedo_fr],
|
||||||
|
initial_processed_details=initial_processed_details
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_rgba_data = np.zeros((10, 10, 4), dtype=np.uint8)
|
||||||
|
mock_rgba_data[:, :, 3] = 128
|
||||||
|
mock_load_image.side_effect = [mock_rgba_data, mock_rgba_data] # Load succeeds
|
||||||
|
|
||||||
|
mock_save_image.return_value = False # Simulate save failure
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert mock_load_image.call_count == 2
|
||||||
|
mock_save_image.assert_called_once() # Save was attempted
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 1 # No new MASK rule should be successfully added and detailed
|
||||||
|
|
||||||
|
# Check that no new MASK details were added, or if they were, they reflect failure.
|
||||||
|
# The current stage logic returns context early, so no new rule or details should be present.
|
||||||
|
mask_rule_found = any(fr.map_type == "MASK" for fr in updated_context.files_to_process)
|
||||||
|
assert not mask_rule_found
|
||||||
|
|
||||||
|
mask_details_found = any(
|
||||||
|
details['map_type'] == "MASK"
|
||||||
|
for fr_id, details in updated_context.processed_maps_details.items()
|
||||||
|
if fr_id != albedo_fr.id.hex # Exclude the original albedo
|
||||||
|
)
|
||||||
|
assert not mask_details_found
|
||||||
|
mock_log_error.assert_called_once() # Check that an error was logged
|
||||||
213
tests/processing/pipeline/stages/test_asset_skip_logic.py
Normal file
213
tests/processing/pipeline/stages/test_asset_skip_logic.py
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, Optional, Any
|
||||||
|
|
||||||
|
from processing.pipeline.stages.asset_skip_logic import AssetSkipLogicStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule
|
||||||
|
from configuration import Configuration, GeneralSettings
|
||||||
|
|
||||||
|
# Helper function to create a mock AssetProcessingContext
|
||||||
|
def create_skip_logic_mock_context(
|
||||||
|
effective_supplier: Optional[str] = "ValidSupplier",
|
||||||
|
asset_process_status: str = "PENDING",
|
||||||
|
overwrite_existing: bool = False,
|
||||||
|
asset_name: str = "TestAssetSkip"
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_asset_rule.process_status = asset_process_status
|
||||||
|
mock_asset_rule.source_path = "fake/source" # Added for completeness
|
||||||
|
mock_asset_rule.output_path = "fake/output" # Added for completeness
|
||||||
|
mock_asset_rule.maps = [] # Added for completeness
|
||||||
|
mock_asset_rule.metadata = {} # Added for completeness
|
||||||
|
mock_asset_rule.material_name = None # Added for completeness
|
||||||
|
mock_asset_rule.notes = None # Added for completeness
|
||||||
|
mock_asset_rule.tags = [] # Added for completeness
|
||||||
|
mock_asset_rule.enabled = True # Added for completeness
|
||||||
|
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
mock_source_rule.name = "TestSourceRule" # Added for completeness
|
||||||
|
mock_source_rule.path = "fake/source_rule_path" # Added for completeness
|
||||||
|
mock_source_rule.default_supplier = None # Added for completeness
|
||||||
|
mock_source_rule.assets = [mock_asset_rule] # Added for completeness
|
||||||
|
mock_source_rule.enabled = True # Added for completeness
|
||||||
|
|
||||||
|
mock_general_settings = mock.MagicMock(spec=GeneralSettings)
|
||||||
|
mock_general_settings.overwrite_existing = overwrite_existing
|
||||||
|
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
mock_config.general_settings = mock_general_settings
|
||||||
|
mock_config.suppliers = {"ValidSupplier": mock.MagicMock()}
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp"),
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier=effective_supplier,
|
||||||
|
asset_metadata={},
|
||||||
|
processed_maps_details={},
|
||||||
|
merged_maps_details={},
|
||||||
|
files_to_process=[],
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={},
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None # Corrected from sha5_value to sha256_value if that's the actual field
|
||||||
|
)
|
||||||
|
# Ensure status_flags is initialized if AssetSkipLogicStage expects it
|
||||||
|
# context.status_flags = {} # Already done in constructor
|
||||||
|
return context
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_skip_due_to_missing_supplier(mock_log_info):
|
||||||
|
"""
|
||||||
|
Test that the asset is skipped if effective_supplier is None.
|
||||||
|
"""
|
||||||
|
stage = AssetSkipLogicStage()
|
||||||
|
context = create_skip_logic_mock_context(effective_supplier=None, asset_name="MissingSupplierAsset")
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('skip_asset') is True
|
||||||
|
assert updated_context.status_flags.get('skip_reason') == "Invalid or missing supplier"
|
||||||
|
mock_log_info.assert_any_call(f"Asset 'MissingSupplierAsset': Skipping due to missing or invalid supplier.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_skip_due_to_process_status_skip(mock_log_info):
|
||||||
|
"""
|
||||||
|
Test that the asset is skipped if asset_rule.process_status is "SKIP".
|
||||||
|
"""
|
||||||
|
stage = AssetSkipLogicStage()
|
||||||
|
context = create_skip_logic_mock_context(asset_process_status="SKIP", asset_name="SkipStatusAsset")
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('skip_asset') is True
|
||||||
|
assert updated_context.status_flags.get('skip_reason') == "Process status set to SKIP"
|
||||||
|
mock_log_info.assert_any_call(f"Asset 'SkipStatusAsset': Skipping because process_status is 'SKIP'.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_skip_due_to_processed_and_overwrite_disabled(mock_log_info):
|
||||||
|
"""
|
||||||
|
Test that the asset is skipped if asset_rule.process_status is "PROCESSED"
|
||||||
|
and overwrite_existing is False.
|
||||||
|
"""
|
||||||
|
stage = AssetSkipLogicStage()
|
||||||
|
context = create_skip_logic_mock_context(
|
||||||
|
asset_process_status="PROCESSED",
|
||||||
|
overwrite_existing=False,
|
||||||
|
asset_name="ProcessedNoOverwriteAsset"
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('skip_asset') is True
|
||||||
|
assert updated_context.status_flags.get('skip_reason') == "Already processed, overwrite disabled"
|
||||||
|
mock_log_info.assert_any_call(f"Asset 'ProcessedNoOverwriteAsset': Skipping because already processed and overwrite is disabled.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_no_skip_when_processed_and_overwrite_enabled(mock_log_info):
|
||||||
|
"""
|
||||||
|
Test that the asset is NOT skipped if asset_rule.process_status is "PROCESSED"
|
||||||
|
but overwrite_existing is True.
|
||||||
|
"""
|
||||||
|
stage = AssetSkipLogicStage()
|
||||||
|
context = create_skip_logic_mock_context(
|
||||||
|
asset_process_status="PROCESSED",
|
||||||
|
overwrite_existing=True,
|
||||||
|
effective_supplier="ValidSupplier", # Ensure supplier is valid
|
||||||
|
asset_name="ProcessedOverwriteAsset"
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('skip_asset', False) is False # Default to False if key not present
|
||||||
|
# No specific skip_reason to check if not skipped
|
||||||
|
# Check that no skip log message was called for this specific reason
|
||||||
|
for call_args in mock_log_info.call_args_list:
|
||||||
|
assert "Skipping because already processed and overwrite is disabled" not in call_args[0][0]
|
||||||
|
assert "Skipping due to missing or invalid supplier" not in call_args[0][0]
|
||||||
|
assert "Skipping because process_status is 'SKIP'" not in call_args[0][0]
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_no_skip_when_process_status_pending(mock_log_info):
|
||||||
|
"""
|
||||||
|
Test that the asset is NOT skipped if asset_rule.process_status is "PENDING".
|
||||||
|
"""
|
||||||
|
stage = AssetSkipLogicStage()
|
||||||
|
context = create_skip_logic_mock_context(
|
||||||
|
asset_process_status="PENDING",
|
||||||
|
effective_supplier="ValidSupplier", # Ensure supplier is valid
|
||||||
|
asset_name="PendingAsset"
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('skip_asset', False) is False
|
||||||
|
# Check that no skip log message was called
|
||||||
|
for call_args in mock_log_info.call_args_list:
|
||||||
|
assert "Skipping" not in call_args[0][0]
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_no_skip_when_process_status_failed_previously(mock_log_info):
|
||||||
|
"""
|
||||||
|
Test that the asset is NOT skipped if asset_rule.process_status is "FAILED_PREVIOUSLY".
|
||||||
|
"""
|
||||||
|
stage = AssetSkipLogicStage()
|
||||||
|
context = create_skip_logic_mock_context(
|
||||||
|
asset_process_status="FAILED_PREVIOUSLY",
|
||||||
|
effective_supplier="ValidSupplier", # Ensure supplier is valid
|
||||||
|
asset_name="FailedPreviouslyAsset"
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('skip_asset', False) is False
|
||||||
|
# Check that no skip log message was called
|
||||||
|
for call_args in mock_log_info.call_args_list:
|
||||||
|
assert "Skipping" not in call_args[0][0]
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_no_skip_when_process_status_other_valid_status(mock_log_info):
|
||||||
|
"""
|
||||||
|
Test that the asset is NOT skipped for other valid, non-skip process statuses.
|
||||||
|
"""
|
||||||
|
stage = AssetSkipLogicStage()
|
||||||
|
context = create_skip_logic_mock_context(
|
||||||
|
asset_process_status="READY_FOR_PROCESSING", # Example of another non-skip status
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_name="ReadyAsset"
|
||||||
|
)
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
assert updated_context.status_flags.get('skip_asset', False) is False
|
||||||
|
for call_args in mock_log_info.call_args_list:
|
||||||
|
assert "Skipping" not in call_args[0][0]
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_skip_asset_flag_initialized_if_not_present(mock_log_info):
|
||||||
|
"""
|
||||||
|
Test that 'skip_asset' is initialized to False in status_flags if not skipped and not present.
|
||||||
|
"""
|
||||||
|
stage = AssetSkipLogicStage()
|
||||||
|
context = create_skip_logic_mock_context(
|
||||||
|
asset_process_status="PENDING",
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_name="InitFlagAsset"
|
||||||
|
)
|
||||||
|
# Ensure status_flags is empty before execute
|
||||||
|
context.status_flags = {}
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
# If not skipped, 'skip_asset' should be explicitly False.
|
||||||
|
assert updated_context.status_flags.get('skip_asset') is False
|
||||||
|
# No skip reason should be set
|
||||||
|
assert 'skip_reason' not in updated_context.status_flags
|
||||||
|
for call_args in mock_log_info.call_args_list:
|
||||||
|
assert "Skipping" not in call_args[0][0]
|
||||||
330
tests/processing/pipeline/stages/test_file_rule_filter.py
Normal file
330
tests/processing/pipeline/stages/test_file_rule_filter.py
Normal file
@@ -0,0 +1,330 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import uuid
|
||||||
|
from typing import Optional # Added Optional for type hinting
|
||||||
|
|
||||||
|
from processing.pipeline.stages.file_rule_filter import FileRuleFilterStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule, FileRule # FileRule is key here
|
||||||
|
from configuration import Configuration # Minimal config needed
|
||||||
|
|
||||||
|
def create_mock_file_rule(
|
||||||
|
id_val: Optional[uuid.UUID] = None,
|
||||||
|
map_type: str = "Diffuse",
|
||||||
|
filename_pattern: str = "*.tif",
|
||||||
|
item_type: str = "MAP_COL", # e.g., MAP_COL, FILE_IGNORE
|
||||||
|
active: bool = True
|
||||||
|
) -> mock.MagicMock: # Return MagicMock to easily set other attributes if needed
|
||||||
|
mock_fr = mock.MagicMock(spec=FileRule)
|
||||||
|
mock_fr.id = id_val if id_val else uuid.uuid4()
|
||||||
|
mock_fr.map_type = map_type
|
||||||
|
mock_fr.filename_pattern = filename_pattern
|
||||||
|
mock_fr.item_type = item_type
|
||||||
|
mock_fr.active = active
|
||||||
|
return mock_fr
|
||||||
|
|
||||||
|
def create_file_filter_mock_context(
|
||||||
|
file_rules_list: Optional[list] = None, # List of mock FileRule objects
|
||||||
|
skip_asset_flag: bool = False,
|
||||||
|
asset_name: str = "FileFilterAsset"
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_asset_rule.file_rules = file_rules_list if file_rules_list is not None else []
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp"),
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier="ValidSupplier", # Assume valid for this stage
|
||||||
|
asset_metadata={'asset_name': asset_name}, # Assume metadata init happened
|
||||||
|
processed_maps_details={},
|
||||||
|
merged_maps_details={},
|
||||||
|
files_to_process=[], # Stage will populate this
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={'skip_asset': skip_asset_flag},
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None # Corrected from sha5_value to sha256_value based on AssetProcessingContext
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
# Test Cases for FileRuleFilterStage.execute()
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_asset_skipped(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: Asset Skipped - status_flags['skip_asset'] is True.
|
||||||
|
Assert context.files_to_process remains empty.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
context = create_file_filter_mock_context(skip_asset_flag=True)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 0
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule filtering as 'skip_asset' is True.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_no_file_rules(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: No File Rules - asset_rule.file_rules is empty.
|
||||||
|
Assert context.files_to_process is empty.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 0
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': No file rules defined. Skipping file rule filtering.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_only_active_processable_rules(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: Only Active, Processable Rules - All FileRules are active=True and item_type="MAP_COL".
|
||||||
|
Assert all are added to context.files_to_process.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr1 = create_mock_file_rule(filename_pattern="diffuse.png", item_type="MAP_COL", active=True)
|
||||||
|
fr2 = create_mock_file_rule(filename_pattern="normal.png", item_type="MAP_COL", active=True)
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr1, fr2])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 2
|
||||||
|
assert fr1 in updated_context.files_to_process
|
||||||
|
assert fr2 in updated_context.files_to_process
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 2 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_inactive_rules(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: Inactive Rules - Some FileRules have active=False.
|
||||||
|
Assert only active rules are added.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr_active = create_mock_file_rule(filename_pattern="active.png", item_type="MAP_COL", active=True)
|
||||||
|
fr_inactive = create_mock_file_rule(filename_pattern="inactive.png", item_type="MAP_COL", active=False)
|
||||||
|
fr_another_active = create_mock_file_rule(filename_pattern="another_active.jpg", item_type="MAP_COL", active=True)
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr_active, fr_inactive, fr_another_active])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 2
|
||||||
|
assert fr_active in updated_context.files_to_process
|
||||||
|
assert fr_another_active in updated_context.files_to_process
|
||||||
|
assert fr_inactive not in updated_context.files_to_process
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping inactive file rule: '{fr_inactive.filename_pattern}'")
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 2 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_file_ignore_simple_match(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: FILE_IGNORE Rule (Simple Match).
|
||||||
|
One FILE_IGNORE rule with filename_pattern="*_ignore.png".
|
||||||
|
One MAP_COL rule with filename_pattern="diffuse_ignore.png".
|
||||||
|
One MAP_COL rule with filename_pattern="normal_process.png".
|
||||||
|
Assert only "normal_process.png" rule is added.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr_ignore = create_mock_file_rule(filename_pattern="*_ignore.png", item_type="FILE_IGNORE", active=True)
|
||||||
|
fr_ignored_map = create_mock_file_rule(filename_pattern="diffuse_ignore.png", item_type="MAP_COL", active=True)
|
||||||
|
fr_process_map = create_mock_file_rule(filename_pattern="normal_process.png", item_type="MAP_COL", active=True)
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr_ignore, fr_ignored_map, fr_process_map])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
assert fr_process_map in updated_context.files_to_process
|
||||||
|
assert fr_ignored_map not in updated_context.files_to_process
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Registering ignore pattern: '{fr_ignore.filename_pattern}'")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule '{fr_ignored_map.filename_pattern}' due to matching ignore pattern.")
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 1 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_file_ignore_glob_pattern(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: FILE_IGNORE Rule (Glob Pattern).
|
||||||
|
One FILE_IGNORE rule with filename_pattern="*_ignore.*".
|
||||||
|
MAP_COL rules: "tex_ignore.tif", "tex_process.png".
|
||||||
|
Assert only "tex_process.png" rule is added.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr_ignore_glob = create_mock_file_rule(filename_pattern="*_ignore.*", item_type="FILE_IGNORE", active=True)
|
||||||
|
fr_ignored_tif = create_mock_file_rule(filename_pattern="tex_ignore.tif", item_type="MAP_COL", active=True)
|
||||||
|
fr_process_png = create_mock_file_rule(filename_pattern="tex_process.png", item_type="MAP_COL", active=True)
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr_ignore_glob, fr_ignored_tif, fr_process_png])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
assert fr_process_png in updated_context.files_to_process
|
||||||
|
assert fr_ignored_tif not in updated_context.files_to_process
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Registering ignore pattern: '{fr_ignore_glob.filename_pattern}'")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule '{fr_ignored_tif.filename_pattern}' due to matching ignore pattern.")
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 1 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_multiple_file_ignore_rules(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: Multiple FILE_IGNORE Rules.
|
||||||
|
Test with several ignore patterns and ensure they are all respected.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr_ignore1 = create_mock_file_rule(filename_pattern="*.tmp", item_type="FILE_IGNORE", active=True)
|
||||||
|
fr_ignore2 = create_mock_file_rule(filename_pattern="backup_*", item_type="FILE_IGNORE", active=True)
|
||||||
|
fr_ignore3 = create_mock_file_rule(filename_pattern="*_old.png", item_type="FILE_IGNORE", active=True)
|
||||||
|
|
||||||
|
fr_map_ignored1 = create_mock_file_rule(filename_pattern="data.tmp", item_type="MAP_COL", active=True)
|
||||||
|
fr_map_ignored2 = create_mock_file_rule(filename_pattern="backup_diffuse.jpg", item_type="MAP_COL", active=True)
|
||||||
|
fr_map_ignored3 = create_mock_file_rule(filename_pattern="normal_old.png", item_type="MAP_COL", active=True)
|
||||||
|
fr_map_process = create_mock_file_rule(filename_pattern="final_texture.tif", item_type="MAP_COL", active=True)
|
||||||
|
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[
|
||||||
|
fr_ignore1, fr_ignore2, fr_ignore3,
|
||||||
|
fr_map_ignored1, fr_map_ignored2, fr_map_ignored3, fr_map_process
|
||||||
|
])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
assert fr_map_process in updated_context.files_to_process
|
||||||
|
assert fr_map_ignored1 not in updated_context.files_to_process
|
||||||
|
assert fr_map_ignored2 not in updated_context.files_to_process
|
||||||
|
assert fr_map_ignored3 not in updated_context.files_to_process
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Registering ignore pattern: '{fr_ignore1.filename_pattern}'")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Registering ignore pattern: '{fr_ignore2.filename_pattern}'")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Registering ignore pattern: '{fr_ignore3.filename_pattern}'")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule '{fr_map_ignored1.filename_pattern}' due to matching ignore pattern.")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule '{fr_map_ignored2.filename_pattern}' due to matching ignore pattern.")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule '{fr_map_ignored3.filename_pattern}' due to matching ignore pattern.")
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 1 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_file_ignore_rule_is_inactive(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: FILE_IGNORE Rule is Inactive.
|
||||||
|
An ignore rule itself is active=False. Assert its pattern is NOT used for filtering.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr_inactive_ignore = create_mock_file_rule(filename_pattern="*_ignore.tif", item_type="FILE_IGNORE", active=False)
|
||||||
|
fr_should_process1 = create_mock_file_rule(filename_pattern="diffuse_ignore.tif", item_type="MAP_COL", active=True) # Should be processed
|
||||||
|
fr_should_process2 = create_mock_file_rule(filename_pattern="normal_ok.png", item_type="MAP_COL", active=True)
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr_inactive_ignore, fr_should_process1, fr_should_process2])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 2
|
||||||
|
assert fr_should_process1 in updated_context.files_to_process
|
||||||
|
assert fr_should_process2 in updated_context.files_to_process
|
||||||
|
# Ensure the inactive ignore rule's pattern was not registered
|
||||||
|
# We check this by ensuring no debug log for registering *that specific* pattern was made.
|
||||||
|
# A more robust way would be to check mock_log_debug.call_args_list, but this is simpler for now.
|
||||||
|
for call in mock_log_debug.call_args_list:
|
||||||
|
args, kwargs = call
|
||||||
|
if "Registering ignore pattern" in args[0] and fr_inactive_ignore.filename_pattern in args[0]:
|
||||||
|
pytest.fail(f"Inactive ignore pattern '{fr_inactive_ignore.filename_pattern}' was incorrectly registered.")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping inactive file rule: '{fr_inactive_ignore.filename_pattern}' (type: FILE_IGNORE)")
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 2 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_no_file_ignore_rules(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: No FILE_IGNORE Rules.
|
||||||
|
All rules are MAP_COL or other processable types.
|
||||||
|
Assert all active, processable rules are included.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr1 = create_mock_file_rule(filename_pattern="diffuse.png", item_type="MAP_COL", active=True)
|
||||||
|
fr2 = create_mock_file_rule(filename_pattern="normal.png", item_type="MAP_COL", active=True)
|
||||||
|
fr_other_type = create_mock_file_rule(filename_pattern="spec.tif", item_type="MAP_SPEC", active=True) # Assuming MAP_SPEC is processable
|
||||||
|
fr_inactive = create_mock_file_rule(filename_pattern="ao.jpg", item_type="MAP_AO", active=False)
|
||||||
|
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr1, fr2, fr_other_type, fr_inactive])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 3
|
||||||
|
assert fr1 in updated_context.files_to_process
|
||||||
|
assert fr2 in updated_context.files_to_process
|
||||||
|
assert fr_other_type in updated_context.files_to_process
|
||||||
|
assert fr_inactive not in updated_context.files_to_process
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping inactive file rule: '{fr_inactive.filename_pattern}'")
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 3 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_item_type_not_processable(mock_log_debug, mock_log_info):
|
||||||
|
"""
|
||||||
|
Test case: Item type is not processable (e.g., not MAP_COL, MAP_AO etc., but something else like 'METADATA_ONLY').
|
||||||
|
Assert such rules are not added to files_to_process, unless they are FILE_IGNORE.
|
||||||
|
"""
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr_processable = create_mock_file_rule(filename_pattern="diffuse.png", item_type="MAP_COL", active=True)
|
||||||
|
fr_not_processable = create_mock_file_rule(filename_pattern="info.txt", item_type="METADATA_ONLY", active=True)
|
||||||
|
fr_ignore = create_mock_file_rule(filename_pattern="*.bak", item_type="FILE_IGNORE", active=True)
|
||||||
|
fr_ignored_by_bak = create_mock_file_rule(filename_pattern="diffuse.bak", item_type="MAP_COL", active=True)
|
||||||
|
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr_processable, fr_not_processable, fr_ignore, fr_ignored_by_bak])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
assert fr_processable in updated_context.files_to_process
|
||||||
|
assert fr_not_processable not in updated_context.files_to_process
|
||||||
|
assert fr_ignored_by_bak not in updated_context.files_to_process
|
||||||
|
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Registering ignore pattern: '{fr_ignore.filename_pattern}'")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule '{fr_not_processable.filename_pattern}' as its item_type '{fr_not_processable.item_type}' is not processable.")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule '{fr_ignored_by_bak.filename_pattern}' due to matching ignore pattern.")
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 1 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
# Example tests from instructions (can be adapted or used as a base)
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_basic_active_example(mock_log_debug, mock_log_info): # Renamed to avoid conflict
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr1 = create_mock_file_rule(filename_pattern="diffuse.png", item_type="MAP_COL", active=True)
|
||||||
|
fr2 = create_mock_file_rule(filename_pattern="normal.png", item_type="MAP_COL", active=True)
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr1, fr2])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 2
|
||||||
|
assert fr1 in updated_context.files_to_process
|
||||||
|
assert fr2 in updated_context.files_to_process
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 2 file rules queued for processing after filtering.")
|
||||||
|
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_file_rule_filter_with_file_ignore_example(mock_log_debug, mock_log_info): # Renamed to avoid conflict
|
||||||
|
stage = FileRuleFilterStage()
|
||||||
|
fr_ignore = create_mock_file_rule(filename_pattern="*_ignore.tif", item_type="FILE_IGNORE", active=True)
|
||||||
|
fr_process = create_mock_file_rule(filename_pattern="diffuse_ok.tif", item_type="MAP_COL", active=True)
|
||||||
|
fr_skip = create_mock_file_rule(filename_pattern="normal_ignore.tif", item_type="MAP_COL", active=True)
|
||||||
|
context = create_file_filter_mock_context(file_rules_list=[fr_ignore, fr_process, fr_skip])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
assert fr_process in updated_context.files_to_process
|
||||||
|
assert fr_skip not in updated_context.files_to_process
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Registering ignore pattern: '{fr_ignore.filename_pattern}'")
|
||||||
|
mock_log_debug.assert_any_call(f"Asset '{context.asset_rule.name}': Skipping file rule '{fr_skip.filename_pattern}' due to matching ignore pattern.")
|
||||||
|
mock_log_info.assert_any_call(f"Asset '{context.asset_rule.name}': 1 file rules queued for processing after filtering.")
|
||||||
@@ -0,0 +1,486 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import uuid
|
||||||
|
import numpy as np
|
||||||
|
from typing import Optional, List, Dict
|
||||||
|
|
||||||
|
from processing.pipeline.stages.gloss_to_rough_conversion import GlossToRoughConversionStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule, FileRule
|
||||||
|
from configuration import Configuration, GeneralSettings
|
||||||
|
# No direct ipu import needed in test if we mock its usage by the stage
|
||||||
|
|
||||||
|
def create_mock_file_rule_for_gloss_test(
|
||||||
|
id_val: Optional[uuid.UUID] = None,
|
||||||
|
map_type: str = "GLOSS", # Test with GLOSS and other types
|
||||||
|
filename_pattern: str = "gloss.png"
|
||||||
|
) -> mock.MagicMock:
|
||||||
|
mock_fr = mock.MagicMock(spec=FileRule)
|
||||||
|
mock_fr.id = id_val if id_val else uuid.uuid4()
|
||||||
|
mock_fr.map_type = map_type
|
||||||
|
mock_fr.filename_pattern = filename_pattern
|
||||||
|
mock_fr.item_type = "MAP_COL"
|
||||||
|
mock_fr.active = True
|
||||||
|
return mock_fr
|
||||||
|
|
||||||
|
def create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules: Optional[List[FileRule]] = None, # Type hint corrected
|
||||||
|
initial_processed_details: Optional[Dict] = None, # Type hint corrected
|
||||||
|
skip_asset_flag: bool = False,
|
||||||
|
asset_name: str = "GlossAsset",
|
||||||
|
# Add a mock for general_settings if your stage checks a global flag
|
||||||
|
# convert_gloss_globally: bool = True
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_asset_rule.file_rules = initial_file_rules if initial_file_rules is not None else []
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
|
||||||
|
mock_gs = mock.MagicMock(spec=GeneralSettings)
|
||||||
|
# if your stage uses a global flag:
|
||||||
|
# mock_gs.convert_gloss_to_rough_globally = convert_gloss_globally
|
||||||
|
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
mock_config.general_settings = mock_gs
|
||||||
|
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp_engine_dir"), # Important for new temp file paths
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_metadata={'asset_name': asset_name},
|
||||||
|
processed_maps_details=initial_processed_details if initial_processed_details is not None else {},
|
||||||
|
merged_maps_details={},
|
||||||
|
files_to_process=list(initial_file_rules) if initial_file_rules else [], # Stage modifies this list
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={'skip_asset': skip_asset_flag},
|
||||||
|
incrementing_value=None, # Added as per AssetProcessingContext definition
|
||||||
|
sha5_value=None # Added as per AssetProcessingContext definition
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Unit tests will be added below
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.load_image')
|
||||||
|
def test_asset_skipped(mock_load_image, mock_save_image):
|
||||||
|
"""
|
||||||
|
Test that if 'skip_asset' is True, no processing occurs.
|
||||||
|
"""
|
||||||
|
stage = GlossToRoughConversionStage()
|
||||||
|
|
||||||
|
gloss_rule_id = uuid.uuid4()
|
||||||
|
gloss_fr = create_mock_file_rule_for_gloss_test(id_val=gloss_rule_id, map_type="GLOSS")
|
||||||
|
|
||||||
|
initial_details = {
|
||||||
|
gloss_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_gloss_map.png', 'status': 'Processed', 'map_type': 'GLOSS'}
|
||||||
|
}
|
||||||
|
context = create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules=[gloss_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
skip_asset_flag=True # Asset is skipped
|
||||||
|
)
|
||||||
|
|
||||||
|
# Keep a copy of files_to_process and processed_maps_details to compare
|
||||||
|
original_files_to_process = list(context.files_to_process)
|
||||||
|
original_processed_maps_details = context.processed_maps_details.copy()
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
|
||||||
|
assert updated_context.files_to_process == original_files_to_process, "files_to_process should not change if asset is skipped"
|
||||||
|
assert updated_context.processed_maps_details == original_processed_maps_details, "processed_maps_details should not change if asset is skipped"
|
||||||
|
assert updated_context.status_flags['skip_asset'] is True
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.load_image')
|
||||||
|
def test_no_gloss_map_present(mock_load_image, mock_save_image):
|
||||||
|
"""
|
||||||
|
Test that if no GLOSS maps are in files_to_process, no conversion occurs.
|
||||||
|
"""
|
||||||
|
stage = GlossToRoughConversionStage()
|
||||||
|
|
||||||
|
normal_rule_id = uuid.uuid4()
|
||||||
|
normal_fr = create_mock_file_rule_for_gloss_test(id_val=normal_rule_id, map_type="NORMAL", filename_pattern="normal.png")
|
||||||
|
albedo_fr = create_mock_file_rule_for_gloss_test(map_type="ALBEDO", filename_pattern="albedo.jpg")
|
||||||
|
|
||||||
|
initial_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_normal_map.png', 'status': 'Processed', 'map_type': 'NORMAL'}
|
||||||
|
}
|
||||||
|
context = create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules=[normal_fr, albedo_fr],
|
||||||
|
initial_processed_details=initial_details
|
||||||
|
)
|
||||||
|
|
||||||
|
original_files_to_process = list(context.files_to_process)
|
||||||
|
original_processed_maps_details = context.processed_maps_details.copy()
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
|
||||||
|
assert updated_context.files_to_process == original_files_to_process, "files_to_process should not change if no GLOSS maps are present"
|
||||||
|
assert updated_context.processed_maps_details == original_processed_maps_details, "processed_maps_details should not change if no GLOSS maps are present"
|
||||||
|
|
||||||
|
# Ensure map types of existing rules are unchanged
|
||||||
|
for fr_in_list in updated_context.files_to_process:
|
||||||
|
if fr_in_list.id == normal_fr.id:
|
||||||
|
assert fr_in_list.map_type == "NORMAL"
|
||||||
|
elif fr_in_list.id == albedo_fr.id:
|
||||||
|
assert fr_in_list.map_type == "ALBEDO"
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.logging') # Mock logging
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.load_image')
|
||||||
|
def test_gloss_conversion_uint8_success(mock_load_image, mock_save_image, mock_logging):
|
||||||
|
"""
|
||||||
|
Test successful conversion of a GLOSS map (uint8 data) to ROUGHNESS.
|
||||||
|
"""
|
||||||
|
stage = GlossToRoughConversionStage()
|
||||||
|
|
||||||
|
gloss_rule_id = uuid.uuid4()
|
||||||
|
# Use a distinct filename for the gloss map to ensure correct path construction
|
||||||
|
gloss_fr = create_mock_file_rule_for_gloss_test(id_val=gloss_rule_id, map_type="GLOSS", filename_pattern="my_gloss_map.png")
|
||||||
|
other_fr_id = uuid.uuid4()
|
||||||
|
other_fr = create_mock_file_rule_for_gloss_test(id_val=other_fr_id, map_type="NORMAL", filename_pattern="normal_map.png")
|
||||||
|
|
||||||
|
initial_gloss_temp_path = Path("/fake/temp_engine_dir/processed_gloss_map.png")
|
||||||
|
initial_other_temp_path = Path("/fake/temp_engine_dir/processed_normal_map.png")
|
||||||
|
|
||||||
|
initial_details = {
|
||||||
|
gloss_fr.id.hex: {'temp_processed_file': str(initial_gloss_temp_path), 'status': 'Processed', 'map_type': 'GLOSS'},
|
||||||
|
other_fr.id.hex: {'temp_processed_file': str(initial_other_temp_path), 'status': 'Processed', 'map_type': 'NORMAL'}
|
||||||
|
}
|
||||||
|
context = create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules=[gloss_fr, other_fr],
|
||||||
|
initial_processed_details=initial_details
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_loaded_gloss_data = np.array([10, 50, 250], dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_loaded_gloss_data
|
||||||
|
mock_save_image.return_value = True # Simulate successful save
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(initial_gloss_temp_path)
|
||||||
|
|
||||||
|
# Check that save_image was called with inverted data and correct path
|
||||||
|
expected_inverted_data = 255 - mock_loaded_gloss_data
|
||||||
|
|
||||||
|
# call_args[0] is a tuple of positional args, call_args[1] is a dict of kwargs
|
||||||
|
saved_path_arg = mock_save_image.call_args[0][0]
|
||||||
|
saved_data_arg = mock_save_image.call_args[0][1]
|
||||||
|
|
||||||
|
assert np.array_equal(saved_data_arg, expected_inverted_data), "Image data passed to save_image is not correctly inverted."
|
||||||
|
assert "rough_from_gloss_" in saved_path_arg.name, "Saved file name should indicate conversion from gloss."
|
||||||
|
assert saved_path_arg.parent == Path("/fake/temp_engine_dir"), "Saved file should be in the engine temp directory."
|
||||||
|
# Ensure the new filename is based on the original gloss map's ID for uniqueness
|
||||||
|
assert gloss_fr.id.hex in saved_path_arg.name
|
||||||
|
|
||||||
|
# Check context.files_to_process
|
||||||
|
assert len(updated_context.files_to_process) == 2, "Number of file rules in context should remain the same."
|
||||||
|
converted_rule_found = False
|
||||||
|
other_rule_untouched = False
|
||||||
|
for fr_in_list in updated_context.files_to_process:
|
||||||
|
if fr_in_list.id == gloss_fr.id: # Should be the same rule object, modified
|
||||||
|
assert fr_in_list.map_type == "ROUGHNESS", "GLOSS map_type should be changed to ROUGHNESS."
|
||||||
|
# Check if filename_pattern was updated (optional, depends on stage logic)
|
||||||
|
# For now, assume it might not be, as the primary identifier is map_type and ID
|
||||||
|
converted_rule_found = True
|
||||||
|
elif fr_in_list.id == other_fr.id:
|
||||||
|
assert fr_in_list.map_type == "NORMAL", "Other map_type should remain unchanged."
|
||||||
|
other_rule_untouched = True
|
||||||
|
assert converted_rule_found, "The converted GLOSS rule was not found or not updated correctly in files_to_process."
|
||||||
|
assert other_rule_untouched, "The non-GLOSS rule was modified unexpectedly."
|
||||||
|
|
||||||
|
# Check context.processed_maps_details
|
||||||
|
assert len(updated_context.processed_maps_details) == 2, "Number of entries in processed_maps_details should remain the same."
|
||||||
|
|
||||||
|
gloss_detail = updated_context.processed_maps_details[gloss_fr.id.hex]
|
||||||
|
assert "rough_from_gloss_" in gloss_detail['temp_processed_file'], "temp_processed_file for gloss map not updated."
|
||||||
|
assert Path(gloss_detail['temp_processed_file']).name == saved_path_arg.name, "Path in details should match saved path."
|
||||||
|
assert gloss_detail['original_map_type_before_conversion'] == "GLOSS", "original_map_type_before_conversion not set correctly."
|
||||||
|
assert "Converted from GLOSS to ROUGHNESS" in gloss_detail['notes'], "Conversion notes not added or incorrect."
|
||||||
|
assert gloss_detail['map_type'] == "ROUGHNESS", "map_type in details not updated to ROUGHNESS."
|
||||||
|
|
||||||
|
|
||||||
|
other_detail = updated_context.processed_maps_details[other_fr.id.hex]
|
||||||
|
assert other_detail['temp_processed_file'] == str(initial_other_temp_path), "Other map's temp_processed_file should be unchanged."
|
||||||
|
assert other_detail['map_type'] == "NORMAL", "Other map's map_type should be unchanged."
|
||||||
|
assert 'original_map_type_before_conversion' not in other_detail, "Other map should not have conversion history."
|
||||||
|
assert 'notes' not in other_detail or "Converted from GLOSS" not in other_detail['notes'], "Other map should not have conversion notes."
|
||||||
|
|
||||||
|
mock_logging.info.assert_any_call(f"Successfully converted GLOSS map {gloss_fr.id.hex} to ROUGHNESS.")
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.logging') # Mock logging
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.load_image')
|
||||||
|
def test_gloss_conversion_float_success(mock_load_image, mock_save_image, mock_logging):
|
||||||
|
"""
|
||||||
|
Test successful conversion of a GLOSS map (float data) to ROUGHNESS.
|
||||||
|
"""
|
||||||
|
stage = GlossToRoughConversionStage()
|
||||||
|
|
||||||
|
gloss_rule_id = uuid.uuid4()
|
||||||
|
gloss_fr = create_mock_file_rule_for_gloss_test(id_val=gloss_rule_id, map_type="GLOSS", filename_pattern="gloss_float.hdr") # Example float format
|
||||||
|
|
||||||
|
initial_gloss_temp_path = Path("/fake/temp_engine_dir/processed_gloss_float.hdr")
|
||||||
|
initial_details = {
|
||||||
|
gloss_fr.id.hex: {'temp_processed_file': str(initial_gloss_temp_path), 'status': 'Processed', 'map_type': 'GLOSS'}
|
||||||
|
}
|
||||||
|
context = create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules=[gloss_fr],
|
||||||
|
initial_processed_details=initial_details
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_loaded_gloss_data = np.array([0.1, 0.5, 0.9], dtype=np.float32)
|
||||||
|
mock_load_image.return_value = mock_loaded_gloss_data
|
||||||
|
mock_save_image.return_value = True # Simulate successful save
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(initial_gloss_temp_path)
|
||||||
|
|
||||||
|
expected_inverted_data = 1.0 - mock_loaded_gloss_data
|
||||||
|
|
||||||
|
saved_path_arg = mock_save_image.call_args[0][0]
|
||||||
|
saved_data_arg = mock_save_image.call_args[0][1]
|
||||||
|
|
||||||
|
assert np.allclose(saved_data_arg, expected_inverted_data), "Image data (float) passed to save_image is not correctly inverted."
|
||||||
|
assert "rough_from_gloss_" in saved_path_arg.name, "Saved file name should indicate conversion from gloss."
|
||||||
|
assert saved_path_arg.parent == Path("/fake/temp_engine_dir"), "Saved file should be in the engine temp directory."
|
||||||
|
assert gloss_fr.id.hex in saved_path_arg.name
|
||||||
|
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
converted_rule = updated_context.files_to_process[0]
|
||||||
|
assert converted_rule.id == gloss_fr.id
|
||||||
|
assert converted_rule.map_type == "ROUGHNESS"
|
||||||
|
|
||||||
|
gloss_detail = updated_context.processed_maps_details[gloss_fr.id.hex]
|
||||||
|
assert "rough_from_gloss_" in gloss_detail['temp_processed_file']
|
||||||
|
assert Path(gloss_detail['temp_processed_file']).name == saved_path_arg.name
|
||||||
|
assert gloss_detail['original_map_type_before_conversion'] == "GLOSS"
|
||||||
|
assert "Converted from GLOSS to ROUGHNESS" in gloss_detail['notes']
|
||||||
|
assert gloss_detail['map_type'] == "ROUGHNESS"
|
||||||
|
|
||||||
|
mock_logging.info.assert_any_call(f"Successfully converted GLOSS map {gloss_fr.id.hex} to ROUGHNESS.")
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.logging')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.load_image')
|
||||||
|
def test_load_image_fails(mock_load_image, mock_save_image, mock_logging):
|
||||||
|
"""
|
||||||
|
Test behavior when ipu.load_image fails (returns None).
|
||||||
|
The original FileRule should be kept, and an error logged.
|
||||||
|
"""
|
||||||
|
stage = GlossToRoughConversionStage()
|
||||||
|
|
||||||
|
gloss_rule_id = uuid.uuid4()
|
||||||
|
gloss_fr = create_mock_file_rule_for_gloss_test(id_val=gloss_rule_id, map_type="GLOSS", filename_pattern="gloss_fails_load.png")
|
||||||
|
|
||||||
|
initial_gloss_temp_path = Path("/fake/temp_engine_dir/processed_gloss_fails_load.png")
|
||||||
|
initial_details = {
|
||||||
|
gloss_fr.id.hex: {'temp_processed_file': str(initial_gloss_temp_path), 'status': 'Processed', 'map_type': 'GLOSS'}
|
||||||
|
}
|
||||||
|
context = create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules=[gloss_fr],
|
||||||
|
initial_processed_details=initial_details
|
||||||
|
)
|
||||||
|
|
||||||
|
# Keep a copy for comparison
|
||||||
|
original_file_rule_map_type = gloss_fr.map_type
|
||||||
|
original_details_entry = context.processed_maps_details[gloss_fr.id.hex].copy()
|
||||||
|
|
||||||
|
mock_load_image.return_value = None # Simulate load failure
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(initial_gloss_temp_path)
|
||||||
|
mock_save_image.assert_not_called() # Save should not be attempted
|
||||||
|
|
||||||
|
# Check context.files_to_process: rule should be unchanged
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
processed_rule = updated_context.files_to_process[0]
|
||||||
|
assert processed_rule.id == gloss_fr.id
|
||||||
|
assert processed_rule.map_type == original_file_rule_map_type, "FileRule map_type should not change if load fails."
|
||||||
|
assert processed_rule.map_type == "GLOSS" # Explicitly check it's still GLOSS
|
||||||
|
|
||||||
|
# Check context.processed_maps_details: details should be unchanged
|
||||||
|
current_details_entry = updated_context.processed_maps_details[gloss_fr.id.hex]
|
||||||
|
assert current_details_entry['temp_processed_file'] == str(initial_gloss_temp_path)
|
||||||
|
assert current_details_entry['map_type'] == "GLOSS"
|
||||||
|
assert 'original_map_type_before_conversion' not in current_details_entry
|
||||||
|
assert 'notes' not in current_details_entry or "Converted from GLOSS" not in current_details_entry['notes']
|
||||||
|
|
||||||
|
mock_logging.error.assert_called_once_with(
|
||||||
|
f"Failed to load image data for GLOSS map {gloss_fr.id.hex} from {initial_gloss_temp_path}. Skipping conversion for this map."
|
||||||
|
)
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.logging')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.load_image')
|
||||||
|
def test_save_image_fails(mock_load_image, mock_save_image, mock_logging):
|
||||||
|
"""
|
||||||
|
Test behavior when ipu.save_image fails (returns False).
|
||||||
|
The original FileRule should be kept, and an error logged.
|
||||||
|
"""
|
||||||
|
stage = GlossToRoughConversionStage()
|
||||||
|
|
||||||
|
gloss_rule_id = uuid.uuid4()
|
||||||
|
gloss_fr = create_mock_file_rule_for_gloss_test(id_val=gloss_rule_id, map_type="GLOSS", filename_pattern="gloss_fails_save.png")
|
||||||
|
|
||||||
|
initial_gloss_temp_path = Path("/fake/temp_engine_dir/processed_gloss_fails_save.png")
|
||||||
|
initial_details = {
|
||||||
|
gloss_fr.id.hex: {'temp_processed_file': str(initial_gloss_temp_path), 'status': 'Processed', 'map_type': 'GLOSS'}
|
||||||
|
}
|
||||||
|
context = create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules=[gloss_fr],
|
||||||
|
initial_processed_details=initial_details
|
||||||
|
)
|
||||||
|
|
||||||
|
original_file_rule_map_type = gloss_fr.map_type
|
||||||
|
original_details_entry = context.processed_maps_details[gloss_fr.id.hex].copy()
|
||||||
|
|
||||||
|
mock_loaded_gloss_data = np.array([10, 50, 250], dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_loaded_gloss_data
|
||||||
|
mock_save_image.return_value = False # Simulate save failure
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(initial_gloss_temp_path)
|
||||||
|
|
||||||
|
# Check that save_image was called with correct data and path
|
||||||
|
expected_inverted_data = 255 - mock_loaded_gloss_data
|
||||||
|
# call_args[0] is a tuple of positional args
|
||||||
|
saved_path_arg = mock_save_image.call_args[0][0]
|
||||||
|
saved_data_arg = mock_save_image.call_args[0][1]
|
||||||
|
|
||||||
|
assert np.array_equal(saved_data_arg, expected_inverted_data), "Image data passed to save_image is not correctly inverted even on failure."
|
||||||
|
assert "rough_from_gloss_" in saved_path_arg.name, "Attempted save file name should indicate conversion from gloss."
|
||||||
|
assert saved_path_arg.parent == Path("/fake/temp_engine_dir"), "Attempted save file should be in the engine temp directory."
|
||||||
|
|
||||||
|
# Check context.files_to_process: rule should be unchanged
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
processed_rule = updated_context.files_to_process[0]
|
||||||
|
assert processed_rule.id == gloss_fr.id
|
||||||
|
assert processed_rule.map_type == original_file_rule_map_type, "FileRule map_type should not change if save fails."
|
||||||
|
assert processed_rule.map_type == "GLOSS"
|
||||||
|
|
||||||
|
# Check context.processed_maps_details: details should be unchanged
|
||||||
|
current_details_entry = updated_context.processed_maps_details[gloss_fr.id.hex]
|
||||||
|
assert current_details_entry['temp_processed_file'] == str(initial_gloss_temp_path)
|
||||||
|
assert current_details_entry['map_type'] == "GLOSS"
|
||||||
|
assert 'original_map_type_before_conversion' not in current_details_entry
|
||||||
|
assert 'notes' not in current_details_entry or "Converted from GLOSS" not in current_details_entry['notes']
|
||||||
|
|
||||||
|
mock_logging.error.assert_called_once_with(
|
||||||
|
f"Failed to save inverted GLOSS map {gloss_fr.id.hex} to {saved_path_arg}. Retaining original GLOSS map."
|
||||||
|
)
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.logging')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.load_image')
|
||||||
|
def test_gloss_map_in_files_to_process_but_not_in_details(mock_load_image, mock_save_image, mock_logging):
|
||||||
|
"""
|
||||||
|
Test behavior when a GLOSS FileRule is in files_to_process but its details
|
||||||
|
are missing from processed_maps_details.
|
||||||
|
The stage should log an error and skip this FileRule.
|
||||||
|
"""
|
||||||
|
stage = GlossToRoughConversionStage()
|
||||||
|
|
||||||
|
gloss_rule_id = uuid.uuid4()
|
||||||
|
# This FileRule is in files_to_process
|
||||||
|
gloss_fr_in_list = create_mock_file_rule_for_gloss_test(id_val=gloss_rule_id, map_type="GLOSS", filename_pattern="orphan_gloss.png")
|
||||||
|
|
||||||
|
# processed_maps_details is empty or does not contain gloss_fr_in_list.id.hex
|
||||||
|
initial_details = {}
|
||||||
|
|
||||||
|
context = create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules=[gloss_fr_in_list],
|
||||||
|
initial_processed_details=initial_details
|
||||||
|
)
|
||||||
|
|
||||||
|
original_files_to_process = list(context.files_to_process)
|
||||||
|
original_processed_maps_details = context.processed_maps_details.copy()
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called() # Load should not be attempted if details are missing
|
||||||
|
mock_save_image.assert_not_called() # Save should not be attempted
|
||||||
|
|
||||||
|
# Check context.files_to_process: rule should be unchanged
|
||||||
|
assert len(updated_context.files_to_process) == 1
|
||||||
|
processed_rule = updated_context.files_to_process[0]
|
||||||
|
assert processed_rule.id == gloss_fr_in_list.id
|
||||||
|
assert processed_rule.map_type == "GLOSS", "FileRule map_type should not change if its details are missing."
|
||||||
|
|
||||||
|
# Check context.processed_maps_details: should remain unchanged
|
||||||
|
assert updated_context.processed_maps_details == original_processed_maps_details, "processed_maps_details should not change."
|
||||||
|
|
||||||
|
mock_logging.error.assert_called_once_with(
|
||||||
|
f"GLOSS map {gloss_fr_in_list.id.hex} found in files_to_process but missing from processed_maps_details. Skipping conversion."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test for Case 8.2 (GLOSS map ID in processed_maps_details but no corresponding FileRule in files_to_process)
|
||||||
|
# This case is implicitly handled because the stage iterates files_to_process.
|
||||||
|
# If a FileRule isn't in files_to_process, its corresponding entry in processed_maps_details (if any) won't be acted upon.
|
||||||
|
# We can add a simple test to ensure no errors occur and non-relevant details are untouched.
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.logging')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.gloss_to_rough_conversion.ipu.load_image')
|
||||||
|
def test_gloss_detail_exists_but_not_in_files_to_process(mock_load_image, mock_save_image, mock_logging):
|
||||||
|
"""
|
||||||
|
Test that if a GLOSS map detail exists in processed_maps_details but
|
||||||
|
no corresponding FileRule is in files_to_process, it's simply ignored
|
||||||
|
without error, and other valid conversions proceed.
|
||||||
|
"""
|
||||||
|
stage = GlossToRoughConversionStage()
|
||||||
|
|
||||||
|
# This rule will be processed
|
||||||
|
convert_rule_id = uuid.uuid4()
|
||||||
|
convert_fr = create_mock_file_rule_for_gloss_test(id_val=convert_rule_id, map_type="GLOSS", filename_pattern="convert_me.png")
|
||||||
|
convert_initial_temp_path = Path("/fake/temp_engine_dir/processed_convert_me.png")
|
||||||
|
|
||||||
|
# This rule's details exist, but the rule itself is not in files_to_process
|
||||||
|
orphan_detail_id = uuid.uuid4()
|
||||||
|
|
||||||
|
initial_details = {
|
||||||
|
convert_fr.id.hex: {'temp_processed_file': str(convert_initial_temp_path), 'status': 'Processed', 'map_type': 'GLOSS'},
|
||||||
|
orphan_detail_id.hex: {'temp_processed_file': '/fake/temp_engine_dir/orphan.png', 'status': 'Processed', 'map_type': 'GLOSS', 'notes': 'This is an orphan'}
|
||||||
|
}
|
||||||
|
|
||||||
|
context = create_gloss_conversion_mock_context(
|
||||||
|
initial_file_rules=[convert_fr], # Only convert_fr is in files_to_process
|
||||||
|
initial_processed_details=initial_details
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_loaded_data = np.array([100], dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_loaded_data
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
# Assert that load/save were called only for the rule in files_to_process
|
||||||
|
mock_load_image.assert_called_once_with(convert_initial_temp_path)
|
||||||
|
mock_save_image.assert_called_once() # Check it was called, details checked in other tests
|
||||||
|
|
||||||
|
# Check that the orphan detail in processed_maps_details is untouched
|
||||||
|
assert orphan_detail_id.hex in updated_context.processed_maps_details
|
||||||
|
orphan_entry = updated_context.processed_maps_details[orphan_detail_id.hex]
|
||||||
|
assert orphan_entry['temp_processed_file'] == '/fake/temp_engine_dir/orphan.png'
|
||||||
|
assert orphan_entry['map_type'] == 'GLOSS'
|
||||||
|
assert orphan_entry['notes'] == 'This is an orphan'
|
||||||
|
assert 'original_map_type_before_conversion' not in orphan_entry
|
||||||
|
|
||||||
|
# Check that the processed rule was indeed converted
|
||||||
|
assert convert_fr.id.hex in updated_context.processed_maps_details
|
||||||
|
converted_entry = updated_context.processed_maps_details[convert_fr.id.hex]
|
||||||
|
assert converted_entry['map_type'] == 'ROUGHNESS'
|
||||||
|
assert "rough_from_gloss_" in converted_entry['temp_processed_file']
|
||||||
|
|
||||||
|
# No errors should have been logged regarding the orphan detail
|
||||||
|
for call_args in mock_logging.error.call_args_list:
|
||||||
|
assert str(orphan_detail_id.hex) not in call_args[0][0], "Error logged for orphan detail"
|
||||||
@@ -0,0 +1,555 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import uuid
|
||||||
|
import numpy as np
|
||||||
|
from typing import Optional # Added for type hinting in helper functions
|
||||||
|
|
||||||
|
from processing.pipeline.stages.individual_map_processing import IndividualMapProcessingStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule, FileRule # Key models
|
||||||
|
from configuration import Configuration, GeneralSettings
|
||||||
|
# cv2 might be imported by the stage for interpolation constants, ensure it's mockable if so.
|
||||||
|
# For now, assume ipu handles interpolation details.
|
||||||
|
|
||||||
|
def create_mock_transform_settings(
|
||||||
|
target_width=0, target_height=0, resize_mode="FIT",
|
||||||
|
ensure_pot=False, allow_upscale=True, target_color_profile="RGB" # Add other fields as needed
|
||||||
|
) -> mock.MagicMock:
|
||||||
|
ts = mock.MagicMock(spec=TransformSettings)
|
||||||
|
ts.target_width = target_width
|
||||||
|
ts.target_height = target_height
|
||||||
|
ts.resize_mode = resize_mode
|
||||||
|
ts.ensure_pot = ensure_pot
|
||||||
|
ts.allow_upscale = allow_upscale
|
||||||
|
ts.target_color_profile = target_color_profile
|
||||||
|
# ts.resize_filter = "AREA" # if your stage uses this
|
||||||
|
return ts
|
||||||
|
|
||||||
|
def create_mock_file_rule_for_individual_processing(
|
||||||
|
id_val: Optional[uuid.UUID] = None,
|
||||||
|
map_type: str = "ALBEDO",
|
||||||
|
filename_pattern: str = "albedo_*.png", # Pattern for glob
|
||||||
|
item_type: str = "MAP_COL",
|
||||||
|
active: bool = True,
|
||||||
|
transform_settings: Optional[mock.MagicMock] = None
|
||||||
|
) -> mock.MagicMock:
|
||||||
|
mock_fr = mock.MagicMock(spec=FileRule)
|
||||||
|
mock_fr.id = id_val if id_val else uuid.uuid4()
|
||||||
|
mock_fr.map_type = map_type
|
||||||
|
mock_fr.filename_pattern = filename_pattern
|
||||||
|
mock_fr.item_type = item_type
|
||||||
|
mock_fr.active = active
|
||||||
|
mock_fr.transform_settings = transform_settings if transform_settings else create_mock_transform_settings()
|
||||||
|
return mock_fr
|
||||||
|
|
||||||
|
def create_individual_map_proc_mock_context(
|
||||||
|
initial_file_rules: Optional[list] = None,
|
||||||
|
asset_source_path_str: str = "/fake/asset_source",
|
||||||
|
skip_asset_flag: bool = False,
|
||||||
|
asset_name: str = "IndividualMapAsset"
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_asset_rule.source_path = Path(asset_source_path_str)
|
||||||
|
# file_rules on AssetRule not directly used by stage, context.files_to_process is
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
# mock_config.general_settings = mock.MagicMock(spec=GeneralSettings) # If needed
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp_engine_dir"),
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_metadata={'asset_name': asset_name},
|
||||||
|
processed_maps_details={}, # Stage populates this
|
||||||
|
merged_maps_details={},
|
||||||
|
files_to_process=list(initial_file_rules) if initial_file_rules else [],
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={'skip_asset': skip_asset_flag},
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None # Corrected from sha5_value to sha_value if that's the actual param
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Placeholder for tests to be added next
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_asset_skipped_if_flag_is_true(mock_log_info, mock_ipu):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
context = create_individual_map_proc_mock_context(skip_asset_flag=True)
|
||||||
|
|
||||||
|
# Add a dummy file rule to ensure it's not processed
|
||||||
|
file_rule = create_mock_file_rule_for_individual_processing()
|
||||||
|
context.files_to_process = [file_rule]
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_ipu.load_image.assert_not_called()
|
||||||
|
mock_ipu.save_image.assert_not_called()
|
||||||
|
assert not updated_context.processed_maps_details # No details should be added
|
||||||
|
# Check for a log message indicating skip, if applicable (depends on stage's logging)
|
||||||
|
# mock_log_info.assert_any_call("Skipping asset IndividualMapAsset due to status_flags['skip_asset'] = True") # Example
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_no_processing_if_no_map_col_rules(mock_log_info, mock_ipu):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
|
||||||
|
# Create a file rule that is NOT of item_type MAP_COL
|
||||||
|
non_map_col_rule = create_mock_file_rule_for_individual_processing(item_type="METADATA")
|
||||||
|
context = create_individual_map_proc_mock_context(initial_file_rules=[non_map_col_rule])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_ipu.load_image.assert_not_called()
|
||||||
|
mock_ipu.save_image.assert_not_called()
|
||||||
|
assert not updated_context.processed_maps_details
|
||||||
|
# mock_log_info.assert_any_call("No FileRules of item_type 'MAP_COL' to process for asset IndividualMapAsset.") # Example
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.calculate_target_dimensions')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.load_image')
|
||||||
|
@mock.patch('pathlib.Path.glob') # Mocking Path.glob used by the stage's _find_source_file
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_individual_map_processing_success_no_resize(
|
||||||
|
mock_log_error, mock_log_info, mock_path_glob, mock_load_image,
|
||||||
|
mock_calc_dims, mock_resize_image, mock_save_image
|
||||||
|
):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
|
||||||
|
source_file_name = "albedo_source.png"
|
||||||
|
# The glob is called on context.asset_rule.source_path, so mock that Path object's glob
|
||||||
|
mock_asset_source_path = Path("/fake/asset_source")
|
||||||
|
mock_found_source_path = mock_asset_source_path / source_file_name
|
||||||
|
|
||||||
|
# We need to mock the glob method of the Path instance
|
||||||
|
# that represents the asset's source directory.
|
||||||
|
# The stage does something like: Path(context.asset_rule.source_path).glob(...)
|
||||||
|
# So, we need to ensure that when Path() is called with that specific string,
|
||||||
|
# the resulting object's glob method is our mock.
|
||||||
|
# A more robust way is to mock Path itself to return a mock object
|
||||||
|
# whose glob method is also a mock.
|
||||||
|
|
||||||
|
# Simpler approach for now: assume Path.glob is used as a static/class method call
|
||||||
|
# or that the instance it's called on is correctly patched by @mock.patch('pathlib.Path.glob')
|
||||||
|
# if the stage does `from pathlib import Path` and then `Path(path_str).glob(...)`.
|
||||||
|
# The prompt example uses @mock.patch('pathlib.Path.glob'), implying the stage might do this:
|
||||||
|
# for f_pattern in patterns:
|
||||||
|
# for found_file in Path(base_dir).glob(f_pattern): ...
|
||||||
|
# Let's refine the mock_path_glob setup.
|
||||||
|
# The stage's _find_source_file likely does:
|
||||||
|
# search_path = Path(self.context.asset_rule.source_path)
|
||||||
|
# found_files = list(search_path.glob(filename_pattern))
|
||||||
|
|
||||||
|
# To correctly mock this, we need to mock the `glob` method of the specific Path instance.
|
||||||
|
# Or, if `_find_source_file` instantiates `Path` like `Path(str(context.asset_rule.source_path)).glob(...)`,
|
||||||
|
# then patching `pathlib.Path.glob` might work if it's treated as a method that gets bound.
|
||||||
|
# Let's stick to the example's @mock.patch('pathlib.Path.glob') and assume it covers the usage.
|
||||||
|
mock_path_glob.return_value = [mock_found_source_path] # Glob finds one file
|
||||||
|
|
||||||
|
ts = create_mock_transform_settings(target_width=100, target_height=100)
|
||||||
|
file_rule = create_mock_file_rule_for_individual_processing(
|
||||||
|
map_type="ALBEDO", filename_pattern="albedo_*.png", transform_settings=ts
|
||||||
|
)
|
||||||
|
context = create_individual_map_proc_mock_context(
|
||||||
|
initial_file_rules=[file_rule],
|
||||||
|
asset_source_path_str=str(mock_asset_source_path) # Ensure context uses this path
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_img_data = np.zeros((100, 100, 3), dtype=np.uint8) # Original dimensions
|
||||||
|
mock_load_image.return_value = mock_img_data
|
||||||
|
mock_calc_dims.return_value = (100, 100) # No resize needed
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
# Assert that Path(context.asset_rule.source_path).glob was called
|
||||||
|
# This requires a bit more intricate mocking if Path instances are created inside.
|
||||||
|
# For now, assert mock_path_glob was called with the pattern.
|
||||||
|
# The actual call in stage is `Path(context.asset_rule.source_path).glob(file_rule.filename_pattern)`
|
||||||
|
# So, `mock_path_glob` (if it patches `Path.glob` globally) should be called.
|
||||||
|
# We need to ensure the mock_path_glob is associated with the correct Path instance or that
|
||||||
|
# the global patch works as intended.
|
||||||
|
# A common pattern is:
|
||||||
|
# with mock.patch.object(Path, 'glob', return_value=[mock_found_source_path]) as specific_glob_mock:
|
||||||
|
# # execute code
|
||||||
|
# specific_glob_mock.assert_called_once_with(file_rule.filename_pattern)
|
||||||
|
# However, the decorator @mock.patch('pathlib.Path.glob') should work if the stage code is
|
||||||
|
# `from pathlib import Path; p = Path(...); p.glob(...)`
|
||||||
|
|
||||||
|
# The stage's _find_source_file will instantiate a Path object from context.asset_rule.source_path
|
||||||
|
# and then call glob on it.
|
||||||
|
# So, @mock.patch('pathlib.Path.glob') is patching the method on the class.
|
||||||
|
# When an instance calls it, the mock is used.
|
||||||
|
mock_path_glob.assert_called_once_with(file_rule.filename_pattern)
|
||||||
|
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(mock_found_source_path)
|
||||||
|
# The actual call to calculate_target_dimensions is:
|
||||||
|
# ipu.calculate_target_dimensions(original_dims, ts.target_width, ts.target_height, ts.resize_mode, ts.ensure_pot, ts.allow_upscale)
|
||||||
|
mock_calc_dims.assert_called_once_with(
|
||||||
|
(100, 100), ts.target_width, ts.target_height, ts.resize_mode, ts.ensure_pot, ts.allow_upscale
|
||||||
|
)
|
||||||
|
mock_resize_image.assert_not_called() # Crucial for this test case
|
||||||
|
mock_save_image.assert_called_once()
|
||||||
|
|
||||||
|
# Check save path and data
|
||||||
|
saved_image_arg, saved_path_arg = mock_save_image.call_args[0]
|
||||||
|
assert np.array_equal(saved_image_arg, mock_img_data) # Ensure correct image data is passed to save
|
||||||
|
assert "processed_ALBEDO_" in saved_path_arg.name # Based on map_type
|
||||||
|
assert file_rule.id.hex in saved_path_arg.name # Ensure unique name with FileRule ID
|
||||||
|
assert saved_path_arg.parent == context.engine_temp_dir
|
||||||
|
|
||||||
|
assert file_rule.id.hex in updated_context.processed_maps_details
|
||||||
|
details = updated_context.processed_maps_details[file_rule.id.hex]
|
||||||
|
assert details['status'] == 'Processed'
|
||||||
|
assert details['source_file'] == str(mock_found_source_path)
|
||||||
|
assert Path(details['temp_processed_file']) == saved_path_arg
|
||||||
|
assert details['original_dimensions'] == (100, 100)
|
||||||
|
assert details['processed_dimensions'] == (100, 100)
|
||||||
|
assert details['map_type'] == file_rule.map_type
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
mock_log_info.assert_any_call(f"Successfully processed map {file_rule.map_type} (ID: {file_rule.id.hex}) for asset {context.asset_rule.name}. Output: {saved_path_arg}")
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.calculate_target_dimensions')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.load_image')
|
||||||
|
@mock.patch('pathlib.Path.glob')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_source_file_not_found(
|
||||||
|
mock_log_error, mock_log_info, mock_path_glob, mock_load_image,
|
||||||
|
mock_calc_dims, mock_resize_image, mock_save_image
|
||||||
|
):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
mock_asset_source_path = Path("/fake/asset_source")
|
||||||
|
|
||||||
|
mock_path_glob.return_value = [] # Glob finds no files
|
||||||
|
|
||||||
|
file_rule = create_mock_file_rule_for_individual_processing(filename_pattern="nonexistent_*.png")
|
||||||
|
context = create_individual_map_proc_mock_context(
|
||||||
|
initial_file_rules=[file_rule],
|
||||||
|
asset_source_path_str=str(mock_asset_source_path)
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_path_glob.assert_called_once_with(file_rule.filename_pattern)
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_calc_dims.assert_not_called()
|
||||||
|
mock_resize_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
|
||||||
|
assert file_rule.id.hex in updated_context.processed_maps_details
|
||||||
|
details = updated_context.processed_maps_details[file_rule.id.hex]
|
||||||
|
assert details['status'] == 'Source Not Found'
|
||||||
|
assert details['source_file'] is None
|
||||||
|
assert details['temp_processed_file'] is None
|
||||||
|
assert details['error_message'] is not None # Check an error message is present
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
# Example: mock_log_error.assert_called_with(f"Could not find source file for rule {file_rule.id} (pattern: {file_rule.filename_pattern}) in {context.asset_rule.source_path}")
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.calculate_target_dimensions')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.load_image')
|
||||||
|
@mock.patch('pathlib.Path.glob')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_load_image_fails(
|
||||||
|
mock_log_error, mock_log_info, mock_path_glob, mock_load_image,
|
||||||
|
mock_calc_dims, mock_resize_image, mock_save_image
|
||||||
|
):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
source_file_name = "albedo_corrupt.png"
|
||||||
|
mock_asset_source_path = Path("/fake/asset_source")
|
||||||
|
mock_found_source_path = mock_asset_source_path / source_file_name
|
||||||
|
mock_path_glob.return_value = [mock_found_source_path]
|
||||||
|
|
||||||
|
mock_load_image.return_value = None # Simulate load failure
|
||||||
|
|
||||||
|
file_rule = create_mock_file_rule_for_individual_processing(filename_pattern="albedo_*.png")
|
||||||
|
context = create_individual_map_proc_mock_context(
|
||||||
|
initial_file_rules=[file_rule],
|
||||||
|
asset_source_path_str=str(mock_asset_source_path)
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_path_glob.assert_called_once_with(file_rule.filename_pattern)
|
||||||
|
mock_load_image.assert_called_once_with(mock_found_source_path)
|
||||||
|
mock_calc_dims.assert_not_called()
|
||||||
|
mock_resize_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
|
||||||
|
assert file_rule.id.hex in updated_context.processed_maps_details
|
||||||
|
details = updated_context.processed_maps_details[file_rule.id.hex]
|
||||||
|
assert details['status'] == 'Load Failed'
|
||||||
|
assert details['source_file'] == str(mock_found_source_path)
|
||||||
|
assert details['temp_processed_file'] is None
|
||||||
|
assert details['error_message'] is not None
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
# Example: mock_log_error.assert_called_with(f"Failed to load image {mock_found_source_path} for rule {file_rule.id}")
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.calculate_target_dimensions')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.load_image')
|
||||||
|
@mock.patch('pathlib.Path.glob')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_resize_occurs_when_dimensions_differ(
|
||||||
|
mock_log_error, mock_log_info, mock_path_glob, mock_load_image,
|
||||||
|
mock_calc_dims, mock_resize_image, mock_save_image
|
||||||
|
):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
source_file_name = "albedo_resize.png"
|
||||||
|
mock_asset_source_path = Path("/fake/asset_source")
|
||||||
|
mock_found_source_path = mock_asset_source_path / source_file_name
|
||||||
|
mock_path_glob.return_value = [mock_found_source_path]
|
||||||
|
|
||||||
|
original_dims = (100, 100)
|
||||||
|
target_dims = (50, 50) # Different dimensions
|
||||||
|
mock_img_data = np.zeros((*original_dims, 3), dtype=np.uint8)
|
||||||
|
mock_resized_img_data = np.zeros((*target_dims, 3), dtype=np.uint8)
|
||||||
|
|
||||||
|
mock_load_image.return_value = mock_img_data
|
||||||
|
ts = create_mock_transform_settings(target_width=target_dims[0], target_height=target_dims[1])
|
||||||
|
file_rule = create_mock_file_rule_for_individual_processing(transform_settings=ts)
|
||||||
|
context = create_individual_map_proc_mock_context(
|
||||||
|
initial_file_rules=[file_rule],
|
||||||
|
asset_source_path_str=str(mock_asset_source_path)
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_calc_dims.return_value = target_dims # Simulate calc_dims returning new dimensions
|
||||||
|
mock_resize_image.return_value = mock_resized_img_data # Simulate resize returning new image data
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(mock_found_source_path)
|
||||||
|
mock_calc_dims.assert_called_once_with(
|
||||||
|
original_dims, ts.target_width, ts.target_height, ts.resize_mode, ts.ensure_pot, ts.allow_upscale
|
||||||
|
)
|
||||||
|
# The actual call to resize_image is:
|
||||||
|
# ipu.resize_image(loaded_image, target_dims, ts.resize_filter) # Assuming resize_filter is used
|
||||||
|
# If resize_filter is not on TransformSettings or not used, adjust this.
|
||||||
|
# For now, let's assume it's ipu.resize_image(loaded_image, target_dims) or similar
|
||||||
|
# The stage code is: resized_image = ipu.resize_image(loaded_image, target_dims_calculated, file_rule.transform_settings.resize_filter)
|
||||||
|
# So we need to mock ts.resize_filter
|
||||||
|
ts.resize_filter = "LANCZOS4" # Example filter
|
||||||
|
mock_resize_image.assert_called_once_with(mock_img_data, target_dims, ts.resize_filter)
|
||||||
|
|
||||||
|
saved_image_arg, saved_path_arg = mock_save_image.call_args[0]
|
||||||
|
assert np.array_equal(saved_image_arg, mock_resized_img_data) # Check resized data is saved
|
||||||
|
assert "processed_ALBEDO_" in saved_path_arg.name
|
||||||
|
assert saved_path_arg.parent == context.engine_temp_dir
|
||||||
|
|
||||||
|
assert file_rule.id.hex in updated_context.processed_maps_details
|
||||||
|
details = updated_context.processed_maps_details[file_rule.id.hex]
|
||||||
|
assert details['status'] == 'Processed'
|
||||||
|
assert details['original_dimensions'] == original_dims
|
||||||
|
assert details['processed_dimensions'] == target_dims
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.calculate_target_dimensions')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.load_image')
|
||||||
|
@mock.patch('pathlib.Path.glob')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_save_image_fails(
|
||||||
|
mock_log_error, mock_log_info, mock_path_glob, mock_load_image,
|
||||||
|
mock_calc_dims, mock_resize_image, mock_save_image
|
||||||
|
):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
source_file_name = "albedo_save_fail.png"
|
||||||
|
mock_asset_source_path = Path("/fake/asset_source")
|
||||||
|
mock_found_source_path = mock_asset_source_path / source_file_name
|
||||||
|
mock_path_glob.return_value = [mock_found_source_path]
|
||||||
|
|
||||||
|
mock_img_data = np.zeros((100, 100, 3), dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_img_data
|
||||||
|
mock_calc_dims.return_value = (100, 100) # No resize
|
||||||
|
mock_save_image.return_value = False # Simulate save failure
|
||||||
|
|
||||||
|
ts = create_mock_transform_settings()
|
||||||
|
file_rule = create_mock_file_rule_for_individual_processing(transform_settings=ts)
|
||||||
|
context = create_individual_map_proc_mock_context(
|
||||||
|
initial_file_rules=[file_rule],
|
||||||
|
asset_source_path_str=str(mock_asset_source_path)
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_save_image.assert_called_once() # Attempt to save should still happen
|
||||||
|
|
||||||
|
assert file_rule.id.hex in updated_context.processed_maps_details
|
||||||
|
details = updated_context.processed_maps_details[file_rule.id.hex]
|
||||||
|
assert details['status'] == 'Save Failed'
|
||||||
|
assert details['source_file'] == str(mock_found_source_path)
|
||||||
|
assert details['temp_processed_file'] is not None # Path was generated
|
||||||
|
assert details['error_message'] is not None
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
# Example: mock_log_error.assert_called_with(f"Failed to save processed image for rule {file_rule.id} to {details['temp_processed_file']}")
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.calculate_target_dimensions')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.load_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.convert_bgr_to_rgb')
|
||||||
|
@mock.patch('pathlib.Path.glob')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_color_conversion_bgr_to_rgb(
|
||||||
|
mock_log_error, mock_log_info, mock_path_glob, mock_convert_bgr, mock_load_image,
|
||||||
|
mock_calc_dims, mock_resize_image, mock_save_image
|
||||||
|
):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
source_file_name = "albedo_bgr.png"
|
||||||
|
mock_asset_source_path = Path("/fake/asset_source")
|
||||||
|
mock_found_source_path = mock_asset_source_path / source_file_name
|
||||||
|
mock_path_glob.return_value = [mock_found_source_path]
|
||||||
|
|
||||||
|
mock_bgr_img_data = np.zeros((100, 100, 3), dtype=np.uint8) # Loaded as BGR
|
||||||
|
mock_rgb_img_data = np.zeros((100, 100, 3), dtype=np.uint8) # After conversion
|
||||||
|
|
||||||
|
mock_load_image.return_value = mock_bgr_img_data # Image is loaded (assume BGR by default from cv2)
|
||||||
|
mock_convert_bgr.return_value = mock_rgb_img_data # Mock the conversion
|
||||||
|
mock_calc_dims.return_value = (100, 100) # No resize
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
# Transform settings request RGB, and stage assumes load might be BGR
|
||||||
|
ts = create_mock_transform_settings(target_color_profile="RGB")
|
||||||
|
file_rule = create_mock_file_rule_for_individual_processing(transform_settings=ts)
|
||||||
|
context = create_individual_map_proc_mock_context(
|
||||||
|
initial_file_rules=[file_rule],
|
||||||
|
asset_source_path_str=str(mock_asset_source_path)
|
||||||
|
)
|
||||||
|
# The stage code is:
|
||||||
|
# if file_rule.transform_settings.target_color_profile == "RGB" and loaded_image.shape[2] == 3:
|
||||||
|
# logger.info(f"Attempting to convert image from BGR to RGB for {file_rule_id_hex}")
|
||||||
|
# processed_image_data = ipu.convert_bgr_to_rgb(processed_image_data)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(mock_found_source_path)
|
||||||
|
mock_convert_bgr.assert_called_once_with(mock_bgr_img_data)
|
||||||
|
mock_resize_image.assert_not_called()
|
||||||
|
|
||||||
|
saved_image_arg, _ = mock_save_image.call_args[0]
|
||||||
|
assert np.array_equal(saved_image_arg, mock_rgb_img_data) # Ensure RGB data is saved
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
mock_log_info.assert_any_call(f"Attempting to convert image from BGR to RGB for {file_rule.id.hex}")
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.calculate_target_dimensions')
|
||||||
|
@mock.patch('processing.pipeline.stages.individual_map_processing.ipu.load_image')
|
||||||
|
@mock.patch('pathlib.Path.glob')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_multiple_map_col_rules_processed(
|
||||||
|
mock_log_error, mock_log_info, mock_path_glob, mock_load_image,
|
||||||
|
mock_calc_dims, mock_resize_image, mock_save_image
|
||||||
|
):
|
||||||
|
stage = IndividualMapProcessingStage()
|
||||||
|
mock_asset_source_path = Path("/fake/asset_source")
|
||||||
|
|
||||||
|
# Rule 1: Albedo
|
||||||
|
ts1 = create_mock_transform_settings(target_width=100, target_height=100)
|
||||||
|
file_rule1_id = uuid.uuid4()
|
||||||
|
file_rule1 = create_mock_file_rule_for_individual_processing(
|
||||||
|
id_val=file_rule1_id, map_type="ALBEDO", filename_pattern="albedo_*.png", transform_settings=ts1
|
||||||
|
)
|
||||||
|
source_file1 = mock_asset_source_path / "albedo_map.png"
|
||||||
|
img_data1 = np.zeros((100, 100, 3), dtype=np.uint8)
|
||||||
|
|
||||||
|
# Rule 2: Roughness
|
||||||
|
ts2 = create_mock_transform_settings(target_width=50, target_height=50) # Resize
|
||||||
|
ts2.resize_filter = "AREA"
|
||||||
|
file_rule2_id = uuid.uuid4()
|
||||||
|
file_rule2 = create_mock_file_rule_for_individual_processing(
|
||||||
|
id_val=file_rule2_id, map_type="ROUGHNESS", filename_pattern="rough_*.png", transform_settings=ts2
|
||||||
|
)
|
||||||
|
source_file2 = mock_asset_source_path / "rough_map.png"
|
||||||
|
img_data2_orig = np.zeros((200, 200, 1), dtype=np.uint8) # Original, needs resize
|
||||||
|
img_data2_resized = np.zeros((50, 50, 1), dtype=np.uint8) # Resized
|
||||||
|
|
||||||
|
context = create_individual_map_proc_mock_context(
|
||||||
|
initial_file_rules=[file_rule1, file_rule2],
|
||||||
|
asset_source_path_str=str(mock_asset_source_path)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock behaviors for Path.glob, load_image, calc_dims, resize, save
|
||||||
|
# Path.glob will be called twice
|
||||||
|
mock_path_glob.side_effect = [
|
||||||
|
[source_file1], # For albedo_*.png
|
||||||
|
[source_file2] # For rough_*.png
|
||||||
|
]
|
||||||
|
mock_load_image.side_effect = [img_data1, img_data2_orig]
|
||||||
|
mock_calc_dims.side_effect = [
|
||||||
|
(100, 100), # For rule1 (no change)
|
||||||
|
(50, 50) # For rule2 (change)
|
||||||
|
]
|
||||||
|
mock_resize_image.return_value = img_data2_resized # Only called for rule2
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
# Assertions for Rule 1 (Albedo)
|
||||||
|
assert mock_path_glob.call_args_list[0][0][0] == file_rule1.filename_pattern
|
||||||
|
assert mock_load_image.call_args_list[0][0][0] == source_file1
|
||||||
|
assert mock_calc_dims.call_args_list[0][0] == ((100,100), ts1.target_width, ts1.target_height, ts1.resize_mode, ts1.ensure_pot, ts1.allow_upscale)
|
||||||
|
|
||||||
|
# Assertions for Rule 2 (Roughness)
|
||||||
|
assert mock_path_glob.call_args_list[1][0][0] == file_rule2.filename_pattern
|
||||||
|
assert mock_load_image.call_args_list[1][0][0] == source_file2
|
||||||
|
assert mock_calc_dims.call_args_list[1][0] == ((200,200), ts2.target_width, ts2.target_height, ts2.resize_mode, ts2.ensure_pot, ts2.allow_upscale)
|
||||||
|
mock_resize_image.assert_called_once_with(img_data2_orig, (50,50), ts2.resize_filter)
|
||||||
|
|
||||||
|
assert mock_save_image.call_count == 2
|
||||||
|
# Check saved image for rule 1
|
||||||
|
saved_img1_arg, saved_path1_arg = mock_save_image.call_args_list[0][0]
|
||||||
|
assert np.array_equal(saved_img1_arg, img_data1)
|
||||||
|
assert "processed_ALBEDO_" in saved_path1_arg.name
|
||||||
|
assert file_rule1_id.hex in saved_path1_arg.name
|
||||||
|
|
||||||
|
# Check saved image for rule 2
|
||||||
|
saved_img2_arg, saved_path2_arg = mock_save_image.call_args_list[1][0]
|
||||||
|
assert np.array_equal(saved_img2_arg, img_data2_resized)
|
||||||
|
assert "processed_ROUGHNESS_" in saved_path2_arg.name
|
||||||
|
assert file_rule2_id.hex in saved_path2_arg.name
|
||||||
|
|
||||||
|
# Check context details
|
||||||
|
assert file_rule1_id.hex in updated_context.processed_maps_details
|
||||||
|
details1 = updated_context.processed_maps_details[file_rule1_id.hex]
|
||||||
|
assert details1['status'] == 'Processed'
|
||||||
|
assert details1['original_dimensions'] == (100, 100)
|
||||||
|
assert details1['processed_dimensions'] == (100, 100)
|
||||||
|
|
||||||
|
assert file_rule2_id.hex in updated_context.processed_maps_details
|
||||||
|
details2 = updated_context.processed_maps_details[file_rule2_id.hex]
|
||||||
|
assert details2['status'] == 'Processed'
|
||||||
|
assert details2['original_dimensions'] == (200, 200) # Original dims of img_data2_orig
|
||||||
|
assert details2['processed_dimensions'] == (50, 50)
|
||||||
|
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
538
tests/processing/pipeline/stages/test_map_merging.py
Normal file
538
tests/processing/pipeline/stages/test_map_merging.py
Normal file
@@ -0,0 +1,538 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import uuid
|
||||||
|
import numpy as np
|
||||||
|
from typing import Optional # Added Optional for type hinting
|
||||||
|
|
||||||
|
from processing.pipeline.stages.map_merging import MapMergingStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule, FileRule
|
||||||
|
from configuration import Configuration
|
||||||
|
|
||||||
|
# Mock Helper Functions
|
||||||
|
def create_mock_merge_input_channel(
|
||||||
|
file_rule_id: uuid.UUID, source_channel: int = 0, target_channel: int = 0, invert: bool = False
|
||||||
|
) -> mock.MagicMock:
|
||||||
|
mic = mock.MagicMock(spec=MergeInputChannel)
|
||||||
|
mic.file_rule_id = file_rule_id
|
||||||
|
mic.source_channel = source_channel
|
||||||
|
mic.target_channel = target_channel
|
||||||
|
mic.invert_source_channel = invert
|
||||||
|
mic.default_value_if_missing = 0 # Or some other default
|
||||||
|
return mic
|
||||||
|
|
||||||
|
def create_mock_merge_settings(
|
||||||
|
input_maps: Optional[list] = None, # List of mock MergeInputChannel
|
||||||
|
output_channels: int = 3
|
||||||
|
) -> mock.MagicMock:
|
||||||
|
ms = mock.MagicMock(spec=MergeSettings)
|
||||||
|
ms.input_maps = input_maps if input_maps is not None else []
|
||||||
|
ms.output_channels = output_channels
|
||||||
|
return ms
|
||||||
|
|
||||||
|
def create_mock_file_rule_for_merging(
|
||||||
|
id_val: Optional[uuid.UUID] = None,
|
||||||
|
map_type: str = "ORM", # Output map type
|
||||||
|
item_type: str = "MAP_MERGE",
|
||||||
|
merge_settings: Optional[mock.MagicMock] = None
|
||||||
|
) -> mock.MagicMock:
|
||||||
|
mock_fr = mock.MagicMock(spec=FileRule)
|
||||||
|
mock_fr.id = id_val if id_val else uuid.uuid4()
|
||||||
|
mock_fr.map_type = map_type
|
||||||
|
mock_fr.filename_pattern = f"{map_type.lower()}_merged.png" # Placeholder
|
||||||
|
mock_fr.item_type = item_type
|
||||||
|
mock_fr.active = True
|
||||||
|
mock_fr.merge_settings = merge_settings if merge_settings else create_mock_merge_settings()
|
||||||
|
return mock_fr
|
||||||
|
|
||||||
|
def create_map_merging_mock_context(
|
||||||
|
initial_file_rules: Optional[list] = None, # Will contain the MAP_MERGE rule
|
||||||
|
initial_processed_details: Optional[dict] = None, # Pre-processed inputs for merge
|
||||||
|
skip_asset_flag: bool = False,
|
||||||
|
asset_name: str = "MergeAsset"
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp_engine_dir"),
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_metadata={'asset_name': asset_name},
|
||||||
|
processed_maps_details=initial_processed_details if initial_processed_details is not None else {},
|
||||||
|
merged_maps_details={}, # Stage populates this
|
||||||
|
files_to_process=list(initial_file_rules) if initial_file_rules else [],
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={'skip_asset': skip_asset_flag},
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None # Corrected from sha5_value to sha_value based on AssetProcessingContext
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
def test_asset_skipped():
|
||||||
|
stage = MapMergingStage()
|
||||||
|
context = create_map_merging_mock_context(skip_asset_flag=True)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context == context # No changes expected
|
||||||
|
assert not updated_context.merged_maps_details # No maps should be merged
|
||||||
|
|
||||||
|
def test_no_map_merge_rules():
|
||||||
|
stage = MapMergingStage()
|
||||||
|
# Context with a non-MAP_MERGE rule
|
||||||
|
non_merge_rule = create_mock_file_rule_for_merging(item_type="TEXTURE_MAP", map_type="Diffuse")
|
||||||
|
context = create_map_merging_mock_context(initial_file_rules=[non_merge_rule])
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context == context # No changes expected
|
||||||
|
assert not updated_context.merged_maps_details # No maps should be merged
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.resize_image') # If testing resize
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_rgb_success(mock_log_error, mock_log_info, mock_load_image, mock_resize_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
# Input FileRules (mocked as already processed)
|
||||||
|
r_id, g_id, b_id = uuid.uuid4(), uuid.uuid4(), uuid.uuid4()
|
||||||
|
processed_details = {
|
||||||
|
r_id.hex: {'temp_processed_file': '/fake/red.png', 'status': 'Processed', 'map_type': 'RED_SRC'},
|
||||||
|
g_id.hex: {'temp_processed_file': '/fake/green.png', 'status': 'Processed', 'map_type': 'GREEN_SRC'},
|
||||||
|
b_id.hex: {'temp_processed_file': '/fake/blue.png', 'status': 'Processed', 'map_type': 'BLUE_SRC'}
|
||||||
|
}
|
||||||
|
# Mock loaded image data (grayscale for inputs)
|
||||||
|
mock_r_data = np.full((10, 10), 200, dtype=np.uint8)
|
||||||
|
mock_g_data = np.full((10, 10), 100, dtype=np.uint8)
|
||||||
|
mock_b_data = np.full((10, 10), 50, dtype=np.uint8)
|
||||||
|
mock_load_image.side_effect = [mock_r_data, mock_g_data, mock_b_data]
|
||||||
|
|
||||||
|
# Merge Rule setup
|
||||||
|
merge_inputs = [
|
||||||
|
create_mock_merge_input_channel(file_rule_id=r_id, source_channel=0, target_channel=0), # R to R
|
||||||
|
create_mock_merge_input_channel(file_rule_id=g_id, source_channel=0, target_channel=1), # G to G
|
||||||
|
create_mock_merge_input_channel(file_rule_id=b_id, source_channel=0, target_channel=2) # B to B
|
||||||
|
]
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=merge_inputs, output_channels=3)
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="RGB_Combined", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details=processed_details
|
||||||
|
)
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert mock_load_image.call_count == 3
|
||||||
|
mock_resize_image.assert_not_called() # Assuming all inputs are same size for this test
|
||||||
|
mock_save_image.assert_called_once()
|
||||||
|
|
||||||
|
# Check that the correct filename was passed to save_image
|
||||||
|
# The filename is constructed as: f"{context.asset_rule.name}_merged_{merge_rule.map_type}{Path(first_input_path).suffix}"
|
||||||
|
# In this case, first_input_path is '/fake/red.png', so suffix is '.png'
|
||||||
|
# Asset name is "MergeAsset"
|
||||||
|
expected_filename_part = f"{context.asset_rule.name}_merged_{merge_rule.map_type}.png"
|
||||||
|
saved_path_arg = mock_save_image.call_args[0][0]
|
||||||
|
assert expected_filename_part in str(saved_path_arg)
|
||||||
|
|
||||||
|
|
||||||
|
saved_data = mock_save_image.call_args[0][1]
|
||||||
|
assert saved_data.shape == (10, 10, 3)
|
||||||
|
assert np.all(saved_data[:,:,0] == 200) # Red channel
|
||||||
|
assert np.all(saved_data[:,:,1] == 100) # Green channel
|
||||||
|
assert np.all(saved_data[:,:,2] == 50) # Blue channel
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Processed'
|
||||||
|
# The temp_merged_file path will be under engine_temp_dir / asset_name / filename
|
||||||
|
assert f"{context.engine_temp_dir / context.asset_rule.name / expected_filename_part}" == details['temp_merged_file']
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
mock_log_info.assert_any_call(f"Successfully merged map '{merge_rule.map_type}' for asset '{context.asset_rule.name}'.")
|
||||||
|
|
||||||
|
# Unit tests will be added below this line
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_channel_inversion(mock_log_error, mock_log_info, mock_load_image, mock_resize_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
# Input FileRule
|
||||||
|
input_id = uuid.uuid4()
|
||||||
|
processed_details = {
|
||||||
|
input_id.hex: {'temp_processed_file': '/fake/source.png', 'status': 'Processed', 'map_type': 'SOURCE_MAP'}
|
||||||
|
}
|
||||||
|
# Mock loaded image data (single channel for simplicity, to be inverted)
|
||||||
|
mock_source_data = np.array([[0, 100], [155, 255]], dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_source_data
|
||||||
|
|
||||||
|
# Merge Rule setup: one input, inverted, to one output channel
|
||||||
|
merge_inputs = [
|
||||||
|
create_mock_merge_input_channel(file_rule_id=input_id, source_channel=0, target_channel=0, invert=True)
|
||||||
|
]
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=merge_inputs, output_channels=1)
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="Inverted_Gray", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details=processed_details
|
||||||
|
)
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path('/fake/source.png'))
|
||||||
|
mock_resize_image.assert_not_called()
|
||||||
|
mock_save_image.assert_called_once()
|
||||||
|
|
||||||
|
saved_data = mock_save_image.call_args[0][1]
|
||||||
|
assert saved_data.shape == (2, 2) # Grayscale output
|
||||||
|
|
||||||
|
# Expected inverted data: 255-original
|
||||||
|
expected_inverted_data = np.array([[255, 155], [100, 0]], dtype=np.uint8)
|
||||||
|
assert np.all(saved_data == expected_inverted_data)
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Processed'
|
||||||
|
assert "merged_Inverted_Gray" in details['temp_merged_file']
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
mock_log_info.assert_any_call(f"Successfully merged map '{merge_rule.map_type}' for asset '{context.asset_rule.name}'.")
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_input_map_missing(mock_log_error, mock_load_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
# Input FileRule ID that will be missing from processed_details
|
||||||
|
missing_input_id = uuid.uuid4()
|
||||||
|
|
||||||
|
# Merge Rule setup
|
||||||
|
merge_inputs = [
|
||||||
|
create_mock_merge_input_channel(file_rule_id=missing_input_id, source_channel=0, target_channel=0)
|
||||||
|
]
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=merge_inputs, output_channels=1)
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="TestMissing", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
# processed_details is empty, so missing_input_id will not be found
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details={}
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Failed'
|
||||||
|
assert 'error_message' in details
|
||||||
|
assert f"Input map FileRule ID {missing_input_id.hex} not found in processed_maps_details or not successfully processed" in details['error_message']
|
||||||
|
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
assert f"Failed to merge map '{merge_rule.map_type}' for asset '{context.asset_rule.name}'" in mock_log_error.call_args[0][0]
|
||||||
|
assert f"Input map FileRule ID {missing_input_id.hex} not found in processed_maps_details or not successfully processed" in mock_log_error.call_args[0][0]
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_input_map_status_not_processed(mock_log_error, mock_load_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
input_id = uuid.uuid4()
|
||||||
|
processed_details = {
|
||||||
|
# Status is 'Failed', not 'Processed'
|
||||||
|
input_id.hex: {'temp_processed_file': '/fake/source.png', 'status': 'Failed', 'map_type': 'SOURCE_MAP'}
|
||||||
|
}
|
||||||
|
|
||||||
|
merge_inputs = [
|
||||||
|
create_mock_merge_input_channel(file_rule_id=input_id, source_channel=0, target_channel=0)
|
||||||
|
]
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=merge_inputs, output_channels=1)
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="TestNotProcessed", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details=processed_details
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Failed'
|
||||||
|
assert 'error_message' in details
|
||||||
|
assert f"Input map FileRule ID {input_id.hex} not found in processed_maps_details or not successfully processed" in details['error_message']
|
||||||
|
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
assert f"Failed to merge map '{merge_rule.map_type}' for asset '{context.asset_rule.name}'" in mock_log_error.call_args[0][0]
|
||||||
|
assert f"Input map FileRule ID {input_id.hex} not found in processed_maps_details or not successfully processed" in mock_log_error.call_args[0][0]
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_load_image_fails(mock_log_error, mock_load_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
input_id = uuid.uuid4()
|
||||||
|
processed_details = {
|
||||||
|
input_id.hex: {'temp_processed_file': '/fake/source.png', 'status': 'Processed', 'map_type': 'SOURCE_MAP'}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Configure mock_load_image to raise an exception
|
||||||
|
mock_load_image.side_effect = Exception("Failed to load image")
|
||||||
|
|
||||||
|
merge_inputs = [
|
||||||
|
create_mock_merge_input_channel(file_rule_id=input_id, source_channel=0, target_channel=0)
|
||||||
|
]
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=merge_inputs, output_channels=1)
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="TestLoadFail", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details=processed_details
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path('/fake/source.png'))
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Failed'
|
||||||
|
assert 'error_message' in details
|
||||||
|
assert "Failed to load image for merge input" in details['error_message']
|
||||||
|
assert str(Path('/fake/source.png')) in details['error_message']
|
||||||
|
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
assert f"Failed to merge map '{merge_rule.map_type}' for asset '{context.asset_rule.name}'" in mock_log_error.call_args[0][0]
|
||||||
|
assert "Failed to load image for merge input" in mock_log_error.call_args[0][0]
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_save_image_fails(mock_log_error, mock_load_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
input_id = uuid.uuid4()
|
||||||
|
processed_details = {
|
||||||
|
input_id.hex: {'temp_processed_file': '/fake/source.png', 'status': 'Processed', 'map_type': 'SOURCE_MAP'}
|
||||||
|
}
|
||||||
|
mock_source_data = np.full((10, 10), 128, dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_source_data
|
||||||
|
|
||||||
|
# Configure mock_save_image to return False (indicating failure)
|
||||||
|
mock_save_image.return_value = False
|
||||||
|
|
||||||
|
merge_inputs = [
|
||||||
|
create_mock_merge_input_channel(file_rule_id=input_id, source_channel=0, target_channel=0)
|
||||||
|
]
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=merge_inputs, output_channels=1)
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="TestSaveFail", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details=processed_details
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path('/fake/source.png'))
|
||||||
|
mock_save_image.assert_called_once() # save_image is called, but returns False
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Failed'
|
||||||
|
assert 'error_message' in details
|
||||||
|
assert "Failed to save merged map" in details['error_message']
|
||||||
|
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
assert f"Failed to merge map '{merge_rule.map_type}' for asset '{context.asset_rule.name}'" in mock_log_error.call_args[0][0]
|
||||||
|
assert "Failed to save merged map" in mock_log_error.call_args[0][0]
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_dimension_mismatch_handling(mock_log_error, mock_log_info, mock_load_image, mock_resize_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
# Input FileRules
|
||||||
|
id1, id2 = uuid.uuid4(), uuid.uuid4()
|
||||||
|
processed_details = {
|
||||||
|
id1.hex: {'temp_processed_file': '/fake/img1.png', 'status': 'Processed', 'map_type': 'IMG1_SRC'},
|
||||||
|
id2.hex: {'temp_processed_file': '/fake/img2.png', 'status': 'Processed', 'map_type': 'IMG2_SRC'}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mock loaded image data with different dimensions
|
||||||
|
mock_img1_data = np.full((10, 10), 100, dtype=np.uint8) # 10x10
|
||||||
|
mock_img2_data_original = np.full((5, 5), 200, dtype=np.uint8) # 5x5, will be resized
|
||||||
|
|
||||||
|
mock_load_image.side_effect = [mock_img1_data, mock_img2_data_original]
|
||||||
|
|
||||||
|
# Mock resize_image to return an image of the target dimensions
|
||||||
|
# For simplicity, it just creates a new array of the target size filled with a value.
|
||||||
|
mock_img2_data_resized = np.full((10, 10), 210, dtype=np.uint8) # Resized to 10x10
|
||||||
|
mock_resize_image.return_value = mock_img2_data_resized
|
||||||
|
|
||||||
|
# Merge Rule setup: two inputs, one output channel (e.g., averaging them)
|
||||||
|
# Target channel 0 for both, the stage should handle combining them if they map to the same target.
|
||||||
|
# However, the current stage logic for multiple inputs to the same target channel is to take the last one.
|
||||||
|
# Let's make them target different channels for a clearer test of resize.
|
||||||
|
merge_inputs = [
|
||||||
|
create_mock_merge_input_channel(file_rule_id=id1, source_channel=0, target_channel=0),
|
||||||
|
create_mock_merge_input_channel(file_rule_id=id2, source_channel=0, target_channel=1)
|
||||||
|
]
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=merge_inputs, output_channels=2) # Outputting 2 channels
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="ResizedMerge", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details=processed_details
|
||||||
|
)
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert mock_load_image.call_count == 2
|
||||||
|
mock_load_image.assert_any_call(Path('/fake/img1.png'))
|
||||||
|
mock_load_image.assert_any_call(Path('/fake/img2.png'))
|
||||||
|
|
||||||
|
# Assert resize_image was called for the second image to match the first's dimensions
|
||||||
|
mock_resize_image.assert_called_once()
|
||||||
|
# The first argument to resize_image is the image data, second is target_shape tuple (height, width)
|
||||||
|
# np.array_equal is needed for comparing numpy arrays in mock calls
|
||||||
|
assert np.array_equal(mock_resize_image.call_args[0][0], mock_img2_data_original)
|
||||||
|
assert mock_resize_image.call_args[0][1] == (10, 10)
|
||||||
|
|
||||||
|
mock_save_image.assert_called_once()
|
||||||
|
|
||||||
|
saved_data = mock_save_image.call_args[0][1]
|
||||||
|
assert saved_data.shape == (10, 10, 2) # 2 output channels
|
||||||
|
assert np.all(saved_data[:,:,0] == mock_img1_data) # First channel from img1
|
||||||
|
assert np.all(saved_data[:,:,1] == mock_img2_data_resized) # Second channel from resized img2
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Processed'
|
||||||
|
assert "merged_ResizedMerge" in details['temp_merged_file']
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
mock_log_info.assert_any_call(f"Resized input map from {Path('/fake/img2.png')} from {mock_img2_data_original.shape} to {(10,10)} to match first loaded map.")
|
||||||
|
mock_log_info.assert_any_call(f"Successfully merged map '{merge_rule.map_type}' for asset '{context.asset_rule.name}'.")
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.resize_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_to_grayscale_output(mock_log_error, mock_log_info, mock_load_image, mock_resize_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
# Input FileRule (e.g., an RGB image)
|
||||||
|
input_id = uuid.uuid4()
|
||||||
|
processed_details = {
|
||||||
|
input_id.hex: {'temp_processed_file': '/fake/rgb_source.png', 'status': 'Processed', 'map_type': 'RGB_SRC'}
|
||||||
|
}
|
||||||
|
# Mock loaded image data (3 channels)
|
||||||
|
mock_rgb_data = np.full((10, 10, 3), [50, 100, 150], dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_rgb_data
|
||||||
|
|
||||||
|
# Merge Rule setup: take the Green channel (source_channel=1) from input and map it to the single output channel (target_channel=0)
|
||||||
|
merge_inputs = [
|
||||||
|
create_mock_merge_input_channel(file_rule_id=input_id, source_channel=1, target_channel=0) # G to Grayscale
|
||||||
|
]
|
||||||
|
# output_channels = 1 for grayscale
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=merge_inputs, output_channels=1)
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="GrayscaleFromGreen", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details=processed_details
|
||||||
|
)
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path('/fake/rgb_source.png'))
|
||||||
|
mock_resize_image.assert_not_called()
|
||||||
|
mock_save_image.assert_called_once()
|
||||||
|
|
||||||
|
saved_data = mock_save_image.call_args[0][1]
|
||||||
|
assert saved_data.shape == (10, 10) # Grayscale output (2D)
|
||||||
|
assert np.all(saved_data == 100) # Green channel's value
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Processed'
|
||||||
|
assert "merged_GrayscaleFromGreen" in details['temp_merged_file']
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
mock_log_info.assert_any_call(f"Successfully merged map '{merge_rule.map_type}' for asset '{context.asset_rule.name}'.")
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.map_merging.ipu.load_image')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_map_merging_default_value_if_missing_channel(mock_log_error, mock_load_image, mock_save_image):
|
||||||
|
stage = MapMergingStage()
|
||||||
|
|
||||||
|
input_id = uuid.uuid4()
|
||||||
|
processed_details = {
|
||||||
|
# Input is a grayscale image (1 channel)
|
||||||
|
input_id.hex: {'temp_processed_file': '/fake/gray_source.png', 'status': 'Processed', 'map_type': 'GRAY_SRC'}
|
||||||
|
}
|
||||||
|
mock_gray_data = np.full((10, 10), 50, dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_gray_data
|
||||||
|
|
||||||
|
# Merge Rule: try to read source_channel 1 (which doesn't exist in grayscale)
|
||||||
|
# and use default_value_if_missing for target_channel 0.
|
||||||
|
# Also, read source_channel 0 (which exists) for target_channel 1.
|
||||||
|
mic1 = create_mock_merge_input_channel(file_rule_id=input_id, source_channel=1, target_channel=0)
|
||||||
|
mic1.default_value_if_missing = 128 # Set a specific default value
|
||||||
|
mic2 = create_mock_merge_input_channel(file_rule_id=input_id, source_channel=0, target_channel=1)
|
||||||
|
|
||||||
|
merge_settings = create_mock_merge_settings(input_maps=[mic1, mic2], output_channels=2)
|
||||||
|
merge_rule_id = uuid.uuid4()
|
||||||
|
merge_rule = create_mock_file_rule_for_merging(id_val=merge_rule_id, map_type="DefaultValueTest", merge_settings=merge_settings)
|
||||||
|
|
||||||
|
context = create_map_merging_mock_context(
|
||||||
|
initial_file_rules=[merge_rule],
|
||||||
|
initial_processed_details=processed_details
|
||||||
|
)
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path('/fake/gray_source.png'))
|
||||||
|
mock_save_image.assert_called_once()
|
||||||
|
|
||||||
|
saved_data = mock_save_image.call_args[0][1]
|
||||||
|
assert saved_data.shape == (10, 10, 2)
|
||||||
|
assert np.all(saved_data[:,:,0] == 128) # Default value for missing source channel 1
|
||||||
|
assert np.all(saved_data[:,:,1] == 50) # Value from existing source channel 0
|
||||||
|
|
||||||
|
assert merge_rule.id.hex in updated_context.merged_maps_details
|
||||||
|
details = updated_context.merged_maps_details[merge_rule.id.hex]
|
||||||
|
assert details['status'] == 'Processed'
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
@@ -0,0 +1,359 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import datetime
|
||||||
|
import json # For comparing dumped content
|
||||||
|
import uuid
|
||||||
|
from typing import Optional, Dict, Any
|
||||||
|
|
||||||
|
from processing.pipeline.stages.metadata_finalization_save import MetadataFinalizationAndSaveStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule
|
||||||
|
from configuration import Configuration, GeneralSettings # Added GeneralSettings as it's in the helper
|
||||||
|
|
||||||
|
|
||||||
|
def create_metadata_save_mock_context(
|
||||||
|
status_flags: Optional[Dict[str, Any]] = None,
|
||||||
|
initial_asset_metadata: Optional[Dict[str, Any]] = None,
|
||||||
|
processed_details: Optional[Dict[str, Any]] = None,
|
||||||
|
merged_details: Optional[Dict[str, Any]] = None,
|
||||||
|
asset_name: str = "MetaSaveAsset",
|
||||||
|
output_path_pattern_val: str = "{asset_name}/metadata/{filename}",
|
||||||
|
# ... other common context fields ...
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_asset_rule.output_path_pattern = output_path_pattern_val
|
||||||
|
mock_asset_rule.id = uuid.uuid4() # Needed for generate_path_from_pattern if it uses it
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
mock_source_rule.name = "MetaSaveSource"
|
||||||
|
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
# mock_config.general_settings = mock.MagicMock(spec=GeneralSettings) # If needed
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp_engine_dir"),
|
||||||
|
output_base_path=Path("/fake/output_base"), # For generate_path
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_metadata=initial_asset_metadata if initial_asset_metadata is not None else {},
|
||||||
|
processed_maps_details=processed_details if processed_details is not None else {},
|
||||||
|
merged_maps_details=merged_details if merged_details is not None else {},
|
||||||
|
files_to_process=[],
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags=status_flags if status_flags is not None else {},
|
||||||
|
incrementing_value="001", # Example for path generation
|
||||||
|
sha5_value="abc" # Example for path generation
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.json.dump')
|
||||||
|
@mock.patch('builtins.open', new_callable=mock.mock_open)
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.generate_path_from_pattern')
|
||||||
|
@mock.patch('datetime.datetime')
|
||||||
|
def test_asset_skipped_before_metadata_init(
|
||||||
|
mock_dt, mock_gen_path, mock_mkdir, mock_file_open, mock_json_dump
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Tests that if an asset is marked for skipping and has no initial metadata,
|
||||||
|
the stage returns early without attempting to save metadata.
|
||||||
|
"""
|
||||||
|
stage = MetadataFinalizationAndSaveStage()
|
||||||
|
context = create_metadata_save_mock_context(
|
||||||
|
status_flags={'skip_asset': True},
|
||||||
|
initial_asset_metadata={} # Explicitly empty
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
# Assert that no processing or saving attempts were made
|
||||||
|
mock_dt.now.assert_not_called() # Should not even try to set end time if no metadata
|
||||||
|
mock_gen_path.assert_not_called()
|
||||||
|
mock_mkdir.assert_not_called()
|
||||||
|
mock_file_open.assert_not_called()
|
||||||
|
mock_json_dump.assert_not_called()
|
||||||
|
|
||||||
|
assert updated_context.asset_metadata == {} # Metadata remains empty
|
||||||
|
assert 'metadata_file_path' not in updated_context.asset_metadata
|
||||||
|
assert updated_context.status_flags.get('metadata_save_error') is None
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.json.dump')
|
||||||
|
@mock.patch('builtins.open', new_callable=mock.mock_open)
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.generate_path_from_pattern')
|
||||||
|
@mock.patch('datetime.datetime')
|
||||||
|
def test_asset_skipped_after_metadata_init(
|
||||||
|
mock_dt, mock_gen_path, mock_mkdir, mock_file_open, mock_json_dump
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Tests that if an asset is marked for skipping but has initial metadata,
|
||||||
|
the status is updated to 'Skipped' and metadata is saved.
|
||||||
|
"""
|
||||||
|
stage = MetadataFinalizationAndSaveStage()
|
||||||
|
|
||||||
|
fixed_now = datetime.datetime(2023, 1, 1, 12, 0, 0)
|
||||||
|
mock_dt.now.return_value = fixed_now
|
||||||
|
|
||||||
|
fake_metadata_path_str = "/fake/output_base/SkippedAsset/metadata/SkippedAsset_metadata.json"
|
||||||
|
mock_gen_path.return_value = fake_metadata_path_str
|
||||||
|
|
||||||
|
initial_meta = {'asset_name': "SkippedAsset", 'status': "Pending"}
|
||||||
|
|
||||||
|
context = create_metadata_save_mock_context(
|
||||||
|
asset_name="SkippedAsset",
|
||||||
|
status_flags={'skip_asset': True},
|
||||||
|
initial_asset_metadata=initial_meta
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_dt.now.assert_called_once()
|
||||||
|
mock_gen_path.assert_called_once_with(
|
||||||
|
context.asset_rule.output_path_pattern,
|
||||||
|
context.asset_rule,
|
||||||
|
context.source_rule,
|
||||||
|
context.output_base_path,
|
||||||
|
context.asset_metadata, # Original metadata passed for path gen
|
||||||
|
context.incrementing_value,
|
||||||
|
context.sha5_value,
|
||||||
|
filename_override=f"{context.asset_rule.name}_metadata.json"
|
||||||
|
)
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
|
||||||
|
mock_file_open.assert_called_once_with(Path(fake_metadata_path_str), 'w')
|
||||||
|
mock_json_dump.assert_called_once()
|
||||||
|
|
||||||
|
dumped_data = mock_json_dump.call_args[0][0]
|
||||||
|
assert dumped_data['status'] == "Skipped"
|
||||||
|
assert dumped_data['processing_end_time'] == fixed_now.isoformat()
|
||||||
|
assert 'processed_map_details' not in dumped_data # Should not be present if skipped early
|
||||||
|
assert 'merged_map_details' not in dumped_data # Should not be present if skipped early
|
||||||
|
|
||||||
|
assert updated_context.asset_metadata['status'] == "Skipped"
|
||||||
|
assert updated_context.asset_metadata['processing_end_time'] == fixed_now.isoformat()
|
||||||
|
assert updated_context.asset_metadata['metadata_file_path'] == fake_metadata_path_str
|
||||||
|
assert updated_context.status_flags.get('metadata_save_error') is None
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.json.dump')
|
||||||
|
@mock.patch('builtins.open', new_callable=mock.mock_open) # Mocks open()
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.generate_path_from_pattern')
|
||||||
|
@mock.patch('datetime.datetime')
|
||||||
|
def test_metadata_save_success(mock_dt, mock_gen_path, mock_mkdir, mock_file_open, mock_json_dump):
|
||||||
|
"""
|
||||||
|
Tests successful metadata finalization and saving, including serialization of Path objects.
|
||||||
|
"""
|
||||||
|
stage = MetadataFinalizationAndSaveStage()
|
||||||
|
|
||||||
|
fixed_now = datetime.datetime(2023, 1, 1, 12, 30, 0)
|
||||||
|
mock_dt.now.return_value = fixed_now
|
||||||
|
|
||||||
|
fake_metadata_path_str = "/fake/output_base/MetaSaveAsset/metadata/MetaSaveAsset_metadata.json"
|
||||||
|
mock_gen_path.return_value = fake_metadata_path_str
|
||||||
|
|
||||||
|
initial_meta = {'asset_name': "MetaSaveAsset", 'status': "Pending", 'processing_start_time': "2023-01-01T12:00:00"}
|
||||||
|
# Example of a Path object that needs serialization
|
||||||
|
proc_details = {'map1': {'temp_processed_file': Path('/fake/temp_engine_dir/map1.png'), 'final_file_path': Path('/fake/output_base/MetaSaveAsset/map1.png')}}
|
||||||
|
merged_details = {'merged_map_A': {'output_path': Path('/fake/output_base/MetaSaveAsset/merged_A.png')}}
|
||||||
|
|
||||||
|
context = create_metadata_save_mock_context(
|
||||||
|
initial_asset_metadata=initial_meta,
|
||||||
|
processed_details=proc_details,
|
||||||
|
merged_details=merged_details,
|
||||||
|
status_flags={} # No errors, no skip
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_dt.now.assert_called_once()
|
||||||
|
mock_gen_path.assert_called_once_with(
|
||||||
|
context.asset_rule.output_path_pattern,
|
||||||
|
context.asset_rule,
|
||||||
|
context.source_rule,
|
||||||
|
context.output_base_path,
|
||||||
|
context.asset_metadata, # The metadata *before* adding end_time, status etc.
|
||||||
|
context.incrementing_value,
|
||||||
|
context.sha5_value,
|
||||||
|
filename_override=f"{context.asset_rule.name}_metadata.json"
|
||||||
|
)
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True) # Checks parent dir of fake_metadata_path_str
|
||||||
|
mock_file_open.assert_called_once_with(Path(fake_metadata_path_str), 'w')
|
||||||
|
mock_json_dump.assert_called_once()
|
||||||
|
|
||||||
|
# Check what was passed to json.dump
|
||||||
|
dumped_data = mock_json_dump.call_args[0][0]
|
||||||
|
assert dumped_data['status'] == "Processed"
|
||||||
|
assert dumped_data['processing_end_time'] == fixed_now.isoformat()
|
||||||
|
assert 'processing_start_time' in dumped_data # Ensure existing fields are preserved
|
||||||
|
|
||||||
|
# Verify processed_map_details and Path serialization
|
||||||
|
assert 'processed_map_details' in dumped_data
|
||||||
|
assert dumped_data['processed_map_details']['map1']['temp_processed_file'] == '/fake/temp_engine_dir/map1.png'
|
||||||
|
assert dumped_data['processed_map_details']['map1']['final_file_path'] == '/fake/output_base/MetaSaveAsset/map1.png'
|
||||||
|
|
||||||
|
# Verify merged_map_details and Path serialization
|
||||||
|
assert 'merged_map_details' in dumped_data
|
||||||
|
assert dumped_data['merged_map_details']['merged_map_A']['output_path'] == '/fake/output_base/MetaSaveAsset/merged_A.png'
|
||||||
|
|
||||||
|
assert updated_context.asset_metadata['metadata_file_path'] == fake_metadata_path_str
|
||||||
|
assert updated_context.asset_metadata['status'] == "Processed"
|
||||||
|
assert updated_context.status_flags.get('metadata_save_error') is None
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.json.dump')
|
||||||
|
@mock.patch('builtins.open', new_callable=mock.mock_open)
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.generate_path_from_pattern')
|
||||||
|
@mock.patch('datetime.datetime')
|
||||||
|
def test_processing_failed_due_to_previous_error(
|
||||||
|
mock_dt, mock_gen_path, mock_mkdir, mock_file_open, mock_json_dump
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Tests that if a previous stage set an error flag, the status is 'Failed'
|
||||||
|
and metadata (including any existing details) is saved.
|
||||||
|
"""
|
||||||
|
stage = MetadataFinalizationAndSaveStage()
|
||||||
|
|
||||||
|
fixed_now = datetime.datetime(2023, 1, 1, 12, 45, 0)
|
||||||
|
mock_dt.now.return_value = fixed_now
|
||||||
|
|
||||||
|
fake_metadata_path_str = "/fake/output_base/FailedAsset/metadata/FailedAsset_metadata.json"
|
||||||
|
mock_gen_path.return_value = fake_metadata_path_str
|
||||||
|
|
||||||
|
initial_meta = {'asset_name': "FailedAsset", 'status': "Processing"}
|
||||||
|
# Simulate some details might exist even if a later stage failed
|
||||||
|
proc_details = {'map1_partial': {'temp_processed_file': Path('/fake/temp_engine_dir/map1_partial.png')}}
|
||||||
|
|
||||||
|
context = create_metadata_save_mock_context(
|
||||||
|
asset_name="FailedAsset",
|
||||||
|
initial_asset_metadata=initial_meta,
|
||||||
|
processed_details=proc_details,
|
||||||
|
merged_details={}, # No merged details if processing failed before that
|
||||||
|
status_flags={'file_processing_error': True, 'error_message': "Something went wrong"}
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_dt.now.assert_called_once()
|
||||||
|
mock_gen_path.assert_called_once() # Path generation should still occur
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
|
||||||
|
mock_file_open.assert_called_once_with(Path(fake_metadata_path_str), 'w')
|
||||||
|
mock_json_dump.assert_called_once()
|
||||||
|
|
||||||
|
dumped_data = mock_json_dump.call_args[0][0]
|
||||||
|
assert dumped_data['status'] == "Failed"
|
||||||
|
assert dumped_data['processing_end_time'] == fixed_now.isoformat()
|
||||||
|
assert 'error_message' in dumped_data # Assuming error messages from status_flags are copied
|
||||||
|
assert dumped_data['error_message'] == "Something went wrong"
|
||||||
|
|
||||||
|
# Check that existing details are included
|
||||||
|
assert 'processed_map_details' in dumped_data
|
||||||
|
assert dumped_data['processed_map_details']['map1_partial']['temp_processed_file'] == '/fake/temp_engine_dir/map1_partial.png'
|
||||||
|
assert 'merged_map_details' in dumped_data # Should be present, even if empty
|
||||||
|
assert dumped_data['merged_map_details'] == {}
|
||||||
|
|
||||||
|
assert updated_context.asset_metadata['status'] == "Failed"
|
||||||
|
assert updated_context.asset_metadata['metadata_file_path'] == fake_metadata_path_str
|
||||||
|
assert updated_context.status_flags.get('metadata_save_error') is None
|
||||||
|
# Ensure the original error flag is preserved
|
||||||
|
assert updated_context.status_flags['file_processing_error'] is True
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.json.dump')
|
||||||
|
@mock.patch('builtins.open', new_callable=mock.mock_open)
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.generate_path_from_pattern')
|
||||||
|
@mock.patch('datetime.datetime')
|
||||||
|
@mock.patch('logging.error') # To check if error is logged
|
||||||
|
def test_generate_path_fails(
|
||||||
|
mock_log_error, mock_dt, mock_gen_path, mock_mkdir, mock_file_open, mock_json_dump
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Tests behavior when generate_path_from_pattern raises an exception.
|
||||||
|
Ensures status is updated, error flag is set, and no save is attempted.
|
||||||
|
"""
|
||||||
|
stage = MetadataFinalizationAndSaveStage()
|
||||||
|
|
||||||
|
fixed_now = datetime.datetime(2023, 1, 1, 12, 50, 0)
|
||||||
|
mock_dt.now.return_value = fixed_now
|
||||||
|
|
||||||
|
mock_gen_path.side_effect = Exception("Simulated path generation error")
|
||||||
|
|
||||||
|
initial_meta = {'asset_name': "PathFailAsset", 'status': "Processing"}
|
||||||
|
context = create_metadata_save_mock_context(
|
||||||
|
asset_name="PathFailAsset",
|
||||||
|
initial_asset_metadata=initial_meta,
|
||||||
|
status_flags={}
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_dt.now.assert_called_once() # Time is set before path generation
|
||||||
|
mock_gen_path.assert_called_once() # generate_path_from_pattern is called
|
||||||
|
|
||||||
|
# File operations should NOT be called if path generation fails
|
||||||
|
mock_mkdir.assert_not_called()
|
||||||
|
mock_file_open.assert_not_called()
|
||||||
|
mock_json_dump.assert_not_called()
|
||||||
|
|
||||||
|
mock_log_error.assert_called_once() # Check that an error was logged
|
||||||
|
# Example: check if the log message contains relevant info, if needed
|
||||||
|
# assert "Failed to generate metadata path" in mock_log_error.call_args[0][0]
|
||||||
|
|
||||||
|
assert updated_context.asset_metadata['status'] == "Failed" # Or a more specific error status
|
||||||
|
assert 'processing_end_time' in updated_context.asset_metadata # End time should still be set
|
||||||
|
assert updated_context.asset_metadata['processing_end_time'] == fixed_now.isoformat()
|
||||||
|
assert 'metadata_file_path' not in updated_context.asset_metadata # Path should not be set
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('metadata_save_error') is True
|
||||||
|
assert 'error_message' in updated_context.asset_metadata # Check if error message is populated
|
||||||
|
assert "Simulated path generation error" in updated_context.asset_metadata['error_message']
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.json.dump')
|
||||||
|
@mock.patch('builtins.open', new_callable=mock.mock_open)
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_finalization_save.generate_path_from_pattern')
|
||||||
|
@mock.patch('datetime.datetime')
|
||||||
|
@mock.patch('logging.error') # To check if error is logged
|
||||||
|
def test_json_dump_fails(
|
||||||
|
mock_log_error, mock_dt, mock_gen_path, mock_mkdir, mock_file_open, mock_json_dump
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Tests behavior when json.dump raises an exception during saving.
|
||||||
|
Ensures status is updated, error flag is set, and error is logged.
|
||||||
|
"""
|
||||||
|
stage = MetadataFinalizationAndSaveStage()
|
||||||
|
|
||||||
|
fixed_now = datetime.datetime(2023, 1, 1, 12, 55, 0)
|
||||||
|
mock_dt.now.return_value = fixed_now
|
||||||
|
|
||||||
|
fake_metadata_path_str = "/fake/output_base/JsonDumpFailAsset/metadata/JsonDumpFailAsset_metadata.json"
|
||||||
|
mock_gen_path.return_value = fake_metadata_path_str
|
||||||
|
|
||||||
|
mock_json_dump.side_effect = IOError("Simulated JSON dump error") # Or TypeError for non-serializable
|
||||||
|
|
||||||
|
initial_meta = {'asset_name': "JsonDumpFailAsset", 'status': "Processing"}
|
||||||
|
context = create_metadata_save_mock_context(
|
||||||
|
asset_name="JsonDumpFailAsset",
|
||||||
|
initial_asset_metadata=initial_meta,
|
||||||
|
status_flags={}
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_dt.now.assert_called_once()
|
||||||
|
mock_gen_path.assert_called_once()
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
|
||||||
|
mock_file_open.assert_called_once_with(Path(fake_metadata_path_str), 'w')
|
||||||
|
mock_json_dump.assert_called_once() # json.dump was attempted
|
||||||
|
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
# assert "Failed to save metadata JSON" in mock_log_error.call_args[0][0]
|
||||||
|
|
||||||
|
assert updated_context.asset_metadata['status'] == "Failed" # Or specific "Metadata Save Failed"
|
||||||
|
assert 'processing_end_time' in updated_context.asset_metadata
|
||||||
|
assert updated_context.asset_metadata['processing_end_time'] == fixed_now.isoformat()
|
||||||
|
# metadata_file_path might be set if path generation succeeded, even if dump failed.
|
||||||
|
# Depending on desired behavior, this could be asserted or not.
|
||||||
|
# For now, let's assume it's set if path generation was successful.
|
||||||
|
assert updated_context.asset_metadata['metadata_file_path'] == fake_metadata_path_str
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('metadata_save_error') is True
|
||||||
|
assert 'error_message' in updated_context.asset_metadata
|
||||||
|
assert "Simulated JSON dump error" in updated_context.asset_metadata['error_message']
|
||||||
169
tests/processing/pipeline/stages/test_metadata_initialization.py
Normal file
169
tests/processing/pipeline/stages/test_metadata_initialization.py
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import datetime
|
||||||
|
import uuid
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from processing.pipeline.stages.metadata_initialization import MetadataInitializationStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule
|
||||||
|
from configuration import Configuration, GeneralSettings
|
||||||
|
|
||||||
|
# Helper function to create a mock AssetProcessingContext
|
||||||
|
def create_metadata_init_mock_context(
|
||||||
|
skip_asset_flag: bool = False,
|
||||||
|
asset_name: str = "MetaAsset",
|
||||||
|
asset_id: uuid.UUID = None, # Allow None to default to uuid.uuid4()
|
||||||
|
source_path_str: str = "source/meta_asset",
|
||||||
|
output_pattern: str = "{asset_name}/{map_type}",
|
||||||
|
tags: list = None,
|
||||||
|
custom_fields: dict = None,
|
||||||
|
source_rule_name: str = "MetaSource",
|
||||||
|
source_rule_id: uuid.UUID = None, # Allow None to default to uuid.uuid4()
|
||||||
|
eff_supplier: Optional[str] = "SupplierMeta",
|
||||||
|
app_version_str: str = "1.0.0-test",
|
||||||
|
inc_val: Optional[str] = None,
|
||||||
|
sha_val: Optional[str] = None
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_asset_rule.id = asset_id if asset_id is not None else uuid.uuid4()
|
||||||
|
mock_asset_rule.source_path = Path(source_path_str)
|
||||||
|
mock_asset_rule.output_path_pattern = output_pattern
|
||||||
|
mock_asset_rule.tags = tags if tags is not None else ["tag1", "test_tag"]
|
||||||
|
mock_asset_rule.custom_fields = custom_fields if custom_fields is not None else {"custom_key": "custom_value"}
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
mock_source_rule.name = source_rule_name
|
||||||
|
mock_source_rule.id = source_rule_id if source_rule_id is not None else uuid.uuid4()
|
||||||
|
|
||||||
|
mock_general_settings = mock.MagicMock(spec=GeneralSettings)
|
||||||
|
mock_general_settings.app_version = app_version_str
|
||||||
|
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
mock_config.general_settings = mock_general_settings
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp"),
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier=eff_supplier,
|
||||||
|
asset_metadata={},
|
||||||
|
processed_maps_details={},
|
||||||
|
merged_maps_details={},
|
||||||
|
files_to_process=[],
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={'skip_asset': skip_asset_flag},
|
||||||
|
incrementing_value=inc_val,
|
||||||
|
sha5_value=sha_val
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_initialization.datetime')
|
||||||
|
def test_metadata_initialization_not_skipped(mock_datetime_module):
|
||||||
|
stage = MetadataInitializationStage()
|
||||||
|
|
||||||
|
fixed_now = datetime.datetime(2023, 10, 26, 12, 0, 0, tzinfo=datetime.timezone.utc)
|
||||||
|
mock_datetime_module.datetime.now.return_value = fixed_now
|
||||||
|
|
||||||
|
asset_id_val = uuid.uuid4()
|
||||||
|
source_id_val = uuid.uuid4()
|
||||||
|
|
||||||
|
context = create_metadata_init_mock_context(
|
||||||
|
skip_asset_flag=False,
|
||||||
|
asset_id=asset_id_val,
|
||||||
|
source_rule_id=source_id_val,
|
||||||
|
inc_val="001",
|
||||||
|
sha_val="abcde"
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert isinstance(updated_context.asset_metadata, dict)
|
||||||
|
assert isinstance(updated_context.processed_maps_details, dict)
|
||||||
|
assert isinstance(updated_context.merged_maps_details, dict)
|
||||||
|
|
||||||
|
md = updated_context.asset_metadata
|
||||||
|
assert md['asset_name'] == "MetaAsset"
|
||||||
|
assert md['asset_id'] == str(asset_id_val)
|
||||||
|
assert md['source_rule_name'] == "MetaSource"
|
||||||
|
assert md['source_rule_id'] == str(source_id_val)
|
||||||
|
assert md['source_path'] == "source/meta_asset"
|
||||||
|
assert md['effective_supplier'] == "SupplierMeta"
|
||||||
|
assert md['output_path_pattern'] == "{asset_name}/{map_type}"
|
||||||
|
assert md['processing_start_time'] == fixed_now.isoformat()
|
||||||
|
assert md['status'] == "Pending"
|
||||||
|
assert md['version'] == "1.0.0-test"
|
||||||
|
assert md['tags'] == ["tag1", "test_tag"]
|
||||||
|
assert md['custom_fields'] == {"custom_key": "custom_value"}
|
||||||
|
assert md['incrementing_value'] == "001"
|
||||||
|
assert md['sha5_value'] == "abcde"
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_initialization.datetime')
|
||||||
|
def test_metadata_initialization_not_skipped_none_inc_sha(mock_datetime_module):
|
||||||
|
stage = MetadataInitializationStage()
|
||||||
|
|
||||||
|
fixed_now = datetime.datetime(2023, 10, 26, 12, 0, 0, tzinfo=datetime.timezone.utc)
|
||||||
|
mock_datetime_module.datetime.now.return_value = fixed_now
|
||||||
|
|
||||||
|
context = create_metadata_init_mock_context(
|
||||||
|
skip_asset_flag=False,
|
||||||
|
inc_val=None,
|
||||||
|
sha_val=None
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
md = updated_context.asset_metadata
|
||||||
|
assert 'incrementing_value' not in md # Or assert md['incrementing_value'] is None, depending on desired behavior
|
||||||
|
assert 'sha5_value' not in md # Or assert md['sha5_value'] is None
|
||||||
|
|
||||||
|
def test_metadata_initialization_skipped():
|
||||||
|
stage = MetadataInitializationStage()
|
||||||
|
context = create_metadata_init_mock_context(skip_asset_flag=True)
|
||||||
|
|
||||||
|
# Make copies of initial state to ensure they are not modified
|
||||||
|
initial_asset_metadata = dict(context.asset_metadata)
|
||||||
|
initial_processed_maps = dict(context.processed_maps_details)
|
||||||
|
initial_merged_maps = dict(context.merged_maps_details)
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.asset_metadata == initial_asset_metadata
|
||||||
|
assert updated_context.processed_maps_details == initial_processed_maps
|
||||||
|
assert updated_context.merged_maps_details == initial_merged_maps
|
||||||
|
assert not updated_context.asset_metadata # Explicitly check it's empty as per initial setup
|
||||||
|
assert not updated_context.processed_maps_details
|
||||||
|
assert not updated_context.merged_maps_details
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.metadata_initialization.datetime')
|
||||||
|
def test_tags_and_custom_fields_are_copies(mock_datetime_module):
|
||||||
|
stage = MetadataInitializationStage()
|
||||||
|
fixed_now = datetime.datetime(2023, 10, 26, 12, 0, 0, tzinfo=datetime.timezone.utc)
|
||||||
|
mock_datetime_module.datetime.now.return_value = fixed_now
|
||||||
|
|
||||||
|
original_tags = ["original_tag"]
|
||||||
|
original_custom_fields = {"original_key": "original_value"}
|
||||||
|
|
||||||
|
context = create_metadata_init_mock_context(
|
||||||
|
skip_asset_flag=False,
|
||||||
|
tags=original_tags,
|
||||||
|
custom_fields=original_custom_fields
|
||||||
|
)
|
||||||
|
|
||||||
|
# Modify originals after context creation but before stage execution
|
||||||
|
original_tags.append("modified_after_creation")
|
||||||
|
original_custom_fields["new_key_after_creation"] = "new_value"
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
md = updated_context.asset_metadata
|
||||||
|
assert md['tags'] == ["original_tag"] # Should not have "modified_after_creation"
|
||||||
|
assert md['tags'] is not original_tags # Ensure it's a different object
|
||||||
|
|
||||||
|
assert md['custom_fields'] == {"original_key": "original_value"} # Should not have "new_key_after_creation"
|
||||||
|
assert md['custom_fields'] is not original_custom_fields # Ensure it's a different object
|
||||||
@@ -0,0 +1,323 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import uuid
|
||||||
|
import numpy as np
|
||||||
|
import logging # Added for mocking logger
|
||||||
|
|
||||||
|
from processing.pipeline.stages.normal_map_green_channel import NormalMapGreenChannelStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule, FileRule
|
||||||
|
from configuration import Configuration, GeneralSettings
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
def create_mock_file_rule_for_normal_test(
|
||||||
|
id_val: uuid.UUID = None, # Corrected type hint from Optional[uuid.UUID]
|
||||||
|
map_type: str = "NORMAL",
|
||||||
|
filename_pattern: str = "normal.png"
|
||||||
|
) -> mock.MagicMock:
|
||||||
|
mock_fr = mock.MagicMock(spec=FileRule)
|
||||||
|
mock_fr.id = id_val if id_val else uuid.uuid4()
|
||||||
|
mock_fr.map_type = map_type
|
||||||
|
mock_fr.filename_pattern = filename_pattern
|
||||||
|
mock_fr.item_type = "MAP_COL" # As per example, though not directly used by stage
|
||||||
|
mock_fr.active = True # As per example
|
||||||
|
return mock_fr
|
||||||
|
|
||||||
|
def create_normal_map_mock_context(
|
||||||
|
initial_file_rules: list = None, # Corrected type hint
|
||||||
|
initial_processed_details: dict = None, # Corrected type hint
|
||||||
|
invert_green_globally: bool = False,
|
||||||
|
skip_asset_flag: bool = False,
|
||||||
|
asset_name: str = "NormalMapAsset"
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
|
||||||
|
mock_gs = mock.MagicMock(spec=GeneralSettings)
|
||||||
|
mock_gs.invert_normal_map_green_channel_globally = invert_green_globally
|
||||||
|
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
mock_config.general_settings = mock_gs
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp_engine_dir"),
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_metadata={'asset_name': asset_name},
|
||||||
|
processed_maps_details=initial_processed_details if initial_processed_details is not None else {},
|
||||||
|
merged_maps_details={},
|
||||||
|
files_to_process=list(initial_file_rules) if initial_file_rules else [],
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={'skip_asset': skip_asset_flag},
|
||||||
|
incrementing_value=None, # Added as per AssetProcessingContext constructor
|
||||||
|
sha5_value=None # Added as per AssetProcessingContext constructor
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Unit tests will be added below
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.load_image')
|
||||||
|
def test_asset_skipped(mock_load_image, mock_save_image):
|
||||||
|
stage = NormalMapGreenChannelStage()
|
||||||
|
normal_fr = create_mock_file_rule_for_normal_test(map_type="NORMAL")
|
||||||
|
initial_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_normal.png', 'status': 'Processed', 'map_type': 'NORMAL', 'notes': ''}
|
||||||
|
}
|
||||||
|
context = create_normal_map_mock_context(
|
||||||
|
initial_file_rules=[normal_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
invert_green_globally=True,
|
||||||
|
skip_asset_flag=True # Asset is skipped
|
||||||
|
)
|
||||||
|
original_details = context.processed_maps_details.copy()
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
assert updated_context.processed_maps_details == original_details
|
||||||
|
assert normal_fr in updated_context.files_to_process # Ensure rule is still there
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.load_image')
|
||||||
|
def test_no_normal_map_present(mock_load_image, mock_save_image):
|
||||||
|
stage = NormalMapGreenChannelStage()
|
||||||
|
# Create a non-normal map rule
|
||||||
|
diffuse_fr = create_mock_file_rule_for_normal_test(map_type="DIFFUSE", filename_pattern="diffuse.png")
|
||||||
|
initial_details = {
|
||||||
|
diffuse_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_diffuse.png', 'status': 'Processed', 'map_type': 'DIFFUSE', 'notes': ''}
|
||||||
|
}
|
||||||
|
context = create_normal_map_mock_context(
|
||||||
|
initial_file_rules=[diffuse_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
invert_green_globally=True # Inversion enabled, but no normal map
|
||||||
|
)
|
||||||
|
original_details = context.processed_maps_details.copy()
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
assert updated_context.processed_maps_details == original_details
|
||||||
|
assert diffuse_fr in updated_context.files_to_process
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.load_image')
|
||||||
|
def test_normal_map_present_inversion_disabled(mock_load_image, mock_save_image):
|
||||||
|
stage = NormalMapGreenChannelStage()
|
||||||
|
normal_rule_id = uuid.uuid4()
|
||||||
|
normal_fr = create_mock_file_rule_for_normal_test(id_val=normal_rule_id, map_type="NORMAL")
|
||||||
|
initial_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': '/fake/temp_engine_dir/processed_normal.png', 'status': 'Processed', 'map_type': 'NORMAL', 'notes': 'Initial note'}
|
||||||
|
}
|
||||||
|
context = create_normal_map_mock_context(
|
||||||
|
initial_file_rules=[normal_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
invert_green_globally=False # Inversion disabled
|
||||||
|
)
|
||||||
|
original_details_entry = context.processed_maps_details[normal_fr.id.hex].copy()
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_not_called()
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
assert updated_context.processed_maps_details[normal_fr.id.hex] == original_details_entry
|
||||||
|
assert normal_fr in updated_context.files_to_process
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_normal_map_inversion_uint8_success(mock_log_debug, mock_log_info, mock_load_image, mock_save_image):
|
||||||
|
stage = NormalMapGreenChannelStage()
|
||||||
|
|
||||||
|
normal_rule_id = uuid.uuid4()
|
||||||
|
normal_fr = create_mock_file_rule_for_normal_test(id_val=normal_rule_id, map_type="NORMAL")
|
||||||
|
|
||||||
|
initial_temp_path = Path('/fake/temp_engine_dir/processed_normal.png')
|
||||||
|
initial_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': str(initial_temp_path), 'status': 'Processed', 'map_type': 'NORMAL', 'notes': 'Initial note'}
|
||||||
|
}
|
||||||
|
context = create_normal_map_mock_context(
|
||||||
|
initial_file_rules=[normal_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
invert_green_globally=True # Enable inversion
|
||||||
|
)
|
||||||
|
|
||||||
|
# R=10, G=50, B=100
|
||||||
|
mock_loaded_normal_data = np.array([[[10, 50, 100]]], dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_loaded_normal_data
|
||||||
|
mock_save_image.return_value = True # Simulate successful save
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(initial_temp_path)
|
||||||
|
|
||||||
|
# Check that save_image was called with green channel inverted
|
||||||
|
assert mock_save_image.call_count == 1
|
||||||
|
saved_path_arg, saved_data_arg = mock_save_image.call_args[0]
|
||||||
|
|
||||||
|
assert saved_data_arg[0,0,0] == 10 # R unchanged
|
||||||
|
assert saved_data_arg[0,0,1] == 255 - 50 # G inverted
|
||||||
|
assert saved_data_arg[0,0,2] == 100 # B unchanged
|
||||||
|
|
||||||
|
assert isinstance(saved_path_arg, Path)
|
||||||
|
assert "normal_g_inv_" in saved_path_arg.name
|
||||||
|
assert saved_path_arg.parent == initial_temp_path.parent # Should be in same temp dir
|
||||||
|
|
||||||
|
normal_detail = updated_context.processed_maps_details[normal_fr.id.hex]
|
||||||
|
assert "normal_g_inv_" in normal_detail['temp_processed_file']
|
||||||
|
assert Path(normal_detail['temp_processed_file']).name == saved_path_arg.name
|
||||||
|
assert "Green channel inverted" in normal_detail['notes']
|
||||||
|
assert "Initial note" in normal_detail['notes'] # Check existing notes preserved
|
||||||
|
|
||||||
|
assert normal_fr in updated_context.files_to_process
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.load_image')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.debug')
|
||||||
|
def test_normal_map_inversion_float_success(mock_log_debug, mock_log_info, mock_load_image, mock_save_image):
|
||||||
|
stage = NormalMapGreenChannelStage()
|
||||||
|
normal_rule_id = uuid.uuid4()
|
||||||
|
normal_fr = create_mock_file_rule_for_normal_test(id_val=normal_rule_id, map_type="NORMAL")
|
||||||
|
initial_temp_path = Path('/fake/temp_engine_dir/processed_normal_float.png')
|
||||||
|
initial_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': str(initial_temp_path), 'status': 'Processed', 'map_type': 'NORMAL', 'notes': 'Float image'}
|
||||||
|
}
|
||||||
|
context = create_normal_map_mock_context(
|
||||||
|
initial_file_rules=[normal_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
invert_green_globally=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# R=0.1, G=0.25, B=0.75
|
||||||
|
mock_loaded_normal_data = np.array([[[0.1, 0.25, 0.75]]], dtype=np.float32)
|
||||||
|
mock_load_image.return_value = mock_loaded_normal_data
|
||||||
|
mock_save_image.return_value = True
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(initial_temp_path)
|
||||||
|
|
||||||
|
assert mock_save_image.call_count == 1
|
||||||
|
saved_path_arg, saved_data_arg = mock_save_image.call_args[0]
|
||||||
|
|
||||||
|
assert np.isclose(saved_data_arg[0,0,0], 0.1) # R unchanged
|
||||||
|
assert np.isclose(saved_data_arg[0,0,1], 1.0 - 0.25) # G inverted
|
||||||
|
assert np.isclose(saved_data_arg[0,0,2], 0.75) # B unchanged
|
||||||
|
|
||||||
|
assert "normal_g_inv_" in saved_path_arg.name
|
||||||
|
normal_detail = updated_context.processed_maps_details[normal_fr.id.hex]
|
||||||
|
assert "normal_g_inv_" in normal_detail['temp_processed_file']
|
||||||
|
assert "Green channel inverted" in normal_detail['notes']
|
||||||
|
assert "Float image" in normal_detail['notes']
|
||||||
|
assert normal_fr in updated_context.files_to_process
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.load_image')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_load_image_fails(mock_log_error, mock_load_image, mock_save_image):
|
||||||
|
stage = NormalMapGreenChannelStage()
|
||||||
|
normal_rule_id = uuid.uuid4()
|
||||||
|
normal_fr = create_mock_file_rule_for_normal_test(id_val=normal_rule_id, map_type="NORMAL")
|
||||||
|
initial_temp_path_str = '/fake/temp_engine_dir/processed_normal_load_fail.png'
|
||||||
|
initial_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': initial_temp_path_str, 'status': 'Processed', 'map_type': 'NORMAL', 'notes': 'Load fail test'}
|
||||||
|
}
|
||||||
|
context = create_normal_map_mock_context(
|
||||||
|
initial_file_rules=[normal_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
invert_green_globally=True
|
||||||
|
)
|
||||||
|
original_details_entry = context.processed_maps_details[normal_fr.id.hex].copy()
|
||||||
|
|
||||||
|
mock_load_image.return_value = None # Simulate load failure
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path(initial_temp_path_str))
|
||||||
|
mock_save_image.assert_not_called()
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
assert f"Failed to load image {Path(initial_temp_path_str)} for green channel inversion." in mock_log_error.call_args[0][0]
|
||||||
|
|
||||||
|
# Details should be unchanged
|
||||||
|
assert updated_context.processed_maps_details[normal_fr.id.hex] == original_details_entry
|
||||||
|
assert normal_fr in updated_context.files_to_process
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.load_image')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_save_image_fails(mock_log_error, mock_load_image, mock_save_image):
|
||||||
|
stage = NormalMapGreenChannelStage()
|
||||||
|
normal_rule_id = uuid.uuid4()
|
||||||
|
normal_fr = create_mock_file_rule_for_normal_test(id_val=normal_rule_id, map_type="NORMAL")
|
||||||
|
initial_temp_path = Path('/fake/temp_engine_dir/processed_normal_save_fail.png')
|
||||||
|
initial_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': str(initial_temp_path), 'status': 'Processed', 'map_type': 'NORMAL', 'notes': 'Save fail test'}
|
||||||
|
}
|
||||||
|
context = create_normal_map_mock_context(
|
||||||
|
initial_file_rules=[normal_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
invert_green_globally=True
|
||||||
|
)
|
||||||
|
original_details_entry = context.processed_maps_details[normal_fr.id.hex].copy()
|
||||||
|
|
||||||
|
mock_loaded_normal_data = np.array([[[10, 50, 100]]], dtype=np.uint8)
|
||||||
|
mock_load_image.return_value = mock_loaded_normal_data
|
||||||
|
mock_save_image.return_value = False # Simulate save failure
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(initial_temp_path)
|
||||||
|
mock_save_image.assert_called_once() # Save is attempted
|
||||||
|
|
||||||
|
saved_path_arg = mock_save_image.call_args[0][0] # Get the path it tried to save to
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
assert f"Failed to save green channel inverted image to {saved_path_arg}." in mock_log_error.call_args[0][0]
|
||||||
|
|
||||||
|
# Details should be unchanged
|
||||||
|
assert updated_context.processed_maps_details[normal_fr.id.hex] == original_details_entry
|
||||||
|
assert normal_fr in updated_context.files_to_process
|
||||||
|
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.save_image')
|
||||||
|
@mock.patch('processing.pipeline.stages.normal_map_green_channel.ipu.load_image')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@pytest.mark.parametrize("unsuitable_data, description", [
|
||||||
|
(np.array([[1, 2], [3, 4]], dtype=np.uint8), "2D array"), # 2D array
|
||||||
|
(np.array([[[1, 2]]], dtype=np.uint8), "2-channel image") # Image with less than 3 channels
|
||||||
|
])
|
||||||
|
def test_image_not_suitable_for_inversion(mock_log_error, mock_load_image, mock_save_image, unsuitable_data, description):
|
||||||
|
stage = NormalMapGreenChannelStage()
|
||||||
|
normal_rule_id = uuid.uuid4()
|
||||||
|
normal_fr = create_mock_file_rule_for_normal_test(id_val=normal_rule_id, map_type="NORMAL")
|
||||||
|
initial_temp_path_str = f'/fake/temp_engine_dir/unsuitable_{description.replace(" ", "_")}.png'
|
||||||
|
initial_details = {
|
||||||
|
normal_fr.id.hex: {'temp_processed_file': initial_temp_path_str, 'status': 'Processed', 'map_type': 'NORMAL', 'notes': f'Unsuitable: {description}'}
|
||||||
|
}
|
||||||
|
context = create_normal_map_mock_context(
|
||||||
|
initial_file_rules=[normal_fr],
|
||||||
|
initial_processed_details=initial_details,
|
||||||
|
invert_green_globally=True
|
||||||
|
)
|
||||||
|
original_details_entry = context.processed_maps_details[normal_fr.id.hex].copy()
|
||||||
|
|
||||||
|
mock_load_image.return_value = unsuitable_data
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_load_image.assert_called_once_with(Path(initial_temp_path_str))
|
||||||
|
mock_save_image.assert_not_called() # Save should not be attempted
|
||||||
|
mock_log_error.assert_called_once()
|
||||||
|
assert f"Image at {Path(initial_temp_path_str)} is not suitable for green channel inversion (e.g., not RGB/RGBA)." in mock_log_error.call_args[0][0]
|
||||||
|
|
||||||
|
# Details should be unchanged
|
||||||
|
assert updated_context.processed_maps_details[normal_fr.id.hex] == original_details_entry
|
||||||
|
assert normal_fr in updated_context.files_to_process
|
||||||
417
tests/processing/pipeline/stages/test_output_organization.py
Normal file
417
tests/processing/pipeline/stages/test_output_organization.py
Normal file
@@ -0,0 +1,417 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import shutil # To check if shutil.copy2 is called
|
||||||
|
import uuid
|
||||||
|
from typing import Optional # Added for type hinting in helper
|
||||||
|
|
||||||
|
from processing.pipeline.stages.output_organization import OutputOrganizationStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule, FileRule # For context setup
|
||||||
|
from configuration import Configuration, GeneralSettings
|
||||||
|
|
||||||
|
def create_output_org_mock_context(
|
||||||
|
status_flags: Optional[dict] = None,
|
||||||
|
asset_metadata_status: str = "Processed", # Default to processed for testing copy
|
||||||
|
processed_map_details: Optional[dict] = None,
|
||||||
|
merged_map_details: Optional[dict] = None,
|
||||||
|
overwrite_setting: bool = False,
|
||||||
|
asset_name: str = "OutputOrgAsset",
|
||||||
|
output_path_pattern_val: str = "{asset_name}/{map_type}/{filename}"
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_asset_rule.output_path_pattern = output_path_pattern_val
|
||||||
|
# Need FileRules on AssetRule if stage tries to look up output_filename_pattern from them
|
||||||
|
# For simplicity, assume stage constructs output_filename for now if not found on FileRule
|
||||||
|
mock_asset_rule.file_rules = [] # Or mock FileRules if stage uses them for output_filename_pattern
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
mock_source_rule.name = "OutputOrgSource"
|
||||||
|
|
||||||
|
mock_gs = mock.MagicMock(spec=GeneralSettings)
|
||||||
|
mock_gs.overwrite_existing = overwrite_setting
|
||||||
|
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
mock_config.general_settings = mock_gs
|
||||||
|
|
||||||
|
# Ensure asset_metadata has a status
|
||||||
|
initial_asset_metadata = {'asset_name': asset_name, 'status': asset_metadata_status}
|
||||||
|
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp_engine_dir"),
|
||||||
|
output_base_path=Path("/fake/output_final"),
|
||||||
|
effective_supplier="ValidSupplier",
|
||||||
|
asset_metadata=initial_asset_metadata,
|
||||||
|
processed_maps_details=processed_map_details if processed_map_details is not None else {},
|
||||||
|
merged_maps_details=merged_map_details if merged_map_details is not None else {},
|
||||||
|
files_to_process=[], # Not directly used by this stage, but good to have
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags=status_flags if status_flags is not None else {},
|
||||||
|
incrementing_value="001",
|
||||||
|
sha5_value="xyz" # Corrected from sha5_value to sha256_value if that's the actual param, or ensure it's a valid param. Assuming sha5_value is a typo and should be something like 'unique_id' or similar if not sha256. For now, keeping as sha5_value as per instructions.
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('logging.info') # To check for log messages
|
||||||
|
def test_output_organization_asset_skipped_by_status_flag(mock_log_info, mock_shutil_copy):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
context = create_output_org_mock_context(status_flags={'skip_asset': True})
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_shutil_copy.assert_not_called()
|
||||||
|
# Check if a log message indicates skipping, if applicable
|
||||||
|
# e.g., mock_log_info.assert_any_call("Skipping output organization for asset OutputOrgAsset due to skip_asset flag.")
|
||||||
|
assert 'final_output_files' not in updated_context.asset_metadata # Or assert it's empty
|
||||||
|
assert updated_context.asset_metadata['status'] == "Processed" # Status should not change if skipped due to flag before stage logic
|
||||||
|
# Add specific log check if the stage logs this event
|
||||||
|
# For now, assume no copy is the primary check
|
||||||
|
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('logging.warning') # Or info, depending on how failure is logged
|
||||||
|
def test_output_organization_asset_failed_by_metadata_status(mock_log_warning, mock_shutil_copy):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
context = create_output_org_mock_context(asset_metadata_status="Failed")
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_shutil_copy.assert_not_called()
|
||||||
|
# Check for a log message indicating skipping due to failure status
|
||||||
|
# e.g., mock_log_warning.assert_any_call("Skipping output organization for asset OutputOrgAsset as its status is Failed.")
|
||||||
|
assert 'final_output_files' not in updated_context.asset_metadata # Or assert it's empty
|
||||||
|
assert updated_context.asset_metadata['status'] == "Failed" # Status remains Failed
|
||||||
|
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('pathlib.Path.exists')
|
||||||
|
@mock.patch('processing.pipeline.stages.output_organization.generate_path_from_pattern')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_output_organization_success_no_overwrite(
|
||||||
|
mock_log_error, mock_log_info, mock_gen_path, mock_path_exists, mock_mkdir, mock_shutil_copy
|
||||||
|
):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
|
||||||
|
proc_id_1 = uuid.uuid4().hex
|
||||||
|
merged_id_1 = uuid.uuid4().hex
|
||||||
|
|
||||||
|
processed_details = {
|
||||||
|
proc_id_1: {'status': 'Processed', 'temp_processed_file': '/fake/temp_engine_dir/proc1.png', 'map_type': 'Diffuse', 'output_filename': 'OutputOrgAsset_Diffuse.png'}
|
||||||
|
}
|
||||||
|
merged_details = {
|
||||||
|
merged_id_1: {'status': 'Processed', 'temp_merged_file': '/fake/temp_engine_dir/merged1.png', 'map_type': 'ORM', 'output_filename': 'OutputOrgAsset_ORM.png'}
|
||||||
|
}
|
||||||
|
|
||||||
|
context = create_output_org_mock_context(
|
||||||
|
processed_map_details=processed_details,
|
||||||
|
merged_map_details=merged_details,
|
||||||
|
overwrite_setting=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock generate_path_from_pattern to return different paths for each call
|
||||||
|
final_path_proc1 = Path("/fake/output_final/OutputOrgAsset/Diffuse/OutputOrgAsset_Diffuse.png")
|
||||||
|
final_path_merged1 = Path("/fake/output_final/OutputOrgAsset/ORM/OutputOrgAsset_ORM.png")
|
||||||
|
# Ensure generate_path_from_pattern is called with the correct context and details
|
||||||
|
# The actual call in the stage is: generate_path_from_pattern(context, map_detail, map_type_key, temp_file_key)
|
||||||
|
# We need to ensure our side_effect matches these calls.
|
||||||
|
|
||||||
|
def gen_path_side_effect(ctx, detail, map_type_key, temp_file_key, output_filename_key):
|
||||||
|
if detail['temp_processed_file'] == '/fake/temp_engine_dir/proc1.png':
|
||||||
|
return final_path_proc1
|
||||||
|
elif detail['temp_merged_file'] == '/fake/temp_engine_dir/merged1.png':
|
||||||
|
return final_path_merged1
|
||||||
|
raise ValueError("Unexpected call to generate_path_from_pattern")
|
||||||
|
|
||||||
|
mock_gen_path.side_effect = gen_path_side_effect
|
||||||
|
|
||||||
|
mock_path_exists.return_value = False # Files do not exist at destination
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
assert mock_shutil_copy.call_count == 2
|
||||||
|
mock_shutil_copy.assert_any_call(Path(processed_details[proc_id_1]['temp_processed_file']), final_path_proc1)
|
||||||
|
mock_shutil_copy.assert_any_call(Path(merged_details[merged_id_1]['temp_merged_file']), final_path_merged1)
|
||||||
|
|
||||||
|
# Check mkdir calls
|
||||||
|
# It should be called for each unique parent directory
|
||||||
|
expected_mkdir_calls = [
|
||||||
|
mock.call(Path("/fake/output_final/OutputOrgAsset/Diffuse"), parents=True, exist_ok=True),
|
||||||
|
mock.call(Path("/fake/output_final/OutputOrgAsset/ORM"), parents=True, exist_ok=True)
|
||||||
|
]
|
||||||
|
mock_mkdir.assert_has_calls(expected_mkdir_calls, any_order=True)
|
||||||
|
# Ensure mkdir was called for the parent of each file
|
||||||
|
assert mock_mkdir.call_count >= 1 # Could be 1 or 2 if paths share a base that's created once
|
||||||
|
|
||||||
|
assert len(updated_context.asset_metadata['final_output_files']) == 2
|
||||||
|
assert str(final_path_proc1) in updated_context.asset_metadata['final_output_files']
|
||||||
|
assert str(final_path_merged1) in updated_context.asset_metadata['final_output_files']
|
||||||
|
|
||||||
|
assert updated_context.processed_maps_details[proc_id_1]['final_output_path'] == str(final_path_proc1)
|
||||||
|
assert updated_context.merged_maps_details[merged_id_1]['final_output_path'] == str(final_path_merged1)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
# Check for specific info logs if necessary
|
||||||
|
# mock_log_info.assert_any_call(f"Copying {processed_details[proc_id_1]['temp_processed_file']} to {final_path_proc1}")
|
||||||
|
# mock_log_info.assert_any_call(f"Copying {merged_details[merged_id_1]['temp_merged_file']} to {final_path_merged1}")
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('pathlib.Path.mkdir') # Still might be called if other files are processed
|
||||||
|
@mock.patch('pathlib.Path.exists')
|
||||||
|
@mock.patch('processing.pipeline.stages.output_organization.generate_path_from_pattern')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_output_organization_overwrite_disabled_file_exists(
|
||||||
|
mock_log_info, mock_gen_path, mock_path_exists, mock_mkdir, mock_shutil_copy
|
||||||
|
):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
proc_id_1 = uuid.uuid4().hex
|
||||||
|
processed_details = {
|
||||||
|
proc_id_1: {'status': 'Processed', 'temp_processed_file': '/fake/temp_engine_dir/proc_exists.png', 'map_type': 'Diffuse', 'output_filename': 'OutputOrgAsset_Diffuse_Exists.png'}
|
||||||
|
}
|
||||||
|
context = create_output_org_mock_context(
|
||||||
|
processed_map_details=processed_details,
|
||||||
|
overwrite_setting=False
|
||||||
|
)
|
||||||
|
|
||||||
|
final_path_proc1 = Path("/fake/output_final/OutputOrgAsset/Diffuse/OutputOrgAsset_Diffuse_Exists.png")
|
||||||
|
mock_gen_path.return_value = final_path_proc1 # Only one file
|
||||||
|
mock_path_exists.return_value = True # File exists at destination
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_shutil_copy.assert_not_called()
|
||||||
|
mock_log_info.assert_any_call(
|
||||||
|
f"Skipping copy for {final_path_proc1} as it already exists and overwrite is disabled."
|
||||||
|
)
|
||||||
|
# final_output_files should still be populated if the file exists and is considered "organized"
|
||||||
|
assert str(final_path_proc1) in updated_context.asset_metadata['final_output_files']
|
||||||
|
assert updated_context.processed_maps_details[proc_id_1]['final_output_path'] == str(final_path_proc1)
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('pathlib.Path.exists')
|
||||||
|
@mock.patch('processing.pipeline.stages.output_organization.generate_path_from_pattern')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_output_organization_overwrite_enabled_file_exists(
|
||||||
|
mock_log_error, mock_log_info, mock_gen_path, mock_path_exists, mock_mkdir, mock_shutil_copy
|
||||||
|
):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
proc_id_1 = uuid.uuid4().hex
|
||||||
|
processed_details = {
|
||||||
|
proc_id_1: {'status': 'Processed', 'temp_processed_file': '/fake/temp_engine_dir/proc_overwrite.png', 'map_type': 'Diffuse', 'output_filename': 'OutputOrgAsset_Diffuse_Overwrite.png'}
|
||||||
|
}
|
||||||
|
context = create_output_org_mock_context(
|
||||||
|
processed_map_details=processed_details,
|
||||||
|
overwrite_setting=True # Overwrite is enabled
|
||||||
|
)
|
||||||
|
|
||||||
|
final_path_proc1 = Path("/fake/output_final/OutputOrgAsset/Diffuse/OutputOrgAsset_Diffuse_Overwrite.png")
|
||||||
|
mock_gen_path.return_value = final_path_proc1
|
||||||
|
mock_path_exists.return_value = True # File exists, but we should overwrite
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_shutil_copy.assert_called_once_with(Path(processed_details[proc_id_1]['temp_processed_file']), final_path_proc1)
|
||||||
|
mock_mkdir.assert_called_once_with(final_path_proc1.parent, parents=True, exist_ok=True)
|
||||||
|
assert str(final_path_proc1) in updated_context.asset_metadata['final_output_files']
|
||||||
|
assert updated_context.processed_maps_details[proc_id_1]['final_output_path'] == str(final_path_proc1)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
# Optionally check for a log message indicating overwrite, if implemented
|
||||||
|
# mock_log_info.assert_any_call(f"Overwriting existing file {final_path_proc1}...")
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('pathlib.Path.exists')
|
||||||
|
@mock.patch('processing.pipeline.stages.output_organization.generate_path_from_pattern')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_output_organization_only_processed_maps(
|
||||||
|
mock_log_error, mock_gen_path, mock_path_exists, mock_mkdir, mock_shutil_copy
|
||||||
|
):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
proc_id_1 = uuid.uuid4().hex
|
||||||
|
processed_details = {
|
||||||
|
proc_id_1: {'status': 'Processed', 'temp_processed_file': '/fake/temp_engine_dir/proc_only.png', 'map_type': 'Albedo', 'output_filename': 'OutputOrgAsset_Albedo.png'}
|
||||||
|
}
|
||||||
|
context = create_output_org_mock_context(
|
||||||
|
processed_map_details=processed_details,
|
||||||
|
merged_map_details={}, # No merged maps
|
||||||
|
overwrite_setting=False
|
||||||
|
)
|
||||||
|
|
||||||
|
final_path_proc1 = Path("/fake/output_final/OutputOrgAsset/Albedo/OutputOrgAsset_Albedo.png")
|
||||||
|
mock_gen_path.return_value = final_path_proc1
|
||||||
|
mock_path_exists.return_value = False
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_shutil_copy.assert_called_once_with(Path(processed_details[proc_id_1]['temp_processed_file']), final_path_proc1)
|
||||||
|
mock_mkdir.assert_called_once_with(final_path_proc1.parent, parents=True, exist_ok=True)
|
||||||
|
assert len(updated_context.asset_metadata['final_output_files']) == 1
|
||||||
|
assert str(final_path_proc1) in updated_context.asset_metadata['final_output_files']
|
||||||
|
assert updated_context.processed_maps_details[proc_id_1]['final_output_path'] == str(final_path_proc1)
|
||||||
|
assert not updated_context.merged_maps_details # Should remain empty
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('pathlib.Path.exists')
|
||||||
|
@mock.patch('processing.pipeline.stages.output_organization.generate_path_from_pattern')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_output_organization_only_merged_maps(
|
||||||
|
mock_log_error, mock_gen_path, mock_path_exists, mock_mkdir, mock_shutil_copy
|
||||||
|
):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
merged_id_1 = uuid.uuid4().hex
|
||||||
|
merged_details = {
|
||||||
|
merged_id_1: {'status': 'Processed', 'temp_merged_file': '/fake/temp_engine_dir/merged_only.png', 'map_type': 'Metallic', 'output_filename': 'OutputOrgAsset_Metallic.png'}
|
||||||
|
}
|
||||||
|
context = create_output_org_mock_context(
|
||||||
|
processed_map_details={}, # No processed maps
|
||||||
|
merged_map_details=merged_details,
|
||||||
|
overwrite_setting=False
|
||||||
|
)
|
||||||
|
|
||||||
|
final_path_merged1 = Path("/fake/output_final/OutputOrgAsset/Metallic/OutputOrgAsset_Metallic.png")
|
||||||
|
mock_gen_path.return_value = final_path_merged1
|
||||||
|
mock_path_exists.return_value = False
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_shutil_copy.assert_called_once_with(Path(merged_details[merged_id_1]['temp_merged_file']), final_path_merged1)
|
||||||
|
mock_mkdir.assert_called_once_with(final_path_merged1.parent, parents=True, exist_ok=True)
|
||||||
|
assert len(updated_context.asset_metadata['final_output_files']) == 1
|
||||||
|
assert str(final_path_merged1) in updated_context.asset_metadata['final_output_files']
|
||||||
|
assert updated_context.merged_maps_details[merged_id_1]['final_output_path'] == str(final_path_merged1)
|
||||||
|
assert not updated_context.processed_maps_details # Should remain empty
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('pathlib.Path.exists')
|
||||||
|
@mock.patch('processing.pipeline.stages.output_organization.generate_path_from_pattern')
|
||||||
|
@mock.patch('logging.warning') # Expect a warning for skipped map
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_output_organization_map_status_not_processed(
|
||||||
|
mock_log_error, mock_log_warning, mock_gen_path, mock_path_exists, mock_mkdir, mock_shutil_copy
|
||||||
|
):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
|
||||||
|
proc_id_1_failed = uuid.uuid4().hex
|
||||||
|
proc_id_2_ok = uuid.uuid4().hex
|
||||||
|
|
||||||
|
processed_details = {
|
||||||
|
proc_id_1_failed: {'status': 'Failed', 'temp_processed_file': '/fake/temp_engine_dir/proc_failed.png', 'map_type': 'Diffuse', 'output_filename': 'OutputOrgAsset_Diffuse_Failed.png'},
|
||||||
|
proc_id_2_ok: {'status': 'Processed', 'temp_processed_file': '/fake/temp_engine_dir/proc_ok.png', 'map_type': 'Normal', 'output_filename': 'OutputOrgAsset_Normal_OK.png'}
|
||||||
|
}
|
||||||
|
context = create_output_org_mock_context(
|
||||||
|
processed_map_details=processed_details,
|
||||||
|
overwrite_setting=False
|
||||||
|
)
|
||||||
|
|
||||||
|
final_path_proc_ok = Path("/fake/output_final/OutputOrgAsset/Normal/OutputOrgAsset_Normal_OK.png")
|
||||||
|
# generate_path_from_pattern should only be called for the 'Processed' map
|
||||||
|
mock_gen_path.return_value = final_path_proc_ok
|
||||||
|
mock_path_exists.return_value = False
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
# Assert copy was only called for the 'Processed' map
|
||||||
|
mock_shutil_copy.assert_called_once_with(Path(processed_details[proc_id_2_ok]['temp_processed_file']), final_path_proc_ok)
|
||||||
|
mock_mkdir.assert_called_once_with(final_path_proc_ok.parent, parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Assert final_output_files only contains the successfully processed map
|
||||||
|
assert len(updated_context.asset_metadata['final_output_files']) == 1
|
||||||
|
assert str(final_path_proc_ok) in updated_context.asset_metadata['final_output_files']
|
||||||
|
|
||||||
|
# Assert final_output_path is set for the processed map
|
||||||
|
assert updated_context.processed_maps_details[proc_id_2_ok]['final_output_path'] == str(final_path_proc_ok)
|
||||||
|
# Assert final_output_path is NOT set for the failed map
|
||||||
|
assert 'final_output_path' not in updated_context.processed_maps_details[proc_id_1_failed]
|
||||||
|
|
||||||
|
mock_log_warning.assert_any_call(
|
||||||
|
f"Skipping output organization for map with ID {proc_id_1_failed} (type: Diffuse) as its status is 'Failed'."
|
||||||
|
)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('pathlib.Path.exists')
|
||||||
|
@mock.patch('processing.pipeline.stages.output_organization.generate_path_from_pattern')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_output_organization_generate_path_fails(
|
||||||
|
mock_log_error, mock_gen_path, mock_path_exists, mock_mkdir, mock_shutil_copy
|
||||||
|
):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
proc_id_1 = uuid.uuid4().hex
|
||||||
|
processed_details = {
|
||||||
|
proc_id_1: {'status': 'Processed', 'temp_processed_file': '/fake/temp_engine_dir/proc_path_fail.png', 'map_type': 'Roughness', 'output_filename': 'OutputOrgAsset_Roughness_PathFail.png'}
|
||||||
|
}
|
||||||
|
context = create_output_org_mock_context(
|
||||||
|
processed_map_details=processed_details,
|
||||||
|
overwrite_setting=False
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_gen_path.side_effect = Exception("Simulated path generation error")
|
||||||
|
mock_path_exists.return_value = False # Should not matter if path gen fails
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_shutil_copy.assert_not_called() # No copy if path generation fails
|
||||||
|
mock_mkdir.assert_not_called() # No mkdir if path generation fails
|
||||||
|
|
||||||
|
assert not updated_context.asset_metadata.get('final_output_files') # No files should be listed
|
||||||
|
assert 'final_output_path' not in updated_context.processed_maps_details[proc_id_1]
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('output_organization_error') is True
|
||||||
|
assert updated_context.asset_metadata['status'] == "Error" # Or "Failed" depending on desired behavior
|
||||||
|
|
||||||
|
mock_log_error.assert_any_call(
|
||||||
|
f"Error generating output path for map ID {proc_id_1} (type: Roughness): Simulated path generation error"
|
||||||
|
)
|
||||||
|
|
||||||
|
@mock.patch('shutil.copy2')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
@mock.patch('pathlib.Path.exists')
|
||||||
|
@mock.patch('processing.pipeline.stages.output_organization.generate_path_from_pattern')
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
def test_output_organization_shutil_copy_fails(
|
||||||
|
mock_log_error, mock_gen_path, mock_path_exists, mock_mkdir, mock_shutil_copy
|
||||||
|
):
|
||||||
|
stage = OutputOrganizationStage()
|
||||||
|
proc_id_1 = uuid.uuid4().hex
|
||||||
|
processed_details = {
|
||||||
|
proc_id_1: {'status': 'Processed', 'temp_processed_file': '/fake/temp_engine_dir/proc_copy_fail.png', 'map_type': 'AO', 'output_filename': 'OutputOrgAsset_AO_CopyFail.png'}
|
||||||
|
}
|
||||||
|
context = create_output_org_mock_context(
|
||||||
|
processed_map_details=processed_details,
|
||||||
|
overwrite_setting=False
|
||||||
|
)
|
||||||
|
|
||||||
|
final_path_proc1 = Path("/fake/output_final/OutputOrgAsset/AO/OutputOrgAsset_AO_CopyFail.png")
|
||||||
|
mock_gen_path.return_value = final_path_proc1
|
||||||
|
mock_path_exists.return_value = False
|
||||||
|
mock_shutil_copy.side_effect = shutil.Error("Simulated copy error") # Can also be IOError, OSError
|
||||||
|
|
||||||
|
updated_context = stage.execute(context)
|
||||||
|
|
||||||
|
mock_mkdir.assert_called_once_with(final_path_proc1.parent, parents=True, exist_ok=True) # mkdir would be called before copy
|
||||||
|
mock_shutil_copy.assert_called_once_with(Path(processed_details[proc_id_1]['temp_processed_file']), final_path_proc1)
|
||||||
|
|
||||||
|
# Even if copy fails, the path might be added to final_output_files before the error is caught,
|
||||||
|
# or the design might be to not add it. Let's assume it's not added on error.
|
||||||
|
# Check the stage's actual behavior for this.
|
||||||
|
# If the intention is to record the *attempted* path, this assertion might change.
|
||||||
|
# For now, assume failure means it's not a "final" output.
|
||||||
|
assert not updated_context.asset_metadata.get('final_output_files')
|
||||||
|
assert 'final_output_path' not in updated_context.processed_maps_details[proc_id_1] # Or it might contain the path but status is error
|
||||||
|
|
||||||
|
assert updated_context.status_flags.get('output_organization_error') is True
|
||||||
|
assert updated_context.asset_metadata['status'] == "Error" # Or "Failed"
|
||||||
|
|
||||||
|
mock_log_error.assert_any_call(
|
||||||
|
f"Error copying file {processed_details[proc_id_1]['temp_processed_file']} to {final_path_proc1}: Simulated copy error"
|
||||||
|
)
|
||||||
213
tests/processing/pipeline/stages/test_supplier_determination.py
Normal file
213
tests/processing/pipeline/stages/test_supplier_determination.py
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
|
||||||
|
# Assuming pytest is run from project root, adjust if necessary
|
||||||
|
from processing.pipeline.stages.supplier_determination import SupplierDeterminationStage
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from rule_structure import AssetRule, SourceRule, FileRule # For constructing mock context
|
||||||
|
from configuration import Configuration, GeneralSettings, Supplier # For mock config
|
||||||
|
|
||||||
|
# Example helper (can be a pytest fixture too)
|
||||||
|
def create_mock_context(
|
||||||
|
asset_rule_supplier_override: Optional[str] = None,
|
||||||
|
source_rule_supplier: Optional[str] = None,
|
||||||
|
config_suppliers: Optional[Dict[str, Any]] = None, # Mocked Supplier objects or dicts
|
||||||
|
asset_name: str = "TestAsset"
|
||||||
|
) -> AssetProcessingContext:
|
||||||
|
mock_asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
mock_asset_rule.name = asset_name
|
||||||
|
mock_asset_rule.supplier_override = asset_rule_supplier_override
|
||||||
|
# ... other AssetRule fields if needed by the stage ...
|
||||||
|
|
||||||
|
mock_source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
mock_source_rule.supplier = source_rule_supplier
|
||||||
|
# ... other SourceRule fields ...
|
||||||
|
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
mock_config.suppliers = config_suppliers if config_suppliers is not None else {}
|
||||||
|
|
||||||
|
# Basic AssetProcessingContext fields
|
||||||
|
context = AssetProcessingContext(
|
||||||
|
source_rule=mock_source_rule,
|
||||||
|
asset_rule=mock_asset_rule,
|
||||||
|
workspace_path=Path("/fake/workspace"),
|
||||||
|
engine_temp_dir=Path("/fake/temp"),
|
||||||
|
output_base_path=Path("/fake/output"),
|
||||||
|
effective_supplier=None,
|
||||||
|
asset_metadata={},
|
||||||
|
processed_maps_details={},
|
||||||
|
merged_maps_details={},
|
||||||
|
files_to_process=[],
|
||||||
|
loaded_data_cache={},
|
||||||
|
config_obj=mock_config,
|
||||||
|
status_flags={},
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None # Corrected from sha5_value to sha256_value if that's the actual field name
|
||||||
|
)
|
||||||
|
return context
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def supplier_stage():
|
||||||
|
return SupplierDeterminationStage()
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_supplier_from_asset_rule_override_valid(mock_log_info, mock_log_error, supplier_stage):
|
||||||
|
mock_suppliers_config = {"SupplierA": mock.MagicMock(spec=Supplier)}
|
||||||
|
context = create_mock_context(
|
||||||
|
asset_rule_supplier_override="SupplierA",
|
||||||
|
config_suppliers=mock_suppliers_config
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier == "SupplierA"
|
||||||
|
assert not updated_context.status_flags.get('supplier_error')
|
||||||
|
mock_log_info.assert_any_call("Effective supplier for asset 'TestAsset' set to 'SupplierA' from asset rule override.")
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_supplier_from_source_rule_fallback_valid(mock_log_info, mock_log_error, supplier_stage):
|
||||||
|
mock_suppliers_config = {"SupplierB": mock.MagicMock(spec=Supplier)}
|
||||||
|
context = create_mock_context(
|
||||||
|
asset_rule_supplier_override=None,
|
||||||
|
source_rule_supplier="SupplierB",
|
||||||
|
config_suppliers=mock_suppliers_config
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier == "SupplierB"
|
||||||
|
assert not updated_context.status_flags.get('supplier_error')
|
||||||
|
mock_log_info.assert_any_call("Effective supplier for asset 'TestAsset' set to 'SupplierB' from source rule.")
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.warning') # supplier_determination uses logging.warning for invalid suppliers
|
||||||
|
def test_asset_rule_override_invalid_supplier(mock_log_warning, mock_log_error, supplier_stage):
|
||||||
|
context = create_mock_context(
|
||||||
|
asset_rule_supplier_override="InvalidSupplier",
|
||||||
|
config_suppliers={"SupplierA": mock.MagicMock(spec=Supplier)} # "InvalidSupplier" not in config
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier is None
|
||||||
|
assert updated_context.status_flags.get('supplier_error') is True
|
||||||
|
mock_log_warning.assert_any_call(
|
||||||
|
"Asset 'TestAsset' has supplier_override 'InvalidSupplier' which is not defined in global suppliers. No supplier set."
|
||||||
|
)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.warning')
|
||||||
|
def test_source_rule_fallback_invalid_supplier(mock_log_warning, mock_log_error, supplier_stage):
|
||||||
|
context = create_mock_context(
|
||||||
|
asset_rule_supplier_override=None,
|
||||||
|
source_rule_supplier="InvalidSupplierB",
|
||||||
|
config_suppliers={"SupplierA": mock.MagicMock(spec=Supplier)} # "InvalidSupplierB" not in config
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier is None
|
||||||
|
assert updated_context.status_flags.get('supplier_error') is True
|
||||||
|
mock_log_warning.assert_any_call(
|
||||||
|
"Asset 'TestAsset' has source rule supplier 'InvalidSupplierB' which is not defined in global suppliers. No supplier set."
|
||||||
|
)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.warning')
|
||||||
|
def test_no_supplier_defined(mock_log_warning, mock_log_error, supplier_stage):
|
||||||
|
context = create_mock_context(
|
||||||
|
asset_rule_supplier_override=None,
|
||||||
|
source_rule_supplier=None,
|
||||||
|
config_suppliers={"SupplierA": mock.MagicMock(spec=Supplier)}
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier is None
|
||||||
|
assert updated_context.status_flags.get('supplier_error') is True
|
||||||
|
mock_log_warning.assert_any_call(
|
||||||
|
"No supplier could be determined for asset 'TestAsset'. "
|
||||||
|
"AssetRule override is None and SourceRule supplier is None or empty."
|
||||||
|
)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.warning')
|
||||||
|
def test_empty_config_suppliers_with_asset_override(mock_log_warning, mock_log_error, supplier_stage):
|
||||||
|
context = create_mock_context(
|
||||||
|
asset_rule_supplier_override="SupplierX",
|
||||||
|
config_suppliers={} # Empty global supplier config
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier is None
|
||||||
|
assert updated_context.status_flags.get('supplier_error') is True
|
||||||
|
mock_log_warning.assert_any_call(
|
||||||
|
"Asset 'TestAsset' has supplier_override 'SupplierX' which is not defined in global suppliers. No supplier set."
|
||||||
|
)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.warning')
|
||||||
|
def test_empty_config_suppliers_with_source_rule(mock_log_warning, mock_log_error, supplier_stage):
|
||||||
|
context = create_mock_context(
|
||||||
|
source_rule_supplier="SupplierY",
|
||||||
|
config_suppliers={} # Empty global supplier config
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier is None
|
||||||
|
assert updated_context.status_flags.get('supplier_error') is True
|
||||||
|
mock_log_warning.assert_any_call(
|
||||||
|
"Asset 'TestAsset' has source rule supplier 'SupplierY' which is not defined in global suppliers. No supplier set."
|
||||||
|
)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.info')
|
||||||
|
def test_asset_rule_override_empty_string(mock_log_info, mock_log_error, supplier_stage):
|
||||||
|
# This scenario should fall back to source_rule.supplier if asset_rule.supplier_override is ""
|
||||||
|
mock_suppliers_config = {"SupplierB": mock.MagicMock(spec=Supplier)}
|
||||||
|
context = create_mock_context(
|
||||||
|
asset_rule_supplier_override="", # Empty string override
|
||||||
|
source_rule_supplier="SupplierB",
|
||||||
|
config_suppliers=mock_suppliers_config
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier == "SupplierB" # Falls back to SourceRule
|
||||||
|
assert not updated_context.status_flags.get('supplier_error')
|
||||||
|
mock_log_info.assert_any_call("Effective supplier for asset 'TestAsset' set to 'SupplierB' from source rule.")
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('logging.error')
|
||||||
|
@mock.patch('logging.warning')
|
||||||
|
def test_source_rule_supplier_empty_string(mock_log_warning, mock_log_error, supplier_stage):
|
||||||
|
# This scenario should result in an error if asset_rule.supplier_override is None and source_rule.supplier is ""
|
||||||
|
context = create_mock_context(
|
||||||
|
asset_rule_supplier_override=None,
|
||||||
|
source_rule_supplier="", # Empty string source supplier
|
||||||
|
config_suppliers={"SupplierA": mock.MagicMock(spec=Supplier)}
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_context = supplier_stage.execute(context)
|
||||||
|
|
||||||
|
assert updated_context.effective_supplier is None
|
||||||
|
assert updated_context.status_flags.get('supplier_error') is True
|
||||||
|
mock_log_warning.assert_any_call(
|
||||||
|
"No supplier could be determined for asset 'TestAsset'. "
|
||||||
|
"AssetRule override is None and SourceRule supplier is None or empty."
|
||||||
|
)
|
||||||
|
mock_log_error.assert_not_called()
|
||||||
383
tests/processing/pipeline/test_orchestrator.py
Normal file
383
tests/processing/pipeline/test_orchestrator.py
Normal file
@@ -0,0 +1,383 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
from pathlib import Path
|
||||||
|
import uuid
|
||||||
|
import shutil # For checking rmtree
|
||||||
|
import tempfile # For mocking mkdtemp
|
||||||
|
|
||||||
|
from processing.pipeline.orchestrator import PipelineOrchestrator
|
||||||
|
from processing.pipeline.asset_context import AssetProcessingContext
|
||||||
|
from processing.pipeline.stages.base_stage import ProcessingStage # For mocking stages
|
||||||
|
from rule_structure import SourceRule, AssetRule, FileRule
|
||||||
|
from configuration import Configuration, GeneralSettings
|
||||||
|
|
||||||
|
# Mock Stage that modifies context
|
||||||
|
class MockPassThroughStage(ProcessingStage):
|
||||||
|
def __init__(self, stage_name="mock_stage"):
|
||||||
|
self.stage_name = stage_name
|
||||||
|
self.execute_call_count = 0
|
||||||
|
self.contexts_called_with = [] # To store contexts for verification
|
||||||
|
|
||||||
|
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
|
||||||
|
self.execute_call_count += 1
|
||||||
|
self.contexts_called_with.append(context)
|
||||||
|
# Optionally, modify context for testing
|
||||||
|
context.asset_metadata[f'{self.stage_name}_executed'] = True
|
||||||
|
if self.stage_name == "skipper_stage": # Example conditional logic
|
||||||
|
context.status_flags['skip_asset'] = True
|
||||||
|
context.status_flags['skip_reason'] = "Skipped by skipper_stage"
|
||||||
|
elif self.stage_name == "error_stage": # Example error-raising stage
|
||||||
|
raise ValueError("Simulated error in error_stage")
|
||||||
|
|
||||||
|
# Simulate status update based on stage execution
|
||||||
|
if not context.status_flags.get('skip_asset') and not context.status_flags.get('asset_failed'):
|
||||||
|
context.asset_metadata['status'] = "Processed" # Default to processed if not skipped/failed
|
||||||
|
return context
|
||||||
|
|
||||||
|
def create_orchestrator_test_config() -> mock.MagicMock:
|
||||||
|
mock_config = mock.MagicMock(spec=Configuration)
|
||||||
|
mock_config.general_settings = mock.MagicMock(spec=GeneralSettings)
|
||||||
|
mock_config.general_settings.temp_dir_override = None # Default, can be overridden in tests
|
||||||
|
# Add other config details if orchestrator or stages depend on them directly
|
||||||
|
return mock_config
|
||||||
|
|
||||||
|
def create_orchestrator_test_asset_rule(name: str, num_file_rules: int = 1) -> mock.MagicMock:
|
||||||
|
asset_rule = mock.MagicMock(spec=AssetRule)
|
||||||
|
asset_rule.name = name
|
||||||
|
asset_rule.id = uuid.uuid4()
|
||||||
|
asset_rule.source_path = Path(f"/fake/source/{name}") # Using Path object
|
||||||
|
asset_rule.file_rules = [mock.MagicMock(spec=FileRule) for _ in range(num_file_rules)]
|
||||||
|
asset_rule.enabled = True
|
||||||
|
asset_rule.map_types = {} # Initialize as dict
|
||||||
|
asset_rule.material_name_scheme = "{asset_name}"
|
||||||
|
asset_rule.texture_name_scheme = "{asset_name}_{map_type}"
|
||||||
|
asset_rule.output_path_scheme = "{source_name}/{asset_name}"
|
||||||
|
# ... other necessary AssetRule fields ...
|
||||||
|
return asset_rule
|
||||||
|
|
||||||
|
def create_orchestrator_test_source_rule(name: str, num_assets: int = 1, asset_names: list = None) -> mock.MagicMock:
|
||||||
|
source_rule = mock.MagicMock(spec=SourceRule)
|
||||||
|
source_rule.name = name
|
||||||
|
source_rule.id = uuid.uuid4()
|
||||||
|
if asset_names:
|
||||||
|
source_rule.assets = [create_orchestrator_test_asset_rule(an) for an in asset_names]
|
||||||
|
else:
|
||||||
|
source_rule.assets = [create_orchestrator_test_asset_rule(f"Asset_{i+1}_in_{name}") for i in range(num_assets)]
|
||||||
|
source_rule.enabled = True
|
||||||
|
source_rule.source_path = Path(f"/fake/source_root/{name}") # Using Path object
|
||||||
|
# ... other necessary SourceRule fields ...
|
||||||
|
return source_rule
|
||||||
|
|
||||||
|
# --- Test Cases for PipelineOrchestrator.process_source_rule() ---
|
||||||
|
|
||||||
|
@mock.patch('shutil.rmtree')
|
||||||
|
@mock.patch('tempfile.mkdtemp')
|
||||||
|
def test_orchestrator_basic_flow_mock_stages(mock_mkdtemp, mock_rmtree):
|
||||||
|
mock_mkdtemp.return_value = "/fake/engine_temp_dir_path" # Path for mkdtemp
|
||||||
|
|
||||||
|
config = create_orchestrator_test_config()
|
||||||
|
stage1 = MockPassThroughStage("stage1")
|
||||||
|
stage2 = MockPassThroughStage("stage2")
|
||||||
|
orchestrator = PipelineOrchestrator(config_obj=config, stages=[stage1, stage2])
|
||||||
|
|
||||||
|
source_rule = create_orchestrator_test_source_rule("MySourceRule", num_assets=2)
|
||||||
|
asset1_name = source_rule.assets[0].name
|
||||||
|
asset2_name = source_rule.assets[1].name
|
||||||
|
|
||||||
|
# Mock asset_metadata to be updated by stages for status check
|
||||||
|
# The MockPassThroughStage already sets a 'status' = "Processed" if not skipped/failed
|
||||||
|
# and adds '{stage_name}_executed' = True to asset_metadata.
|
||||||
|
|
||||||
|
results = orchestrator.process_source_rule(
|
||||||
|
source_rule, Path("/ws"), Path("/out"), False, "inc_val_123", "sha_val_abc"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert stage1.execute_call_count == 2 # Called for each asset
|
||||||
|
assert stage2.execute_call_count == 2 # Called for each asset
|
||||||
|
|
||||||
|
assert asset1_name in results['processed']
|
||||||
|
assert asset2_name in results['processed']
|
||||||
|
assert not results['skipped']
|
||||||
|
assert not results['failed']
|
||||||
|
|
||||||
|
# Verify context modifications by stages
|
||||||
|
for i in range(2): # For each asset
|
||||||
|
# Stage 1 context checks
|
||||||
|
s1_context_asset = stage1.contexts_called_with[i]
|
||||||
|
assert s1_context_asset.asset_metadata.get('stage1_executed') is True
|
||||||
|
assert s1_context_asset.asset_metadata.get('stage2_executed') is None # Stage 2 not yet run for this asset
|
||||||
|
|
||||||
|
# Stage 2 context checks
|
||||||
|
s2_context_asset = stage2.contexts_called_with[i]
|
||||||
|
assert s2_context_asset.asset_metadata.get('stage1_executed') is True # From stage 1
|
||||||
|
assert s2_context_asset.asset_metadata.get('stage2_executed') is True
|
||||||
|
assert s2_context_asset.asset_metadata.get('status') == "Processed"
|
||||||
|
|
||||||
|
mock_mkdtemp.assert_called_once()
|
||||||
|
# The orchestrator creates a subdirectory within the mkdtemp path
|
||||||
|
expected_temp_path = Path(mock_mkdtemp.return_value) / source_rule.id.hex
|
||||||
|
mock_rmtree.assert_called_once_with(expected_temp_path, ignore_errors=True)
|
||||||
|
|
||||||
|
@mock.patch('shutil.rmtree')
|
||||||
|
@mock.patch('tempfile.mkdtemp')
|
||||||
|
def test_orchestrator_asset_skipping_by_stage(mock_mkdtemp, mock_rmtree):
|
||||||
|
mock_mkdtemp.return_value = "/fake/engine_temp_dir_path_skip"
|
||||||
|
|
||||||
|
config = create_orchestrator_test_config()
|
||||||
|
skipper_stage = MockPassThroughStage("skipper_stage") # This stage will set skip_asset = True
|
||||||
|
stage_after_skip = MockPassThroughStage("stage_after_skip")
|
||||||
|
|
||||||
|
orchestrator = PipelineOrchestrator(config_obj=config, stages=[skipper_stage, stage_after_skip])
|
||||||
|
|
||||||
|
source_rule = create_orchestrator_test_source_rule("SkipSourceRule", num_assets=1)
|
||||||
|
asset_to_skip_name = source_rule.assets[0].name
|
||||||
|
|
||||||
|
results = orchestrator.process_source_rule(
|
||||||
|
source_rule, Path("/ws_skip"), Path("/out_skip"), False, "inc_skip", "sha_skip"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert skipper_stage.execute_call_count == 1 # Called for the asset
|
||||||
|
assert stage_after_skip.execute_call_count == 0 # Not called because asset was skipped
|
||||||
|
|
||||||
|
assert asset_to_skip_name in results['skipped']
|
||||||
|
assert not results['processed']
|
||||||
|
assert not results['failed']
|
||||||
|
|
||||||
|
# Verify skip reason in context if needed (MockPassThroughStage stores contexts)
|
||||||
|
skipped_context = skipper_stage.contexts_called_with[0]
|
||||||
|
assert skipped_context.status_flags['skip_asset'] is True
|
||||||
|
assert skipped_context.status_flags['skip_reason'] == "Skipped by skipper_stage"
|
||||||
|
|
||||||
|
mock_mkdtemp.assert_called_once()
|
||||||
|
expected_temp_path = Path(mock_mkdtemp.return_value) / source_rule.id.hex
|
||||||
|
mock_rmtree.assert_called_once_with(expected_temp_path, ignore_errors=True)
|
||||||
|
|
||||||
|
@mock.patch('shutil.rmtree')
|
||||||
|
@mock.patch('tempfile.mkdtemp')
|
||||||
|
def test_orchestrator_no_assets_in_source_rule(mock_mkdtemp, mock_rmtree):
|
||||||
|
mock_mkdtemp.return_value = "/fake/engine_temp_dir_no_assets"
|
||||||
|
|
||||||
|
config = create_orchestrator_test_config()
|
||||||
|
stage1 = MockPassThroughStage("stage1_no_assets")
|
||||||
|
orchestrator = PipelineOrchestrator(config_obj=config, stages=[stage1])
|
||||||
|
|
||||||
|
source_rule = create_orchestrator_test_source_rule("NoAssetSourceRule", num_assets=0)
|
||||||
|
|
||||||
|
results = orchestrator.process_source_rule(
|
||||||
|
source_rule, Path("/ws_no_assets"), Path("/out_no_assets"), False, "inc_no", "sha_no"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert stage1.execute_call_count == 0
|
||||||
|
assert not results['processed']
|
||||||
|
assert not results['skipped']
|
||||||
|
assert not results['failed']
|
||||||
|
|
||||||
|
# mkdtemp should still be called for the source rule processing, even if no assets
|
||||||
|
mock_mkdtemp.assert_called_once()
|
||||||
|
expected_temp_path = Path(mock_mkdtemp.return_value) / source_rule.id.hex
|
||||||
|
mock_rmtree.assert_called_once_with(expected_temp_path, ignore_errors=True)
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('shutil.rmtree')
|
||||||
|
@mock.patch('tempfile.mkdtemp')
|
||||||
|
def test_orchestrator_error_during_stage_execution(mock_mkdtemp, mock_rmtree):
|
||||||
|
mock_mkdtemp.return_value = "/fake/engine_temp_dir_error"
|
||||||
|
|
||||||
|
config = create_orchestrator_test_config()
|
||||||
|
error_stage = MockPassThroughStage("error_stage") # This stage will raise an error
|
||||||
|
stage_after_error = MockPassThroughStage("stage_after_error")
|
||||||
|
|
||||||
|
orchestrator = PipelineOrchestrator(config_obj=config, stages=[error_stage, stage_after_error])
|
||||||
|
|
||||||
|
# Test with two assets, one fails, one processes (if orchestrator continues)
|
||||||
|
# The current orchestrator's process_asset is per asset, so an error in one
|
||||||
|
# should not stop processing of other assets in the same source_rule.
|
||||||
|
source_rule = create_orchestrator_test_source_rule("ErrorSourceRule", asset_names=["AssetFails", "AssetSucceeds"])
|
||||||
|
asset_fails_name = source_rule.assets[0].name
|
||||||
|
asset_succeeds_name = source_rule.assets[1].name
|
||||||
|
|
||||||
|
# Make only the first asset's processing trigger the error
|
||||||
|
original_execute = error_stage.execute
|
||||||
|
def error_execute_side_effect(context: AssetProcessingContext):
|
||||||
|
if context.asset_rule.name == asset_fails_name:
|
||||||
|
# The MockPassThroughStage is already configured to raise ValueError for "error_stage"
|
||||||
|
# but we need to ensure it's only for the first asset.
|
||||||
|
# We can achieve this by modifying the stage_name temporarily or by checking asset_rule.name
|
||||||
|
# For simplicity, let's assume the mock stage's error logic is fine,
|
||||||
|
# and we just need to check the outcome.
|
||||||
|
# The error_stage will raise ValueError("Simulated error in error_stage")
|
||||||
|
# The orchestrator's _process_single_asset catches generic Exception.
|
||||||
|
return original_execute(context) # This will call the erroring logic
|
||||||
|
else:
|
||||||
|
# For the second asset, make it pass through without error
|
||||||
|
context.asset_metadata[f'{error_stage.stage_name}_executed'] = True
|
||||||
|
context.asset_metadata['status'] = "Processed"
|
||||||
|
return context
|
||||||
|
|
||||||
|
error_stage.execute = mock.MagicMock(side_effect=error_execute_side_effect)
|
||||||
|
# stage_after_error should still be called for the successful asset
|
||||||
|
|
||||||
|
results = orchestrator.process_source_rule(
|
||||||
|
source_rule, Path("/ws_error"), Path("/out_error"), False, "inc_err", "sha_err"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert error_stage.execute.call_count == 2 # Called for both assets
|
||||||
|
# stage_after_error is only called for the asset that didn't fail in error_stage
|
||||||
|
assert stage_after_error.execute_call_count == 1
|
||||||
|
|
||||||
|
assert asset_fails_name in results['failed']
|
||||||
|
assert asset_succeeds_name in results['processed']
|
||||||
|
assert not results['skipped']
|
||||||
|
|
||||||
|
# Verify the context of the failed asset
|
||||||
|
failed_context = None
|
||||||
|
for ctx in error_stage.contexts_called_with:
|
||||||
|
if ctx.asset_rule.name == asset_fails_name:
|
||||||
|
failed_context = ctx
|
||||||
|
break
|
||||||
|
assert failed_context is not None
|
||||||
|
assert failed_context.status_flags['asset_failed'] is True
|
||||||
|
assert "Simulated error in error_stage" in failed_context.status_flags['failure_reason']
|
||||||
|
|
||||||
|
# Verify the context of the successful asset after stage_after_error
|
||||||
|
successful_context_after_s2 = None
|
||||||
|
for ctx in stage_after_error.contexts_called_with:
|
||||||
|
if ctx.asset_rule.name == asset_succeeds_name:
|
||||||
|
successful_context_after_s2 = ctx
|
||||||
|
break
|
||||||
|
assert successful_context_after_s2 is not None
|
||||||
|
assert successful_context_after_s2.asset_metadata.get('error_stage_executed') is True # from the non-erroring path
|
||||||
|
assert successful_context_after_s2.asset_metadata.get('stage_after_error_executed') is True
|
||||||
|
assert successful_context_after_s2.asset_metadata.get('status') == "Processed"
|
||||||
|
|
||||||
|
|
||||||
|
mock_mkdtemp.assert_called_once()
|
||||||
|
expected_temp_path = Path(mock_mkdtemp.return_value) / source_rule.id.hex
|
||||||
|
mock_rmtree.assert_called_once_with(expected_temp_path, ignore_errors=True)
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('shutil.rmtree')
|
||||||
|
@mock.patch('tempfile.mkdtemp')
|
||||||
|
def test_orchestrator_asset_processing_context_initialization(mock_mkdtemp, mock_rmtree):
|
||||||
|
mock_engine_temp_dir = "/fake/engine_temp_dir_context_init"
|
||||||
|
mock_mkdtemp.return_value = mock_engine_temp_dir
|
||||||
|
|
||||||
|
config = create_orchestrator_test_config()
|
||||||
|
mock_stage = MockPassThroughStage("context_check_stage")
|
||||||
|
orchestrator = PipelineOrchestrator(config_obj=config, stages=[mock_stage])
|
||||||
|
|
||||||
|
source_rule = create_orchestrator_test_source_rule("ContextSourceRule", num_assets=1)
|
||||||
|
asset_rule = source_rule.assets[0]
|
||||||
|
|
||||||
|
workspace_path = Path("/ws_context")
|
||||||
|
output_base_path = Path("/out_context")
|
||||||
|
incrementing_value = "inc_context_123"
|
||||||
|
sha5_value = "sha_context_abc"
|
||||||
|
|
||||||
|
orchestrator.process_source_rule(
|
||||||
|
source_rule, workspace_path, output_base_path, False, incrementing_value, sha5_value
|
||||||
|
)
|
||||||
|
|
||||||
|
assert mock_stage.execute_call_count == 1
|
||||||
|
|
||||||
|
# Retrieve the context passed to the mock stage
|
||||||
|
captured_context = mock_stage.contexts_called_with[0]
|
||||||
|
|
||||||
|
assert captured_context.source_rule == source_rule
|
||||||
|
assert captured_context.asset_rule == asset_rule
|
||||||
|
assert captured_context.workspace_path == workspace_path
|
||||||
|
|
||||||
|
# engine_temp_dir for the asset is a sub-directory of the source_rule's temp dir
|
||||||
|
# which itself is a sub-directory of the main engine_temp_dir from mkdtemp
|
||||||
|
expected_source_rule_temp_dir = Path(mock_engine_temp_dir) / source_rule.id.hex
|
||||||
|
expected_asset_temp_dir = expected_source_rule_temp_dir / asset_rule.id.hex
|
||||||
|
assert captured_context.engine_temp_dir == expected_asset_temp_dir
|
||||||
|
|
||||||
|
assert captured_context.output_base_path == output_base_path
|
||||||
|
assert captured_context.config_obj == config
|
||||||
|
assert captured_context.incrementing_value == incrementing_value
|
||||||
|
assert captured_context.sha5_value == sha5_value
|
||||||
|
|
||||||
|
# Check initial state of other context fields
|
||||||
|
assert captured_context.asset_metadata == {} # Should be empty initially for an asset
|
||||||
|
assert captured_context.status_flags == {} # Should be empty initially
|
||||||
|
assert captured_context.shared_data == {} # Should be empty initially
|
||||||
|
assert captured_context.current_files == [] # Should be empty initially
|
||||||
|
|
||||||
|
mock_mkdtemp.assert_called_once()
|
||||||
|
mock_rmtree.assert_called_once_with(expected_source_rule_temp_dir, ignore_errors=True)
|
||||||
|
|
||||||
|
@mock.patch('shutil.rmtree')
|
||||||
|
@mock.patch('tempfile.mkdtemp')
|
||||||
|
def test_orchestrator_temp_dir_override_from_config(mock_mkdtemp, mock_rmtree):
|
||||||
|
# This test verifies that if config.general_settings.temp_dir_override is set,
|
||||||
|
# mkdtemp is NOT called, and the override path is used and cleaned up.
|
||||||
|
|
||||||
|
config = create_orchestrator_test_config()
|
||||||
|
override_temp_path_str = "/override/temp/path"
|
||||||
|
config.general_settings.temp_dir_override = override_temp_path_str
|
||||||
|
|
||||||
|
stage1 = MockPassThroughStage("stage_temp_override")
|
||||||
|
orchestrator = PipelineOrchestrator(config_obj=config, stages=[stage1])
|
||||||
|
|
||||||
|
source_rule = create_orchestrator_test_source_rule("TempOverrideRule", num_assets=1)
|
||||||
|
asset_rule = source_rule.assets[0]
|
||||||
|
|
||||||
|
results = orchestrator.process_source_rule(
|
||||||
|
source_rule, Path("/ws_override"), Path("/out_override"), False, "inc_override", "sha_override"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert stage1.execute_call_count == 1
|
||||||
|
assert asset_rule.name in results['processed']
|
||||||
|
|
||||||
|
mock_mkdtemp.assert_not_called() # mkdtemp should not be called due to override
|
||||||
|
|
||||||
|
# The orchestrator should create its source-rule specific subdir within the override
|
||||||
|
expected_source_rule_temp_dir_in_override = Path(override_temp_path_str) / source_rule.id.hex
|
||||||
|
|
||||||
|
# Verify the context passed to the stage uses the overridden path structure
|
||||||
|
captured_context = stage1.contexts_called_with[0]
|
||||||
|
expected_asset_temp_dir_in_override = expected_source_rule_temp_dir_in_override / asset_rule.id.hex
|
||||||
|
assert captured_context.engine_temp_dir == expected_asset_temp_dir_in_override
|
||||||
|
|
||||||
|
# rmtree should be called on the source_rule's directory within the override path
|
||||||
|
mock_rmtree.assert_called_once_with(expected_source_rule_temp_dir_in_override, ignore_errors=True)
|
||||||
|
|
||||||
|
@mock.patch('shutil.rmtree')
|
||||||
|
@mock.patch('tempfile.mkdtemp')
|
||||||
|
def test_orchestrator_disabled_asset_rule_is_skipped(mock_mkdtemp, mock_rmtree):
|
||||||
|
mock_mkdtemp.return_value = "/fake/engine_temp_dir_disabled_asset"
|
||||||
|
|
||||||
|
config = create_orchestrator_test_config()
|
||||||
|
stage1 = MockPassThroughStage("stage_disabled_check")
|
||||||
|
orchestrator = PipelineOrchestrator(config_obj=config, stages=[stage1])
|
||||||
|
|
||||||
|
source_rule = create_orchestrator_test_source_rule("DisabledAssetSourceRule", asset_names=["EnabledAsset", "DisabledAsset"])
|
||||||
|
enabled_asset = source_rule.assets[0]
|
||||||
|
disabled_asset = source_rule.assets[1]
|
||||||
|
disabled_asset.enabled = False # Disable this asset rule
|
||||||
|
|
||||||
|
results = orchestrator.process_source_rule(
|
||||||
|
source_rule, Path("/ws_disabled"), Path("/out_disabled"), False, "inc_dis", "sha_dis"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert stage1.execute_call_count == 1 # Only called for the enabled asset
|
||||||
|
|
||||||
|
assert enabled_asset.name in results['processed']
|
||||||
|
assert disabled_asset.name in results['skipped']
|
||||||
|
assert not results['failed']
|
||||||
|
|
||||||
|
# Verify context for the processed asset
|
||||||
|
assert stage1.contexts_called_with[0].asset_rule.name == enabled_asset.name
|
||||||
|
|
||||||
|
# Verify skip reason for the disabled asset (this is set by the orchestrator itself)
|
||||||
|
# The orchestrator's _process_single_asset checks asset_rule.enabled
|
||||||
|
# We need to inspect the results dictionary for the skip reason if it's stored there,
|
||||||
|
# or infer it. The current structure of `results` doesn't store detailed skip reasons directly,
|
||||||
|
# but the test ensures it's in the 'skipped' list.
|
||||||
|
# For a more detailed check, one might need to adjust how results are reported or mock deeper.
|
||||||
|
# For now, confirming it's in 'skipped' and stage1 wasn't called for it is sufficient.
|
||||||
|
|
||||||
|
mock_mkdtemp.assert_called_once()
|
||||||
|
expected_temp_path = Path(mock_mkdtemp.return_value) / source_rule.id.hex
|
||||||
|
mock_rmtree.assert_called_once_with(expected_temp_path, ignore_errors=True)
|
||||||
504
tests/processing/utils/test_image_processing_utils.py
Normal file
504
tests/processing/utils/test_image_processing_utils.py
Normal file
@@ -0,0 +1,504 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
|
import numpy as np
|
||||||
|
from pathlib import Path
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Attempt to import the module under test
|
||||||
|
# This assumes that the 'tests' directory is at the same level as the 'processing' directory,
|
||||||
|
# and pytest handles the PYTHONPATH correctly.
|
||||||
|
try:
|
||||||
|
from processing.utils import image_processing_utils as ipu
|
||||||
|
import cv2 # Import cv2 here if it's used for constants like cv2.COLOR_BGR2RGB
|
||||||
|
except ImportError:
|
||||||
|
# Fallback for environments where PYTHONPATH might not be set up as expected by pytest initially
|
||||||
|
# This adds the project root to sys.path to find the 'processing' module
|
||||||
|
# Adjust the number of Path.parent calls if your test structure is deeper or shallower
|
||||||
|
project_root = Path(__file__).parent.parent.parent.parent
|
||||||
|
sys.path.insert(0, str(project_root))
|
||||||
|
from processing.utils import image_processing_utils as ipu
|
||||||
|
import cv2 # Import cv2 here as well
|
||||||
|
|
||||||
|
# If cv2 is imported directly in image_processing_utils, you might need to mock it globally for some tests
|
||||||
|
# For example, at the top of the test file:
|
||||||
|
# sys.modules['cv2'] = mock.MagicMock() # Basic global mock if needed
|
||||||
|
# We will use more targeted mocks with @mock.patch where cv2 is used.
|
||||||
|
|
||||||
|
# --- Tests for Mathematical Helpers ---
|
||||||
|
|
||||||
|
def test_is_power_of_two():
|
||||||
|
assert ipu.is_power_of_two(1) is True
|
||||||
|
assert ipu.is_power_of_two(2) is True
|
||||||
|
assert ipu.is_power_of_two(4) is True
|
||||||
|
assert ipu.is_power_of_two(16) is True
|
||||||
|
assert ipu.is_power_of_two(1024) is True
|
||||||
|
assert ipu.is_power_of_two(0) is False
|
||||||
|
assert ipu.is_power_of_two(-2) is False
|
||||||
|
assert ipu.is_power_of_two(3) is False
|
||||||
|
assert ipu.is_power_of_two(100) is False
|
||||||
|
|
||||||
|
def test_get_nearest_pot():
|
||||||
|
assert ipu.get_nearest_pot(1) == 1
|
||||||
|
assert ipu.get_nearest_pot(2) == 2
|
||||||
|
# Based on current implementation:
|
||||||
|
# For 3: lower=2, upper=4. (3-2)=1, (4-3)=1. Else branch returns upper_pot. So 4.
|
||||||
|
assert ipu.get_nearest_pot(3) == 4
|
||||||
|
assert ipu.get_nearest_pot(50) == 64 # (50-32)=18, (64-50)=14 -> upper
|
||||||
|
assert ipu.get_nearest_pot(100) == 128 # (100-64)=36, (128-100)=28 -> upper
|
||||||
|
assert ipu.get_nearest_pot(256) == 256
|
||||||
|
assert ipu.get_nearest_pot(0) == 1
|
||||||
|
assert ipu.get_nearest_pot(-10) == 1
|
||||||
|
# For 700: value.bit_length() = 10. lower_pot = 1<<(10-1) = 512. upper_pot = 1<<10 = 1024.
|
||||||
|
# (700-512) = 188. (1024-700) = 324. (188 < 324) is True. Returns lower_pot. So 512.
|
||||||
|
assert ipu.get_nearest_pot(700) == 512
|
||||||
|
assert ipu.get_nearest_pot(6) == 8 # (6-4)=2, (8-6)=2. Returns upper.
|
||||||
|
assert ipu.get_nearest_pot(5) == 4 # (5-4)=1, (8-5)=3. Returns lower.
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"orig_w, orig_h, target_w, target_h, resize_mode, ensure_pot, allow_upscale, target_max_dim, expected_w, expected_h",
|
||||||
|
[
|
||||||
|
# FIT mode
|
||||||
|
(1000, 800, 500, None, "fit", False, False, None, 500, 400), # Fit width
|
||||||
|
(1000, 800, None, 400, "fit", False, False, None, 500, 400), # Fit height
|
||||||
|
(1000, 800, 500, 500, "fit", False, False, None, 500, 400), # Fit to box (width constrained)
|
||||||
|
(800, 1000, 500, 500, "fit", False, False, None, 400, 500), # Fit to box (height constrained)
|
||||||
|
(100, 80, 200, None, "fit", False, False, None, 100, 80), # Fit width, no upscale
|
||||||
|
(100, 80, 200, None, "fit", False, True, None, 200, 160), # Fit width, allow upscale
|
||||||
|
(100, 80, 128, None, "fit", True, False, None, 128, 64), # Re-evaluated
|
||||||
|
(100, 80, 128, None, "fit", True, True, None, 128, 128), # Fit width, ensure_pot, allow upscale (128, 102 -> pot 128, 128)
|
||||||
|
|
||||||
|
# STRETCH mode
|
||||||
|
(1000, 800, 500, 400, "stretch", False, False, None, 500, 400),
|
||||||
|
(100, 80, 200, 160, "stretch", False, True, None, 200, 160), # Stretch, allow upscale
|
||||||
|
(100, 80, 200, 160, "stretch", False, False, None, 100, 80), # Stretch, no upscale
|
||||||
|
(100, 80, 128, 128, "stretch", True, True, None, 128, 128), # Stretch, ensure_pot, allow upscale
|
||||||
|
(100, 80, 70, 70, "stretch", True, False, None, 64, 64), # Stretch, ensure_pot, no upscale (70,70 -> pot 64,64)
|
||||||
|
|
||||||
|
# MAX_DIM_POT mode
|
||||||
|
(1000, 800, None, None, "max_dim_pot", True, False, 512, 512, 512),
|
||||||
|
(800, 1000, None, None, "max_dim_pot", True, False, 512, 512, 512),
|
||||||
|
(1920, 1080, None, None, "max_dim_pot", True, False, 1024, 1024, 512),
|
||||||
|
(100, 100, None, None, "max_dim_pot", True, False, 60, 64, 64),
|
||||||
|
# Edge cases for calculate_target_dimensions
|
||||||
|
(0, 0, 512, 512, "fit", False, False, None, 512, 512),
|
||||||
|
(10, 10, 512, 512, "fit", True, False, None, 8, 8),
|
||||||
|
(100, 100, 150, 150, "fit", True, False, None, 128, 128),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_calculate_target_dimensions(orig_w, orig_h, target_w, target_h, resize_mode, ensure_pot, allow_upscale, target_max_dim, expected_w, expected_h):
|
||||||
|
if resize_mode == "max_dim_pot" and target_max_dim is None:
|
||||||
|
with pytest.raises(ValueError, match="target_max_dim_for_pot_mode must be provided"):
|
||||||
|
ipu.calculate_target_dimensions(orig_w, orig_h, target_width=target_w, target_height=target_h,
|
||||||
|
resize_mode=resize_mode, ensure_pot=ensure_pot, allow_upscale=allow_upscale,
|
||||||
|
target_max_dim_for_pot_mode=target_max_dim)
|
||||||
|
elif (resize_mode == "fit" and target_w is None and target_h is None) or \
|
||||||
|
(resize_mode == "stretch" and (target_w is None or target_h is None)):
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
ipu.calculate_target_dimensions(orig_w, orig_h, target_width=target_w, target_height=target_h,
|
||||||
|
resize_mode=resize_mode, ensure_pot=ensure_pot, allow_upscale=allow_upscale,
|
||||||
|
target_max_dim_for_pot_mode=target_max_dim)
|
||||||
|
else:
|
||||||
|
actual_w, actual_h = ipu.calculate_target_dimensions(
|
||||||
|
orig_w, orig_h, target_width=target_w, target_height=target_h,
|
||||||
|
resize_mode=resize_mode, ensure_pot=ensure_pot, allow_upscale=allow_upscale,
|
||||||
|
target_max_dim_for_pot_mode=target_max_dim
|
||||||
|
)
|
||||||
|
assert (actual_w, actual_h) == (expected_w, expected_h), \
|
||||||
|
f"Input: ({orig_w},{orig_h}), T=({target_w},{target_h}), M={resize_mode}, POT={ensure_pot}, UPSC={allow_upscale}, TMAX={target_max_dim}"
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_target_dimensions_invalid_mode():
|
||||||
|
with pytest.raises(ValueError, match="Unsupported resize_mode"):
|
||||||
|
ipu.calculate_target_dimensions(100, 100, 50, 50, resize_mode="invalid_mode")
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"ow, oh, rw, rh, expected_str",
|
||||||
|
[
|
||||||
|
(100, 100, 100, 100, "EVEN"),
|
||||||
|
(100, 100, 200, 200, "EVEN"),
|
||||||
|
(200, 200, 100, 100, "EVEN"),
|
||||||
|
(100, 100, 150, 100, "X15Y1"),
|
||||||
|
(100, 100, 50, 100, "X05Y1"),
|
||||||
|
(100, 100, 100, 150, "X1Y15"),
|
||||||
|
(100, 100, 100, 50, "X1Y05"),
|
||||||
|
(100, 50, 150, 75, "EVEN"),
|
||||||
|
(100, 50, 150, 50, "X15Y1"),
|
||||||
|
(100, 50, 100, 75, "X1Y15"),
|
||||||
|
(100, 50, 120, 60, "EVEN"),
|
||||||
|
(100, 50, 133, 66, "EVEN"),
|
||||||
|
(100, 100, 133, 100, "X133Y1"),
|
||||||
|
(100, 100, 100, 133, "X1Y133"),
|
||||||
|
(100, 100, 133, 133, "EVEN"),
|
||||||
|
(100, 100, 67, 100, "X067Y1"),
|
||||||
|
(100, 100, 100, 67, "X1Y067"),
|
||||||
|
(100, 100, 67, 67, "EVEN"),
|
||||||
|
(1920, 1080, 1024, 576, "EVEN"),
|
||||||
|
(1920, 1080, 1024, 512, "X112Y1"),
|
||||||
|
(0, 100, 50, 50, "InvalidInput"),
|
||||||
|
(100, 0, 50, 50, "InvalidInput"),
|
||||||
|
(100, 100, 0, 50, "InvalidResize"),
|
||||||
|
(100, 100, 50, 0, "InvalidResize"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_normalize_aspect_ratio_change(ow, oh, rw, rh, expected_str):
|
||||||
|
assert ipu.normalize_aspect_ratio_change(ow, oh, rw, rh) == expected_str
|
||||||
|
|
||||||
|
# --- Tests for Image Manipulation ---
|
||||||
|
|
||||||
|
@mock.patch('cv2.imread')
|
||||||
|
def test_load_image_success_str_path(mock_cv2_imread):
|
||||||
|
mock_img_data = np.array([[[1, 2, 3]]], dtype=np.uint8)
|
||||||
|
mock_cv2_imread.return_value = mock_img_data
|
||||||
|
|
||||||
|
result = ipu.load_image("dummy/path.png")
|
||||||
|
|
||||||
|
mock_cv2_imread.assert_called_once_with("dummy/path.png", cv2.IMREAD_UNCHANGED)
|
||||||
|
assert np.array_equal(result, mock_img_data)
|
||||||
|
|
||||||
|
@mock.patch('cv2.imread')
|
||||||
|
def test_load_image_success_path_obj(mock_cv2_imread):
|
||||||
|
mock_img_data = np.array([[[1, 2, 3]]], dtype=np.uint8)
|
||||||
|
mock_cv2_imread.return_value = mock_img_data
|
||||||
|
dummy_path = Path("dummy/path.png")
|
||||||
|
|
||||||
|
result = ipu.load_image(dummy_path)
|
||||||
|
|
||||||
|
mock_cv2_imread.assert_called_once_with(str(dummy_path), cv2.IMREAD_UNCHANGED)
|
||||||
|
assert np.array_equal(result, mock_img_data)
|
||||||
|
|
||||||
|
@mock.patch('cv2.imread')
|
||||||
|
def test_load_image_failure(mock_cv2_imread):
|
||||||
|
mock_cv2_imread.return_value = None
|
||||||
|
|
||||||
|
result = ipu.load_image("dummy/path.png")
|
||||||
|
|
||||||
|
mock_cv2_imread.assert_called_once_with("dummy/path.png", cv2.IMREAD_UNCHANGED)
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
@mock.patch('cv2.imread', side_effect=Exception("CV2 Read Error"))
|
||||||
|
def test_load_image_exception(mock_cv2_imread):
|
||||||
|
result = ipu.load_image("dummy/path.png")
|
||||||
|
mock_cv2_imread.assert_called_once_with("dummy/path.png", cv2.IMREAD_UNCHANGED)
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('cv2.cvtColor')
|
||||||
|
def test_convert_bgr_to_rgb_3_channel(mock_cv2_cvtcolor):
|
||||||
|
bgr_image = np.random.randint(0, 255, (10, 10, 3), dtype=np.uint8)
|
||||||
|
rgb_image_mock = np.random.randint(0, 255, (10, 10, 3), dtype=np.uint8)
|
||||||
|
mock_cv2_cvtcolor.return_value = rgb_image_mock
|
||||||
|
|
||||||
|
result = ipu.convert_bgr_to_rgb(bgr_image)
|
||||||
|
|
||||||
|
mock_cv2_cvtcolor.assert_called_once_with(bgr_image, cv2.COLOR_BGR2RGB)
|
||||||
|
assert np.array_equal(result, rgb_image_mock)
|
||||||
|
|
||||||
|
@mock.patch('cv2.cvtColor')
|
||||||
|
def test_convert_bgr_to_rgb_4_channel_bgra(mock_cv2_cvtcolor):
|
||||||
|
bgra_image = np.random.randint(0, 255, (10, 10, 4), dtype=np.uint8)
|
||||||
|
rgb_image_mock = np.random.randint(0, 255, (10, 10, 3), dtype=np.uint8) # cvtColor BGRA2RGB drops alpha
|
||||||
|
mock_cv2_cvtcolor.return_value = rgb_image_mock # Mocking the output of BGRA2RGB
|
||||||
|
|
||||||
|
result = ipu.convert_bgr_to_rgb(bgra_image)
|
||||||
|
|
||||||
|
mock_cv2_cvtcolor.assert_called_once_with(bgra_image, cv2.COLOR_BGRA2RGB)
|
||||||
|
assert np.array_equal(result, rgb_image_mock)
|
||||||
|
|
||||||
|
|
||||||
|
def test_convert_bgr_to_rgb_none_input():
|
||||||
|
assert ipu.convert_bgr_to_rgb(None) is None
|
||||||
|
|
||||||
|
def test_convert_bgr_to_rgb_grayscale_input():
|
||||||
|
gray_image = np.random.randint(0, 255, (10, 10), dtype=np.uint8)
|
||||||
|
result = ipu.convert_bgr_to_rgb(gray_image)
|
||||||
|
assert np.array_equal(result, gray_image) # Should return as is
|
||||||
|
|
||||||
|
@mock.patch('cv2.cvtColor')
|
||||||
|
def test_convert_rgb_to_bgr_3_channel(mock_cv2_cvtcolor):
|
||||||
|
rgb_image = np.random.randint(0, 255, (10, 10, 3), dtype=np.uint8)
|
||||||
|
bgr_image_mock = np.random.randint(0, 255, (10, 10, 3), dtype=np.uint8)
|
||||||
|
mock_cv2_cvtcolor.return_value = bgr_image_mock
|
||||||
|
|
||||||
|
result = ipu.convert_rgb_to_bgr(rgb_image)
|
||||||
|
|
||||||
|
mock_cv2_cvtcolor.assert_called_once_with(rgb_image, cv2.COLOR_RGB2BGR)
|
||||||
|
assert np.array_equal(result, bgr_image_mock)
|
||||||
|
|
||||||
|
def test_convert_rgb_to_bgr_none_input():
|
||||||
|
assert ipu.convert_rgb_to_bgr(None) is None
|
||||||
|
|
||||||
|
def test_convert_rgb_to_bgr_grayscale_input():
|
||||||
|
gray_image = np.random.randint(0, 255, (10, 10), dtype=np.uint8)
|
||||||
|
result = ipu.convert_rgb_to_bgr(gray_image)
|
||||||
|
assert np.array_equal(result, gray_image) # Should return as is
|
||||||
|
|
||||||
|
def test_convert_rgb_to_bgr_4_channel_input():
|
||||||
|
rgba_image = np.random.randint(0, 255, (10, 10, 4), dtype=np.uint8)
|
||||||
|
result = ipu.convert_rgb_to_bgr(rgba_image)
|
||||||
|
assert np.array_equal(result, rgba_image) # Should return as is
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('cv2.resize')
|
||||||
|
def test_resize_image_downscale(mock_cv2_resize):
|
||||||
|
original_image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
|
||||||
|
resized_image_mock = np.random.randint(0, 255, (50, 50, 3), dtype=np.uint8)
|
||||||
|
mock_cv2_resize.return_value = resized_image_mock
|
||||||
|
target_w, target_h = 50, 50
|
||||||
|
|
||||||
|
result = ipu.resize_image(original_image, target_w, target_h)
|
||||||
|
|
||||||
|
mock_cv2_resize.assert_called_once_with(original_image, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)
|
||||||
|
assert np.array_equal(result, resized_image_mock)
|
||||||
|
|
||||||
|
@mock.patch('cv2.resize')
|
||||||
|
def test_resize_image_upscale(mock_cv2_resize):
|
||||||
|
original_image = np.random.randint(0, 255, (50, 50, 3), dtype=np.uint8)
|
||||||
|
resized_image_mock = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
|
||||||
|
mock_cv2_resize.return_value = resized_image_mock
|
||||||
|
target_w, target_h = 100, 100
|
||||||
|
|
||||||
|
result = ipu.resize_image(original_image, target_w, target_h)
|
||||||
|
|
||||||
|
mock_cv2_resize.assert_called_once_with(original_image, (target_w, target_h), interpolation=cv2.INTER_CUBIC)
|
||||||
|
assert np.array_equal(result, resized_image_mock)
|
||||||
|
|
||||||
|
@mock.patch('cv2.resize')
|
||||||
|
def test_resize_image_custom_interpolation(mock_cv2_resize):
|
||||||
|
original_image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
|
||||||
|
resized_image_mock = np.random.randint(0, 255, (50, 50, 3), dtype=np.uint8)
|
||||||
|
mock_cv2_resize.return_value = resized_image_mock
|
||||||
|
target_w, target_h = 50, 50
|
||||||
|
|
||||||
|
result = ipu.resize_image(original_image, target_w, target_h, interpolation=cv2.INTER_NEAREST)
|
||||||
|
|
||||||
|
mock_cv2_resize.assert_called_once_with(original_image, (target_w, target_h), interpolation=cv2.INTER_NEAREST)
|
||||||
|
assert np.array_equal(result, resized_image_mock)
|
||||||
|
|
||||||
|
def test_resize_image_none_input():
|
||||||
|
with pytest.raises(ValueError, match="Cannot resize a None image."):
|
||||||
|
ipu.resize_image(None, 50, 50)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("w, h", [(0, 50), (50, 0), (-1, 50)])
|
||||||
|
def test_resize_image_invalid_dims(w, h):
|
||||||
|
original_image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
|
||||||
|
with pytest.raises(ValueError, match="Target width and height must be positive."):
|
||||||
|
ipu.resize_image(original_image, w, h)
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('cv2.imwrite')
|
||||||
|
@mock.patch('pathlib.Path.mkdir') # Mock mkdir to avoid actual directory creation
|
||||||
|
def test_save_image_success(mock_mkdir, mock_cv2_imwrite):
|
||||||
|
mock_cv2_imwrite.return_value = True
|
||||||
|
img_data = np.zeros((10,10,3), dtype=np.uint8) # RGB
|
||||||
|
save_path = "output/test.png"
|
||||||
|
|
||||||
|
# ipu.save_image converts RGB to BGR by default for non-EXR
|
||||||
|
# So we expect convert_rgb_to_bgr to be called internally,
|
||||||
|
# and cv2.imwrite to receive BGR data.
|
||||||
|
# We can mock convert_rgb_to_bgr if we want to be very specific,
|
||||||
|
# or trust its own unit tests and check the data passed to imwrite.
|
||||||
|
# For simplicity, let's assume convert_rgb_to_bgr works and imwrite gets BGR.
|
||||||
|
# The function copies data, so we can check the mock call.
|
||||||
|
|
||||||
|
success = ipu.save_image(save_path, img_data, convert_to_bgr_before_save=True)
|
||||||
|
|
||||||
|
assert success is True
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Check that imwrite was called. The first arg to assert_called_once_with is the path.
|
||||||
|
# The second arg is the image data. We need to compare it carefully.
|
||||||
|
# Since convert_rgb_to_bgr is called internally, the data passed to imwrite will be BGR.
|
||||||
|
# Let's create expected BGR data.
|
||||||
|
expected_bgr_data = cv2.cvtColor(img_data, cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
|
args, kwargs = mock_cv2_imwrite.call_args
|
||||||
|
assert args[0] == str(Path(save_path))
|
||||||
|
assert np.array_equal(args[1], expected_bgr_data)
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('cv2.imwrite')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
def test_save_image_success_exr_no_bgr_conversion(mock_mkdir, mock_cv2_imwrite):
|
||||||
|
mock_cv2_imwrite.return_value = True
|
||||||
|
img_data_rgb_float = np.random.rand(10,10,3).astype(np.float32) # RGB float for EXR
|
||||||
|
save_path = "output/test.exr"
|
||||||
|
|
||||||
|
success = ipu.save_image(save_path, img_data_rgb_float, output_format="exr", convert_to_bgr_before_save=False)
|
||||||
|
|
||||||
|
assert success is True
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
|
||||||
|
args, kwargs = mock_cv2_imwrite.call_args
|
||||||
|
assert args[0] == str(Path(save_path))
|
||||||
|
assert np.array_equal(args[1], img_data_rgb_float) # Should be original RGB data
|
||||||
|
|
||||||
|
@mock.patch('cv2.imwrite')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
def test_save_image_success_explicit_bgr_false_png(mock_mkdir, mock_cv2_imwrite):
|
||||||
|
mock_cv2_imwrite.return_value = True
|
||||||
|
img_data_rgb = np.zeros((10,10,3), dtype=np.uint8) # RGB
|
||||||
|
save_path = "output/test.png"
|
||||||
|
|
||||||
|
# If convert_to_bgr_before_save is False, it should save RGB as is.
|
||||||
|
# However, OpenCV's imwrite for PNG might still expect BGR.
|
||||||
|
# The function's docstring says: "If True and image is 3-channel, converts RGB to BGR."
|
||||||
|
# So if False, it passes the data as is.
|
||||||
|
success = ipu.save_image(save_path, img_data_rgb, convert_to_bgr_before_save=False)
|
||||||
|
|
||||||
|
assert success is True
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
|
||||||
|
args, kwargs = mock_cv2_imwrite.call_args
|
||||||
|
assert args[0] == str(Path(save_path))
|
||||||
|
assert np.array_equal(args[1], img_data_rgb)
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('cv2.imwrite')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
def test_save_image_failure(mock_mkdir, mock_cv2_imwrite):
|
||||||
|
mock_cv2_imwrite.return_value = False
|
||||||
|
img_data = np.zeros((10,10,3), dtype=np.uint8)
|
||||||
|
save_path = "output/fail.png"
|
||||||
|
|
||||||
|
success = ipu.save_image(save_path, img_data)
|
||||||
|
|
||||||
|
assert success is False
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
|
||||||
|
mock_cv2_imwrite.assert_called_once() # Check it was called
|
||||||
|
|
||||||
|
def test_save_image_none_data():
|
||||||
|
assert ipu.save_image("output/none.png", None) is False
|
||||||
|
|
||||||
|
@mock.patch('cv2.imwrite', side_effect=Exception("CV2 Write Error"))
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
def test_save_image_exception(mock_mkdir, mock_cv2_imwrite_exception):
|
||||||
|
img_data = np.zeros((10,10,3), dtype=np.uint8)
|
||||||
|
save_path = "output/exception.png"
|
||||||
|
|
||||||
|
success = ipu.save_image(save_path, img_data)
|
||||||
|
|
||||||
|
assert success is False
|
||||||
|
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
|
||||||
|
mock_cv2_imwrite_exception.assert_called_once()
|
||||||
|
|
||||||
|
# Test data type conversions in save_image
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"input_dtype, input_data_producer, output_dtype_target, expected_conversion_dtype, check_scaling",
|
||||||
|
[
|
||||||
|
(np.uint16, lambda: (np.random.randint(0, 65535, (10,10,3), dtype=np.uint16)), np.uint8, np.uint8, True),
|
||||||
|
(np.float32, lambda: np.random.rand(10,10,3).astype(np.float32), np.uint8, np.uint8, True),
|
||||||
|
(np.uint8, lambda: (np.random.randint(0, 255, (10,10,3), dtype=np.uint8)), np.uint16, np.uint16, True),
|
||||||
|
(np.float32, lambda: np.random.rand(10,10,3).astype(np.float32), np.uint16, np.uint16, True),
|
||||||
|
(np.uint8, lambda: (np.random.randint(0, 255, (10,10,3), dtype=np.uint8)), np.float16, np.float16, True),
|
||||||
|
(np.uint16, lambda: (np.random.randint(0, 65535, (10,10,3), dtype=np.uint16)), np.float32, np.float32, True),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
@mock.patch('cv2.imwrite')
|
||||||
|
@mock.patch('pathlib.Path.mkdir')
|
||||||
|
def test_save_image_dtype_conversion(mock_mkdir, mock_cv2_imwrite, input_dtype, input_data_producer, output_dtype_target, expected_conversion_dtype, check_scaling):
|
||||||
|
mock_cv2_imwrite.return_value = True
|
||||||
|
img_data = input_data_producer()
|
||||||
|
original_img_data_copy = img_data.copy() # For checking scaling if needed
|
||||||
|
|
||||||
|
ipu.save_image("output/dtype_test.png", img_data, output_dtype_target=output_dtype_target)
|
||||||
|
|
||||||
|
mock_cv2_imwrite.assert_called_once()
|
||||||
|
saved_img_data = mock_cv2_imwrite.call_args[0][1] # Get the image data passed to imwrite
|
||||||
|
|
||||||
|
assert saved_img_data.dtype == expected_conversion_dtype
|
||||||
|
|
||||||
|
if check_scaling:
|
||||||
|
# This is a basic check. More precise checks would require known input/output values.
|
||||||
|
if output_dtype_target == np.uint8:
|
||||||
|
if input_dtype == np.uint16:
|
||||||
|
expected_scaled_data = (original_img_data_copy.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8)
|
||||||
|
assert np.allclose(saved_img_data, cv2.cvtColor(expected_scaled_data, cv2.COLOR_RGB2BGR), atol=1) # Allow small diff due to float precision
|
||||||
|
elif input_dtype in [np.float16, np.float32, np.float64]:
|
||||||
|
expected_scaled_data = (np.clip(original_img_data_copy, 0.0, 1.0) * 255.0).astype(np.uint8)
|
||||||
|
assert np.allclose(saved_img_data, cv2.cvtColor(expected_scaled_data, cv2.COLOR_RGB2BGR), atol=1)
|
||||||
|
elif output_dtype_target == np.uint16:
|
||||||
|
if input_dtype == np.uint8:
|
||||||
|
expected_scaled_data = (original_img_data_copy.astype(np.float32) / 255.0 * 65535.0).astype(np.uint16)
|
||||||
|
assert np.allclose(saved_img_data, cv2.cvtColor(expected_scaled_data, cv2.COLOR_RGB2BGR), atol=1)
|
||||||
|
elif input_dtype in [np.float16, np.float32, np.float64]:
|
||||||
|
expected_scaled_data = (np.clip(original_img_data_copy, 0.0, 1.0) * 65535.0).astype(np.uint16)
|
||||||
|
assert np.allclose(saved_img_data, cv2.cvtColor(expected_scaled_data, cv2.COLOR_RGB2BGR), atol=1)
|
||||||
|
# Add more scaling checks for float16, float32 if necessary
|
||||||
|
|
||||||
|
|
||||||
|
# --- Tests for calculate_image_stats ---
|
||||||
|
|
||||||
|
def test_calculate_image_stats_grayscale_uint8():
|
||||||
|
img_data = np.array([[0, 128], [255, 10]], dtype=np.uint8)
|
||||||
|
# Expected normalized: [[0, 0.50196], [1.0, 0.03921]] approx
|
||||||
|
stats = ipu.calculate_image_stats(img_data)
|
||||||
|
assert stats is not None
|
||||||
|
assert np.isclose(stats["min"], 0/255.0)
|
||||||
|
assert np.isclose(stats["max"], 255/255.0)
|
||||||
|
assert np.isclose(stats["mean"], np.mean(img_data.astype(np.float64)/255.0))
|
||||||
|
|
||||||
|
def test_calculate_image_stats_color_uint8():
|
||||||
|
img_data = np.array([
|
||||||
|
[[0, 50, 100], [10, 60, 110]],
|
||||||
|
[[255, 128, 200], [20, 70, 120]]
|
||||||
|
], dtype=np.uint8)
|
||||||
|
stats = ipu.calculate_image_stats(img_data)
|
||||||
|
assert stats is not None
|
||||||
|
# Min per channel (normalized)
|
||||||
|
assert np.allclose(stats["min"], [0/255.0, 50/255.0, 100/255.0])
|
||||||
|
# Max per channel (normalized)
|
||||||
|
assert np.allclose(stats["max"], [255/255.0, 128/255.0, 200/255.0])
|
||||||
|
# Mean per channel (normalized)
|
||||||
|
expected_mean = np.mean(img_data.astype(np.float64)/255.0, axis=(0,1))
|
||||||
|
assert np.allclose(stats["mean"], expected_mean)
|
||||||
|
|
||||||
|
def test_calculate_image_stats_grayscale_uint16():
|
||||||
|
img_data = np.array([[0, 32768], [65535, 1000]], dtype=np.uint16)
|
||||||
|
stats = ipu.calculate_image_stats(img_data)
|
||||||
|
assert stats is not None
|
||||||
|
assert np.isclose(stats["min"], 0/65535.0)
|
||||||
|
assert np.isclose(stats["max"], 65535/65535.0)
|
||||||
|
assert np.isclose(stats["mean"], np.mean(img_data.astype(np.float64)/65535.0))
|
||||||
|
|
||||||
|
def test_calculate_image_stats_color_float32():
|
||||||
|
# Floats are assumed to be in 0-1 range already by the function's normalization logic
|
||||||
|
img_data = np.array([
|
||||||
|
[[0.0, 0.2, 0.4], [0.1, 0.3, 0.5]],
|
||||||
|
[[1.0, 0.5, 0.8], [0.05, 0.25, 0.6]]
|
||||||
|
], dtype=np.float32)
|
||||||
|
stats = ipu.calculate_image_stats(img_data)
|
||||||
|
assert stats is not None
|
||||||
|
assert np.allclose(stats["min"], [0.0, 0.2, 0.4])
|
||||||
|
assert np.allclose(stats["max"], [1.0, 0.5, 0.8])
|
||||||
|
expected_mean = np.mean(img_data.astype(np.float64), axis=(0,1))
|
||||||
|
assert np.allclose(stats["mean"], expected_mean)
|
||||||
|
|
||||||
|
def test_calculate_image_stats_none_input():
|
||||||
|
assert ipu.calculate_image_stats(None) is None
|
||||||
|
|
||||||
|
def test_calculate_image_stats_unsupported_shape():
|
||||||
|
img_data = np.zeros((2,2,2,2), dtype=np.uint8) # 4D array
|
||||||
|
assert ipu.calculate_image_stats(img_data) is None
|
||||||
|
|
||||||
|
@mock.patch('numpy.mean', side_effect=Exception("Numpy error"))
|
||||||
|
def test_calculate_image_stats_exception_during_calculation(mock_np_mean):
|
||||||
|
img_data = np.array([[0, 128], [255, 10]], dtype=np.uint8)
|
||||||
|
stats = ipu.calculate_image_stats(img_data)
|
||||||
|
assert stats == {"error": "Error calculating image stats"}
|
||||||
|
|
||||||
|
# Example of mocking ipu.load_image for a function that uses it (if calculate_image_stats used it)
|
||||||
|
# For the current calculate_image_stats, it takes image_data directly, so this is not needed for it.
|
||||||
|
# This is just an example as requested in the prompt for a hypothetical scenario.
|
||||||
|
@mock.patch('processing.utils.image_processing_utils.load_image')
|
||||||
|
def test_hypothetical_function_using_load_image(mock_load_image):
|
||||||
|
# This test is for a function that would call ipu.load_image internally
|
||||||
|
# e.g. def process_image_from_path(path):
|
||||||
|
# img_data = ipu.load_image(path)
|
||||||
|
# return ipu.calculate_image_stats(img_data)
|
||||||
|
|
||||||
|
mock_img_data = np.array([[[0.5]]], dtype=np.float32)
|
||||||
|
mock_load_image.return_value = mock_img_data
|
||||||
|
|
||||||
|
# result = ipu.hypothetical_process_image_from_path("dummy.png")
|
||||||
|
# mock_load_image.assert_called_once_with("dummy.png")
|
||||||
|
# assert result["mean"] == 0.5
|
||||||
|
pass # This is a conceptual example
|
||||||
1
tests/utils/__init__.py
Normal file
1
tests/utils/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# This file makes the 'tests/utils' directory a Python package.
|
||||||
252
tests/utils/test_path_utils.py
Normal file
252
tests/utils/test_path_utils.py
Normal file
@@ -0,0 +1,252 @@
|
|||||||
|
import pytest
|
||||||
|
from pathlib import Path
|
||||||
|
from utils.path_utils import sanitize_filename, generate_path_from_pattern
|
||||||
|
|
||||||
|
# Tests for sanitize_filename
|
||||||
|
def test_sanitize_filename_valid():
|
||||||
|
assert sanitize_filename("valid_filename.txt") == "valid_filename.txt"
|
||||||
|
|
||||||
|
def test_sanitize_filename_with_spaces():
|
||||||
|
assert sanitize_filename("file name with spaces.txt") == "file_name_with_spaces.txt"
|
||||||
|
|
||||||
|
def test_sanitize_filename_with_special_characters():
|
||||||
|
assert sanitize_filename("file!@#$%^&*()[]{};:'\",.<>/?\\|.txt") == "file____________________.txt"
|
||||||
|
|
||||||
|
def test_sanitize_filename_with_leading_trailing_whitespace():
|
||||||
|
assert sanitize_filename(" filename_with_spaces .txt") == "filename_with_spaces.txt"
|
||||||
|
|
||||||
|
def test_sanitize_filename_empty_string():
|
||||||
|
assert sanitize_filename("") == ""
|
||||||
|
|
||||||
|
def test_sanitize_filename_with_none():
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
sanitize_filename(None)
|
||||||
|
|
||||||
|
def test_sanitize_filename_mixed_case():
|
||||||
|
assert sanitize_filename("MixedCaseFileName.PNG") == "MixedCaseFileName.PNG"
|
||||||
|
|
||||||
|
def test_sanitize_filename_long_filename():
|
||||||
|
long_name = "a" * 255 + ".txt"
|
||||||
|
# Assuming the function doesn't truncate, but sanitizes.
|
||||||
|
# If it's meant to handle OS limits, this test might need adjustment
|
||||||
|
# based on the function's specific behavior for long names.
|
||||||
|
assert sanitize_filename(long_name) == long_name
|
||||||
|
|
||||||
|
def test_sanitize_filename_unicode_characters():
|
||||||
|
assert sanitize_filename("文件名前缀_文件名_后缀.jpg") == "文件名前缀_文件名_后缀.jpg"
|
||||||
|
|
||||||
|
def test_sanitize_filename_multiple_extensions():
|
||||||
|
assert sanitize_filename("archive.tar.gz") == "archive.tar.gz"
|
||||||
|
|
||||||
|
def test_sanitize_filename_no_extension():
|
||||||
|
assert sanitize_filename("filename") == "filename"
|
||||||
|
|
||||||
|
def test_sanitize_filename_only_special_chars():
|
||||||
|
assert sanitize_filename("!@#$%^") == "______"
|
||||||
|
|
||||||
|
def test_sanitize_filename_with_hyphens_and_underscores():
|
||||||
|
assert sanitize_filename("file-name_with-hyphens_and_underscores.zip") == "file-name_with-hyphens_and_underscores.zip"
|
||||||
|
|
||||||
|
# Tests for generate_path_from_pattern
|
||||||
|
def test_generate_path_basic():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="output",
|
||||||
|
pattern="{asset_name}/{map_type}/{filename}",
|
||||||
|
asset_name="MyAsset",
|
||||||
|
map_type="Diffuse",
|
||||||
|
filename="MyAsset_Diffuse.png",
|
||||||
|
source_rule_name="TestRule",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
expected = Path("output/MyAsset/Diffuse/MyAsset_Diffuse.png")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_all_placeholders():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="project_files",
|
||||||
|
pattern="{source_rule_name}/{asset_name}/{map_type}_{incrementing_value}_{sha5_value}/{filename}",
|
||||||
|
asset_name="AnotherAsset",
|
||||||
|
map_type="Normal",
|
||||||
|
filename="NormalMap.tif",
|
||||||
|
source_rule_name="ComplexRule",
|
||||||
|
incrementing_value="001",
|
||||||
|
sha5_value="abcde"
|
||||||
|
)
|
||||||
|
expected = Path("project_files/ComplexRule/AnotherAsset/Normal_001_abcde/NormalMap.tif")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_optional_placeholders_none():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="data",
|
||||||
|
pattern="{asset_name}/{filename}",
|
||||||
|
asset_name="SimpleAsset",
|
||||||
|
map_type="Albedo", # map_type is in pattern but not used if not in string
|
||||||
|
filename="texture.jpg",
|
||||||
|
source_rule_name="Basic",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
expected = Path("data/SimpleAsset/texture.jpg")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_optional_incrementing_value_present():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="assets",
|
||||||
|
pattern="{asset_name}/{map_type}/v{incrementing_value}/{filename}",
|
||||||
|
asset_name="VersionedAsset",
|
||||||
|
map_type="Specular",
|
||||||
|
filename="spec.png",
|
||||||
|
source_rule_name="VersioningRule",
|
||||||
|
incrementing_value="3",
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
expected = Path("assets/VersionedAsset/Specular/v3/spec.png")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_optional_sha5_value_present():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="cache",
|
||||||
|
pattern="{asset_name}/{sha5_value}/{filename}",
|
||||||
|
asset_name="HashedAsset",
|
||||||
|
map_type="Roughness",
|
||||||
|
filename="rough.exr",
|
||||||
|
source_rule_name="HashingRule",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value="f1234"
|
||||||
|
)
|
||||||
|
expected = Path("cache/HashedAsset/f1234/rough.exr")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_base_path_is_path_object():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path=Path("output_path"),
|
||||||
|
pattern="{asset_name}/{filename}",
|
||||||
|
asset_name="ObjectAsset",
|
||||||
|
map_type="AO",
|
||||||
|
filename="ao.png",
|
||||||
|
source_rule_name="PathObjectRule",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
expected = Path("output_path/ObjectAsset/ao.png")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_empty_pattern():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="output",
|
||||||
|
pattern="", # Empty pattern should just use base_path and filename
|
||||||
|
asset_name="MyAsset",
|
||||||
|
map_type="Diffuse",
|
||||||
|
filename="MyAsset_Diffuse.png",
|
||||||
|
source_rule_name="TestRule",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
expected = Path("output/MyAsset_Diffuse.png")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_pattern_with_no_placeholders():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="fixed_output",
|
||||||
|
pattern="some/static/path", # Pattern has no placeholders
|
||||||
|
asset_name="MyAsset",
|
||||||
|
map_type="Diffuse",
|
||||||
|
filename="MyAsset_Diffuse.png",
|
||||||
|
source_rule_name="TestRule",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
expected = Path("fixed_output/some/static/path/MyAsset_Diffuse.png")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_filename_with_subdirs_in_pattern():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="output",
|
||||||
|
pattern="{asset_name}", # Filename itself will be appended
|
||||||
|
asset_name="AssetWithSubdirFile",
|
||||||
|
map_type="Color",
|
||||||
|
filename="textures/variant1/color.png", # Filename contains subdirectories
|
||||||
|
source_rule_name="SubdirRule",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
# The function is expected to join pattern result with filename
|
||||||
|
expected = Path("output/AssetWithSubdirFile/textures/variant1/color.png")
|
||||||
|
assert Path(result) == expected
|
||||||
|
|
||||||
|
def test_generate_path_no_filename_provided():
|
||||||
|
# This test assumes that if filename is None or empty, it might raise an error
|
||||||
|
# or behave in a specific way, e.g. not append anything or use a default.
|
||||||
|
# Adjust based on actual function behavior for missing filename.
|
||||||
|
# For now, let's assume it might raise TypeError if filename is critical.
|
||||||
|
with pytest.raises(TypeError): # Or ValueError, depending on implementation
|
||||||
|
generate_path_from_pattern(
|
||||||
|
base_path="output",
|
||||||
|
pattern="{asset_name}/{map_type}",
|
||||||
|
asset_name="MyAsset",
|
||||||
|
map_type="Diffuse",
|
||||||
|
filename=None, # No filename
|
||||||
|
source_rule_name="TestRule",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_generate_path_all_values_are_empty_strings_or_none_where_applicable():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="", # Empty base_path
|
||||||
|
pattern="{asset_name}/{map_type}/{incrementing_value}/{sha5_value}",
|
||||||
|
asset_name="", # Empty asset_name
|
||||||
|
map_type="", # Empty map_type
|
||||||
|
filename="empty_test.file",
|
||||||
|
source_rule_name="", # Empty source_rule_name
|
||||||
|
incrementing_value="", # Empty incrementing_value
|
||||||
|
sha5_value="" # Empty sha5_value
|
||||||
|
)
|
||||||
|
# Behavior with empty strings might vary. Assuming they are treated as literal empty segments.
|
||||||
|
# Path("///empty_test.file") might resolve to "/empty_test.file" on POSIX
|
||||||
|
# or just "empty_test.file" if base_path is current dir.
|
||||||
|
# Let's assume Path() handles normalization.
|
||||||
|
# If base_path is "", it means current directory.
|
||||||
|
# So, "//empty_test.file" relative to current dir.
|
||||||
|
# Path objects normalize this. e.g. Path('//a') -> Path('/a') on POSIX
|
||||||
|
# Path('a//b') -> Path('a/b')
|
||||||
|
# Path('/a//b') -> Path('/a/b')
|
||||||
|
# Path('//a//b') -> Path('/a/b')
|
||||||
|
# If base_path is empty, it's like Path('.////empty_test.file')
|
||||||
|
expected = Path("empty_test.file") # Simplified, actual result might be OS dependent or Path lib norm.
|
||||||
|
# More robust check:
|
||||||
|
# result_path = Path(result)
|
||||||
|
# expected_path = Path.cwd() / "" / "" / "" / "" / "empty_test.file" # This is not quite right
|
||||||
|
# Let's assume the function joins them: "" + "/" + "" + "/" + "" + "/" + "" + "/" + "empty_test.file"
|
||||||
|
# which becomes "////empty_test.file"
|
||||||
|
# Path("////empty_test.file") on Windows becomes "\\empty_test.file" (network path attempt)
|
||||||
|
# Path("////empty_test.file") on Linux becomes "/empty_test.file"
|
||||||
|
# Given the function likely uses os.path.join or Path.joinpath,
|
||||||
|
# and base_path="", asset_name="", map_type="", inc_val="", sha5_val=""
|
||||||
|
# pattern = "{asset_name}/{map_type}/{incrementing_value}/{sha5_value}" -> "///"
|
||||||
|
# result = base_path / pattern_result / filename
|
||||||
|
# result = "" / "///" / "empty_test.file"
|
||||||
|
# Path("") / "///" / "empty_test.file" -> Path("///empty_test.file")
|
||||||
|
# This is tricky. Let's assume the function is robust.
|
||||||
|
# If all path segments are empty, it should ideally resolve to just the filename relative to base_path.
|
||||||
|
# If base_path is also empty, then filename relative to CWD.
|
||||||
|
# Let's test the expected output based on typical os.path.join behavior:
|
||||||
|
# os.path.join("", "", "", "", "", "empty_test.file") -> "empty_test.file" on Windows
|
||||||
|
# os.path.join("", "", "", "", "", "empty_test.file") -> "empty_test.file" on Linux
|
||||||
|
assert Path(result) == Path("empty_test.file")
|
||||||
|
|
||||||
|
|
||||||
|
def test_generate_path_with_dots_in_placeholders():
|
||||||
|
result = generate_path_from_pattern(
|
||||||
|
base_path="output",
|
||||||
|
pattern="{asset_name}/{map_type}",
|
||||||
|
asset_name="My.Asset.V1",
|
||||||
|
map_type="Diffuse.Main",
|
||||||
|
filename="texture.png",
|
||||||
|
source_rule_name="DotsRule",
|
||||||
|
incrementing_value=None,
|
||||||
|
sha5_value=None
|
||||||
|
)
|
||||||
|
expected = Path("output/My.Asset.V1/Diffuse.Main/texture.png")
|
||||||
|
assert Path(result) == expected
|
||||||
@@ -154,6 +154,48 @@ def get_next_incrementing_value(output_base_path: Path, output_directory_pattern
|
|||||||
logger.info(f"Determined next incrementing value: {next_value_str} (Max found: {max_value})")
|
logger.info(f"Determined next incrementing value: {next_value_str} (Max found: {max_value})")
|
||||||
return next_value_str
|
return next_value_str
|
||||||
|
|
||||||
|
def sanitize_filename(name: str) -> str:
|
||||||
|
"""Removes or replaces characters invalid for filenames/directory names."""
|
||||||
|
if not isinstance(name, str): name = str(name)
|
||||||
|
name = re.sub(r'[^\w.\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot
|
||||||
|
name = re.sub(r'_+', '_', name)
|
||||||
|
name = name.strip('_')
|
||||||
|
if not name: name = "invalid_name"
|
||||||
|
return name
|
||||||
|
|
||||||
|
def get_filename_friendly_map_type(internal_map_type: str, file_type_definitions: Optional[Dict[str, Dict]]) -> str:
|
||||||
|
"""Derives a filename-friendly map type from the internal map type."""
|
||||||
|
filename_friendly_map_type = internal_map_type # Fallback
|
||||||
|
if not file_type_definitions or not isinstance(file_type_definitions, dict) or not file_type_definitions:
|
||||||
|
logger.warning(f"Filename-friendly lookup: FILE_TYPE_DEFINITIONS not available or invalid. Falling back to internal type: {internal_map_type}")
|
||||||
|
return filename_friendly_map_type
|
||||||
|
|
||||||
|
base_map_key_val = None
|
||||||
|
suffix_part = ""
|
||||||
|
# Sort keys by length descending to match longest prefix first (e.g., MAP_ROUGHNESS before MAP_ROUGH)
|
||||||
|
sorted_known_base_keys = sorted(list(file_type_definitions.keys()), key=len, reverse=True)
|
||||||
|
|
||||||
|
for known_key in sorted_known_base_keys:
|
||||||
|
if internal_map_type.startswith(known_key):
|
||||||
|
base_map_key_val = known_key
|
||||||
|
suffix_part = internal_map_type[len(known_key):]
|
||||||
|
break
|
||||||
|
|
||||||
|
if base_map_key_val:
|
||||||
|
definition = file_type_definitions.get(base_map_key_val)
|
||||||
|
if definition and isinstance(definition, dict):
|
||||||
|
standard_type_alias = definition.get("standard_type")
|
||||||
|
if standard_type_alias and isinstance(standard_type_alias, str) and standard_type_alias.strip():
|
||||||
|
filename_friendly_map_type = standard_type_alias.strip() + suffix_part
|
||||||
|
logger.debug(f"Filename-friendly lookup: Transformed '{internal_map_type}' -> '{filename_friendly_map_type}'")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Filename-friendly lookup: Standard type alias for '{base_map_key_val}' is missing or invalid. Falling back.")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Filename-friendly lookup: No valid definition for '{base_map_key_val}'. Falling back.")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Filename-friendly lookup: Could not parse base key from '{internal_map_type}'. Falling back.")
|
||||||
|
|
||||||
|
return filename_friendly_map_type
|
||||||
# --- Basic Unit Tests ---
|
# --- Basic Unit Tests ---
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print("Running basic tests for path_utils.generate_path_from_pattern...")
|
print("Running basic tests for path_utils.generate_path_from_pattern...")
|
||||||
|
|||||||
Reference in New Issue
Block a user