Compare commits

1 Commits

Author SHA1 Message Date
3927f8e6c0 Merge pull request 'Prototype > PreAlpha' (#67) from Dev into Stable
Reviewed-on: #67
2025-05-15 09:10:53 +02:00
39 changed files with 571 additions and 1303 deletions

View File

@@ -1,46 +0,0 @@
{
"mcpServers": {
"conport": {
"command": "C:\\Users\\theis\\context-portal\\.venv\\Scripts\\python.exe",
"args": [
"C:\\Users\\theis\\context-portal\\src\\context_portal_mcp\\main.py",
"--mode",
"stdio",
"--workspace_id",
"${workspaceFolder}"
],
"alwaysAllow": [
"get_product_context",
"update_product_context",
"get_active_context",
"update_active_context",
"log_decision",
"get_decisions",
"search_decisions_fts",
"log_progress",
"get_progress",
"update_progress",
"delete_progress_by_id",
"log_system_pattern",
"get_system_patterns",
"log_custom_data",
"get_custom_data",
"delete_custom_data",
"search_project_glossary_fts",
"export_conport_to_markdown",
"import_markdown_to_conport",
"link_conport_items",
"search_custom_data_value_fts",
"get_linked_items",
"batch_log_items",
"get_item_history",
"delete_decision_by_id",
"delete_system_pattern_by_id",
"get_conport_schema",
"get_recent_activity_summary",
"semantic_search_conport",
"search_system_patterns_fts"
]
}
}
}

View File

@@ -1,15 +0,0 @@
{
"customModes": [
{
"slug": "Task-Initiator",
"name": "Task Initiator",
"roleDefinition": "You are Task Initiator. Your exclusive function is comprehensive initial context gathering, focusing solely on ConPort data. Do NOT perform other tasks or use direct file system tools for context gathering.",
"customInstructions": "1. First, execute standard initial context setup procedures (as per global ConPort strategy).\n2. Next, if a specific user request is pending, YOU, as Task Initiator, should analyze it and proactively gather relevant information, strictly by querying ConPort. Your process for this is:\n a. Identify the key subject(s) of the request.\n b. Loosely search relevant ConPort data for information or summaries related to these identified subject(s).\n3. After completing both standard setup AND any ConPort-based task-specific gathering, briefly report the overall context status. This report must cover ConPort initialization and summarize any specific information found (or explicitly not found) within ConPort relevant to the user's request.\n4. Then, output `[TASK_INITIATOR_COMPLETE]`.\n5. Finally, to address the user's main request with the context you've gathered (or confirmed is missing from ConPort), use the `switch_mode` tool to transition to the determined most appropriate mode by analysing the initial request. you should ALWAYS finish context-gathering before switching modes.",
"groups": [
"mcp",
"read"
],
"source": "project"
}
]
}

View File

@@ -16,7 +16,6 @@ This document outlines the key features of the Asset Processor Tool.
* Saves maps in appropriate formats (JPG, PNG, EXR) based on complex rules involving map type (`FORCE_LOSSLESS_MAP_TYPES`), resolution (`RESOLUTION_THRESHOLD_FOR_JPG`), bit depth, and source format. * Saves maps in appropriate formats (JPG, PNG, EXR) based on complex rules involving map type (`FORCE_LOSSLESS_MAP_TYPES`), resolution (`RESOLUTION_THRESHOLD_FOR_JPG`), bit depth, and source format.
* Calculates basic image statistics (Min/Max/Mean) for a reference resolution. * Calculates basic image statistics (Min/Max/Mean) for a reference resolution.
* Calculates and stores the relative aspect ratio change string in metadata (e.g., `EVEN`, `X150`, `Y125`). * Calculates and stores the relative aspect ratio change string in metadata (e.g., `EVEN`, `X150`, `Y125`).
* **Low-Resolution Fallback:** If enabled (`ENABLE_LOW_RESOLUTION_FALLBACK`), automatically saves an additional "LOWRES" variant of source images if their largest dimension is below a configurable threshold (`LOW_RESOLUTION_THRESHOLD`). This "LOWRES" variant uses the original image dimensions and is saved in addition to any standard resolution outputs.
* **Channel Merging:** Combines channels from different maps into packed textures (e.g., NRMRGH) based on preset rules (`MAP_MERGE_RULES` in `config.py`). * **Channel Merging:** Combines channels from different maps into packed textures (e.g., NRMRGH) based on preset rules (`MAP_MERGE_RULES` in `config.py`).
* **Metadata Generation:** Creates a `metadata.json` file for each asset containing details about maps, category, archetype, aspect ratio change, processing settings, etc. * **Metadata Generation:** Creates a `metadata.json` file for each asset containing details about maps, category, archetype, aspect ratio change, processing settings, etc.
* **Output Organization:** Creates a clean, structured output directory (`<output_base>/<supplier>/<asset_name>/`). * **Output Organization:** Creates a clean, structured output directory (`<output_base>/<supplier>/<asset_name>/`).

View File

@@ -13,18 +13,6 @@ The `app_settings.json` file is structured into several key sections, including:
* `ASSET_TYPE_DEFINITIONS`: Defines known asset types (like Surface, Model, Decal) and their properties. * `ASSET_TYPE_DEFINITIONS`: Defines known asset types (like Surface, Model, Decal) and their properties.
* `MAP_MERGE_RULES`: Defines how multiple input maps can be merged into a single output map (e.g., combining Normal and Roughness into one). * `MAP_MERGE_RULES`: Defines how multiple input maps can be merged into a single output map (e.g., combining Normal and Roughness into one).
### Low-Resolution Fallback Settings
These settings control the generation of low-resolution "fallback" variants for source images:
* `ENABLE_LOW_RESOLUTION_FALLBACK` (boolean, default: `true`):
* If `true`, the tool will generate an additional "LOWRES" variant for source images whose largest dimension is smaller than the `LOW_RESOLUTION_THRESHOLD`.
* This "LOWRES" variant uses the original dimensions of the source image and is saved in addition to any other standard resolution outputs (e.g., 1K, PREVIEW).
* If `false`, this feature is disabled.
* `LOW_RESOLUTION_THRESHOLD` (integer, default: `512`):
* Defines the pixel dimension (for the largest side of an image) below which the "LOWRES" fallback variant will be generated (if enabled).
* For example, if set to `512`, any source image smaller than 512x512 (e.g., 256x512, 128x128) will have a "LOWRES" variant created.
### LLM Predictor Settings ### LLM Predictor Settings
For users who wish to utilize the experimental LLM Predictor feature, the following settings are available in `config/llm_settings.json`: For users who wish to utilize the experimental LLM Predictor feature, the following settings are available in `config/llm_settings.json`:

View File

@@ -58,7 +58,6 @@ The `<output_base_directory>` (the root folder where processing output starts) i
Each asset directory contains the following: Each asset directory contains the following:
* Processed texture maps (e.g., `WoodFloor_Albedo_4k.png`, `MetalPanel_Normal_2k.exr`). The exact filenames depend on the `OUTPUT_FILENAME_PATTERN`. These are the resized, format-converted, and bit-depth adjusted texture files. * Processed texture maps (e.g., `WoodFloor_Albedo_4k.png`, `MetalPanel_Normal_2k.exr`). The exact filenames depend on the `OUTPUT_FILENAME_PATTERN`. These are the resized, format-converted, and bit-depth adjusted texture files.
* **LOWRES Variants:** If the "Low-Resolution Fallback" feature is enabled and a source image's dimensions are below the configured threshold, an additional variant with "LOWRES" as its resolution token (e.g., `MyTexture_COL_LOWRES.png`) will be saved. This variant uses the original dimensions of the source image.
* Merged texture maps (e.g., `WoodFloor_Combined_4k.png`). The exact filenames depend on the `OUTPUT_FILENAME_PATTERN`. These are maps created by combining channels from different source maps based on the configured merge rules. * Merged texture maps (e.g., `WoodFloor_Combined_4k.png`). The exact filenames depend on the `OUTPUT_FILENAME_PATTERN`. These are maps created by combining channels from different source maps based on the configured merge rules.
* Model files (if present in the source asset). * Model files (if present in the source asset).
* `metadata.json`: A JSON file containing detailed information about the asset and the processing that was performed. This includes details about the maps (resolutions, formats, bit depths, and for roughness maps, a `derived_from_gloss_filename: true` flag if it was inverted from an original gloss map), merged map details, calculated image statistics, aspect ratio change information, asset category and archetype, the source preset used, and a list of ignored source files. This file is intended for use by downstream tools or scripts (like the Blender integration scripts). * `metadata.json`: A JSON file containing detailed information about the asset and the processing that was performed. This includes details about the maps (resolutions, formats, bit depths, and for roughness maps, a `derived_from_gloss_filename: true` flag if it was inverted from an original gloss map), merged map details, calculated image statistics, aspect ratio change information, asset category and archetype, the source preset used, and a list of ignored source files. This file is intended for use by downstream tools or scripts (like the Blender integration scripts).

View File

@@ -34,7 +34,7 @@ The script accepts several command-line arguments to configure the test run. If
* A string to search for within the application logs generated during the test run. If found, matching log lines (with context) will be highlighted. * A string to search for within the application logs generated during the test run. If found, matching log lines (with context) will be highlighted.
* Default: None * Default: None
* `--additional-lines NUM_LINES` (optional): * `--additional-lines NUM_LINES` (optional):
* When using `--search`, this specifies how many lines of context before and after each matching log line should be displayed. A good non-zero value is 1-2. * When using `--search`, this specifies how many lines of context before and after each matching log line should be displayed.
* Default: `0` * Default: `0`
**Example Usage:** **Example Usage:**
@@ -81,5 +81,3 @@ When executed, `autotest.py` performs the following steps:
* **Output Directory:** Inspect the contents of the specified output directory to manually verify the processed assets if needed. * **Output Directory:** Inspect the contents of the specified output directory to manually verify the processed assets if needed.
This automated test helps ensure the stability of the core processing logic when driven by GUI-equivalent actions. This automated test helps ensure the stability of the core processing logic when driven by GUI-equivalent actions.
Note: Under some conditions, the autotest will exit with errorcode "3221226505". This has no consequence and can therefor be ignore.

View File

@@ -12,9 +12,6 @@ The tool's configuration is loaded from several JSON files, providing a layered
1. **Application Settings (`config/app_settings.json`):** This JSON file defines the core global default settings, constants, and rules that apply generally across different asset sources (e.g., the global `OUTPUT_DIRECTORY_PATTERN` and `OUTPUT_FILENAME_PATTERN`, standard image resolutions, map merge rules, output format rules, Blender paths, temporary directory prefix, initial scaling mode, merge dimension mismatch strategy). See the [User Guide: Output Structure](../01_User_Guide/09_Output_Structure.md#available-tokens) for a list of available tokens for these patterns. 1. **Application Settings (`config/app_settings.json`):** This JSON file defines the core global default settings, constants, and rules that apply generally across different asset sources (e.g., the global `OUTPUT_DIRECTORY_PATTERN` and `OUTPUT_FILENAME_PATTERN`, standard image resolutions, map merge rules, output format rules, Blender paths, temporary directory prefix, initial scaling mode, merge dimension mismatch strategy). See the [User Guide: Output Structure](../01_User_Guide/09_Output_Structure.md#available-tokens) for a list of available tokens for these patterns.
* *Note:* `ASSET_TYPE_DEFINITIONS` and `FILE_TYPE_DEFINITIONS` are no longer stored here; they have been moved to dedicated files. * *Note:* `ASSET_TYPE_DEFINITIONS` and `FILE_TYPE_DEFINITIONS` are no longer stored here; they have been moved to dedicated files.
* It also includes settings for new features like the "Low-Resolution Fallback":
* `ENABLE_LOW_RESOLUTION_FALLBACK` (boolean): Enables or disables the generation of "LOWRES" variants for small source images. Defaults to `true`.
* `LOW_RESOLUTION_THRESHOLD` (integer): The pixel dimension threshold (largest side) below which a "LOWRES" variant is created if the feature is enabled. Defaults to `512`.
2. **User Settings (`config/user_settings.json`):** This optional JSON file allows users to override specific settings defined in `config/app_settings.json`. If this file exists, its values for corresponding keys will take precedence over the base application settings. This file is primarily managed through the GUI's Application Preferences Editor. 2. **User Settings (`config/user_settings.json`):** This optional JSON file allows users to override specific settings defined in `config/app_settings.json`. If this file exists, its values for corresponding keys will take precedence over the base application settings. This file is primarily managed through the GUI's Application Preferences Editor.

View File

@@ -50,44 +50,27 @@ These stages are executed sequentially once for each asset before the core item
### Core Item Processing Loop ### Core Item Processing Loop
The [`PipelineOrchestrator`](processing/pipeline/orchestrator.py:36) iterates through the `context.processing_items` list (populated by the [`PrepareProcessingItemsStage`](processing/pipeline/stages/prepare_processing_items.py:10)). Each `item` in this list is now either a [`ProcessingItem`](rule_structure.py:0) (representing a specific variant of a source map, e.g., Color at 1K, or Color at LOWRES) or a [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16). The [`PipelineOrchestrator`](processing/pipeline/orchestrator.py:36) iterates through the `context.processing_items` list (populated by the [`PrepareProcessingItemsStage`](processing/pipeline/stages/prepare_processing_items.py:10)). For each item (either a [`FileRule`](rule_structure.py:5) for a regular map or a [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16) for a merged map), the following stages are executed sequentially:
1. **[`PrepareProcessingItemsStage`](processing/pipeline/stages/prepare_processing_items.py:10)** (`processing/pipeline/stages/prepare_processing_items.py`): 1. **[`PrepareProcessingItemsStage`](processing/pipeline/stages/prepare_processing_items.py:10)** (`processing/pipeline/stages/prepare_processing_items.py`):
* **Responsibility**: (Executed once before the loop) This stage is now responsible for "exploding" each relevant [`FileRule`](rule_structure.py:5) into one or more [`ProcessingItem`](rule_structure.py:0) objects. * **Responsibility**: (Executed once before the loop) Creates the `context.processing_items` list by combining [`FileRule`](rule_structure.py:5)s from `context.files_to_process` and [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16)s derived from the global `map_merge_rules` configuration. It correctly accesses `map_merge_rules` from `context.config_obj` and validates each merge rule for the presence of `output_map_type` and a dictionary for `inputs`. Initializes `context.intermediate_results`.
* For each [`FileRule`](rule_structure.py:5) that represents an image map: * **Context Interaction**: Reads from `context.files_to_process` and `context.config_obj` (accessing `map_merge_rules`). Populates `context.processing_items` and initializes `context.intermediate_results`.
* It loads the source image data and determines its original dimensions and bit depth.
* It creates standard [`ProcessingItem`](rule_structure.py:0)s for each required output resolution (e.g., "1K", "PREVIEW"), populating them with a copy of the source image data and the respective `resolution_key`.
* If the "Low-Resolution Fallback" feature is enabled (`ENABLE_LOW_RESOLUTION_FALLBACK` in config) and the source image's largest dimension is below `LOW_RESOLUTION_THRESHOLD`, it creates an additional [`ProcessingItem`](rule_structure.py:0) with `resolution_key="LOWRES"`, using the original image data and dimensions.
* It also adds [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16)s derived from global `map_merge_rules`.
* **Context Interaction**: Reads `context.files_to_process` and `context.config_obj`. Populates `context.processing_items` with a list of [`ProcessingItem`](rule_structure.py:0) and [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16) objects. Initializes `context.intermediate_results`.
For each `item` in `context.processing_items`: 2. **[`RegularMapProcessorStage`](processing/pipeline/stages/regular_map_processor.py:18)** (`processing/pipeline/stages/regular_map_processor.py`):
* **Responsibility**: (Executed per [`FileRule`](rule_structure.py:5) item) Checks if the `FileRule.item_type` starts with "MAP_". If not, the item is skipped. Otherwise, it loads the image data for the file, determines its potentially suffixed internal map type (e.g., "MAP_COL-1"), applies in-memory transformations (Gloss-to-Rough, Normal Green Invert) using the shared utility function [`apply_common_map_transformations`](processing/utils/image_processing_utils.py), and returns the processed image data and details in a [`ProcessedRegularMapData`](processing/pipeline/asset_context.py:23) object. The `internal_map_type` in the output reflects any transformations (e.g., "MAP_GLOSS" becomes "MAP_ROUGH").
2. **Transformations (Implicit or via a dedicated stage - formerly `RegularMapProcessorStage` logic):** * **Context Interaction**: Reads from the input [`FileRule`](rule_structure.py:5) (checking `item_type`) and [`Configuration`](configuration.py:68). Returns a [`ProcessedRegularMapData`](processing/pipeline/asset_context.py:23) object which is stored in `context.intermediate_results`.
* **Responsibility**: If the `item` is a [`ProcessingItem`](rule_structure.py:0), its `image_data` (loaded by `PrepareProcessingItemsStage`) may need transformations (Gloss-to-Rough, Normal Green Invert). This logic, previously in `RegularMapProcessorStage`, might be integrated into `PrepareProcessingItemsStage` before `ProcessingItem` creation, or handled by a new dedicated transformation stage that operates on `ProcessingItem.image_data`. The `item.map_type_identifier` would be updated if a transformation like Gloss-to-Rough occurs.
* **Context Interaction**: Modifies `item.image_data` and `item.map_type_identifier` within the [`ProcessingItem`](rule_structure.py:0) object.
3. **[`MergedTaskProcessorStage`](processing/pipeline/stages/merged_task_processor.py:68)** (`processing/pipeline/stages/merged_task_processor.py`): 3. **[`MergedTaskProcessorStage`](processing/pipeline/stages/merged_task_processor.py:68)** (`processing/pipeline/stages/merged_task_processor.py`):
* **Responsibility**: (Executed if `item` is a [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16)) Same as before: validates inputs, loads source map data (likely from `ProcessingItem`s in `context.processing_items` or a cache populated from them), applies transformations, merges channels, and returns [`ProcessedMergedMapData`](processing/pipeline/asset_context.py:35). * **Responsibility**: (Executed per [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16) item) Validates that all input map types specified in the merge rule start with "MAP_". If not, the task is failed. It dynamically loads input images by looking up the required input map types (e.g., "MAP_NRM") in `context.processed_maps_details` and using the temporary file paths from their `saved_files_info`. It applies in-memory transformations to inputs using [`apply_common_map_transformations`](processing/utils/image_processing_utils.py), handles dimension mismatches (with fallback creation if configured and `source_dimensions` are available), performs the channel merging operation, and returns the merged image data and details in a [`ProcessedMergedMapData`](processing/pipeline/asset_context.py:35) object. The `output_map_type` of the merged map must also be "MAP_" prefixed in the configuration.
* **Context Interaction**: Reads [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16), potentially `context.processing_items` (or a cache derived from it) for input image data. Returns [`ProcessedMergedMapData`](processing/pipeline/asset_context.py:35). * **Context Interaction**: Reads from the input [`MergeTaskDefinition`](processing/pipeline/asset_context.py:16) (checking input map types), `context.workspace_path`, `context.processed_maps_details` (for input image data), and [`Configuration`](configuration.py:68). Returns a [`ProcessedMergedMapData`](processing/pipeline/asset_context.py:35) object which is stored in `context.intermediate_results`.
4. **[`InitialScalingStage`](processing/pipeline/stages/initial_scaling.py:14)** (`processing/pipeline/stages/initial_scaling.py`): 4. **[`InitialScalingStage`](processing/pipeline/stages/initial_scaling.py:14)** (`processing/pipeline/stages/initial_scaling.py`):
* **Responsibility**: (Executed per item) * **Responsibility**: (Executed per item) Applies initial scaling (e.g., Power-of-Two downscaling) to the image data from the previous processing stage based on the `initial_scaling_mode` configuration.
* If `item` is a [`ProcessingItem`](rule_structure.py:0): Takes `item.image_data`, `item.current_dimensions`, and `item.resolution_key` as input. If `item.resolution_key` is "LOWRES", POT scaling is skipped. Otherwise, applies POT scaling if configured. * **Context Interaction**: Takes a [`InitialScalingInput`](processing/pipeline/asset_context.py:46) (containing image data and config) and returns an [`InitialScalingOutput`](processing/pipeline/asset_context.py:54) object, which updates the item's entry in `context.intermediate_results`.
* If `item` is from a `MergeTaskDefinition` (i.e., `processed_data` from `MergedTaskProcessorStage`): Applies POT scaling as before.
* **Context Interaction**: Takes [`InitialScalingInput`](processing/pipeline/asset_context.py:46) (now including `resolution_key`). Returns [`InitialScalingOutput`](processing/pipeline/asset_context.py:54) (also including `resolution_key`), which updates `context.intermediate_results`. The `current_image_data` and `current_dimensions` for saving are taken from this output.
5. **[`SaveVariantsStage`](processing/pipeline/stages/save_variants.py:15)** (`processing/pipeline/stages/save_variants.py`): 5. **[`SaveVariantsStage`](processing/pipeline/stages/save_variants.py:15)** (`processing/pipeline/stages/save_variants.py`):
* **Responsibility**: (Executed per item) Saves the (potentially scaled) `current_image_data`. * **Responsibility**: (Executed per item) Takes the final processed image data (potentially scaled) and configuration, and calls a utility to save the image to temporary files in various resolutions and formats as defined by the configuration.
* **Context Interaction**: * **Context Interaction**: Takes a [`SaveVariantsInput`](processing/pipeline/asset_context.py:61) object (which includes the "MAP_" prefixed `internal_map_type`). It uses the `get_filename_friendly_map_type` utility to convert this to a "standard type" (e.g., "COL") for output naming. Returns a [`SaveVariantsOutput`](processing/pipeline/asset_context.py:79) object containing details about the saved temporary files. The orchestrator stores these details, including the original "MAP_" prefixed `internal_map_type`, in `context.processed_maps_details` for the item.
* Takes [`SaveVariantsInput`](processing/pipeline/asset_context.py:61).
* `internal_map_type` is set from `item.map_type_identifier` (for `ProcessingItem`) or `processed_data.output_map_type` (for merged).
* `output_filename_pattern_tokens['resolution']` is set to the `resolution_key` obtained from `scaled_data_output.resolution_key` (which originates from `item.resolution_key` for `ProcessingItem`s, or is `None` for merged items that get all standard resolutions).
* `image_resolutions` argument for `SaveVariantsInput`:
* If `resolution_key == "LOWRES"`: Set to `{"LOWRES": width_of_lowres_data}`.
* If `resolution_key` is a standard key (e.g., "1K"): Set to `{resolution_key: configured_dimension}`.
* For merged items (where `resolution_key` from scaling is likely `None`): Set to the full `config.image_resolutions` map to generate all applicable standard sizes.
* Returns [`SaveVariantsOutput`](processing/pipeline/asset_context.py:79). Orchestrator stores details in `context.processed_maps_details`.
### Post-Item Stages ### Post-Item Stages

View File

@@ -1,5 +1,5 @@
{ {
"preset_name": "Dinesen", "preset_name": "Dinesen Custom",
"supplier_name": "Dinesen", "supplier_name": "Dinesen",
"notes": "Preset for standard Poliigon downloads. Prioritizes _xxx16 files. Moves previews etc. to Extra/. Assumes Metal/Rough workflow.", "notes": "Preset for standard Poliigon downloads. Prioritizes _xxx16 files. Moves previews etc. to Extra/. Assumes Metal/Rough workflow.",
"source_naming": { "source_naming": {
@@ -10,7 +10,11 @@
}, },
"glossiness_keywords": [ "glossiness_keywords": [
"GLOSS" "GLOSS"
] ],
"bit_depth_variants": {
"NRM": "*_NRM16*",
"DISP": "*_DISP16*"
}
}, },
"move_to_extra_patterns": [ "move_to_extra_patterns": [
"*_Preview*", "*_Preview*",
@@ -21,8 +25,7 @@
"*.pdf", "*.pdf",
"*.url", "*.url",
"*.htm*", "*.htm*",
"*_Fabric.*", "*_Fabric.*"
"*_DISP_*METALNESS*"
], ],
"map_type_mapping": [ "map_type_mapping": [
{ {
@@ -43,11 +46,6 @@
"NORM*", "NORM*",
"NRM*", "NRM*",
"N" "N"
],
"priority_keywords": [
"*_NRM16*",
"*_NM16*",
"*Normal16*"
] ]
}, },
{ {
@@ -77,14 +75,6 @@
"DISP", "DISP",
"HEIGHT", "HEIGHT",
"BUMP" "BUMP"
],
"priority_keywords": [
"*_DISP16*",
"*_DSP16*",
"*DSP16*",
"*DISP16*",
"*Displacement16*",
"*Height16*"
] ]
}, },
{ {

View File

@@ -10,7 +10,11 @@
}, },
"glossiness_keywords": [ "glossiness_keywords": [
"GLOSS" "GLOSS"
] ],
"bit_depth_variants": {
"NRM": "*_NRM16*",
"DISP": "*_DISP16*"
}
}, },
"move_to_extra_patterns": [ "move_to_extra_patterns": [
"*_Preview*", "*_Preview*",
@@ -24,114 +28,7 @@
"*_Fabric.*", "*_Fabric.*",
"*_Albedo*" "*_Albedo*"
], ],
"map_type_mapping": [ "map_type_mapping": [],
{
"target_type": "MAP_COL",
"keywords": [
"COLOR*",
"COL",
"COL-*",
"DIFFUSE",
"DIF",
"ALBEDO"
]
},
{
"target_type": "MAP_NRM",
"keywords": [
"NORMAL*",
"NORM*",
"NRM*",
"N"
],
"priority_keywords": [
"*_NRM16*",
"*_NM16*",
"*Normal16*"
]
},
{
"target_type": "MAP_ROUGH",
"keywords": [
"ROUGHNESS",
"ROUGH"
]
},
{
"target_type": "MAP_GLOSS",
"keywords": [
"GLOSS"
]
},
{
"target_type": "MAP_AO",
"keywords": [
"AMBIENTOCCLUSION",
"AO"
]
},
{
"target_type": "MAP_DISP",
"keywords": [
"DISPLACEMENT",
"DISP",
"HEIGHT",
"BUMP"
],
"priority_keywords": [
"*_DISP16*",
"*_DSP16*",
"*DSP16*",
"*DISP16*",
"*Displacement16*",
"*Height16*"
]
},
{
"target_type": "MAP_REFL",
"keywords": [
"REFLECTION",
"REFL",
"SPECULAR",
"SPEC"
]
},
{
"target_type": "MAP_SSS",
"keywords": [
"SSS",
"SUBSURFACE*"
]
},
{
"target_type": "MAP_FUZZ",
"keywords": [
"FUZZ"
]
},
{
"target_type": "MAP_IDMAP",
"keywords": [
"IDMAP"
]
},
{
"target_type": "MAP_MASK",
"keywords": [
"OPAC*",
"TRANSP*",
"MASK*",
"ALPHA*"
]
},
{
"target_type": "MAP_METAL",
"keywords": [
"METAL*",
"METALLIC"
]
}
],
"asset_category_rules": { "asset_category_rules": {
"model_patterns": [ "model_patterns": [
"*.fbx", "*.fbx",

View File

@@ -1,9 +1,9 @@
{ {
"source_rules": [ "source_rules": [
{ {
"input_path": "BoucleChunky001.zip", "input_path": "BoucleChunky001.zip",
"supplier_identifier": "Dinesen", "supplier_identifier": "Dinesen",
"preset_name": "Dinesen", "preset_name": null,
"assets": [ "assets": [
{ {
"asset_name": "BoucleChunky001", "asset_name": "BoucleChunky001",
@@ -26,7 +26,7 @@
}, },
{ {
"file_path": "BoucleChunky001_DISP_1K_METALNESS.png", "file_path": "BoucleChunky001_DISP_1K_METALNESS.png",
"item_type": "EXTRA", "item_type": "MAP_DISP",
"target_asset_name_override": "BoucleChunky001" "target_asset_name_override": "BoucleChunky001"
}, },
{ {

View File

@@ -96,8 +96,6 @@ class InfoSummaryFilter(logging.Filter):
"verify: processingengine.process called", "verify: processingengine.process called",
": effective supplier set to", ": effective supplier set to",
": metadata initialized.", ": metadata initialized.",
"path",
"\\asset_processor",
": file rules queued for processing", ": file rules queued for processing",
"successfully loaded base application settings", "successfully loaded base application settings",
"successfully loaded and merged asset_type_definitions", "successfully loaded and merged asset_type_definitions",
@@ -110,6 +108,12 @@ class InfoSummaryFilter(logging.Filter):
"worker thread: finished processing for rule:", "worker thread: finished processing for rule:",
"task finished signal received for", "task finished signal received for",
# Autotest step markers (not global summaries) # Autotest step markers (not global summaries)
"step 1: loading zip file:",
"step 2: selecting preset:",
"step 4: retrieving and comparing rules...",
"step 5: starting processing...",
"step 7: checking output path:",
"output path check completed.",
] ]
def filter(self, record): def filter(self, record):
@@ -298,32 +302,15 @@ class AutoTester(QObject):
def run_test(self) -> None: def run_test(self) -> None:
"""Orchestrates the test steps.""" """Orchestrates the test steps."""
# Load expected rules first to potentially get the preset name logger.info("Starting test run...")
self._load_expected_rules() # Moved here
if not self.expected_rules_data: # Ensure rules were loaded if not self.expected_rules_data: # Ensure rules were loaded
logger.error("Expected rules not loaded. Aborting test.") logger.error("Expected rules not loaded. Aborting test.")
self.cleanup_and_exit(success=False) self.cleanup_and_exit(success=False)
return return
# Determine preset to use: from expected rules if available, else from CLI args
preset_to_use = self.cli_args.preset # Default
if self.expected_rules_data.get("source_rules") and \
isinstance(self.expected_rules_data["source_rules"], list) and \
len(self.expected_rules_data["source_rules"]) > 0 and \
isinstance(self.expected_rules_data["source_rules"][0], dict) and \
self.expected_rules_data["source_rules"][0].get("preset_name"):
preset_to_use = self.expected_rules_data["source_rules"][0]["preset_name"]
logger.info(f"Overriding preset with value from expected_rules.json: '{preset_to_use}'")
else:
logger.info(f"Using preset from CLI arguments: '{preset_to_use}' (this was self.cli_args.preset)")
# If preset_to_use is still self.cli_args.preset, ensure it's logged correctly
# The variable preset_to_use will hold the correct value to be used throughout.
logger.info("Starting test run...") # Moved after preset_to_use definition
# Add a specific summary log for essential context # Add a specific summary log for essential context
# This now correctly uses preset_to_use logger.info(f"Autotest Context: Input='{self.cli_args.zipfile.name}', Preset='{self.cli_args.preset}', Output='{self.cli_args.outputdir}'")
logger.info(f"Autotest Context: Input='{self.cli_args.zipfile.name}', Preset='{preset_to_use}', Output='{self.cli_args.outputdir}'")
# Step 1: Load ZIP # Step 1: Load ZIP
self.test_step = "LOADING_ZIP" self.test_step = "LOADING_ZIP"
@@ -343,25 +330,20 @@ class AutoTester(QObject):
# Step 2: Select Preset # Step 2: Select Preset
self.test_step = "SELECTING_PRESET" self.test_step = "SELECTING_PRESET"
# Use preset_to_use (which is now correctly defined earlier) logger.info(f"Step 2: Selecting preset: {self.cli_args.preset}") # KEEP INFO - Passes filter
logger.info(f"Step 2: Selecting preset: {preset_to_use}") # KEEP INFO - Passes filter
# The print statement below already uses preset_to_use, which is good.
print(f"DEBUG: Attempting to select preset: '{preset_to_use}' (derived from expected: {preset_to_use == self.expected_rules_data.get('source_rules',[{}])[0].get('preset_name') if self.expected_rules_data.get('source_rules') else 'N/A'}, cli_arg: {self.cli_args.preset})")
preset_found = False preset_found = False
preset_list_widget = self.main_window.preset_editor_widget.editor_preset_list preset_list_widget = self.main_window.preset_editor_widget.editor_preset_list
for i in range(preset_list_widget.count()): for i in range(preset_list_widget.count()):
item = preset_list_widget.item(i) item = preset_list_widget.item(i)
if item and item.text() == preset_to_use: # Use preset_to_use if item and item.text() == self.cli_args.preset:
preset_list_widget.setCurrentItem(item) preset_list_widget.setCurrentItem(item)
logger.debug(f"Preset '{preset_to_use}' selected.") logger.debug(f"Preset '{self.cli_args.preset}' selected.")
print(f"DEBUG: Successfully selected preset '{item.text()}' in GUI.")
preset_found = True preset_found = True
break break
if not preset_found: if not preset_found:
logger.error(f"Preset '{preset_to_use}' not found in the list.") logger.error(f"Preset '{self.cli_args.preset}' not found in the list.")
available_presets = [preset_list_widget.item(i).text() for i in range(preset_list_widget.count())] available_presets = [preset_list_widget.item(i).text() for i in range(preset_list_widget.count())]
logger.debug(f"Available presets: {available_presets}") logger.debug(f"Available presets: {available_presets}")
print(f"DEBUG: Failed to find preset '{preset_to_use}'. Available: {available_presets}")
self.cleanup_and_exit(success=False) self.cleanup_and_exit(success=False)
return return
@@ -471,6 +453,8 @@ class AutoTester(QObject):
else: else:
logger.warning("Log console or output widget not found. Cannot retrieve logs.") logger.warning("Log console or output widget not found. Cannot retrieve logs.")
self._process_and_display_logs(all_logs_text)
logger.info("Log analysis completed.")
# Final Step # Final Step
logger.info("Test run completed successfully.") # KEEP INFO - Passes filter logger.info("Test run completed successfully.") # KEEP INFO - Passes filter
@@ -543,7 +527,7 @@ class AutoTester(QObject):
comparable_sources_list.append({ comparable_sources_list.append({
"input_path": Path(source_rule_obj.input_path).name, # Use only the filename "input_path": Path(source_rule_obj.input_path).name, # Use only the filename
"supplier_identifier": source_rule_obj.supplier_identifier, "supplier_identifier": source_rule_obj.supplier_identifier,
"preset_name": source_rule_obj.preset_name, # This is the actual preset name from the SourceRule object "preset_name": source_rule_obj.preset_name,
"assets": comparable_asset_list "assets": comparable_asset_list
}) })
logger.debug("Conversion to comparable dictionary finished.") logger.debug("Conversion to comparable dictionary finished.")
@@ -589,8 +573,6 @@ class AutoTester(QObject):
if not self._compare_list_of_rules(actual_value, expected_value, "FileRule", current_context, "file_path"): if not self._compare_list_of_rules(actual_value, expected_value, "FileRule", current_context, "file_path"):
item_match = False item_match = False
else: # Regular field comparison else: # Regular field comparison
if key == "preset_name":
print(f"DEBUG: Comparing preset_name: Actual='{actual_value}', Expected='{expected_value}' for {item_type_name} ({current_context})")
if actual_value != expected_value: if actual_value != expected_value:
# Handle None vs "None" string for preset_name specifically if it's a common issue # Handle None vs "None" string for preset_name specifically if it's a common issue
if key == "preset_name" and actual_value is None and expected_value == "None": if key == "preset_name" and actual_value is None and expected_value == "None":
@@ -606,64 +588,63 @@ class AutoTester(QObject):
logger.error(f"Value mismatch for field '{key}' in {item_type_name} ({current_context}): Actual='{actual_value}', Expected='{expected_value}'.") logger.error(f"Value mismatch for field '{key}' in {item_type_name} ({current_context}): Actual='{actual_value}', Expected='{expected_value}'.")
item_match = False item_match = False
return item_match return item_match
def _compare_list_of_rules(self, actual_list: List[Dict[str, Any]], expected_list: List[Dict[str, Any]], item_type_name: str, parent_context: str, item_key_field: str) -> bool: def _compare_list_of_rules(self, actual_list: List[Dict[str, Any]], expected_list: List[Dict[str, Any]], item_type_name: str, parent_context: str, item_key_field: str) -> bool:
""" """
Compares a list of actual rule items against a list of expected rule items. Compares a list of actual rule items against a list of expected rule items.
Items are matched by a key field (e.g., 'asset_name' or 'file_path'). Items are matched by a key field (e.g., 'asset_name' or 'file_path').
Order independent for matching, but logs count mismatches. Order independent for matching, but logs count mismatches.
""" """
list_match = True list_match = True # Corrected indentation
if not isinstance(actual_list, list) or not isinstance(expected_list, list): if not isinstance(actual_list, list) or not isinstance(expected_list, list):
logger.error(f"Type mismatch for list of {item_type_name}s in {parent_context}. Expected lists.") logger.error(f"Type mismatch for list of {item_type_name}s in {parent_context}. Expected lists.")
return False return False
if len(actual_list) != len(expected_list): if len(actual_list) != len(expected_list):
logger.error(f"Mismatch in number of {item_type_name}s for {parent_context}. Actual: {len(actual_list)}, Expected: {len(expected_list)}.") logger.error(f"Mismatch in number of {item_type_name}s for {parent_context}. Actual: {len(actual_list)}, Expected: {len(expected_list)}.")
list_match = False # Count mismatch is an error list_match = False # Count mismatch is an error
# If counts differ, we still try to match what we can to provide more detailed feedback, # If counts differ, we still try to match what we can to provide more detailed feedback,
# but the overall list_match will remain False. # but the overall list_match will remain False.
if item_type_name == "FileRule":
print(f"DEBUG: FileRule count mismatch for {parent_context}. Actual: {len(actual_list)}, Expected: {len(expected_list)}")
print(f"DEBUG: Actual FileRule paths: {[item.get(item_key_field) for item in actual_list]}")
print(f"DEBUG: Expected FileRule paths: {[item.get(item_key_field) for item in expected_list]}")
actual_items_map = {item.get(item_key_field): item for item in actual_list if item.get(item_key_field) is not None}
actual_items_map = {item.get(item_key_field): item for item in actual_list if item.get(item_key_field) is not None} # Keep track of expected items that found a match to identify missing ones more easily
matched_expected_keys = set()
# Keep track of expected items that found a match to identify missing ones more easily for expected_item in expected_list:
matched_expected_keys = set() expected_key_value = expected_item.get(item_key_field)
if expected_key_value is None:
logger.error(f"Expected {item_type_name} in {parent_context} is missing key field '{item_key_field}'. Cannot compare this item: {expected_item}")
list_match = False # This specific expected item cannot be processed
continue
for expected_item in expected_list: actual_item = actual_items_map.get(expected_key_value)
expected_key_value = expected_item.get(item_key_field) if actual_item:
if expected_key_value is None: matched_expected_keys.add(expected_key_value)
logger.error(f"Expected {item_type_name} in {parent_context} is missing key field '{item_key_field}'. Cannot compare this item: {expected_item}") if not self._compare_rule_item(actual_item, expected_item, item_type_name, parent_context):
list_match = False # This specific expected item cannot be processed list_match = False # Individual item comparison failed
continue else:
logger.error(f"Expected {item_type_name} with {item_key_field} '{expected_key_value}' not found in actual items for {parent_context}.")
actual_item = actual_items_map.get(expected_key_value)
if actual_item:
matched_expected_keys.add(expected_key_value)
if not self._compare_rule_item(actual_item, expected_item, item_type_name, parent_context):
list_match = False # Individual item comparison failed
else:
logger.error(f"Expected {item_type_name} with {item_key_field} '{expected_key_value}' not found in actual items for {parent_context}.")
list_match = False
# Identify actual items that were not matched by any expected item
# This is useful if len(actual_list) >= len(expected_list) but some actual items are "extra"
for actual_key_value, actual_item_data in actual_items_map.items():
if actual_key_value not in matched_expected_keys:
logger.debug(f"Extra actual {item_type_name} with {item_key_field} '{actual_key_value}' found in {parent_context} (not in expected list or already matched).")
if len(actual_list) != len(expected_list): # If counts already flagged a mismatch, this is just detail
pass
else: # Counts matched, but content didn't align perfectly by key
list_match = False list_match = False
# Identify actual items that were not matched by any expected item
# This is useful if len(actual_list) >= len(expected_list) but some actual items are "extra"
for actual_key_value, actual_item_data in actual_items_map.items():
if actual_key_value not in matched_expected_keys:
logger.debug(f"Extra actual {item_type_name} with {item_key_field} '{actual_key_value}' found in {parent_context} (not in expected list or already matched).")
if len(actual_list) != len(expected_list): # If counts already flagged a mismatch, this is just detail
pass
else: # Counts matched, but content didn't align perfectly by key
list_match = False
return list_match
return list_match # Corrected indentation
def _compare_rules(self, actual_rules_data: Dict[str, Any], expected_rules_data: Dict[str, Any]) -> bool: # Corrected structure: moved out
item_match = False
return item_match
def _compare_rules(self, actual_rules_data: Dict[str, Any], expected_rules_data: Dict[str, Any]) -> bool: def _compare_rules(self, actual_rules_data: Dict[str, Any], expected_rules_data: Dict[str, Any]) -> bool:
""" """
@@ -792,10 +773,6 @@ class AutoTester(QObject):
def cleanup_and_exit(self, success: bool = True) -> None: def cleanup_and_exit(self, success: bool = True) -> None:
"""Cleans up and exits the application.""" """Cleans up and exits the application."""
# Retrieve logs before clearing the handler
all_logs_text = "" # This variable is not used by _process_and_display_logs anymore, but kept for signature compatibility if needed elsewhere.
self._process_and_display_logs(all_logs_text) # Process and display logs BEFORE clearing the buffer
global autotest_memory_handler global autotest_memory_handler
if autotest_memory_handler: if autotest_memory_handler:
logger.debug("Clearing memory log handler buffer and removing handler.") logger.debug("Clearing memory log handler buffer and removing handler.")

View File

@@ -46,10 +46,7 @@
"TEMP_DIR_PREFIX": "_PROCESS_ASSET_", "TEMP_DIR_PREFIX": "_PROCESS_ASSET_",
"INITIAL_SCALING_MODE": "POT_DOWNSCALE", "INITIAL_SCALING_MODE": "POT_DOWNSCALE",
"MERGE_DIMENSION_MISMATCH_STRATEGY": "USE_LARGEST", "MERGE_DIMENSION_MISMATCH_STRATEGY": "USE_LARGEST",
"ENABLE_LOW_RESOLUTION_FALLBACK": true,
"LOW_RESOLUTION_THRESHOLD": 512,
"general_settings": { "general_settings": {
"invert_normal_map_green_channel_globally": false, "invert_normal_map_green_channel_globally": false
"app_version": "Pre-Alpha"
} }
} }

View File

@@ -190,21 +190,19 @@
], ],
"is_grayscale": false, "is_grayscale": false,
"keybind": "E", "keybind": "E",
"standard_type": "EXTRA" "standard_type": ""
}, },
"FILE_IGNORE": { "FILE_IGNORE": {
"bit_depth_rule": "", "bit_depth_rule": "",
"color": "#673d35", "color": "#673d35",
"description": "File identified to be ignored due to prioritization rules (e.g., a lower bit-depth version when a higher one is present).", "description": "File to be ignored",
"category": "Ignored",
"examples": [ "examples": [
"Thumbs.db", "Thumbs.db",
".DS_Store" ".DS_Store"
], ],
"is_grayscale": false, "is_grayscale": false,
"keybind": "X", "keybind": "X",
"standard_type": "", "standard_type": ""
"details": {}
} }
} }
} }

View File

@@ -4,7 +4,6 @@ from pathlib import Path
import logging import logging
import re import re
import collections.abc import collections.abc
from typing import Optional
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -13,7 +12,7 @@ APP_SETTINGS_PATH = BASE_DIR / "config" / "app_settings.json"
LLM_SETTINGS_PATH = BASE_DIR / "config" / "llm_settings.json" LLM_SETTINGS_PATH = BASE_DIR / "config" / "llm_settings.json"
ASSET_TYPE_DEFINITIONS_PATH = BASE_DIR / "config" / "asset_type_definitions.json" ASSET_TYPE_DEFINITIONS_PATH = BASE_DIR / "config" / "asset_type_definitions.json"
FILE_TYPE_DEFINITIONS_PATH = BASE_DIR / "config" / "file_type_definitions.json" FILE_TYPE_DEFINITIONS_PATH = BASE_DIR / "config" / "file_type_definitions.json"
USER_SETTINGS_PATH = BASE_DIR / "config" / "user_settings.json" USER_SETTINGS_PATH = BASE_DIR / "config" / "user_settings.json" # New path for user settings
SUPPLIERS_CONFIG_PATH = BASE_DIR / "config" / "suppliers.json" SUPPLIERS_CONFIG_PATH = BASE_DIR / "config" / "suppliers.json"
PRESETS_DIR = BASE_DIR / "Presets" PRESETS_DIR = BASE_DIR / "Presets"
@@ -104,8 +103,8 @@ class Configuration:
Raises: Raises:
ConfigurationError: If core config or preset cannot be loaded/validated. ConfigurationError: If core config or preset cannot be loaded/validated.
""" """
log.debug(f"Initializing Configuration with preset filename stem: '{preset_name}'") log.debug(f"Initializing Configuration with preset: '{preset_name}'")
self._preset_filename_stem = preset_name # Store the stem used for loading self.preset_name = preset_name
# 1. Load core settings # 1. Load core settings
self._core_settings: dict = self._load_core_config() self._core_settings: dict = self._load_core_config()
@@ -129,16 +128,12 @@ class Configuration:
self._llm_settings: dict = self._load_llm_config() self._llm_settings: dict = self._load_llm_config()
# 7. Load preset settings (conceptually overrides combined base + user for shared keys) # 7. Load preset settings (conceptually overrides combined base + user for shared keys)
self._preset_settings: dict = self._load_preset(self._preset_filename_stem) # Use the stored stem self._preset_settings: dict = self._load_preset(preset_name)
# Store the actual preset name read from the file content
self.actual_internal_preset_name = self._preset_settings.get("preset_name", self._preset_filename_stem)
log.info(f"Configuration instance: Loaded preset file '{self._preset_filename_stem}.json', internal preset_name is '{self.actual_internal_preset_name}'")
# 8. Validate and compile (after all base/user/preset settings are established) # 8. Validate and compile (after all base/user/preset settings are established)
self._validate_configs() self._validate_configs()
self._compile_regex_patterns() self._compile_regex_patterns()
log.info(f"Configuration loaded successfully using preset: '{self.actual_internal_preset_name}'") # Changed self.preset_name to self.actual_internal_preset_name log.info(f"Configuration loaded successfully using preset: '{self.preset_name}'")
def _compile_regex_patterns(self): def _compile_regex_patterns(self):
@@ -147,8 +142,8 @@ class Configuration:
self.compiled_extra_regex: list[re.Pattern] = [] self.compiled_extra_regex: list[re.Pattern] = []
self.compiled_model_regex: list[re.Pattern] = [] self.compiled_model_regex: list[re.Pattern] = []
self.compiled_bit_depth_regex_map: dict[str, re.Pattern] = {} self.compiled_bit_depth_regex_map: dict[str, re.Pattern] = {}
# Map: base_map_type -> list of tuples: (compiled_regex, original_keyword, rule_index, is_priority) # Map: base_map_type -> list of tuples: (compiled_regex, original_keyword, rule_index)
self.compiled_map_keyword_regex: dict[str, list[tuple[re.Pattern, str, int, bool]]] = {} self.compiled_map_keyword_regex: dict[str, list[tuple[re.Pattern, str, int]]] = {}
for pattern in self.move_to_extra_patterns: for pattern in self.move_to_extra_patterns:
try: try:
@@ -183,53 +178,28 @@ class Configuration:
for rule_index, mapping_rule in enumerate(self.map_type_mapping): for rule_index, mapping_rule in enumerate(self.map_type_mapping):
if not isinstance(mapping_rule, dict) or \ if not isinstance(mapping_rule, dict) or \
'target_type' not in mapping_rule: # Removed 'keywords' check here as it's handled below 'target_type' not in mapping_rule or \
log.warning(f"Skipping invalid map_type_mapping rule at index {rule_index}: {mapping_rule}. Expected dict with 'target_type'.") 'keywords' not in mapping_rule or \
not isinstance(mapping_rule['keywords'], list):
log.warning(f"Skipping invalid map_type_mapping rule at index {rule_index}: {mapping_rule}. Expected dict with 'target_type' and 'keywords' list.")
continue continue
target_type = mapping_rule['target_type'].upper() target_type = mapping_rule['target_type'].upper()
source_keywords = mapping_rule['keywords']
# Ensure 'keywords' exists and is a list, default to empty list if not found or not a list
regular_keywords = mapping_rule.get('keywords', [])
if not isinstance(regular_keywords, list):
log.warning(f"Rule {rule_index} for target '{target_type}' has 'keywords' but it's not a list. Treating as empty.")
regular_keywords = []
priority_keywords = mapping_rule.get('priority_keywords', []) # Optional, defaults to empty list for keyword in source_keywords:
if not isinstance(priority_keywords, list):
log.warning(f"Rule {rule_index} for target '{target_type}' has 'priority_keywords' but it's not a list. Treating as empty.")
priority_keywords = []
# Process regular keywords
for keyword in regular_keywords:
if not isinstance(keyword, str): if not isinstance(keyword, str):
log.warning(f"Skipping non-string regular keyword '{keyword}' in rule {rule_index} for target '{target_type}'.") log.warning(f"Skipping non-string keyword '{keyword}' in rule {rule_index} for target '{target_type}'.")
continue continue
try:
kw_regex_part = _fnmatch_to_regex(keyword)
# Ensure the keyword is treated as a whole word or is at the start/end of a segment
regex_str = rf"(?:^|{separator})({kw_regex_part})(?:$|{separator})"
compiled_regex = re.compile(regex_str, re.IGNORECASE)
# Add False for is_priority
temp_compiled_map_regex[target_type].append((compiled_regex, keyword, rule_index, False))
log.debug(f" Compiled regular keyword '{keyword}' (rule {rule_index}) for target '{target_type}' as regex: {regex_str}")
except re.error as e:
log.warning(f"Failed to compile regular map keyword regex '{keyword}' for target type '{target_type}': {e}. Skipping keyword.")
# Process priority keywords
for keyword in priority_keywords:
if not isinstance(keyword, str):
log.warning(f"Skipping non-string priority keyword '{keyword}' in rule {rule_index} for target '{target_type}'.")
continue
try: try:
kw_regex_part = _fnmatch_to_regex(keyword) kw_regex_part = _fnmatch_to_regex(keyword)
regex_str = rf"(?:^|{separator})({kw_regex_part})(?:$|{separator})" regex_str = rf"(?:^|{separator})({kw_regex_part})(?:$|{separator})"
compiled_regex = re.compile(regex_str, re.IGNORECASE) compiled_regex = re.compile(regex_str, re.IGNORECASE)
# Add True for is_priority temp_compiled_map_regex[target_type].append((compiled_regex, keyword, rule_index))
temp_compiled_map_regex[target_type].append((compiled_regex, keyword, rule_index, True)) log.debug(f" Compiled keyword '{keyword}' (rule {rule_index}) for target '{target_type}' as regex: {regex_str}")
log.debug(f" Compiled priority keyword '{keyword}' (rule {rule_index}) for target '{target_type}' as regex: {regex_str}")
except re.error as e: except re.error as e:
log.warning(f"Failed to compile priority map keyword regex '{keyword}' for target type '{target_type}': {e}. Skipping keyword.") log.warning(f"Failed to compile map keyword regex '{keyword}' for target type '{target_type}': {e}. Skipping keyword.")
self.compiled_map_keyword_regex = dict(temp_compiled_map_regex) self.compiled_map_keyword_regex = dict(temp_compiled_map_regex)
log.debug(f"Compiled map keyword regex keys: {list(self.compiled_map_keyword_regex.keys())}") log.debug(f"Compiled map keyword regex keys: {list(self.compiled_map_keyword_regex.keys())}")
@@ -372,43 +342,31 @@ class Configuration:
] ]
for key in required_preset_keys: for key in required_preset_keys:
if key not in self._preset_settings: if key not in self._preset_settings:
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json' (internal name: '{self.actual_internal_preset_name}') is missing required key: '{key}'.") raise ConfigurationError(f"Preset '{self.preset_name}' is missing required key: '{key}'.")
# Validate map_type_mapping structure (new format) # Validate map_type_mapping structure (new format)
if not isinstance(self._preset_settings['map_type_mapping'], list): if not isinstance(self._preset_settings['map_type_mapping'], list):
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json': 'map_type_mapping' must be a list.") raise ConfigurationError(f"Preset '{self.preset_name}': 'map_type_mapping' must be a list.")
for index, rule in enumerate(self._preset_settings['map_type_mapping']): for index, rule in enumerate(self._preset_settings['map_type_mapping']):
if not isinstance(rule, dict): if not isinstance(rule, dict):
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json': Rule at index {index} in 'map_type_mapping' must be a dictionary.") raise ConfigurationError(f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' must be a dictionary.")
if 'target_type' not in rule or not isinstance(rule['target_type'], str): if 'target_type' not in rule or not isinstance(rule['target_type'], str):
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json': Rule at index {index} in 'map_type_mapping' is missing 'target_type' string.") raise ConfigurationError(f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' is missing 'target_type' string.")
valid_file_type_keys = self._file_type_definitions.keys() valid_file_type_keys = self._file_type_definitions.keys()
if rule['target_type'] not in valid_file_type_keys: if rule['target_type'] not in valid_file_type_keys:
raise ConfigurationError( raise ConfigurationError(
f"Preset file '{self._preset_filename_stem}.json': Rule at index {index} in 'map_type_mapping' " f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' "
f"has an invalid 'target_type': '{rule['target_type']}'. " f"has an invalid 'target_type': '{rule['target_type']}'. "
f"Must be one of {list(valid_file_type_keys)}." f"Must be one of {list(valid_file_type_keys)}."
) )
# 'keywords' is optional if 'priority_keywords' is present and not empty, if 'keywords' not in rule or not isinstance(rule['keywords'], list):
# but if 'keywords' IS present, it must be a list of strings. raise ConfigurationError(f"Preset '{self.preset_name}': Rule at index {index} in 'map_type_mapping' is missing 'keywords' list.")
if 'keywords' in rule: for kw_index, keyword in enumerate(rule['keywords']):
if not isinstance(rule['keywords'], list): if not isinstance(keyword, str):
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json': Rule at index {index} in 'map_type_mapping' has 'keywords' but it's not a list.") raise ConfigurationError(f"Preset '{self.preset_name}': Keyword at index {kw_index} in rule {index} ('{rule['target_type']}') must be a string.")
for kw_index, keyword in enumerate(rule['keywords']):
if not isinstance(keyword, str):
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json': Keyword at index {kw_index} in rule {index} ('{rule['target_type']}') must be a string.")
elif not ('priority_keywords' in rule and rule['priority_keywords']): # if 'keywords' is not present, 'priority_keywords' must be
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json': Rule at index {index} in 'map_type_mapping' must have 'keywords' or non-empty 'priority_keywords'.")
# Validate priority_keywords if present
if 'priority_keywords' in rule:
if not isinstance(rule['priority_keywords'], list):
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json': Rule at index {index} in 'map_type_mapping' has 'priority_keywords' but it's not a list.")
for prio_kw_index, prio_keyword in enumerate(rule['priority_keywords']):
if not isinstance(prio_keyword, str):
raise ConfigurationError(f"Preset file '{self._preset_filename_stem}.json': Priority keyword at index {prio_kw_index} in rule {index} ('{rule['target_type']}') must be a string.")
if not isinstance(self._core_settings.get('TARGET_FILENAME_PATTERN'), str): if not isinstance(self._core_settings.get('TARGET_FILENAME_PATTERN'), str):
raise ConfigurationError("Core config 'TARGET_FILENAME_PATTERN' must be a string.") raise ConfigurationError("Core config 'TARGET_FILENAME_PATTERN' must be a string.")
@@ -448,12 +406,6 @@ class Configuration:
def supplier_name(self) -> str: def supplier_name(self) -> str:
return self._preset_settings.get('supplier_name', 'DefaultSupplier') return self._preset_settings.get('supplier_name', 'DefaultSupplier')
@property
def internal_display_preset_name(self) -> str:
"""Returns the 'preset_name' field from within the loaded preset JSON,
or falls back to the filename stem if not present."""
return self.actual_internal_preset_name
@property @property
def default_asset_category(self) -> str: def default_asset_category(self) -> str:
"""Gets the default asset category from core settings.""" """Gets the default asset category from core settings."""
@@ -697,24 +649,6 @@ class Configuration:
"""Returns the LLM request timeout in seconds from LLM settings.""" """Returns the LLM request timeout in seconds from LLM settings."""
return self._llm_settings.get('llm_request_timeout', 120) return self._llm_settings.get('llm_request_timeout', 120)
@property
def app_version(self) -> Optional[str]:
"""Returns the application version from general_settings."""
gs = self._core_settings.get('general_settings')
if isinstance(gs, dict):
return gs.get('app_version')
return None
@property
def enable_low_resolution_fallback(self) -> bool:
"""Gets the setting for enabling low-resolution fallback."""
return self._core_settings.get('ENABLE_LOW_RESOLUTION_FALLBACK', True)
@property
def low_resolution_threshold(self) -> int:
"""Gets the pixel dimension threshold for low-resolution fallback."""
return self._core_settings.get('LOW_RESOLUTION_THRESHOLD', 512)
@property @property
def FILE_TYPE_DEFINITIONS(self) -> dict: def FILE_TYPE_DEFINITIONS(self) -> dict:
return self._file_type_definitions return self._file_type_definitions

Binary file not shown.

View File

@@ -126,15 +126,12 @@ class SupplierSearchDelegate(QStyledItemDelegate):
"""Loads the list of known suppliers from the JSON config file.""" """Loads the list of known suppliers from the JSON config file."""
try: try:
with open(SUPPLIERS_CONFIG_PATH, 'r') as f: with open(SUPPLIERS_CONFIG_PATH, 'r') as f:
suppliers_data = json.load(f) # Renamed variable for clarity suppliers = json.load(f)
if isinstance(suppliers_data, list): if isinstance(suppliers, list):
# Ensure all items are strings # Ensure all items are strings
return sorted([str(s) for s in suppliers_data if isinstance(s, str)]) return sorted([str(s) for s in suppliers if isinstance(s, str)])
elif isinstance(suppliers_data, dict): # ADDED: Handle dictionary case else:
# If it's a dictionary, extract keys as supplier names log.warning(f"'{SUPPLIERS_CONFIG_PATH}' does not contain a valid list. Starting fresh.")
return sorted([str(key) for key in suppliers_data.keys() if isinstance(key, str)])
else: # MODIFIED: Updated warning message
log.warning(f"'{SUPPLIERS_CONFIG_PATH}' does not contain a valid list or dictionary of suppliers. Starting fresh.")
return [] return []
except FileNotFoundError: except FileNotFoundError:
log.info(f"'{SUPPLIERS_CONFIG_PATH}' not found. Starting with an empty supplier list.") log.info(f"'{SUPPLIERS_CONFIG_PATH}' not found. Starting with an empty supplier list.")

View File

@@ -311,7 +311,7 @@ class MainWindow(QMainWindow):
log.info(f"Added {added_count} new asset paths: {newly_added_paths}") log.info(f"Added {added_count} new asset paths: {newly_added_paths}")
self.statusBar().showMessage(f"Added {added_count} asset(s). Updating preview...", 3000) self.statusBar().showMessage(f"Added {added_count} asset(s). Updating preview...", 3000)
mode, selected_display_name, preset_file_path = self.preset_editor_widget.get_selected_preset_mode() mode, selected_preset_text = self.preset_editor_widget.get_selected_preset_mode()
if mode == "llm": if mode == "llm":
log.info(f"LLM Interpretation selected. Preparing LLM prediction for {len(newly_added_paths)} new paths.") log.info(f"LLM Interpretation selected. Preparing LLM prediction for {len(newly_added_paths)} new paths.")
@@ -330,9 +330,8 @@ class MainWindow(QMainWindow):
log.info(f"Delegating {len(llm_requests_to_queue)} LLM requests to the handler.") log.info(f"Delegating {len(llm_requests_to_queue)} LLM requests to the handler.")
self.llm_interaction_handler.queue_llm_requests_batch(llm_requests_to_queue) self.llm_interaction_handler.queue_llm_requests_batch(llm_requests_to_queue)
# The handler manages starting its own processing internally. # The handler manages starting its own processing internally.
elif mode == "preset" and selected_display_name and preset_file_path: elif mode == "preset" and selected_preset_text:
preset_name_for_loading = preset_file_path.stem log.info(f"Preset '{selected_preset_text}' selected. Triggering prediction for {len(newly_added_paths)} new paths.")
log.info(f"Preset '{selected_display_name}' (file: {preset_name_for_loading}.json) selected. Triggering prediction for {len(newly_added_paths)} new paths.")
if self.prediction_thread and not self.prediction_thread.isRunning(): if self.prediction_thread and not self.prediction_thread.isRunning():
log.debug("Starting prediction thread from add_input_paths.") log.debug("Starting prediction thread from add_input_paths.")
self.prediction_thread.start() self.prediction_thread.start()
@@ -344,8 +343,7 @@ class MainWindow(QMainWindow):
self._source_file_lists[input_path_str] = file_list self._source_file_lists[input_path_str] = file_list
self._pending_predictions.add(input_path_str) self._pending_predictions.add(input_path_str)
log.debug(f"Added '{input_path_str}' to pending predictions. Current pending: {self._pending_predictions}") log.debug(f"Added '{input_path_str}' to pending predictions. Current pending: {self._pending_predictions}")
# Pass the filename stem for loading, not the display name self.start_prediction_signal.emit(input_path_str, file_list, selected_preset_text)
self.start_prediction_signal.emit(input_path_str, file_list, preset_name_for_loading)
else: else:
log.warning(f"Skipping prediction for {input_path_str} due to extraction error.") log.warning(f"Skipping prediction for {input_path_str} due to extraction error.")
elif mode == "placeholder": elif mode == "placeholder":
@@ -448,12 +446,7 @@ class MainWindow(QMainWindow):
self.statusBar().showMessage("No assets added to process.", 3000) self.statusBar().showMessage("No assets added to process.", 3000)
return return
# mode, selected_preset_name, preset_file_path are relevant here if processing depends on the *loaded* preset's config mode, selected_preset_name = self.preset_editor_widget.get_selected_preset_mode()
# For now, _on_process_requested uses the rules already in unified_model, which should have been generated
# using the correct preset context. The preset name itself isn't directly used by the processing engine,
# as the SourceRule object already contains the necessary preset-derived information or the preset name string.
# We'll rely on the SourceRule objects in unified_model.get_all_source_rules() to be correct.
# mode, selected_display_name, preset_file_path = self.preset_editor_widget.get_selected_preset_mode()
output_dir_str = settings.get("output_dir") output_dir_str = settings.get("output_dir")
@@ -701,7 +694,7 @@ class MainWindow(QMainWindow):
log.error("RuleBasedPredictionHandler not loaded. Cannot update preview.") log.error("RuleBasedPredictionHandler not loaded. Cannot update preview.")
self.statusBar().showMessage("Error: Prediction components not loaded.", 5000) self.statusBar().showMessage("Error: Prediction components not loaded.", 5000)
return return
mode, selected_display_name, preset_file_path = self.preset_editor_widget.get_selected_preset_mode() mode, selected_preset_name = self.preset_editor_widget.get_selected_preset_mode()
if mode == "placeholder": if mode == "placeholder":
log.debug("Update preview called with placeholder preset selected. Showing existing raw inputs (detailed view).") log.debug("Update preview called with placeholder preset selected. Showing existing raw inputs (detailed view).")
@@ -756,10 +749,9 @@ class MainWindow(QMainWindow):
# Do not return here; let the function exit normally after handling LLM case. # Do not return here; let the function exit normally after handling LLM case.
# The standard prediction path below will be skipped because mode is 'llm'. # The standard prediction path below will be skipped because mode is 'llm'.
elif mode == "preset" and selected_display_name and preset_file_path: elif mode == "preset" and selected_preset_name:
preset_name_for_loading = preset_file_path.stem log.info(f"[{time.time():.4f}] Requesting background preview update for {len(input_paths)} items using Preset='{selected_preset_name}'")
log.info(f"[{time.time():.4f}] Requesting background preview update for {len(input_paths)} items using Preset Display='{selected_display_name}' (File Stem='{preset_name_for_loading}')") self.statusBar().showMessage(f"Updating preview for '{selected_preset_name}'...", 0)
self.statusBar().showMessage(f"Updating preview for '{selected_display_name}'...", 0)
log.debug("Clearing accumulated rules for new standard preview batch.") log.debug("Clearing accumulated rules for new standard preview batch.")
self._accumulated_rules.clear() self._accumulated_rules.clear()
@@ -772,8 +764,8 @@ class MainWindow(QMainWindow):
for input_path_str in input_paths: for input_path_str in input_paths:
file_list = self._extract_file_list(input_path_str) file_list = self._extract_file_list(input_path_str)
if file_list is not None: if file_list is not None:
log.debug(f"[{time.time():.4f}] Emitting start_prediction_signal for: {input_path_str} with {len(file_list)} files, using preset file stem: {preset_name_for_loading}.") log.debug(f"[{time.time():.4f}] Emitting start_prediction_signal for: {input_path_str} with {len(file_list)} files.")
self.start_prediction_signal.emit(input_path_str, file_list, preset_name_for_loading) # Pass stem for loading self.start_prediction_signal.emit(input_path_str, file_list, selected_preset_name)
else: else:
log.warning(f"[{time.time():.4f}] Skipping standard prediction signal for {input_path_str} due to extraction error.") log.warning(f"[{time.time():.4f}] Skipping standard prediction signal for {input_path_str} due to extraction error.")
else: else:
@@ -1074,13 +1066,13 @@ class MainWindow(QMainWindow):
log.debug(f"<-- Exiting _handle_prediction_completion for '{input_path}'") log.debug(f"<-- Exiting _handle_prediction_completion for '{input_path}'")
@Slot(str, str, Path) # mode, display_name, file_path (Path can be None) @Slot(str, str)
def _on_preset_selection_changed(self, mode: str, display_name: str | None, file_path: Path | None ): def _on_preset_selection_changed(self, mode: str, preset_name: str | None):
""" """
Handles changes in the preset editor selection (preset, LLM, placeholder). Handles changes in the preset editor selection (preset, LLM, placeholder).
Switches between PresetEditorWidget and LLMEditorWidget. Switches between PresetEditorWidget and LLMEditorWidget.
""" """
log.info(f"Preset selection changed: mode='{mode}', display_name='{display_name}', file_path='{file_path}'") log.info(f"Preset selection changed: mode='{mode}', preset_name='{preset_name}'")
if mode == "llm": if mode == "llm":
log.debug("Switching editor stack to LLM Editor Widget.") log.debug("Switching editor stack to LLM Editor Widget.")
@@ -1102,11 +1094,11 @@ class MainWindow(QMainWindow):
self.editor_stack.setCurrentWidget(self.preset_editor_widget.json_editor_container) self.editor_stack.setCurrentWidget(self.preset_editor_widget.json_editor_container)
# The PresetEditorWidget's internal logic handles disabling/clearing the editor fields. # The PresetEditorWidget's internal logic handles disabling/clearing the editor fields.
if mode == "preset" and display_name: # Use display_name for window title if mode == "preset" and preset_name:
# This might be redundant if the editor handles its own title updates on save/load # This might be redundant if the editor handles its own title updates on save/load
# but good for consistency. # but good for consistency.
unsaved = self.preset_editor_widget.editor_unsaved_changes unsaved = self.preset_editor_widget.editor_unsaved_changes
self.setWindowTitle(f"Asset Processor Tool - {display_name}{'*' if unsaved else ''}") self.setWindowTitle(f"Asset Processor Tool - {preset_name}{'*' if unsaved else ''}")
elif mode == "llm": elif mode == "llm":
self.setWindowTitle("Asset Processor Tool - LLM Interpretation") self.setWindowTitle("Asset Processor Tool - LLM Interpretation")
else: else:

View File

@@ -39,9 +39,10 @@ if not log.hasHandlers():
def classify_files(file_list: List[str], config: Configuration) -> Dict[str, List[Dict[str, Any]]]: def classify_files(file_list: List[str], config: Configuration) -> Dict[str, List[Dict[str, Any]]]:
""" """
Analyzes a list of files based on configuration rules to group them by asset Analyzes a list of files based on configuration rules using a two-pass approach
and determine initial file properties, applying prioritization based on to group them by asset and determine initial file properties.
'priority_keywords' in map_type_mapping. Pass 1: Identifies and classifies prioritized bit depth variants.
Pass 2: Classifies extras, general maps (downgrading if primary exists), and ignores.
Args: Args:
file_list: List of absolute file paths. file_list: List of absolute file paths.
@@ -52,21 +53,19 @@ def classify_files(file_list: List[str], config: Configuration) -> Dict[str, Lis
Example: Example:
{ {
'AssetName1': [ 'AssetName1': [
{'file_path': '/path/to/AssetName1_DISP16.png', 'item_type': 'MAP_DISP', 'asset_name': 'AssetName1'}, {'file_path': '/path/to/AssetName1_DISP16.png', 'item_type': 'DISP', 'asset_name': 'AssetName1'},
{'file_path': '/path/to/AssetName1_Color.png', 'item_type': 'MAP_COL', 'asset_name': 'AssetName1'} {'file_path': '/path/to/AssetName1_DISP.png', 'item_type': 'EXTRA', 'asset_name': 'AssetName1'},
{'file_path': '/path/to/AssetName1_Color.png', 'item_type': 'COL', 'asset_name': 'AssetName1'}
], ],
# ... other assets # ... other assets
} }
Files marked as "FILE_IGNORE" will also be included in the output.
Returns an empty dict if classification fails or no files are provided. Returns an empty dict if classification fails or no files are provided.
""" """
classified_files_info: Dict[str, List[Dict[str, Any]]] = defaultdict(list) temp_grouped_files = defaultdict(list)
file_matches: Dict[str, List[Tuple[str, int, bool]]] = defaultdict(list) # {file_path: [(target_type, rule_index, is_priority), ...]} extra_files_to_associate = []
files_to_ignore: Set[str] = set() primary_asset_names = set()
primary_assignments = set()
# --- DEBUG: Log the input file_list --- processed_in_pass1 = set()
log.info(f"DEBUG_ROO_CLASSIFY_INPUT: classify_files received file_list (len={len(file_list)}): {file_list}")
# --- END DEBUG ---
# --- Validation --- # --- Validation ---
if not file_list or not config: if not file_list or not config:
@@ -74,20 +73,20 @@ def classify_files(file_list: List[str], config: Configuration) -> Dict[str, Lis
return {} return {}
if not hasattr(config, 'compiled_map_keyword_regex') or not config.compiled_map_keyword_regex: if not hasattr(config, 'compiled_map_keyword_regex') or not config.compiled_map_keyword_regex:
log.warning("Classification skipped: Missing compiled map keyword regex in config.") log.warning("Classification skipped: Missing compiled map keyword regex in config.")
# Proceeding might still classify EXTRA/FILE_IGNORE if those rules exist
if not hasattr(config, 'compiled_extra_regex'): if not hasattr(config, 'compiled_extra_regex'):
log.warning("Configuration object missing 'compiled_extra_regex'. Cannot classify extra files.") log.warning("Configuration object missing 'compiled_extra_regex'. Cannot classify extra files.")
compiled_extra_regex = [] # Provide default to avoid errors if not hasattr(config, 'compiled_bit_depth_regex_map'):
else: log.warning("Configuration object missing 'compiled_bit_depth_regex_map'. Cannot prioritize bit depth variants.")
compiled_extra_regex = getattr(config, 'compiled_extra_regex', [])
compiled_map_regex = getattr(config, 'compiled_map_keyword_regex', {}) compiled_map_regex = getattr(config, 'compiled_map_keyword_regex', {})
# Note: compiled_bit_depth_regex_map is no longer used for primary classification logic here compiled_extra_regex = getattr(config, 'compiled_extra_regex', [])
compiled_bit_depth_regex_map = getattr(config, 'compiled_bit_depth_regex_map', {})
num_map_rules = sum(len(patterns) for patterns in compiled_map_regex.values()) num_map_rules = sum(len(patterns) for patterns in compiled_map_regex.values())
num_extra_rules = len(compiled_extra_regex) num_extra_rules = len(compiled_extra_regex)
num_bit_depth_rules = len(compiled_bit_depth_regex_map)
log.debug(f"Starting classification for {len(file_list)} files using {num_map_rules} map keyword patterns and {num_extra_rules} extra patterns.") log.debug(f"Starting classification for {len(file_list)} files using {num_map_rules} map keyword patterns, {num_bit_depth_rules} bit depth patterns, and {num_extra_rules} extra patterns.")
# --- Asset Name Extraction Helper --- # --- Asset Name Extraction Helper ---
def get_asset_name(f_path: Path, cfg: Configuration) -> str: def get_asset_name(f_path: Path, cfg: Configuration) -> str:
@@ -121,179 +120,155 @@ def classify_files(file_list: List[str], config: Configuration) -> Dict[str, Lis
log.warning(f"Asset name extraction resulted in empty string for '{filename}'. Using stem: '{asset_name}'.") log.warning(f"Asset name extraction resulted in empty string for '{filename}'. Using stem: '{asset_name}'.")
return asset_name return asset_name
# --- Pass 1: Collect all potential matches for each file --- # --- Pass 1: Prioritized Bit Depth Variants ---
# For each file, find all map_type_mapping rules it matches (both regular and priority keywords). log.debug("--- Starting Classification Pass 1: Prioritized Variants ---")
# Store the target_type, original rule_index, and whether it was a priority match.
log.debug("--- Starting Classification Pass 1: Collect Potential Matches ---")
file_matches: Dict[str, List[Tuple[str, int, bool]]] = defaultdict(list) # {file_path: [(target_type, rule_index, is_priority), ...]}
files_classified_as_extra: Set[str] = set() # Files already classified as EXTRA
compiled_map_regex = getattr(config, 'compiled_map_keyword_regex', {})
compiled_extra_regex = getattr(config, 'compiled_extra_regex', [])
for file_path_str in file_list: for file_path_str in file_list:
file_path = Path(file_path_str) file_path = Path(file_path_str)
filename = file_path.name filename = file_path.name
asset_name = get_asset_name(file_path, config) asset_name = get_asset_name(file_path, config)
processed = False
if "BoucleChunky001" in file_path_str: for target_type, variant_regex in compiled_bit_depth_regex_map.items():
log.info(f"DEBUG_ROO: Processing file: {file_path_str}") match = variant_regex.search(filename)
if match:
log.debug(f"PASS 1: File '{filename}' matched PRIORITIZED bit depth variant for type '{target_type}'.")
matched_item_type = target_type
# Check for EXTRA files first if (asset_name, matched_item_type) in primary_assignments:
log.warning(f"PASS 1: Primary assignment ({asset_name}, {matched_item_type}) already exists. File '{filename}' will be handled in Pass 2.")
else:
primary_assignments.add((asset_name, matched_item_type))
log.debug(f" PASS 1: Added primary assignment: ({asset_name}, {matched_item_type})")
primary_asset_names.add(asset_name)
temp_grouped_files[asset_name].append({
'file_path': file_path_str,
'item_type': matched_item_type,
'asset_name': asset_name
})
processed_in_pass1.add(file_path_str)
processed = True
break # Stop checking other variant patterns for this file
log.debug(f"--- Finished Pass 1. Primary assignments made: {primary_assignments} ---")
# --- Pass 2: Extras, General Maps, Ignores ---
log.debug("--- Starting Classification Pass 2: Extras, General Maps, Ignores ---")
for file_path_str in file_list:
if file_path_str in processed_in_pass1:
log.debug(f"PASS 2: Skipping '{Path(file_path_str).name}' (processed in Pass 1).")
continue
file_path = Path(file_path_str)
filename = file_path.name
asset_name = get_asset_name(file_path, config)
is_extra = False is_extra = False
is_map = False
# 1. Check for Extra Files FIRST in Pass 2
for extra_pattern in compiled_extra_regex: for extra_pattern in compiled_extra_regex:
if extra_pattern.search(filename): if extra_pattern.search(filename):
if "BoucleChunky001_DISP_1K_METALNESS.png" in filename and extra_pattern.search(filename): log.debug(f"PASS 2: File '{filename}' matched EXTRA pattern: {extra_pattern.pattern}")
log.info(f"DEBUG_ROO: EXTRA MATCH: File '{filename}' matched EXTRA pattern: {extra_pattern.pattern}") extra_files_to_associate.append((file_path_str, filename))
log.debug(f"PASS 1: File '{filename}' matched EXTRA pattern: {extra_pattern.pattern}")
# For EXTRA, we assign it directly and don't check map rules for this file
classified_files_info[asset_name].append({
'file_path': file_path_str,
'item_type': "EXTRA",
'asset_name': asset_name
})
files_classified_as_extra.add(file_path_str)
is_extra = True is_extra = True
break break
if "BoucleChunky001_DISP_1K_METALNESS.png" in filename and not is_extra: # after the extra loop
log.info(f"DEBUG_ROO: EXTRA CHECK FAILED for {filename}. is_extra: {is_extra}")
if "BoucleChunky001_DISP_1K_METALNESS.png" in filename and not is_extra:
log.info(f"DEBUG_ROO: EXTRA CHECK FAILED for {filename}. is_extra: {is_extra}")
if is_extra: if is_extra:
continue # Move to the next file
# If not EXTRA, check for MAP matches (collect all potential matches)
for target_type, patterns_list in compiled_map_regex.items():
for compiled_regex, original_keyword, rule_index, is_priority in patterns_list:
match = compiled_regex.search(filename)
if match:
if "BoucleChunky001" in file_path_str:
log.info(f"DEBUG_ROO: PASS 1 MAP MATCH: File '{filename}' matched keyword '{original_keyword}' (priority: {is_priority}) for target type '{target_type}' (Rule Index: {rule_index}).")
log.debug(f" PASS 1: File '{filename}' matched keyword '{original_keyword}' (priority: {is_priority}) for target type '{target_type}' (Rule Index: {rule_index}).")
file_matches[file_path_str].append((target_type, rule_index, is_priority))
log.debug(f"--- Finished Pass 1. Collected matches for {len(file_matches)} files. ---")
# --- Pass 2: Determine Trumped Regular Matches ---
# Identify which regular matches are trumped by a priority match for the same rule_index within the asset.
log.debug("--- Starting Classification Pass 2: Determine Trumped Regular Matches ---")
trumped_regular_matches: Set[Tuple[str, int]] = set() # Set of (file_path_str, rule_index) pairs that are trumped
# First, determine which rule_indices have *any* priority match across the entire asset
rule_index_has_priority_match_in_asset: Set[int] = set()
for file_path_str, matches in file_matches.items():
for match_target, match_rule_index, match_is_priority in matches:
if match_is_priority:
rule_index_has_priority_match_in_asset.add(match_rule_index)
log.debug(f" Rule indices with priority matches in asset: {sorted(list(rule_index_has_priority_match_in_asset))}")
# Then, for each file, check its matches against the rules that had priority matches
for file_path_str in file_list:
if file_path_str in files_classified_as_extra:
continue continue
matches_for_this_file = file_matches.get(file_path_str, []) # 2. Check for General Map Files in Pass 2
for target_type, patterns_list in compiled_map_regex.items():
for compiled_regex, original_keyword, rule_index in patterns_list:
match = compiled_regex.search(filename)
if match:
try:
# map_type_mapping_list = config.map_type_mapping # Old gloss logic source
# matched_rule_details = map_type_mapping_list[rule_index] # Old gloss logic source
# is_gloss_flag = matched_rule_details.get('is_gloss_source', False) # Old gloss logic
log.debug(f" PASS 2: Match found! Rule Index: {rule_index}, Keyword: '{original_keyword}', Target: '{target_type}'") # Removed Gloss from log
except Exception as e:
log.exception(f" PASS 2: Error accessing rule details for index {rule_index}: {e}")
# Determine if this file has any priority match for a given rule_index # *** Crucial Check: Has a prioritized variant claimed this type? ***
file_has_priority_match_for_rule: Dict[int, bool] = defaultdict(bool) if (asset_name, target_type) in primary_assignments:
for match_target, match_rule_index, match_is_priority in matches_for_this_file: log.debug(f"PASS 2: File '{filename}' matched '{original_keyword}' for type '{target_type}', but primary already assigned via Pass 1. Classifying as EXTRA.")
if match_is_priority: matched_item_type = "EXTRA"
file_has_priority_match_for_rule[match_rule_index] = True # is_gloss_flag = False # Old gloss logic
else:
log.debug(f"PASS 2: File '{filename}' matched '{original_keyword}' for item_type '{target_type}'.")
matched_item_type = target_type
# Determine if this file has any regular match for a given rule_index temp_grouped_files[asset_name].append({
file_has_regular_match_for_rule: Dict[int, bool] = defaultdict(bool) 'file_path': file_path_str,
for match_target, match_rule_index, match_is_priority in matches_for_this_file: 'item_type': matched_item_type,
if not match_is_priority: 'asset_name': asset_name
file_has_regular_match_for_rule[match_rule_index] = True })
is_map = True
break
if is_map:
break
# Identify trumped regular matches for this file # 3. Handle Unmatched Files in Pass 2 (Not Extra, Not Map)
for match_target, match_rule_index, match_is_priority in matches_for_this_file: if not is_extra and not is_map:
if not match_is_priority: # Only consider regular matches log.debug(f"PASS 2: File '{filename}' did not match any map/extra pattern. Grouping under asset '{asset_name}' as FILE_IGNORE.")
if match_rule_index in rule_index_has_priority_match_in_asset: temp_grouped_files[asset_name].append({
# This regular match is for a rule_index that had a priority match somewhere in the asset 'file_path': file_path_str,
if not file_has_priority_match_for_rule[match_rule_index]: 'item_type': "FILE_IGNORE",
# And this specific file did NOT have a priority match for this rule_index 'asset_name': asset_name
trumped_regular_matches.add((file_path_str, match_rule_index)) })
log.debug(f" File '{Path(file_path_str).name}': Regular match for Rule Index {match_rule_index} is trumped.")
if "BoucleChunky001" in file_path_str:
log.info(f"DEBUG_ROO: TRUMPED: File '{Path(file_path_str).name}': Regular match for Rule Index {match_rule_index} (target {match_target}) is trumped.")
if "BoucleChunky001" in file_path_str: # Check if it was actually added by checking the set, or just log if the condition was met
if (file_path_str, match_rule_index) in trumped_regular_matches:
log.info(f"DEBUG_ROO: TRUMPED: File '{Path(file_path_str).name}': Regular match for Rule Index {match_rule_index} (target {match_target}) is trumped.")
log.debug("--- Finished Pass 2 ---")
log.debug(f"--- Finished Pass 2. Identified {len(trumped_regular_matches)} trumped regular matches. ---") # --- Determine Primary Asset Name for Extra Association (using Pass 1 results) ---
final_primary_asset_name = None
# --- Pass 3: Final Assignment & Inter-Entry Resolution --- if primary_asset_names:
# Iterate through files, apply ignore rules, and then apply earliest rule wins for remaining valid matches. primary_map_asset_names_pass1 = [
log.debug("--- Starting Classification Pass 3: Final Assignment ---") f_info['asset_name']
for asset_files in temp_grouped_files.values()
final_file_assignments: Dict[str, str] = {} # {file_path: final_item_type} for f_info in asset_files
if f_info['asset_name'] in primary_asset_names and (f_info['asset_name'], f_info['item_type']) in primary_assignments
]
for file_path_str in file_list: if primary_map_asset_names_pass1:
# Check if the file was already classified as EXTRA in Pass 1 and added to classified_files_info name_counts = Counter(primary_map_asset_names_pass1)
if file_path_str in files_classified_as_extra: most_common_names = name_counts.most_common()
log.debug(f" Final Assignment: Skipping '{Path(file_path_str).name}' as it was already classified as EXTRA in Pass 1.") final_primary_asset_name = most_common_names[0][0]
continue # Skip this file in Pass 3 as it's already handled if len(most_common_names) > 1 and most_common_names[0][1] == most_common_names[1][1]:
tied_names = sorted([name for name, count in most_common_names if count == most_common_names[0][1]])
asset_name = get_asset_name(Path(file_path_str), config) # Need asset name for the final output structure final_primary_asset_name = tied_names[0]
log.warning(f"Multiple primary asset names tied for most common based on Pass 1: {tied_names}. Using '{final_primary_asset_name}' for associating extra files.")
# Get valid matches for this file after considering intra-entry priority trumps regular log.debug(f"Determined primary asset name for extras based on Pass 1 primary maps: '{final_primary_asset_name}'")
valid_matches = []
for match_target, match_rule_index, match_is_priority in file_matches.get(file_path_str, []):
if (file_path_str, match_rule_index) not in trumped_regular_matches:
valid_matches.append((match_target, match_rule_index, match_is_priority))
log.debug(f" File '{Path(file_path_str).name}': Valid match - Target: '{match_target}', Rule Index: {match_rule_index}, Priority: {match_is_priority}")
else:
log.debug(f" File '{Path(file_path_str).name}': Invalid match (trumped by priority) - Target: '{match_target}', Rule Index: {match_rule_index}, Priority: {match_is_priority}")
if "BoucleChunky001" in file_path_str:
log.info(f"DEBUG_ROO: PASS 3 PRE-ASSIGN: File '{Path(file_path_str).name}'. Valid matches: {valid_matches}")
if "BoucleChunky001" in file_path_str:
log.info(f"DEBUG_ROO: PASS 3 PRE-ASSIGN: File '{Path(file_path_str).name}'. Valid matches: {valid_matches}")
final_item_type = "FILE_IGNORE" # Default to ignore if no valid matches
if valid_matches:
# Apply earliest rule wins among valid matches
best_match = min(valid_matches, key=lambda x: x[1]) # Find match with lowest rule_index
final_item_type = best_match[0] # Assign the target_type of the best match
log.debug(f" File '{Path(file_path_str).name}': Best valid match -> Target: '{best_match[0]}', Rule Index: {best_match[1]}. Final type: '{final_item_type}'.")
else: else:
log.debug(f" File '{Path(file_path_str).name}'': No valid matches after filtering. Final type: '{final_item_type}'.") log.warning("Primary asset names set (from Pass 1) was populated, but no corresponding groups found. Falling back.")
if "BoucleChunky001" in file_path_str: if not final_primary_asset_name:
log.info(f"DEBUG_ROO: PASS 3 FINAL ASSIGN: File '{Path(file_path_str).name}' -> Final Type: '{final_item_type}'") if temp_grouped_files and extra_files_to_associate:
final_file_assignments[file_path_str] = final_item_type fallback_name = sorted(temp_grouped_files.keys())[0]
final_primary_asset_name = fallback_name
if "BoucleChunky001" in file_path_str: log.warning(f"No primary map files found in Pass 1. Associating extras with first group found alphabetically: '{final_primary_asset_name}'.")
log.info(f"DEBUG_ROO: PASS 3 FINAL ASSIGN: File '{Path(file_path_str).name}' -> Final Type: '{final_item_type}'") elif extra_files_to_associate:
log.warning(f"Could not determine any asset name to associate {len(extra_files_to_associate)} extra file(s) with. They will be ignored.")
# Add the file info to the classified_files_info structure else:
log.info(f"DEBUG_ROO: PASS 3 APPEND: Appending file '{Path(file_path_str).name}' with type '{final_item_type}' to classified_files_info['{asset_name}']") log.debug("No primary asset name determined (no maps or extras found).")
classified_files_info[asset_name].append({
'file_path': file_path_str,
'item_type': final_item_type,
'asset_name': asset_name
})
log.debug(f" Final Grouping: '{Path(file_path_str).name}' -> '{final_item_type}' (Asset: '{asset_name}')")
log.debug(f"Classification complete. Found {len(classified_files_info)} potential assets.") # --- Associate Extra Files (collected in Pass 2) ---
# Enhanced logging for the content of classified_files_info if final_primary_asset_name and extra_files_to_associate:
boucle_chunky_data = { log.debug(f"Associating {len(extra_files_to_associate)} extra file(s) with primary asset '{final_primary_asset_name}'")
key: val for key, val in classified_files_info.items() for file_path_str, filename in extra_files_to_associate:
if 'BoucleChunky001' in key or any('BoucleChunky001' in (f_info.get('file_path','')) for f_info in val) if not any(f['file_path'] == file_path_str for f in temp_grouped_files[final_primary_asset_name]):
} temp_grouped_files[final_primary_asset_name].append({
import json # Make sure json is imported if not already at top of file 'file_path': file_path_str,
log.info(f"DEBUG_ROO: Final classified_files_info for BoucleChunky001 (content): \n{json.dumps(boucle_chunky_data, indent=2)}") 'item_type': "EXTRA",
return dict(classified_files_info) 'asset_name': final_primary_asset_name
})
else:
log.debug(f"Skipping duplicate association of extra file: {filename}")
elif extra_files_to_associate:
pass
log.debug(f"Classification complete. Found {len(temp_grouped_files)} potential assets.")
return dict(temp_grouped_files)
class RuleBasedPredictionHandler(BasePredictionHandler): class RuleBasedPredictionHandler(BasePredictionHandler):
@@ -392,8 +367,7 @@ class RuleBasedPredictionHandler(BasePredictionHandler):
source_rule = SourceRule( source_rule = SourceRule(
input_path=input_source_identifier, input_path=input_source_identifier,
supplier_identifier=supplier_identifier, supplier_identifier=supplier_identifier,
# Use the internal display name from the config object preset_name=preset_name
preset_name=config.internal_display_preset_name
) )
asset_rules = [] asset_rules = []
file_type_definitions = config._core_settings.get('FILE_TYPE_DEFINITIONS', {}) file_type_definitions = config._core_settings.get('FILE_TYPE_DEFINITIONS', {})
@@ -489,22 +463,23 @@ class RuleBasedPredictionHandler(BasePredictionHandler):
base_item_type = file_info['item_type'] base_item_type = file_info['item_type']
target_asset_name_override = file_info['asset_name'] target_asset_name_override = file_info['asset_name']
final_item_type = base_item_type final_item_type = base_item_type
# The classification logic now returns the final item_type directly, if not base_item_type.startswith("MAP_") and base_item_type not in ["FILE_IGNORE", "EXTRA", "MODEL"]:
# including "FILE_IGNORE" and correctly prioritized MAP_ types. final_item_type = f"MAP_{base_item_type}"
# No need for the old MAP_ prefixing logic here.
# Validate the final_item_type against definitions, unless it's EXTRA or FILE_IGNORE if file_type_definitions and final_item_type not in file_type_definitions and base_item_type not in ["FILE_IGNORE", "EXTRA"]:
if final_item_type not in ["EXTRA", "FILE_IGNORE"] and file_type_definitions and final_item_type not in file_type_definitions: log.warning(f"Predicted ItemType '{base_item_type}' (checked as '{final_item_type}') for file '{file_info['file_path']}' is not in FILE_TYPE_DEFINITIONS. Setting to FILE_IGNORE.")
log.warning(f"Predicted ItemType '{final_item_type}' for file '{file_info['file_path']}' is not in FILE_TYPE_DEFINITIONS. Setting to FILE_IGNORE.")
final_item_type = "FILE_IGNORE" final_item_type = "FILE_IGNORE"
# is_gloss_source_value = file_info.get('is_gloss_source', False) # Removed
file_rule = FileRule( file_rule = FileRule(
file_path=file_info['file_path'], file_path=file_info['file_path'],
item_type=final_item_type, item_type=final_item_type,
item_type_override=final_item_type, # item_type_override defaults to item_type item_type_override=final_item_type,
target_asset_name_override=target_asset_name_override, target_asset_name_override=target_asset_name_override,
output_format_override=None, output_format_override=None,
# is_gloss_source=is_gloss_source_value if isinstance(is_gloss_source_value, bool) else False, # Removed
resolution_override=None, resolution_override=None,
channel_merge_instructions={}, channel_merge_instructions={},
) )
@@ -514,18 +489,6 @@ class RuleBasedPredictionHandler(BasePredictionHandler):
source_rule.assets = asset_rules source_rule.assets = asset_rules
source_rules_list.append(source_rule) source_rules_list.append(source_rule)
# DEBUG: Log the structure of the source_rule being emitted
if source_rule and source_rule.assets:
for asset_r_idx, asset_r in enumerate(source_rule.assets):
log.info(f"DEBUG_ROO_EMIT: Source '{input_source_identifier}', Asset {asset_r_idx} ('{asset_r.asset_name}') has {len(asset_r.files)} FileRules.")
for fr_idx, fr in enumerate(asset_r.files):
log.info(f"DEBUG_ROO_EMIT: FR {fr_idx}: Path='{fr.file_path}', Type='{fr.item_type}', TargetAsset='{fr.target_asset_name_override}'")
elif source_rule:
log.info(f"DEBUG_ROO_EMIT: Emitting SourceRule for {input_source_identifier} but it has no assets.")
else:
log.info(f"DEBUG_ROO_EMIT: Attempting to emit for {input_source_identifier}, but source_rule object is None.")
# END DEBUG
except Exception as e: except Exception as e:
log.exception(f"Error building rule hierarchy for source '{input_source_identifier}': {e}") log.exception(f"Error building rule hierarchy for source '{input_source_identifier}': {e}")
raise RuntimeError(f"Error building rule hierarchy: {e}") from e raise RuntimeError(f"Error building rule hierarchy: {e}") from e

View File

@@ -20,8 +20,7 @@ script_dir = Path(__file__).parent
project_root = script_dir.parent project_root = script_dir.parent
PRESETS_DIR = project_root / "Presets" PRESETS_DIR = project_root / "Presets"
TEMPLATE_PATH = PRESETS_DIR / "_template.json" TEMPLATE_PATH = PRESETS_DIR / "_template.json"
APP_SETTINGS_PATH_LOCAL = project_root / "config" / "app_settings.json" # Retain for other settings if used elsewhere APP_SETTINGS_PATH_LOCAL = project_root / "config" / "app_settings.json"
FILE_TYPE_DEFINITIONS_PATH = project_root / "config" / "file_type_definitions.json"
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -36,8 +35,8 @@ class PresetEditorWidget(QWidget):
# Signal emitted when presets list changes (saved, deleted, new) # Signal emitted when presets list changes (saved, deleted, new)
presets_changed_signal = Signal() presets_changed_signal = Signal()
# Signal emitted when the selected preset (or LLM/Placeholder) changes # Signal emitted when the selected preset (or LLM/Placeholder) changes
# Emits: mode ("preset", "llm", "placeholder"), display_name (str or None), file_path (Path or None) # Emits: mode ("preset", "llm", "placeholder"), preset_name (str or None)
preset_selection_changed_signal = Signal(str, str, Path) preset_selection_changed_signal = Signal(str, str)
def __init__(self, parent=None): def __init__(self, parent=None):
super().__init__(parent) super().__init__(parent)
@@ -64,19 +63,18 @@ class PresetEditorWidget(QWidget):
"""Loads FILE_TYPE_DEFINITIONS keys from app_settings.json.""" """Loads FILE_TYPE_DEFINITIONS keys from app_settings.json."""
keys = [] keys = []
try: try:
if FILE_TYPE_DEFINITIONS_PATH.is_file(): if APP_SETTINGS_PATH_LOCAL.is_file():
with open(FILE_TYPE_DEFINITIONS_PATH, 'r', encoding='utf-8') as f: with open(APP_SETTINGS_PATH_LOCAL, 'r', encoding='utf-8') as f:
settings = json.load(f) settings = json.load(f)
# The FILE_TYPE_DEFINITIONS key is at the root of file_type_definitions.json
ftd = settings.get("FILE_TYPE_DEFINITIONS", {}) ftd = settings.get("FILE_TYPE_DEFINITIONS", {})
keys = list(ftd.keys()) keys = list(ftd.keys())
log.debug(f"Successfully loaded {len(keys)} FILE_TYPE_DEFINITIONS keys from {FILE_TYPE_DEFINITIONS_PATH}.") log.debug(f"Successfully loaded {len(keys)} FILE_TYPE_DEFINITIONS keys.")
else: else:
log.error(f"file_type_definitions.json not found at {FILE_TYPE_DEFINITIONS_PATH} for PresetEditorWidget.") log.error(f"app_settings.json not found at {APP_SETTINGS_PATH_LOCAL} for PresetEditorWidget.")
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
log.error(f"Failed to parse file_type_definitions.json in PresetEditorWidget: {e}") log.error(f"Failed to parse app_settings.json in PresetEditorWidget: {e}")
except Exception as e: except Exception as e:
log.error(f"Error loading FILE_TYPE_DEFINITIONS keys from {FILE_TYPE_DEFINITIONS_PATH} in PresetEditorWidget: {e}") log.error(f"Error loading FILE_TYPE_DEFINITIONS keys in PresetEditorWidget: {e}")
return keys return keys
def _init_ui(self): def _init_ui(self):
@@ -296,22 +294,8 @@ class PresetEditorWidget(QWidget):
log.warning(msg) log.warning(msg)
else: else:
for preset_path in presets: for preset_path in presets:
preset_display_name = preset_path.stem # Fallback item = QListWidgetItem(preset_path.stem)
try: item.setData(Qt.ItemDataRole.UserRole, preset_path)
with open(preset_path, 'r', encoding='utf-8') as f:
preset_content = json.load(f)
internal_name = preset_content.get("preset_name")
if internal_name and isinstance(internal_name, str) and internal_name.strip():
preset_display_name = internal_name.strip()
else:
log.warning(f"Preset file {preset_path.name} is missing 'preset_name' or it's empty. Using filename stem '{preset_path.stem}' as display name.")
except json.JSONDecodeError:
log.error(f"Failed to parse JSON from {preset_path.name}. Using filename stem '{preset_path.stem}' as display name.")
except Exception as e:
log.error(f"Error reading {preset_path.name}: {e}. Using filename stem '{preset_path.stem}' as display name.")
item = QListWidgetItem(preset_display_name)
item.setData(Qt.ItemDataRole.UserRole, preset_path) # Store the path for loading
self.editor_preset_list.addItem(item) self.editor_preset_list.addItem(item)
log.info(f"Loaded {len(presets)} presets into editor list.") log.info(f"Loaded {len(presets)} presets into editor list.")
@@ -539,8 +523,7 @@ class PresetEditorWidget(QWidget):
log.debug(f"PresetEditor: currentItemChanged signal triggered. current: {current_item.text() if current_item else 'None'}") log.debug(f"PresetEditor: currentItemChanged signal triggered. current: {current_item.text() if current_item else 'None'}")
mode = "placeholder" mode = "placeholder"
display_name_to_emit = None # Changed from preset_name preset_name = None
file_path_to_emit = None # New variable for Path
# Check for unsaved changes before proceeding # Check for unsaved changes before proceeding
if self.check_unsaved_changes(): if self.check_unsaved_changes():
@@ -555,53 +538,41 @@ class PresetEditorWidget(QWidget):
# Determine mode and preset name based on selection # Determine mode and preset name based on selection
if current_item: if current_item:
item_data = current_item.data(Qt.ItemDataRole.UserRole) item_data = current_item.data(Qt.ItemDataRole.UserRole)
current_display_text = current_item.text() # This is the internal name from populate_presets
if item_data == "__PLACEHOLDER__": if item_data == "__PLACEHOLDER__":
log.debug("Placeholder item selected.") log.debug("Placeholder item selected.")
self._clear_editor() self._clear_editor()
self._set_editor_enabled(False) self._set_editor_enabled(False)
mode = "placeholder" mode = "placeholder"
display_name_to_emit = None
file_path_to_emit = None
self._last_valid_preset_name = None # Clear last valid name self._last_valid_preset_name = None # Clear last valid name
elif item_data == "__LLM__": elif item_data == "__LLM__":
log.debug("LLM Interpretation item selected.") log.debug("LLM Interpretation item selected.")
self._clear_editor() self._clear_editor()
self._set_editor_enabled(False) self._set_editor_enabled(False)
mode = "llm" mode = "llm"
display_name_to_emit = None # LLM mode has no specific preset display name # Keep _last_valid_preset_name as it was
file_path_to_emit = None elif isinstance(item_data, Path):
# Keep _last_valid_preset_name as it was (it should be the display name) log.debug(f"Loading preset for editing: {current_item.text()}")
elif isinstance(item_data, Path): # item_data is the Path object for a preset preset_path = item_data
log.debug(f"Loading preset for editing: {current_display_text}") self._load_preset_for_editing(preset_path)
preset_file_path_obj = item_data self._last_valid_preset_name = preset_path.stem
self._load_preset_for_editing(preset_file_path_obj)
# _last_valid_preset_name should store the display name for delegate use
self._last_valid_preset_name = current_display_text
mode = "preset" mode = "preset"
display_name_to_emit = current_display_text preset_name = self._last_valid_preset_name
file_path_to_emit = preset_file_path_obj else:
else: # Should not happen if list is populated correctly
log.error(f"Invalid data type for preset path: {type(item_data)}. Clearing editor.") log.error(f"Invalid data type for preset path: {type(item_data)}. Clearing editor.")
self._clear_editor() self._clear_editor()
self._set_editor_enabled(False) self._set_editor_enabled(False)
mode = "placeholder" mode = "placeholder" # Treat as placeholder on error
display_name_to_emit = None
file_path_to_emit = None
self._last_valid_preset_name = None self._last_valid_preset_name = None
else: # No current_item (e.g., list cleared) else:
log.debug("No preset selected. Clearing editor.") log.debug("No preset selected. Clearing editor.")
self._clear_editor() self._clear_editor()
self._set_editor_enabled(False) self._set_editor_enabled(False)
mode = "placeholder" mode = "placeholder"
display_name_to_emit = None
file_path_to_emit = None
self._last_valid_preset_name = None self._last_valid_preset_name = None
# Emit the signal with all three arguments # Emit the signal regardless of what was selected
log.debug(f"Emitting preset_selection_changed_signal: mode='{mode}', display_name='{display_name_to_emit}', file_path='{file_path_to_emit}'") log.debug(f"Emitting preset_selection_changed_signal: mode='{mode}', preset_name='{preset_name}'")
self.preset_selection_changed_signal.emit(mode, display_name_to_emit, file_path_to_emit) self.preset_selection_changed_signal.emit(mode, preset_name)
def _gather_editor_data(self) -> dict: def _gather_editor_data(self) -> dict:
"""Gathers data from all editor UI widgets and returns a dictionary.""" """Gathers data from all editor UI widgets and returns a dictionary."""
@@ -784,25 +755,22 @@ class PresetEditorWidget(QWidget):
# --- Public Access Methods for MainWindow --- # --- Public Access Methods for MainWindow ---
def get_selected_preset_mode(self) -> tuple[str, str | None, Path | None]: def get_selected_preset_mode(self) -> tuple[str, str | None]:
""" """
Returns the current selection mode, display name, and file path for loading. Returns the current selection mode and preset name (if applicable).
Returns: tuple(mode_string, display_name_string_or_None, file_path_or_None) Returns: tuple(mode_string, preset_name_string_or_None)
mode_string can be "preset", "llm", "placeholder" mode_string can be "preset", "llm", "placeholder"
""" """
current_item = self.editor_preset_list.currentItem() current_item = self.editor_preset_list.currentItem()
if current_item: if current_item:
item_data = current_item.data(Qt.ItemDataRole.UserRole) item_data = current_item.data(Qt.ItemDataRole.UserRole)
display_text = current_item.text() # This is now the internal name
if item_data == "__PLACEHOLDER__": if item_data == "__PLACEHOLDER__":
return "placeholder", None, None return "placeholder", None
elif item_data == "__LLM__": elif item_data == "__LLM__":
return "llm", None, None # LLM mode doesn't have a specific preset file path return "llm", None
elif isinstance(item_data, Path): elif isinstance(item_data, Path):
# For a preset, display_text is the internal name, item_data is the Path return "preset", item_data.stem
return "preset", display_text, item_data # Return internal name and path return "placeholder", None # Default or if no item selected
return "placeholder", None, None # Default or if no item selected
def get_last_valid_preset_name(self) -> str | None: def get_last_valid_preset_name(self) -> str | None:
""" """

View File

@@ -552,13 +552,6 @@ class UnifiedViewModel(QAbstractItemModel):
supplier_col_index = self.createIndex(existing_source_row, self.COL_SUPPLIER, existing_source_rule) supplier_col_index = self.createIndex(existing_source_row, self.COL_SUPPLIER, existing_source_rule)
self.dataChanged.emit(supplier_col_index, supplier_col_index, [Qt.DisplayRole, Qt.EditRole]) self.dataChanged.emit(supplier_col_index, supplier_col_index, [Qt.DisplayRole, Qt.EditRole])
# Always update the preset_name from the new_source_rule, as this reflects the latest prediction context
if existing_source_rule.preset_name != new_source_rule.preset_name:
log.debug(f" Updating preset_name for SourceRule '{source_path}' from '{existing_source_rule.preset_name}' to '{new_source_rule.preset_name}'")
existing_source_rule.preset_name = new_source_rule.preset_name
# Note: preset_name is not directly displayed in the view, so no dataChanged needed for a specific column,
# but if it influenced other display elements, dataChanged would be emitted for those.
# --- Merge AssetRules --- # --- Merge AssetRules ---
existing_assets_dict = {asset.asset_name: asset for asset in existing_source_rule.assets} existing_assets_dict = {asset.asset_name: asset for asset in existing_source_rule.assets}

12
main.py
View File

@@ -4,7 +4,6 @@ import time
import os import os
import logging import logging
from pathlib import Path from pathlib import Path
import re # Added for checking incrementing token
from concurrent.futures import ProcessPoolExecutor, as_completed from concurrent.futures import ProcessPoolExecutor, as_completed
import subprocess import subprocess
import shutil import shutil
@@ -239,14 +238,9 @@ class ProcessingTask(QRunnable):
# output_dir should already be a Path object # output_dir should already be a Path object
pattern = getattr(config, 'output_directory_pattern', None) pattern = getattr(config, 'output_directory_pattern', None)
if pattern: if pattern:
# Only call get_next_incrementing_value if the pattern contains an incrementing token log.debug(f"Calculating next incrementing value for dir: {output_dir} using pattern: {pattern}")
if re.search(r"\[IncrementingValue\]|#+", pattern): next_increment_str = get_next_incrementing_value(output_dir, pattern)
log.debug(f"Incrementing token found in pattern '{pattern}'. Calculating next value for dir: {output_dir}") log.info(f"Calculated next incrementing value for {output_dir}: {next_increment_str}")
next_increment_str = get_next_incrementing_value(output_dir, pattern)
log.info(f"Calculated next incrementing value for {output_dir}: {next_increment_str}")
else:
log.debug(f"No incrementing token found in pattern '{pattern}'. Skipping increment calculation.")
next_increment_str = None # Or a default like "00" if downstream expects a string, but None is cleaner if handled.
else: else:
log.warning(f"Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration for preset {config.preset_name}") log.warning(f"Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration for preset {config.preset_name}")
except Exception as e: except Exception as e:

View File

@@ -195,25 +195,17 @@ def _process_archive_task(archive_path: Path, output_dir: Path, processed_dir: P
# Assuming config object has 'output_directory_pattern' attribute/key # Assuming config object has 'output_directory_pattern' attribute/key
pattern = getattr(config, 'output_directory_pattern', None) # Use getattr for safety pattern = getattr(config, 'output_directory_pattern', None) # Use getattr for safety
if pattern: if pattern:
if re.search(r"\[IncrementingValue\]|#+", pattern): log.debug(f"[Task:{archive_path.name}] Calculating next incrementing value for dir: {output_dir} using pattern: {pattern}")
log.debug(f"[Task:{archive_path.name}] Incrementing token found in pattern '{pattern}'. Calculating next value for dir: {output_dir}") next_increment_str = get_next_incrementing_value(output_dir, pattern)
next_increment_str = get_next_incrementing_value(output_dir, pattern) log.info(f"[Task:{archive_path.name}] Calculated next incrementing value: {next_increment_str}")
log.info(f"[Task:{archive_path.name}] Calculated next incrementing value: {next_increment_str}")
else:
log.debug(f"[Task:{archive_path.name}] No incrementing token found in pattern '{pattern}'. Skipping increment calculation.")
next_increment_str = None
else: else:
# Check if config is a dict as fallback (depends on load_config implementation) # Check if config is a dict as fallback (depends on load_config implementation)
if isinstance(config, dict): if isinstance(config, dict):
pattern = config.get('output_directory_pattern') pattern = config.get('output_directory_pattern')
if pattern: if pattern:
if re.search(r"\[IncrementingValue\]|#+", pattern): log.debug(f"[Task:{archive_path.name}] Calculating next incrementing value for dir: {output_dir} using pattern (from dict): {pattern}")
log.debug(f"[Task:{archive_path.name}] Incrementing token found in pattern '{pattern}' (from dict). Calculating next value for dir: {output_dir}") next_increment_str = get_next_incrementing_value(output_dir, pattern)
next_increment_str = get_next_incrementing_value(output_dir, pattern) log.info(f"[Task:{archive_path.name}] Calculated next incrementing value (from dict): {next_increment_str}")
log.info(f"[Task:{archive_path.name}] Calculated next incrementing value (from dict): {next_increment_str}")
else:
log.debug(f"[Task:{archive_path.name}] No incrementing token found in pattern '{pattern}' (from dict). Skipping increment calculation.")
next_increment_str = None
else: else:
log.warning(f"[Task:{archive_path.name}] Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration dictionary.") log.warning(f"[Task:{archive_path.name}] Cannot calculate incrementing value: 'output_directory_pattern' not found in configuration dictionary.")
else: else:

View File

@@ -1,4 +1,3 @@
import dataclasses # Added import
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Dict, List, Optional from typing import Dict, List, Optional
@@ -28,7 +27,6 @@ class ProcessedRegularMapData:
original_bit_depth: Optional[int] original_bit_depth: Optional[int]
original_dimensions: Optional[Tuple[int, int]] # (width, height) original_dimensions: Optional[Tuple[int, int]] # (width, height)
transformations_applied: List[str] transformations_applied: List[str]
resolution_key: Optional[str] = None # Added field
status: str = "Processed" status: str = "Processed"
error_message: Optional[str] = None error_message: Optional[str] = None
@@ -47,10 +45,9 @@ class ProcessedMergedMapData:
@dataclass @dataclass
class InitialScalingInput: class InitialScalingInput:
image_data: np.ndarray image_data: np.ndarray
initial_scaling_mode: str # Moved before fields with defaults
original_dimensions: Optional[Tuple[int, int]] # (width, height) original_dimensions: Optional[Tuple[int, int]] # (width, height)
resolution_key: Optional[str] = None # Added field
# Configuration needed # Configuration needed
initial_scaling_mode: str
# Output for InitialScalingStage # Output for InitialScalingStage
@dataclass @dataclass
@@ -58,7 +55,6 @@ class InitialScalingOutput:
scaled_image_data: np.ndarray scaled_image_data: np.ndarray
scaling_applied: bool scaling_applied: bool
final_dimensions: Tuple[int, int] # (width, height) final_dimensions: Tuple[int, int] # (width, height)
resolution_key: Optional[str] = None # Added field
# Input for SaveVariantsStage # Input for SaveVariantsStage
@dataclass @dataclass

View File

@@ -8,7 +8,7 @@ from typing import List, Dict, Optional, Any, Union # Added Any, Union
import numpy as np # Added numpy import numpy as np # Added numpy
from configuration import Configuration from configuration import Configuration
from rule_structure import SourceRule, AssetRule, FileRule, ProcessingItem # Added ProcessingItem from rule_structure import SourceRule, AssetRule, FileRule # Added FileRule
# Import new context classes and stages # Import new context classes and stages
from .asset_context import ( from .asset_context import (
@@ -200,224 +200,145 @@ class PipelineOrchestrator:
current_image_data: Optional[np.ndarray] = None # Track current image data ref current_image_data: Optional[np.ndarray] = None # Track current image data ref
try: try:
# The 'item' is now expected to be a ProcessingItem or MergeTaskDefinition # 1. Process (Load/Merge + Transform)
if isinstance(item, FileRule):
if isinstance(item, ProcessingItem): if item.item_type == 'EXTRA':
item_key = f"{item.source_file_info_ref}_{item.map_type_identifier}_{item.resolution_key}" log.debug(f"{item_log_prefix}: Skipping image processing for EXTRA FileRule '{item.file_path}'.")
item_log_prefix = f"Asset '{asset_name}', ProcItem '{item_key}'" # Add a basic entry to processed_maps_details to acknowledge it was seen
log.info(f"{item_log_prefix}: Starting processing.") context.processed_maps_details[item.file_path] = {
"status": "Skipped (EXTRA file)",
# Data for ProcessingItem is already loaded by PrepareProcessingItemsStage "internal_map_type": "EXTRA",
current_image_data = item.image_data "source_file": str(item.file_path)
current_dimensions = item.current_dimensions
item_resolution_key = item.resolution_key
# Transformations (like gloss to rough, normal invert) are assumed to be applied
# by RegularMapProcessorStage if it's still used, or directly in PrepareProcessingItemsStage
# before creating the ProcessingItem, or a new dedicated transformation stage.
# For now, assume item.image_data is ready for scaling/saving.
# Store initial ProcessingItem data as "processed_data" for consistency if RegularMapProcessor is bypassed
# This is a simplification; a dedicated transformation stage would be cleaner.
# For now, we assume transformations happened before or within PrepareProcessingItemsStage.
# The 'processed_data' variable here is more of a placeholder for what would feed into scaling.
# Create a simple ProcessedRegularMapData-like structure for logging/details if needed,
# or adapt the final_details population later.
# For now, we'll directly use 'item' fields.
# 2. Scale (Optional)
scaling_mode = getattr(context.config_obj, "INITIAL_SCALING_MODE", "NONE")
# Pass the item's resolution_key to InitialScalingInput
scale_input = InitialScalingInput(
image_data=current_image_data,
original_dimensions=current_dimensions,
initial_scaling_mode=scaling_mode,
resolution_key=item_resolution_key # Pass the key
)
# Add _source_file_path for logging within InitialScalingStage if available
setattr(scale_input, '_source_file_path', item.source_file_info_ref)
log.debug(f"{item_log_prefix}: Calling InitialScalingStage. Input res_key: {scale_input.resolution_key}")
scaled_data_output = self._scaling_stage.execute(scale_input)
current_image_data = scaled_data_output.scaled_image_data
current_dimensions = scaled_data_output.final_dimensions # Dimensions after scaling
# The resolution_key from item is passed through by InitialScalingOutput
output_resolution_key = scaled_data_output.resolution_key
log.debug(f"{item_log_prefix}: InitialScalingStage output. Scaled: {scaled_data_output.scaling_applied}, New Dims: {current_dimensions}, Output ResKey: {output_resolution_key}")
context.intermediate_results[item_key] = scaled_data_output
# 3. Save Variants
if current_image_data is None or current_image_data.size == 0:
log.warning(f"{item_log_prefix}: Skipping save stage because image data is empty.")
context.processed_maps_details[item_key] = {"status": "Skipped", "notes": "No image data to save", "stage": "SaveVariantsStage"}
continue
log.debug(f"{item_log_prefix}: Preparing to save variant with resolution key '{output_resolution_key}'...")
output_filename_tokens = {
'asset_name': asset_name,
'output_base_directory': context.engine_temp_dir,
'supplier': context.effective_supplier or 'UnknownSupplier',
'resolution': output_resolution_key # Use the key from the item/scaling stage
}
# Determine image_resolutions argument for save_image_variants
save_specific_resolutions = {}
if output_resolution_key == "LOWRES":
# For LOWRES, the "resolution value" is its actual dimension.
# image_saving_utils needs a dict like {"LOWRES": 64} if current_dim is 64x64
# Assuming current_dimensions[0] is width.
save_specific_resolutions = {"LOWRES": current_dimensions[0] if current_dimensions else 0}
log.debug(f"{item_log_prefix}: Preparing to save LOWRES variant. Dimensions: {current_dimensions}. Save resolutions arg: {save_specific_resolutions}")
elif output_resolution_key in context.config_obj.image_resolutions:
save_specific_resolutions = {output_resolution_key: context.config_obj.image_resolutions[output_resolution_key]}
else:
log.warning(f"{item_log_prefix}: Resolution key '{output_resolution_key}' not found in config.image_resolutions and not LOWRES. Saving might fail or use full res.")
# Fallback: pass all configured resolutions, image_saving_utils will try to match by size.
# This might not be ideal if the key is truly unknown.
# Or, more strictly, fail here if key is unknown and not LOWRES.
# For now, let image_saving_utils handle it by passing all.
save_specific_resolutions = context.config_obj.image_resolutions
save_input = SaveVariantsInput(
image_data=current_image_data,
internal_map_type=item.map_type_identifier,
source_bit_depth_info=[item.bit_depth] if item.bit_depth is not None else [8], # Default to 8 if not set
output_filename_pattern_tokens=output_filename_tokens,
image_resolutions=save_specific_resolutions, # Pass the specific resolution(s)
file_type_defs=getattr(context.config_obj, "FILE_TYPE_DEFINITIONS", {}),
output_format_8bit=context.config_obj.get_8bit_output_format(),
output_format_16bit_primary=context.config_obj.get_16bit_output_formats()[0],
output_format_16bit_fallback=context.config_obj.get_16bit_output_formats()[1],
png_compression_level=context.config_obj.png_compression_level,
jpg_quality=context.config_obj.jpg_quality,
output_filename_pattern=context.config_obj.output_filename_pattern,
resolution_threshold_for_jpg=getattr(context.config_obj, "resolution_threshold_for_jpg", None)
)
saved_data = self._save_stage.execute(save_input)
if saved_data and saved_data.status.startswith("Processed"):
item_status = saved_data.status
log.info(f"{item_log_prefix}: Item successfully processed and saved. Status: {item_status}")
context.processed_maps_details[item_key] = {
"status": item_status,
"saved_files_info": saved_data.saved_files_details,
"internal_map_type": item.map_type_identifier,
"resolution_key": output_resolution_key,
"original_dimensions": item.original_dimensions,
"final_dimensions": current_dimensions, # Dimensions after scaling
"source_file": item.source_file_info_ref,
} }
else: continue # Skip to the next item
error_msg = saved_data.error_message if saved_data else "Save stage returned None" item_key = item.file_path # Use file_path string as key
log.error(f"{item_log_prefix}: Failed during save stage. Error: {error_msg}") log.debug(f"{item_log_prefix}: Processing FileRule '{item.file_path}'...")
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Save Error: {error_msg}", "stage": "SaveVariantsStage"} processed_data = self._regular_processor_stage.execute(context, item)
asset_had_item_errors = True
item_status = "Failed"
elif isinstance(item, MergeTaskDefinition): elif isinstance(item, MergeTaskDefinition):
# --- This part needs similar refactoring for resolution_key if merged outputs can be LOWRES --- item_key = item.task_key # Use task_key string as key
# --- For now, assume merged tasks always produce standard resolutions --- log.info(f"{item_log_prefix}: Executing MergedTaskProcessorStage for MergeTask '{item_key}'...") # Log call
item_key = item.task_key
item_log_prefix = f"Asset '{asset_name}', MergeTask '{item_key}'"
log.info(f"{item_log_prefix}: Processing MergeTask.")
# 1. Process Merge Task
processed_data = self._merged_processor_stage.execute(context, item) processed_data = self._merged_processor_stage.execute(context, item)
if not processed_data or processed_data.status != "Processed": # Log status/error from merge processor
error_msg = processed_data.error_message if processed_data else "Merge processor returned None" if processed_data:
log.error(f"{item_log_prefix}: Failed during merge processing. Error: {error_msg}") log.info(f"{item_log_prefix}: MergedTaskProcessorStage result - Status: {processed_data.status}, Error: {processed_data.error_message}")
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Merge Error: {error_msg}", "stage": "MergedTaskProcessorStage"} else:
asset_had_item_errors = True log.warning(f"{item_log_prefix}: MergedTaskProcessorStage returned None for MergeTask '{item_key}'.")
continue else:
log.warning(f"{item_log_prefix}: Unknown item type '{type(item)}'. Skipping.")
item_key = f"unknown_item_{item_index}"
context.processed_maps_details[item_key] = {"status": "Skipped", "notes": f"Unknown item type {type(item)}"}
asset_had_item_errors = True
continue # Next item
context.intermediate_results[item_key] = processed_data # Check for processing failure
current_image_data = processed_data.merged_image_data if not processed_data or processed_data.status != "Processed":
current_dimensions = processed_data.final_dimensions error_msg = processed_data.error_message if processed_data else "Processor returned None"
log.error(f"{item_log_prefix}: Failed during processing stage. Error: {error_msg}")
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Processing Error: {error_msg}", "stage": processed_data.__class__.__name__ if processed_data else "UnknownProcessor"}
asset_had_item_errors = True
continue # Next item
# 2. Scale Merged Output (Optional) # Store intermediate result & get current image data
# Merged tasks typically don't have a single "resolution_key" like LOWRES from source. context.intermediate_results[item_key] = processed_data
# They produce an image that then gets downscaled to 1K, PREVIEW etc. current_image_data = processed_data.processed_image_data if isinstance(processed_data, ProcessedRegularMapData) else processed_data.merged_image_data
# So, resolution_key for InitialScalingInput here would be None or a default. current_dimensions = processed_data.original_dimensions if isinstance(processed_data, ProcessedRegularMapData) else processed_data.final_dimensions
scaling_mode = getattr(context.config_obj, "INITIAL_SCALING_MODE", "NONE")
# 2. Scale (Optional)
scaling_mode = getattr(context.config_obj, "INITIAL_SCALING_MODE", "NONE")
if scaling_mode != "NONE" and current_image_data is not None and current_image_data.size > 0:
if isinstance(item, MergeTaskDefinition): # Log scaling call for merge tasks
log.info(f"{item_log_prefix}: Calling InitialScalingStage for MergeTask '{item_key}' (Mode: {scaling_mode})...")
log.debug(f"{item_log_prefix}: Applying initial scaling (Mode: {scaling_mode})...")
scale_input = InitialScalingInput( scale_input = InitialScalingInput(
image_data=current_image_data, image_data=current_image_data,
original_dimensions=current_dimensions, original_dimensions=current_dimensions, # Pass original/merged dims
initial_scaling_mode=scaling_mode, initial_scaling_mode=scaling_mode
resolution_key=None # Merged outputs are not "LOWRES" themselves before this scaling
) )
setattr(scale_input, '_source_file_path', f"MergeTask_{item_key}") # For logging
log.debug(f"{item_log_prefix}: Calling InitialScalingStage for merged data.")
scaled_data_output = self._scaling_stage.execute(scale_input) scaled_data_output = self._scaling_stage.execute(scale_input)
current_image_data = scaled_data_output.scaled_image_data # Update intermediate result and current image data reference
current_dimensions = scaled_data_output.final_dimensions context.intermediate_results[item_key] = scaled_data_output # Overwrite previous intermediate
# Merged items don't have a specific output_resolution_key from source, current_image_data = scaled_data_output.scaled_image_data # Use scaled data for saving
# they will be saved to all applicable resolutions from config. log.debug(f"{item_log_prefix}: Scaling applied: {scaled_data_output.scaling_applied}. New Dims: {scaled_data_output.final_dimensions}")
# So scaled_data_output.resolution_key will be None here.
context.intermediate_results[item_key] = scaled_data_output
# 3. Save Merged Variants
if current_image_data is None or current_image_data.size == 0:
log.warning(f"{item_log_prefix}: Skipping save for merged task, image data is empty.")
context.processed_maps_details[item_key] = {"status": "Skipped", "notes": "No merged image data to save", "stage": "SaveVariantsStage"}
continue
output_filename_tokens = {
'asset_name': asset_name,
'output_base_directory': context.engine_temp_dir,
'supplier': context.effective_supplier or 'UnknownSupplier',
# 'resolution' token will be filled by image_saving_utils for each variant
}
# For merged tasks, we usually want to generate all standard resolutions.
# The `resolution_key` from the item itself is not applicable here for the `resolution` token.
# The `image_saving_utils.save_image_variants` will iterate through `context.config_obj.image_resolutions`.
save_input = SaveVariantsInput(
image_data=current_image_data,
internal_map_type=processed_data.output_map_type,
source_bit_depth_info=processed_data.source_bit_depths,
output_filename_pattern_tokens=output_filename_tokens,
image_resolutions=context.config_obj.image_resolutions, # Pass all configured resolutions
file_type_defs=getattr(context.config_obj, "FILE_TYPE_DEFINITIONS", {}),
output_format_8bit=context.config_obj.get_8bit_output_format(),
output_format_16bit_primary=context.config_obj.get_16bit_output_formats()[0],
output_format_16bit_fallback=context.config_obj.get_16bit_output_formats()[1],
png_compression_level=context.config_obj.png_compression_level,
jpg_quality=context.config_obj.jpg_quality,
output_filename_pattern=context.config_obj.output_filename_pattern,
resolution_threshold_for_jpg=getattr(context.config_obj, "resolution_threshold_for_jpg", None)
)
saved_data = self._save_stage.execute(save_input)
if saved_data and saved_data.status.startswith("Processed"):
item_status = saved_data.status
log.info(f"{item_log_prefix}: Merged task successfully processed and saved. Status: {item_status}")
context.processed_maps_details[item_key] = {
"status": item_status,
"saved_files_info": saved_data.saved_files_details,
"internal_map_type": processed_data.output_map_type,
"final_dimensions": current_dimensions,
}
else:
error_msg = saved_data.error_message if saved_data else "Save stage for merged task returned None"
log.error(f"{item_log_prefix}: Failed during save stage for merged task. Error: {error_msg}")
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Save Error (Merged): {error_msg}", "stage": "SaveVariantsStage"}
asset_had_item_errors = True
item_status = "Failed"
else: else:
log.warning(f"{item_log_prefix}: Unknown item type in loop: {type(item)}. Skipping.") log.debug(f"{item_log_prefix}: Initial scaling skipped (Mode: NONE or empty image).")
# Ensure some key exists to prevent KeyError if item_key was not set # Create dummy output if scaling skipped, using current dims
unknown_item_key = f"unknown_item_at_index_{item_index}" final_dims = current_dimensions if current_dimensions else (current_image_data.shape[1], current_image_data.shape[0]) if current_image_data is not None else (0,0)
context.processed_maps_details[unknown_item_key] = {"status": "Skipped", "notes": f"Unknown item type {type(item)}"} scaled_data_output = InitialScalingOutput(scaled_image_data=current_image_data, scaling_applied=False, final_dimensions=final_dims)
# 3. Save Variants
if current_image_data is None or current_image_data.size == 0:
log.warning(f"{item_log_prefix}: Skipping save stage because image data is empty.")
context.processed_maps_details[item_key] = {"status": "Skipped", "notes": "No image data to save", "stage": "SaveVariantsStage"}
# Don't mark as asset error, just skip this item's saving
continue # Next item
if isinstance(item, MergeTaskDefinition): # Log save call for merge tasks
log.info(f"{item_log_prefix}: Calling SaveVariantsStage for MergeTask '{item_key}'...")
log.debug(f"{item_log_prefix}: Saving variants...")
# Prepare input for save stage
internal_map_type = processed_data.final_internal_map_type if isinstance(processed_data, ProcessedRegularMapData) else processed_data.output_map_type
source_bit_depth = [processed_data.original_bit_depth] if isinstance(processed_data, ProcessedRegularMapData) and processed_data.original_bit_depth is not None else processed_data.source_bit_depths if isinstance(processed_data, ProcessedMergedMapData) else [8] # Default bit depth if unknown
# Construct filename tokens (ensure temp dir is used)
output_filename_tokens = {
'asset_name': asset_name,
'output_base_directory': context.engine_temp_dir, # Save variants to temp dir
# Add other tokens from context/config as needed by the pattern
'supplier': context.effective_supplier or 'UnknownSupplier',
}
# Log the value being read for the threshold before creating the input object
log.info(f"ORCHESTRATOR_DEBUG: Reading RESOLUTION_THRESHOLD_FOR_JPG from config for SaveVariantsInput: {getattr(context.config_obj, 'RESOLUTION_THRESHOLD_FOR_JPG', None)}")
save_input = SaveVariantsInput(
image_data=current_image_data, # Use potentially scaled data
internal_map_type=internal_map_type,
source_bit_depth_info=source_bit_depth,
output_filename_pattern_tokens=output_filename_tokens,
# Pass config values needed by save stage
image_resolutions=context.config_obj.image_resolutions,
file_type_defs=getattr(context.config_obj, "FILE_TYPE_DEFINITIONS", {}),
output_format_8bit=context.config_obj.get_8bit_output_format(),
output_format_16bit_primary=context.config_obj.get_16bit_output_formats()[0],
output_format_16bit_fallback=context.config_obj.get_16bit_output_formats()[1],
png_compression_level=context.config_obj.png_compression_level,
jpg_quality=context.config_obj.jpg_quality,
output_filename_pattern=context.config_obj.output_filename_pattern,
resolution_threshold_for_jpg=getattr(context.config_obj, "resolution_threshold_for_jpg", None) # Corrected case
)
saved_data = self._save_stage.execute(save_input)
# Log saved_data for merge tasks
if isinstance(item, MergeTaskDefinition):
log.info(f"{item_log_prefix}: SaveVariantsStage result for MergeTask '{item_key}' - Status: {saved_data.status if saved_data else 'N/A'}, Saved Files: {len(saved_data.saved_files_details) if saved_data else 0}")
# Check save status and finalize item result
if saved_data and saved_data.status.startswith("Processed"):
item_status = saved_data.status # e.g., "Processed" or "Processed (No Output)"
log.info(f"{item_log_prefix}: Item successfully processed and saved. Status: {item_status}")
# Populate final details for this item
final_details = {
"status": item_status,
"saved_files_info": saved_data.saved_files_details, # List of dicts from save util
"internal_map_type": internal_map_type,
"original_dimensions": processed_data.original_dimensions if isinstance(processed_data, ProcessedRegularMapData) else None,
"final_dimensions": scaled_data_output.final_dimensions if scaled_data_output else current_dimensions,
"transformations": processed_data.transformations_applied if isinstance(processed_data, ProcessedRegularMapData) else processed_data.transformations_applied_to_inputs,
# Add source file if regular map
"source_file": str(processed_data.source_file_path) if isinstance(processed_data, ProcessedRegularMapData) else None,
}
# Log final details addition for merge tasks
if isinstance(item, MergeTaskDefinition):
log.info(f"{item_log_prefix}: Adding final details to context.processed_maps_details for MergeTask '{item_key}'. Details: {final_details}")
context.processed_maps_details[item_key] = final_details
else:
error_msg = saved_data.error_message if saved_data else "Save stage returned None"
log.error(f"{item_log_prefix}: Failed during save stage. Error: {error_msg}")
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Save Error: {error_msg}", "stage": "SaveVariantsStage"}
asset_had_item_errors = True asset_had_item_errors = True
continue item_status = "Failed" # Ensure item status reflects failure
except Exception as e: except Exception as e:
log.exception(f"Asset '{asset_name}', Item Loop Index {item_index}: Unhandled exception: {e}") log.exception(f"{item_log_prefix}: Unhandled exception during item processing loop: {e}")
# Ensure details are recorded even on unhandled exception # Ensure details are recorded even on unhandled exception
if item_key is not None: if item_key is not None:
context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Unhandled Loop Error: {e}", "stage": "OrchestratorLoop"} context.processed_maps_details[item_key] = {"status": "Failed", "notes": f"Unhandled Loop Error: {e}", "stage": "OrchestratorLoop"}

View File

@@ -1,5 +1,5 @@
import logging import logging
from typing import Tuple, Optional # Added Optional from typing import Tuple
import cv2 # Assuming cv2 is available for interpolation flags import cv2 # Assuming cv2 is available for interpolation flags
import numpy as np import numpy as np
@@ -7,93 +7,77 @@ import numpy as np
from .base_stage import ProcessingStage from .base_stage import ProcessingStage
# Import necessary context classes and utils # Import necessary context classes and utils
from ..asset_context import InitialScalingInput, InitialScalingOutput from ..asset_context import InitialScalingInput, InitialScalingOutput
# ProcessingItem is no longer created here, so its import can be removed if not used otherwise.
# For now, keep rule_structure import if other elements from it might be needed,
# but ProcessingItem itself is not directly instantiated by this stage anymore.
# from rule_structure import ProcessingItem
from ...utils import image_processing_utils as ipu from ...utils import image_processing_utils as ipu
import numpy as np
import cv2 # Added cv2 for interpolation flags (already used implicitly by ipu.resize_image)
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class InitialScalingStage(ProcessingStage): class InitialScalingStage(ProcessingStage):
""" """
Applies initial Power-of-Two (POT) downscaling to image data if configured Applies initial scaling (e.g., Power-of-Two downscaling) to image data
and if the item is not already a 'LOWRES' variant. if configured via the InitialScalingInput.
""" """
def execute(self, input_data: InitialScalingInput) -> InitialScalingOutput: def execute(self, input_data: InitialScalingInput) -> InitialScalingOutput:
""" """
Applies POT scaling based on input_data.initial_scaling_mode, Applies scaling based on input_data.initial_scaling_mode.
unless input_data.resolution_key is 'LOWRES'.
Passes through the resolution_key.
""" """
# Safely access source_file_path for logging, if provided by orchestrator via underscore attribute log.debug(f"Initial Scaling Stage: Mode '{input_data.initial_scaling_mode}'.")
source_file_path = getattr(input_data, '_source_file_path', "UnknownSourcePath")
log_prefix = f"InitialScalingStage (Source: {source_file_path}, ResKey: {input_data.resolution_key})"
log.debug(f"{log_prefix}: Mode '{input_data.initial_scaling_mode}'. Received resolution_key: '{input_data.resolution_key}'")
image_to_scale = input_data.image_data image_to_scale = input_data.image_data
current_dimensions_wh = input_data.original_dimensions # Dimensions of the image_to_scale original_dims_wh = input_data.original_dimensions
scaling_mode = input_data.initial_scaling_mode scaling_mode = input_data.initial_scaling_mode
scaling_applied = False
output_resolution_key = input_data.resolution_key # Pass through the resolution key final_image_data = image_to_scale # Default to original if no scaling happens
if image_to_scale is None or image_to_scale.size == 0: if image_to_scale is None or image_to_scale.size == 0:
log.warning(f"{log_prefix}: Input image data is None or empty. Skipping POT scaling.") log.warning("Initial Scaling Stage: Input image data is None or empty. Skipping.")
# Return original (empty) data and indicate no scaling
return InitialScalingOutput( return InitialScalingOutput(
scaled_image_data=np.array([]), scaled_image_data=np.array([]),
scaling_applied=False, scaling_applied=False,
final_dimensions=(0, 0), final_dimensions=(0, 0)
resolution_key=output_resolution_key
) )
if not current_dimensions_wh: if original_dims_wh is None:
log.warning(f"{log_prefix}: Original dimensions not provided for POT scaling. Using current image shape.") log.warning("Initial Scaling Stage: Original dimensions not provided. Using current image shape.")
h_pre_pot_scale, w_pre_pot_scale = image_to_scale.shape[:2] h_pre_scale, w_pre_scale = image_to_scale.shape[:2]
original_dims_wh = (w_pre_scale, h_pre_scale)
else: else:
w_pre_pot_scale, h_pre_pot_scale = current_dimensions_wh w_pre_scale, h_pre_scale = original_dims_wh
final_image_data = image_to_scale # Default to original if no scaling happens
scaling_applied = False
# Skip POT scaling if the item is already a LOWRES variant or scaling mode is NONE if scaling_mode == "POT_DOWNSCALE":
if output_resolution_key == "LOWRES": pot_w = ipu.get_nearest_power_of_two_downscale(w_pre_scale)
log.info(f"{log_prefix}: Item is a 'LOWRES' variant. Skipping POT downscaling.") pot_h = ipu.get_nearest_power_of_two_downscale(h_pre_scale)
elif scaling_mode == "NONE":
log.info(f"{log_prefix}: Mode is NONE. No POT scaling applied.")
elif scaling_mode == "POT_DOWNSCALE":
pot_w = ipu.get_nearest_power_of_two_downscale(w_pre_pot_scale)
pot_h = ipu.get_nearest_power_of_two_downscale(h_pre_pot_scale)
if (pot_w, pot_h) != (w_pre_pot_scale, h_pre_pot_scale): if (pot_w, pot_h) != (w_pre_scale, h_pre_scale):
log.info(f"{log_prefix}: Applying POT Downscale from ({w_pre_pot_scale},{h_pre_pot_scale}) to ({pot_w},{pot_h}).") log.info(f"Initial Scaling: Applying POT Downscale from ({w_pre_scale},{h_pre_scale}) to ({pot_w},{pot_h}).")
# Use INTER_AREA for downscaling generally
resized_img = ipu.resize_image(image_to_scale, pot_w, pot_h, interpolation=cv2.INTER_AREA) resized_img = ipu.resize_image(image_to_scale, pot_w, pot_h, interpolation=cv2.INTER_AREA)
if resized_img is not None: if resized_img is not None:
final_image_data = resized_img final_image_data = resized_img
scaling_applied = True scaling_applied = True
log.debug(f"{log_prefix}: POT Downscale applied successfully.") log.debug("Initial Scaling: POT Downscale applied successfully.")
else: else:
log.warning(f"{log_prefix}: POT Downscale resize failed. Using pre-POT-scaled data.") log.warning("Initial Scaling: POT Downscale resize failed. Using original data.")
# final_image_data remains image_to_scale
else: else:
log.info(f"{log_prefix}: Image already POT or smaller. No POT scaling needed.") log.info("Initial Scaling: POT Downscale - Image already POT or smaller. No scaling needed.")
# final_image_data remains image_to_scale
elif scaling_mode == "NONE":
log.info("Initial Scaling: Mode is NONE. No scaling applied.")
# final_image_data remains image_to_scale
else: else:
log.warning(f"{log_prefix}: Unknown INITIAL_SCALING_MODE '{scaling_mode}'. Defaulting to NONE (no scaling).") log.warning(f"Initial Scaling: Unknown INITIAL_SCALING_MODE '{scaling_mode}'. Defaulting to NONE.")
# final_image_data remains image_to_scale
# Determine final dimensions # Determine final dimensions
if final_image_data is not None and final_image_data.size > 0: final_h, final_w = final_image_data.shape[:2]
final_h, final_w = final_image_data.shape[:2] final_dims_wh = (final_w, final_h)
final_dims_wh = (final_w, final_h)
else:
final_dims_wh = (0,0)
if final_image_data is None: # Ensure it's an empty array for consistency if None
final_image_data = np.array([])
return InitialScalingOutput( return InitialScalingOutput(
scaled_image_data=final_image_data, scaled_image_data=final_image_data,
scaling_applied=scaling_applied, scaling_applied=scaling_applied,
final_dimensions=final_dims_wh, final_dimensions=final_dims_wh
resolution_key=output_resolution_key # Pass through the resolution key
) )

View File

@@ -148,15 +148,12 @@ class MetadataInitializationStage(ProcessingStage):
context.asset_metadata['processing_start_time'] = datetime.datetime.now().isoformat() context.asset_metadata['processing_start_time'] = datetime.datetime.now().isoformat()
context.asset_metadata['status'] = "Pending" context.asset_metadata['status'] = "Pending"
app_version_value = None if context.config_obj and hasattr(context.config_obj, 'general_settings') and \
if context.config_obj and hasattr(context.config_obj, 'app_version'): hasattr(context.config_obj.general_settings, 'app_version'):
app_version_value = context.config_obj.app_version context.asset_metadata['version'] = context.config_obj.general_settings.app_version
if app_version_value:
context.asset_metadata['version'] = app_version_value
else: else:
logger.warning("App version not found using config_obj.app_version. Setting version to 'N/A'.") logger.warning("App version not found in config_obj.general_settings. Setting version to 'N/A'.")
context.asset_metadata['version'] = "N/A" context.asset_metadata['version'] = "N/A" # Default or placeholder
if context.incrementing_value is not None: if context.incrementing_value is not None:
context.asset_metadata['incrementing_value'] = context.incrementing_value context.asset_metadata['incrementing_value'] = context.incrementing_value

View File

@@ -1,69 +1,21 @@
import logging import logging
from typing import List, Union, Optional, Tuple, Dict # Added Dict from typing import List, Union, Optional
from pathlib import Path # Added Path
from .base_stage import ProcessingStage from .base_stage import ProcessingStage
from ..asset_context import AssetProcessingContext, MergeTaskDefinition from ..asset_context import AssetProcessingContext, MergeTaskDefinition
from rule_structure import FileRule, ProcessingItem # Added ProcessingItem from rule_structure import FileRule # Assuming FileRule is imported correctly
from processing.utils import image_processing_utils as ipu # Added ipu
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class PrepareProcessingItemsStage(ProcessingStage): class PrepareProcessingItemsStage(ProcessingStage):
""" """
Identifies and prepares a unified list of ProcessingItem and MergeTaskDefinition objects Identifies and prepares a unified list of items (FileRule, MergeTaskDefinition)
to be processed in subsequent stages. Performs initial validation and explodes to be processed in subsequent stages. Performs initial validation.
FileRules into specific ProcessingItems for each required output variant.
""" """
def _get_target_resolutions(self, source_w: int, source_h: int, config_resolutions: dict, file_rule: FileRule) -> Dict[str, int]:
"""
Determines the target output resolutions for a given source image.
Placeholder logic: Uses all config resolutions smaller than or equal to source, plus PREVIEW if smaller.
Needs to be refined to consider FileRule.resolution_override and actual project requirements.
"""
# For now, very basic logic:
# If FileRule has a resolution_override (e.g., (1024,1024)), that might be the *only* target.
# This needs to be clarified. Assuming override means *only* that size.
if file_rule.resolution_override and isinstance(file_rule.resolution_override, tuple) and len(file_rule.resolution_override) == 2:
# How to get a "key" for an arbitrary override? For now, skip if overridden.
# This part of the design (how overrides interact with standard resolutions) is unclear.
# Let's assume for now that if resolution_override is set, we don't generate standard named resolutions.
# This is likely incorrect for a full implementation.
log.warning(f"FileRule '{file_rule.file_path}' has resolution_override. Standard resolution key generation skipped (needs design refinement).")
return {}
target_res = {}
max_source_dim = max(source_w, source_h)
for key, res_val in config_resolutions.items():
if key == "PREVIEW": # Always consider PREVIEW if its value is smaller
if res_val < max_source_dim : # Or just always include PREVIEW? For now, if smaller.
target_res[key] = res_val
elif res_val <= max_source_dim:
target_res[key] = res_val
# Ensure PREVIEW is included if it's defined and smaller than the smallest other target, or if no other targets.
# This logic is still a bit naive.
if "PREVIEW" in config_resolutions and config_resolutions["PREVIEW"] < max_source_dim:
if not target_res or config_resolutions["PREVIEW"] < min(v for k,v in target_res.items() if k != "PREVIEW" and isinstance(v,int)):
target_res["PREVIEW"] = config_resolutions["PREVIEW"]
elif "PREVIEW" in config_resolutions and not target_res : # if only preview is applicable
if config_resolutions["PREVIEW"] <= max_source_dim:
target_res["PREVIEW"] = config_resolutions["PREVIEW"]
if not target_res and max_source_dim > 0 : # If no standard res is smaller, but image exists
log.debug(f"No standard resolutions from config are <= source dimension {max_source_dim}. Only LOWRES (if applicable) or PREVIEW (if smaller) might be generated.")
log.debug(f"Determined target resolutions for source {source_w}x{source_h}: {target_res}")
return target_res
def execute(self, context: AssetProcessingContext) -> AssetProcessingContext: def execute(self, context: AssetProcessingContext) -> AssetProcessingContext:
""" """
Populates context.processing_items with ProcessingItem and MergeTaskDefinition objects. Populates context.processing_items with FileRule and MergeTaskDefinition objects.
""" """
asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset" asset_name_for_log = context.asset_rule.asset_name if context.asset_rule else "Unknown Asset"
log.info(f"Asset '{asset_name_for_log}': Preparing processing items...") log.info(f"Asset '{asset_name_for_log}': Preparing processing items...")
@@ -73,135 +25,72 @@ class PrepareProcessingItemsStage(ProcessingStage):
context.processing_items = [] context.processing_items = []
return context return context
# Output list will now be List[Union[ProcessingItem, MergeTaskDefinition]] items_to_process: List[Union[FileRule, MergeTaskDefinition]] = []
items_to_process: List[Union[ProcessingItem, MergeTaskDefinition]] = []
preparation_failed = False preparation_failed = False
config = context.config_obj
# --- Process FileRules into ProcessingItems --- # --- Add regular files ---
if context.files_to_process: if context.files_to_process:
# Validate source path early for regular files
source_path_valid = True source_path_valid = True
if not context.source_rule or not context.source_rule.input_path: if not context.source_rule or not context.source_rule.input_path:
log.error(f"Asset '{asset_name_for_log}': SourceRule or SourceRule.input_path is not set.") log.error(f"Asset '{asset_name_for_log}': SourceRule or SourceRule.input_path is not set. Cannot process regular files.")
source_path_valid = False source_path_valid = False
preparation_failed = True preparation_failed = True # Mark as failed if source path is missing
context.status_flags['prepare_items_failed_reason'] = "SourceRule.input_path missing" context.status_flags['prepare_items_failed_reason'] = "SourceRule.input_path missing"
elif not context.workspace_path or not context.workspace_path.is_dir(): elif not context.workspace_path or not context.workspace_path.is_dir():
log.error(f"Asset '{asset_name_for_log}': Workspace path '{context.workspace_path}' is invalid.") log.error(f"Asset '{asset_name_for_log}': Workspace path '{context.workspace_path}' is not a valid directory. Cannot process regular files.")
source_path_valid = False source_path_valid = False
preparation_failed = True preparation_failed = True # Mark as failed if workspace path is bad
context.status_flags['prepare_items_failed_reason'] = "Workspace path invalid" context.status_flags['prepare_items_failed_reason'] = "Workspace path invalid"
if source_path_valid: if source_path_valid:
for file_rule in context.files_to_process: for file_rule in context.files_to_process:
log_prefix_fr = f"Asset '{asset_name_for_log}', FileRule '{file_rule.file_path}'" # Basic validation for FileRule itself
if not file_rule.file_path: if not file_rule.file_path:
log.warning(f"{log_prefix_fr}: Skipping FileRule with empty file_path.") log.warning(f"Asset '{asset_name_for_log}': Skipping FileRule with empty file_path.")
continue continue # Skip this specific rule, but don't fail the whole stage
items_to_process.append(file_rule)
item_type = file_rule.item_type_override or file_rule.item_type log.debug(f"Asset '{asset_name_for_log}': Added {len(context.files_to_process)} potential FileRule items.")
if not item_type or item_type == "EXTRA" or not item_type.startswith("MAP_"): else:
log.debug(f"{log_prefix_fr}: Item type is '{item_type}'. Not creating map ProcessingItems.") log.warning(f"Asset '{asset_name_for_log}': Skipping addition of all FileRule items due to invalid source/workspace path.")
# Optionally, create a different kind of ProcessingItem for EXTRAs if they need pipeline processing
continue
source_image_path = context.workspace_path / file_rule.file_path
if not source_image_path.is_file():
log.error(f"{log_prefix_fr}: Source image file not found at '{source_image_path}'. Skipping this FileRule.")
preparation_failed = True # Individual file error can contribute to overall stage failure
context.status_flags.setdefault('prepare_items_file_errors', []).append(str(source_image_path))
continue
# Load image data to get dimensions and for LOWRES variant
# This data will be passed to subsequent stages via ProcessingItem.
# Consider caching this load if RegularMapProcessorStage also loads.
# For now, load here as dimensions are needed for LOWRES decision.
log.debug(f"{log_prefix_fr}: Loading image from '{source_image_path}' to determine dimensions and prepare items.")
source_image_data = ipu.load_image(str(source_image_path))
if source_image_data is None:
log.error(f"{log_prefix_fr}: Failed to load image from '{source_image_path}'. Skipping this FileRule.")
preparation_failed = True
context.status_flags.setdefault('prepare_items_file_errors', []).append(f"Failed to load {source_image_path}")
continue
orig_h, orig_w = source_image_data.shape[:2]
original_dimensions_wh = (orig_w, orig_h)
source_bit_depth = ipu.get_image_bit_depth(str(source_image_path)) # Get bit depth from file
source_channels = ipu.get_image_channels(source_image_data)
# Determine standard resolutions to generate # --- Add merged tasks ---
# This logic needs to be robust and consider file_rule.resolution_override, etc. # --- Add merged tasks from global configuration ---
# Using a placeholder _get_target_resolutions for now. # merged_image_tasks are expected to be loaded into context.config_obj
target_resolutions = self._get_target_resolutions(orig_w, orig_h, config.image_resolutions, file_rule) # by the Configuration class from app_settings.json.
for res_key, _res_val in target_resolutions.items(): merged_tasks_list = getattr(context.config_obj, 'map_merge_rules', None)
pi = ProcessingItem(
source_file_info_ref=str(source_image_path), # Using full path as ref
map_type_identifier=item_type,
resolution_key=res_key,
image_data=source_image_data.copy(), # Give each PI its own copy
original_dimensions=original_dimensions_wh,
current_dimensions=original_dimensions_wh,
bit_depth=source_bit_depth,
channels=source_channels,
status="Pending"
)
items_to_process.append(pi)
log.debug(f"{log_prefix_fr}: Created standard ProcessingItem: {pi.map_type_identifier}_{pi.resolution_key}")
# Create LOWRES variant if applicable
if config.enable_low_resolution_fallback and max(orig_w, orig_h) < config.low_resolution_threshold:
# Check if a LOWRES item for this source_file_info_ref already exists (e.g. if target_resolutions was empty)
# This check is important if _get_target_resolutions might return empty for small images.
# A more robust way is to ensure LOWRES is distinct from standard resolutions.
# Avoid duplicate LOWRES if _get_target_resolutions somehow already made one (unlikely with current placeholder)
is_lowres_already_added = any(p.resolution_key == "LOWRES" and p.source_file_info_ref == str(source_image_path) for p in items_to_process if isinstance(p, ProcessingItem))
if not is_lowres_already_added:
pi_lowres = ProcessingItem(
source_file_info_ref=str(source_image_path),
map_type_identifier=item_type,
resolution_key="LOWRES",
image_data=source_image_data.copy(), # Fresh copy for LOWRES
original_dimensions=original_dimensions_wh,
current_dimensions=original_dimensions_wh,
bit_depth=source_bit_depth,
channels=source_channels,
status="Pending"
)
items_to_process.append(pi_lowres)
log.info(f"{log_prefix_fr}: Created LOWRES ProcessingItem because {orig_w}x{orig_h} < {config.low_resolution_threshold}px threshold.")
else:
log.debug(f"{log_prefix_fr}: LOWRES item for this source already added by target resolution logic. Skipping duplicate LOWRES creation.")
elif config.enable_low_resolution_fallback:
log.debug(f"{log_prefix_fr}: Image {orig_w}x{orig_h} not below LOWRES threshold {config.low_resolution_threshold}px.")
else: # Source path not valid
log.warning(f"Asset '{asset_name_for_log}': Skipping creation of ProcessingItems from FileRules due to invalid source/workspace path.")
# --- Add MergeTaskDefinitions --- (This part remains largely the same)
merged_tasks_list = getattr(config, 'map_merge_rules', None)
if merged_tasks_list and isinstance(merged_tasks_list, list): if merged_tasks_list and isinstance(merged_tasks_list, list):
log.debug(f"Asset '{asset_name_for_log}': Found {len(merged_tasks_list)} merge tasks in global config.") log.debug(f"Asset '{asset_name_for_log}': Found {len(merged_tasks_list)} merge tasks in global config.")
for task_idx, task_data in enumerate(merged_tasks_list): for task_idx, task_data in enumerate(merged_tasks_list):
if isinstance(task_data, dict): if isinstance(task_data, dict):
task_key = f"merged_task_{task_idx}" task_key = f"merged_task_{task_idx}"
# Basic validation for merge task data: requires output_map_type and an inputs dictionary
if not task_data.get('output_map_type') or not isinstance(task_data.get('inputs'), dict): if not task_data.get('output_map_type') or not isinstance(task_data.get('inputs'), dict):
log.warning(f"Asset '{asset_name_for_log}', Task Index {task_idx}: Skipping merge task due to missing 'output_map_type' or valid 'inputs'. Task data: {task_data}") log.warning(f"Asset '{asset_name_for_log}', Task Index {task_idx}: Skipping merge task due to missing 'output_map_type' or valid 'inputs' dictionary. Task data: {task_data}")
continue continue # Skip this specific task
log.debug(f"Asset '{asset_name_for_log}', Preparing Merge Task Index {task_idx}: Raw task_data: {task_data}")
merge_def = MergeTaskDefinition(task_data=task_data, task_key=task_key) merge_def = MergeTaskDefinition(task_data=task_data, task_key=task_key)
log.debug(f"Asset '{asset_name_for_log}': Created MergeTaskDefinition object: {merge_def}")
log.info(f"Asset '{asset_name_for_log}': Successfully CREATED MergeTaskDefinition: Key='{merge_def.task_key}', OutputType='{merge_def.task_data.get('output_map_type', 'N/A')}'")
items_to_process.append(merge_def) items_to_process.append(merge_def)
log.info(f"Asset '{asset_name_for_log}': Added MergeTaskDefinition: Key='{merge_def.task_key}', OutputType='{merge_def.task_data.get('output_map_type', 'N/A')}'")
else: else:
log.warning(f"Asset '{asset_name_for_log}': Item at index {task_idx} in config.map_merge_rules is not a dict. Skipping. Item: {task_data}") log.warning(f"Asset '{asset_name_for_log}': Item at index {task_idx} in config_obj.merged_image_tasks is not a dictionary. Skipping. Item: {task_data}")
# ... (rest of merge task handling) ... # The log for "Added X potential MergeTaskDefinition items" will be covered by the final log.
elif merged_tasks_list is None:
log.debug(f"Asset '{asset_name_for_log}': 'merged_image_tasks' not found in config_obj. No global merge tasks to add.")
elif not isinstance(merged_tasks_list, list):
log.warning(f"Asset '{asset_name_for_log}': 'merged_image_tasks' in config_obj is not a list. Skipping global merge tasks. Type: {type(merged_tasks_list)}")
else: # Empty list
log.debug(f"Asset '{asset_name_for_log}': 'merged_image_tasks' in config_obj is empty. No global merge tasks to add.")
if not items_to_process and not preparation_failed: # Check preparation_failed too
log.info(f"Asset '{asset_name_for_log}': No valid items (ProcessingItem or MergeTaskDefinition) found to process.")
if not items_to_process:
log.info(f"Asset '{asset_name_for_log}': No valid items found to process after preparation.")
log.debug(f"Asset '{asset_name_for_log}': Final items_to_process before assigning to context: {items_to_process}")
context.processing_items = items_to_process context.processing_items = items_to_process
context.intermediate_results = {} # Initialize intermediate results storage context.intermediate_results = {} # Initialize intermediate results storage

View File

@@ -37,7 +37,7 @@ class RegularMapProcessorStage(ProcessingStage):
""" """
final_internal_map_type = initial_internal_map_type # Default final_internal_map_type = initial_internal_map_type # Default
base_map_type_match = re.match(r"(MAP_[A-Z]+)", initial_internal_map_type) base_map_type_match = re.match(r"(MAP_[A-Z]{3})", initial_internal_map_type)
if not base_map_type_match or not asset_rule or not asset_rule.files: if not base_map_type_match or not asset_rule or not asset_rule.files:
return final_internal_map_type # Cannot determine suffix without base type or asset rule files return final_internal_map_type # Cannot determine suffix without base type or asset rule files
@@ -47,7 +47,7 @@ class RegularMapProcessorStage(ProcessingStage):
peers_of_same_base_type = [] peers_of_same_base_type = []
for fr_asset in asset_rule.files: for fr_asset in asset_rule.files:
fr_asset_item_type = fr_asset.item_type_override or fr_asset.item_type or "UnknownMapType" fr_asset_item_type = fr_asset.item_type_override or fr_asset.item_type or "UnknownMapType"
fr_asset_base_match = re.match(r"(MAP_[A-Z]+)", fr_asset_item_type) fr_asset_base_match = re.match(r"(MAP_[A-Z]{3})", fr_asset_item_type)
if fr_asset_base_match and fr_asset_base_match.group(1) == true_base_map_type: if fr_asset_base_match and fr_asset_base_match.group(1) == true_base_map_type:
peers_of_same_base_type.append(fr_asset) peers_of_same_base_type.append(fr_asset)
@@ -197,17 +197,10 @@ class RegularMapProcessorStage(ProcessingStage):
result.final_internal_map_type = final_map_type # Update if Gloss->Rough changed it result.final_internal_map_type = final_map_type # Update if Gloss->Rough changed it
result.transformations_applied = transform_notes result.transformations_applied = transform_notes
# --- Determine Resolution Key for LOWRES ---
if config.enable_low_resolution_fallback and result.original_dimensions:
w, h = result.original_dimensions
if max(w, h) < config.low_resolution_threshold:
result.resolution_key = "LOWRES"
log.info(f"{log_prefix}: Image dimensions ({w}x{h}) are below threshold ({config.low_resolution_threshold}px). Flagging as LOWRES.")
# --- Success --- # --- Success ---
result.status = "Processed" result.status = "Processed"
result.error_message = None result.error_message = None
log.info(f"{log_prefix}: Successfully processed regular map. Final type: '{result.final_internal_map_type}', ResolutionKey: {result.resolution_key}.") log.info(f"{log_prefix}: Successfully processed regular map. Final type: '{result.final_internal_map_type}'.")
except Exception as e: except Exception as e:
log.exception(f"{log_prefix}: Unhandled exception during processing: {e}") log.exception(f"{log_prefix}: Unhandled exception during processing: {e}")

View File

@@ -23,17 +23,8 @@ class SaveVariantsStage(ProcessingStage):
Calls isu.save_image_variants with data from input_data. Calls isu.save_image_variants with data from input_data.
""" """
internal_map_type = input_data.internal_map_type internal_map_type = input_data.internal_map_type
# The input_data for SaveVariantsStage doesn't directly contain the ProcessingItem. log_prefix = f"Save Variants Stage (Type: {internal_map_type})"
# It receives data *derived* from a ProcessingItem by previous stages.
# For debugging, we'd need to pass more context or rely on what's in output_filename_pattern_tokens.
resolution_key_from_tokens = input_data.output_filename_pattern_tokens.get('resolution', 'UnknownResKey')
log_prefix = f"Save Variants Stage (Type: {internal_map_type}, ResKey: {resolution_key_from_tokens})"
log.info(f"{log_prefix}: Starting.") log.info(f"{log_prefix}: Starting.")
log.debug(f"{log_prefix}: Input image_data shape: {input_data.image_data.shape if input_data.image_data is not None else 'None'}")
log.debug(f"{log_prefix}: Input source_bit_depth_info: {input_data.source_bit_depth_info}")
log.debug(f"{log_prefix}: Configured image_resolutions for saving: {input_data.image_resolutions}")
log.debug(f"{log_prefix}: Output filename pattern tokens: {input_data.output_filename_pattern_tokens}")
# Initialize output object with default failure state # Initialize output object with default failure state
result = SaveVariantsOutput( result = SaveVariantsOutput(
@@ -73,11 +64,11 @@ class SaveVariantsStage(ProcessingStage):
"resolution_threshold_for_jpg": input_data.resolution_threshold_for_jpg, # Added "resolution_threshold_for_jpg": input_data.resolution_threshold_for_jpg, # Added
} }
log.debug(f"{log_prefix}: Calling save_image_variants utility with args: {save_args}") log.debug(f"{log_prefix}: Calling save_image_variants utility.")
saved_files_details: List[Dict] = isu.save_image_variants(**save_args) saved_files_details: List[Dict] = isu.save_image_variants(**save_args)
if saved_files_details: if saved_files_details:
log.info(f"{log_prefix}: Save utility completed successfully. Saved {len(saved_files_details)} variants: {[details.get('filepath') for details in saved_files_details]}") log.info(f"{log_prefix}: Save utility completed successfully. Saved {len(saved_files_details)} variants.")
result.saved_files_details = saved_files_details result.saved_files_details = saved_files_details
result.status = "Processed" result.status = "Processed"
result.error_message = None result.error_message = None

View File

@@ -194,16 +194,6 @@ def get_image_bit_depth(image_path_str: str) -> Optional[int]:
print(f"Error getting bit depth for {image_path_str}: {e}") print(f"Error getting bit depth for {image_path_str}: {e}")
return None return None
def get_image_channels(image_data: np.ndarray) -> Optional[int]:
"""Determines the number of channels in an image."""
if image_data is None:
return None
if len(image_data.shape) == 2: # Grayscale
return 1
elif len(image_data.shape) == 3: # Color
return image_data.shape[2]
return None # Unknown shape
def calculate_image_stats(image_data: np.ndarray) -> Optional[Dict]: def calculate_image_stats(image_data: np.ndarray) -> Optional[Dict]:
""" """
Calculates min, max, mean for a given numpy image array. Calculates min, max, mean for a given numpy image array.

View File

@@ -1,44 +0,0 @@
# Project Brief: Asset Processor Tool
## 1. Main Goal & Purpose
The primary goal of the Asset Processor Tool is to provide **CG artists and 3D content teams with a friendly, fast, and flexible interface to process and organize 3D asset source files into a standardized library format.** It automates repetitive and complex tasks involved in preparing assets from various suppliers for use in production pipelines.
## 2. Key Features & Components
* **Automated Asset Processing:** Ingests 3D asset source files (texture sets, models, etc.) from `.zip`, `.rar`, `.7z` archives, or folders.
* **Preset-Driven Workflow:** Utilizes configurable JSON presets to interpret different asset sources (e.g., from various online vendors or internal standards), defining rules for file classification and processing.
* **Comprehensive File Operations:**
* **Classification:** Automatically identifies map types (Color, Normal, Roughness, etc.), models, and other file categories based on preset rules.
* **Image Processing:** Performs tasks like image resizing (to standard resolutions like 1K, 2K, 4K, avoiding upscaling), glossiness-to-roughness conversion, normal map green channel inversion (OpenGL/DirectX handling), alpha channel extraction, bit-depth adjustments, and low-resolution fallback generation for small source images.
* **Channel Merging:** Combines channels from different source maps into packed textures (e.g., Normal + Roughness + Metallic into a single NRMRGH map).
* **Metadata Generation:** Creates a detailed `metadata.json` file for each processed asset, containing information about maps, categories, processing settings, and more, for downstream tool integration.
* **Flexible Output Organization:** Generates a clean, structured output directory based on user-configurable naming patterns and tokens.
* **Multiple User Interfaces:**
* **Graphical User Interface (GUI):** The primary interface, designed to be user-friendly, offering drag-and-drop functionality, an integrated preset editor, a live preview table for rule validation and overrides, and clear processing controls.
* **Directory Monitor:** An automated script that watches a specified folder for new asset archives and processes them based on preset names embedded in the archive filename.
* **Command-Line Interface (CLI):** Intended for batch processing and scripting (currently with limited core functionality).
* **Optional Blender Integration:** Can automatically run Blender scripts post-processing to create PBR node groups and materials in specified `.blend` files, linking to the newly processed textures.
* **Hierarchical Rule System:** Allows for dynamic, granular overrides of preset configurations at the source, asset, or individual file level via the GUI.
* **Experimental LLM Prediction:** Includes an option to use a Large Language Model for file interpretation and rule prediction.
## 3. Target Audience
* **CG Artists:** Individual artists looking for an efficient way to manage and prepare their personal or downloaded asset libraries.
* **3D Content Creation Teams:** Studios or groups needing a standardized pipeline for processing and organizing assets from multiple sources.
* **Technical Artists/Pipeline Developers:** Who may extend or integrate the tool into broader production workflows.
## 4. Overall Architectural Style & Key Technologies
* **Core Language:** Python
* **GUI Framework:** PySide6
* **Configuration:** Primarily JSON-based (application settings, user overrides, type definitions, supplier settings, presets, LLM settings).
* **Processing Architecture:** A modular, staged processing pipeline orchestrated by a central engine. Each stage performs a discrete task on an `AssetProcessingContext` object.
* **Key Libraries:** OpenCV (image processing), NumPy (numerical operations), py7zr/rarfile (archive handling), watchdog (directory monitoring).
* **Design Principles:** Modularity, configurability, and user-friendliness (especially for the GUI).
## 5. Foundational Information
* The tool aims to significantly reduce manual effort and ensure consistency in asset preparation.
* It is designed to be adaptable to various asset sources and pipeline requirements through its extensive configuration options and preset system.
* The output `metadata.json` is key for enabling further automation and integration with other tools or digital content creation (DCC) applications.

View File

@@ -1,7 +1,6 @@
import dataclasses import dataclasses
import json import json
from typing import List, Dict, Any, Tuple, Optional from typing import List, Dict, Any, Tuple, Optional
import numpy as np # Added for ProcessingItem
@dataclasses.dataclass @dataclasses.dataclass
class FileRule: class FileRule:
file_path: str = None file_path: str = None
@@ -11,12 +10,8 @@ class FileRule:
resolution_override: Tuple[int, int] = None resolution_override: Tuple[int, int] = None
channel_merge_instructions: Dict[str, Any] = dataclasses.field(default_factory=dict) channel_merge_instructions: Dict[str, Any] = dataclasses.field(default_factory=dict)
output_format_override: str = None output_format_override: str = None
processing_items: List['ProcessingItem'] = dataclasses.field(default_factory=list) # Added field
def to_json(self) -> str: def to_json(self) -> str:
# Need to handle ProcessingItem serialization if it contains non-serializable types like np.ndarray
# For now, assume asdict handles it or it's handled before calling to_json for persistence.
# A custom asdict_factory might be needed for robust serialization.
return json.dumps(dataclasses.asdict(self), indent=4) return json.dumps(dataclasses.asdict(self), indent=4)
@classmethod @classmethod
@@ -59,43 +54,4 @@ class SourceRule:
data = json.loads(json_string) data = json.loads(json_string)
# Manually deserialize nested AssetRule objects # Manually deserialize nested AssetRule objects
data['assets'] = [AssetRule.from_json(json.dumps(asset_data)) for asset_data in data.get('assets', [])] data['assets'] = [AssetRule.from_json(json.dumps(asset_data)) for asset_data in data.get('assets', [])]
# Need to handle ProcessingItem deserialization if it was serialized
# For now, from_json for FileRule doesn't explicitly handle processing_items from JSON.
return cls(**data) return cls(**data)
@dataclasses.dataclass
class ProcessingItem:
"""
Represents a specific version of an image map to be processed and saved.
This could be a standard resolution (1K, 2K), a preview, or a special
variant like 'LOWRES'.
"""
source_file_info_ref: str # Reference to the original SourceFileInfo or unique ID of the source image
map_type_identifier: str # The internal map type (e.g., "MAP_COL", "MAP_ROUGH")
resolution_key: str # The resolution identifier (e.g., "1K", "PREVIEW", "LOWRES")
image_data: np.ndarray # The actual image data for this item
original_dimensions: Tuple[int, int] # (width, height) of the source image for this item
current_dimensions: Tuple[int, int] # (width, height) of the image_data in this item
target_filename: str = "" # Will be populated by SaveVariantsStage
is_extra: bool = False # If this item should be treated as an 'extra' file
bit_depth: Optional[int] = None
channels: Optional[int] = None
file_extension: Optional[str] = None # Determined during saving based on format
processing_applied_log: List[str] = dataclasses.field(default_factory=list)
status: str = "Pending" # e.g., Pending, Processed, Failed
error_message: Optional[str] = None
# __getstate__ and __setstate__ might be needed if we pickle these objects
# and np.ndarray causes issues. For JSON, image_data would typically not be serialized.
def __getstate__(self):
state = self.__dict__.copy()
# Don't pickle image_data if it's large or not needed for state
if 'image_data' in state: # Or a more sophisticated check
del state['image_data'] # Example: remove it
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Potentially re-initialize or handle missing 'image_data'
if 'image_data' not in self.__dict__:
self.image_data = None # Or load it if a path was stored instead