From 6971b8189fcf7f2ab62a53d231139a3c25397af6 Mon Sep 17 00:00:00 2001 From: Rusfort Date: Thu, 1 May 2025 09:13:20 +0200 Subject: [PATCH] Data Flow Overhaul Known regressions in current commit: - No "extra" files - GLOSS map does not look corrected - "override" flag is not respected --- ProjectNotes/Data_Flow_Refinement_Plan.md | 124 ++ .../GUI_Overhaul_Plan_Unified_View.md | 65 + __pycache__/config.cpython-313.pyc | Bin 1865 -> 2134 bytes __pycache__/main.cpython-313.pyc | Bin 28039 -> 13193 bytes config.py | 8 + gui/__pycache__/main_window.cpython-313.pyc | Bin 120662 -> 103565 bytes .../prediction_handler.cpython-313.pyc | Bin 13903 -> 19082 bytes gui/delegates.py | 89 ++ gui/main_window.py | 849 ++++------ gui/prediction_handler.py | 522 +++--- gui/unified_view_model.py | 319 ++++ main.py | 1391 +++++++++++----- processing_engine.py | 1424 +++++++++++++++++ rule_structure.py | 10 +- 14 files changed, 3670 insertions(+), 1131 deletions(-) create mode 100644 ProjectNotes/Data_Flow_Refinement_Plan.md create mode 100644 ProjectNotes/GUI_Overhaul_Plan_Unified_View.md create mode 100644 gui/delegates.py create mode 100644 gui/unified_view_model.py create mode 100644 processing_engine.py diff --git a/ProjectNotes/Data_Flow_Refinement_Plan.md b/ProjectNotes/Data_Flow_Refinement_Plan.md new file mode 100644 index 0000000..256f687 --- /dev/null +++ b/ProjectNotes/Data_Flow_Refinement_Plan.md @@ -0,0 +1,124 @@ +# Architectural Plan: Data Flow Refinement (v3) + +**Date:** 2025-04-30 + +**Author:** Roo (Architect Mode) + +**Status:** Approved + +## 1. Goal + +Refine the application's data flow to establish the GUI as the single source of truth for processing rules. This involves moving prediction/preset logic upstream from the backend processor and ensuring the backend receives a *complete* `SourceRule` object for processing, thereby simplifying the processor itself. This version of the plan involves creating a new processing module (`processing_engine.py`) instead of refactoring the existing `asset_processor.py`. + +## 2. Proposed Data Flow + +The refined data flow centralizes rule generation and modification within the GUI components before passing a complete, explicit rule set to the backend. The `SourceRule` object structure serves as a consistent data contract throughout the pipeline. + +```mermaid +sequenceDiagram + participant User + participant GUI_MainWindow as GUI (main_window.py) + participant GUI_Predictor as Predictor (prediction_handler.py) + participant GUI_UnifiedView as Unified View (unified_view_model.py) + participant Main as main.py + participant ProcessingEngine as New Backend (processing_engine.py) + participant Config as config.py + + User->>+GUI_MainWindow: Selects Input & Preset + Note over GUI_MainWindow: Scans input, gets file list + GUI_MainWindow->>+GUI_Predictor: Request Prediction(File List, Preset Name, Input ID) + GUI_Predictor->>+Config: Load Preset Rules & Canonical Types + Config-->>-GUI_Predictor: Return Rules & Types + %% Prediction Logic (Internal to Predictor) + Note over GUI_Predictor: Perform file analysis (based on list), apply preset rules, generate COMPLETE SourceRule hierarchy (only overridable fields populated) + GUI_Predictor-->>-GUI_MainWindow: Return List[SourceRule] (Initial Rules) + GUI_MainWindow->>+GUI_UnifiedView: Populate View(List[SourceRule]) + GUI_UnifiedView->>+Config: Read Allowed Asset/File Types for Dropdowns + Config-->>-GUI_UnifiedView: Return Allowed Types + Note over GUI_UnifiedView: Display rules, allow user edits + User->>GUI_UnifiedView: Modifies Rules (Overrides) + GUI_UnifiedView-->>GUI_MainWindow: Update SourceRule Objects in Memory + User->>+GUI_MainWindow: Trigger Processing + GUI_MainWindow->>+Main: Send Final List[SourceRule] + Main->>+ProcessingEngine: Queue Task(SourceRule) for each input + Note over ProcessingEngine: Execute processing based *solely* on the provided SourceRule and static config. No internal prediction/fallback. + ProcessingEngine-->>-Main: Processing Result + Main-->>-GUI_MainWindow: Update Status + GUI_MainWindow-->>User: Show Result/Status +``` + +## 3. Module-Specific Changes + +* **`config.py`:** + * **Add Canonical Lists:** Introduce `ALLOWED_ASSET_TYPES` (e.g., `["Surface", "Model", "Decal", "Atlas", "UtilityMap"]`) and `ALLOWED_FILE_TYPES` (e.g., `["MAP_COL", "MAP_NRM", ..., "MODEL", "EXTRA", "FILE_IGNORE"]`). + * **Purpose:** Single source of truth for GUI dropdowns and validation. + * **Existing Config:** Retains static definitions like `IMAGE_RESOLUTIONS`, `MAP_MERGE_RULES`, `JPG_QUALITY`, etc. + +* **`rule_structure.py`:** + * **Remove Enums:** Remove `AssetType` and `ItemType` Enums. Update `AssetRule.asset_type`, `FileRule.item_type_override`, etc., to use string types validated against `config.py` lists. + * **Field Retention:** Keep `FileRule.resolution_override` and `FileRule.channel_merge_instructions` fields for structural consistency, but they will not be populated or used for overrides in this flow. + +* **`gui/prediction_handler.py` (or equivalent):** + * **Enhance Prediction Logic:** Modify `run_prediction` method. + * **Input:** Accept `input_source_identifier` (string), `file_list` (List[str] of relative paths), and `preset_name` (string) when called from GUI. + * **Load Config:** Read `ALLOWED_ASSET_TYPES`, `ALLOWED_FILE_TYPES`, and preset rules. + * **Relocate Classification:** Integrate classification/naming logic (previously in `asset_processor.py`) to operate on the provided `file_list`. + * **Generate Complete Rules:** Populate `SourceRule`, `AssetRule`, and `FileRule` objects. + * Set initial values only for *overridable* fields (e.g., `asset_type`, `item_type_override`, `target_asset_name_override`, `supplier_identifier`, `output_format_override`) based on preset rules/defaults. + * Explicitly **do not** populate static config fields like `FileRule.resolution_override` or `FileRule.channel_merge_instructions`. + * **Temporary Files (If needed for non-GUI):** May need logic later to handle direct path inputs (CLI/Docker) involving temporary extraction/cleanup, but the primary GUI flow uses the provided list. + * **Output:** Emit `rule_hierarchy_ready` signal with the `List[SourceRule]`. + +* **NEW: `processing_engine.py` (New Module):** + * **Purpose:** Contains a new class (e.g., `ProcessingEngine`) for executing the processing pipeline based solely on a complete `SourceRule` and static configuration. Replaces `asset_processor.py` in the main workflow. + * **Initialization (`__init__`):** Takes the static `Configuration` object as input. + * **Core Method (`process`):** Accepts a single, complete `SourceRule` object. Orchestrates processing steps (workspace setup, extraction, map processing, merging, metadata, organization, cleanup). + * **Helper Methods (Refactored Logic):** Implement simplified versions of processing helpers (e.g., `_process_individual_maps`, `_merge_maps_from_source`, `_generate_metadata_file`, `_organize_output_files`, `_load_and_transform_source`, `_save_image`). + * Retrieve *overridable* parameters directly from the input `SourceRule`. + * Retrieve *static configuration* parameters (resolutions, merge rules) **only** from the stored `Configuration` object. + * Contain **no** prediction, classification, or fallback logic. + * **Dependencies:** `rule_structure.py`, `configuration.py`, `config.py`, cv2, numpy, etc. + +* **`asset_processor.py` (Old Module):** + * **Status:** Remains in the codebase **unchanged** for reference. + * **Usage:** No longer called by `main.py` or GUI for standard processing. + +* **`gui/main_window.py`:** + * **Scan Input:** Perform initial directory/archive scan to get the file list for each directory/archieve. + * **Initiate Prediction:** Call `PredictionHandler` with the file list, preset, and input identifier. + * **Receive/Pass Rules:** Handle `rule_hierarchy_ready`, pass `SourceRule` list to `UnifiedViewModel`. + * **Send Final Rules:** Send the final `SourceRule` list to `main.py`. + +* **`gui/unified_view_model.py` / `gui/delegates.py`:** + * **Load Dropdown Options:** Source dropdowns (`AssetType`, `ItemType`) from `config.py`. + * **Data Handling:** Read/write user modifications to overridable fields in `SourceRule` objects. + * **No UI for Static Config:** Do not provide UI editing for resolution or merge instructions. + +* **`main.py`:** + * **Receive Rule List:** Accept `List[SourceRule]` from GUI. + * **Instantiate New Engine:** Import and instantiate the new `ProcessingEngine` from `processing_engine.py`. + * **Queue Tasks:** Iterate `SourceRule` list, queue tasks. + * **Call New Engine:** Pass the individual `SourceRule` object to `ProcessingEngine.process` for each task. + +## 4. Rationale / Benefits + +* **Single Source of Truth:** GUI holds the final `SourceRule` objects. +* **Backend Simplification:** New `processing_engine.py` is focused solely on execution based on explicit rules and static config. +* **Decoupling:** Reduced coupling between GUI/prediction and backend processing. +* **Clarity:** Clearer data flow and component responsibilities. +* **Maintainability:** Easier maintenance and debugging. +* **Centralized Definitions:** `config.py` centralizes allowed types. +* **Preserves Reference:** Keeps `asset_processor.py` available for comparison. +* **Consistent Data Contract:** `SourceRule` structure is consistent from predictor output to engine input, enabling potential GUI bypass. + +## 5. Potential Issues / Considerations + +* **`PredictionHandler` Complexity:** Will require careful implementation of classification/rule population logic. +* **Performance:** Prediction logic needs to remain performant (threading). +* **Rule Structure Completeness:** Ensure `SourceRule` dataclasses hold all necessary *overridable* fields. +* **Preset Loading:** Robust preset loading/interpretation needed in `PredictionHandler`. +* **Static Config Loading:** Ensure the new `ProcessingEngine` correctly loads and uses the static `Configuration` object. + +## 6. Documentation + +This document (`ProjectNotes/Data_Flow_Refinement_Plan.md`) serves as the architectural plan. Relevant sections of the Developer Guide will need updating upon implementation. \ No newline at end of file diff --git a/ProjectNotes/GUI_Overhaul_Plan_Unified_View.md b/ProjectNotes/GUI_Overhaul_Plan_Unified_View.md new file mode 100644 index 0000000..4168af4 --- /dev/null +++ b/ProjectNotes/GUI_Overhaul_Plan_Unified_View.md @@ -0,0 +1,65 @@ +# GUI Overhaul Plan: Unified Hierarchical View + +**Task:** Implement a UI overhaul for the Asset Processor Tool GUI to address usability issues and streamline the workflow for viewing and editing processing rules. + +**Context:** + +* A hierarchical rule system (`SourceRule`, `AssetRule`, `FileRule` in `rule_structure.py`) is used by the core engine (`asset_processor.py`). +* The current GUI (`gui/main_window.py`, `gui/rule_hierarchy_model.py`, `gui/rule_editor_widget.py`) uses a `QTreeView` for hierarchy, a separate `RuleEditorWidget` for editing selected items, and a `QTableView` (`PreviewTableModel`) for previewing file classifications. +* Relevant files analyzed: `gui/main_window.py`, `gui/rule_editor_widget.py`, `gui/rule_hierarchy_model.py`. + +**Identified Issues with Current UI:** + +1. **Window Resizing:** Selecting Source/Asset items causes window expansion because `RuleEditorWidget` displays large child lists (`assets`, `files`) as simple labels. +2. **GUI Not Updating on Add:** Potential regression where adding new inputs doesn't reliably update the preview/hierarchy. +3. **Incorrect Source Display:** Tree view shows "Source: None" instead of the input path (likely `SourceRule.input_path` is None when model receives it). +4. **Preview Table Stale:** Changes made in `RuleEditorWidget` (e.g., overrides) are not reflected in the `PreviewTableModel` because the `_on_rule_updated` slot in `main_window.py` doesn't trigger a refresh. + +**Agreed-Upon Overhaul Plan:** + +The goal is to create a more unified and streamlined experience by merging the hierarchy, editing overrides, and preview aspects into a single view, reducing redundancy. + +1. **UI Structure Redesign:** + * **Left Panel:** Retain the existing Preset Editor panel (`main_window.py`'s `editor_panel`) for managing preset files (`.json`) and their complex rules (naming patterns, map type mappings, archetype rules, etc.). + * **Right Panel:** Replace the current three-part splitter (Hierarchy Tree, Rule Editor, Preview Table) with a **single Unified Hierarchical View**. + * Implementation: Use a `QTreeView` with a custom `QAbstractItemModel` and custom `QStyledItemDelegate`s for inline editing. + * Hierarchy Display: Show Input Source(s) -> Assets -> Files. + * Visual Cues: Use distinct background colors for rows representing Inputs, Assets, and Files. + +2. **Unified View Columns & Functionality:** + * **Column 1: Name/Hierarchy:** Displays input path, asset name, or file name with indentation. + * **Column 2+: Editable Attributes (Context-Dependent):** Implement inline editors using delegates: + * **Input Row:** Optional editable field for `Supplier` override. + * **Asset Row:** `QComboBox` delegate for `Asset-Type` override (e.g., `GENERIC`, `DECAL`, `MODEL`). + * **File Row:** + * `QLineEdit` delegate for `Target Asset Name` override. + * `QComboBox` delegate for `Item-Type` override (e.g., `MAP-COL`, `MAP-NRM`, `EXTRA`, `MODEL_FILE`). + * **Column X: Status (Optional, Post-Processing):** Non-editable column showing processing status icon/text (Pending, Success, Warning, Error). + * **Column Y: Output Path (Optional, Post-Processing):** Non-editable column showing the final output path after successful processing. + +3. **Data Flow and Initialization:** + * When inputs are added and a preset selected, `PredictionHandler` runs. + * `PredictionHandler` generates the `SourceRule` hierarchy *and* predicts initial `Asset-Type`, `Item-Type`, and `Target Asset Name`. + * The Unified View's model is populated with this `SourceRule`. + * *Initial values* in inline editors are set based on these *predicted* values. + * User edits in the Unified View directly modify attributes on the `SourceRule`, `AssetRule`, or `FileRule` objects held by the model. + +4. **Dropdown Options Source:** + * Available options in dropdowns (`Asset-Type`, `Item-Type`) should be sourced from globally defined lists or Enums (e.g., in `rule_structure.py` or `config.py`). + +5. **Addressing Original Issues (How the Plan Fixes Them):** + * **Window Resizing:** Resolved by removing `RuleEditorWidget`. + * **GUI Not Updating on Add:** Fix requires ensuring `add_input_paths` triggers `PredictionHandler` and updates the new Unified View model correctly. + * **Incorrect Source Display:** Fix requires ensuring `PredictionHandler` correctly populates `SourceRule.input_path`. + * **Preview Table Stale:** Resolved by merging preview/editing; edits are live in the main view. + +**Implementation Tasks:** + +* Modify `gui/main_window.py`: Remove the right-side splitter, `RuleEditorWidget`, `PreviewTableModel`/`View`. Instantiate the new Unified View. Adapt `add_input_paths`, `start_processing`, `_on_rule_hierarchy_ready`, etc., to interact with the new view/model. +* Create/Modify Model (`gui/rule_hierarchy_model.py` or new file): Implement a `QAbstractItemModel` supporting multiple columns, hierarchical data, and providing data/flags for inline editing. +* Create Delegates (`gui/delegates.py`?): Implement `QStyledItemDelegate` subclasses for `QComboBox` and `QLineEdit` editors in the tree view. +* Modify `gui/prediction_handler.py`: Ensure it predicts initial override values (`Asset-Type`, `Item-Type`, `Target Asset Name`) and includes them in the data passed back to the main window (likely within the `SourceRule` structure or alongside it). Ensure `SourceRule.input_path` is correctly set. +* Modify `gui/processing_handler.py`: Update it to potentially signal back status/output path updates that can be reflected in the new Unified View model's optional columns. +* Define Dropdown Sources: Add necessary Enums or lists to `rule_structure.py` or `config.py`. + +This plan provides a clear path forward for implementing the UI overhaul. \ No newline at end of file diff --git a/__pycache__/config.cpython-313.pyc b/__pycache__/config.cpython-313.pyc index ab1c078d059f8ea0db6a8db0db58ea1bbc367112..34ebff9ec7285e1518d4fddd2fa8f2e171e5f505 100644 GIT binary patch delta 580 zcmXw#&raJw5XQZB9B7K2-~=axe}Ss4atQ~H6_u=bO)c!$cpSBGz)ES_B1IG}>7gf{ zA&PjB@)TSUCk}h)Gf;Z%p))bv(f6BgKke*VzA^7B<{!f-DY!~!*4Mw*#B99!iMuOh zTLI;O3F-_x168Q#HS`)3w%NpOU5W)TgaIZPP=*RDb_&7-OO&AsH5641mS)Pm-B}P7 zsEb9Eg{ndWma&?!($QdbR^--Ty@PLv8f=IqXbBrOMg7ZG`$b!qbMRe!9-IuNei?rI zG#p9Y9i9xZIpfjbLYDgD)6wbpM>sfdm*rgO#922Aa??vf`HYf~r%sUDBuh=V|D`zO2x&AI3cKcsKQv=ZrP5F|69}v4Z<>^NmwDQ64nUogbhM#`c`We^O5PXmU*u! zRS~kB#YxoV0GR{8)2!ES$f^?r(R=PbMd@M8L0bRA*+tEVO^(Ejn40SP&e9#zU ck0qtP&+e4!qpI8Lonk$h)3o@zaDb=&0gOzQcK`qY delta 352 zcmXYsyG{Z@6oz+pM^=GdZVTcKp`=+VJ7Yo=wLnyUEUYGr0c;@TpoJ3SGZ12B=L=ZS z`35$Y7W)(!3tP{i^XL0=PI5AH<}>;3y5EiyHTb+fN3JKSiJLFHaQ9@uAfsYHoC^{# zD}qc|WI;~q#Ye>8z(vBB6i1RAq>yHG(#S0Mt35wXF0zzhl1vIYta9>L^DV3|qN@#T z`jX#BV~a8`kei$(b4c6>GDw0F>T4^&ZE0F(`$XkPSf9(6t zeUOx?WCeO5U7p8xp7(qFzVDn(kH+U4k64Fo)FvRUWyC)0ppIcDb+Wu^#6?|<)=k~)m!~}ZawDGM zDq1z{rQTs5^$q)}pV3=Ks)uW6&2TNP9j>Ew!}YX&xPdmXI@?I&a1(73NE?o1A88(L zp)JF$w3XF5Ms^Ih(KeQLj_e#3sKC;uk#@QZ=v^bbhdXEol=-H6j+jM%GkKAt!D%k^ zg1!aC-(6(-;8ByBr-#JqWH4FvNBGdgth5eF>!)js(j(Ic#fIMJ z2#GEcA~sH&LNBWHrSGpm<|$LRRuuOV0&{EHo|Zm79fWy3uFp##(@l`C*tbqYpO}6k zbft7uH7z#l6wu3&UF4`nLXS-!3uPar*Xx1W6oPeaePA9>gsyEZB}Tu+(PmAvrgk#j zX(!Pb>|fWk4chL}>q8Td_7M^vEz{>i_fGGFui~EWsv?KYdM_|bEakW4J=1$DP*s&s zZL>DfTcp_uazVRJcAYkjb*iZu(+qjavXji10xcioRL24)g`0?G<^pD=W`d?;k}OZ8 z)5(GBQY@QEQ-zPpkyv_uAt_~~cz{#5XA^Qp;Z7xD8O1u8g>uC)zK}_zQ_*C=s@Ttt zUwTo3I!;WcGm87{J^znu-g1 zkS-HhP##;_}qeO46LFxBMnCMa&^${|~NbnUh z8%ZNE?k%b3jAA2^GKr>?Sv03CC3H5U+DL?O4qG14B3d|7Lxy${lH$Iky?aYz$cDKh z-;Q<=;sy;<(hL7+i(Mpa3Y){FbFHdGH3j>kKA~zoRs7XQJzx^KV_eyowM8mDwTn#E z>uJ4Z${MzYn)K4*uV^iqe{->1PvQLaKFVc%))KJg_UN21$wD$c3tT=c%%qW-Bq3T9 znPBd4XLnSNWfJpJKo+JtyOYvYDVd5ww&zLtd7-nLrGd9QyXTSJXMrx~@4V2{Ip5P6 z7dnS}J4bsv#W4_WD{HIRV4V4AMzP1E8EIxdqd4Y(_mdKpbM?<^Q(=?>$O6byuMntG z?Ajz1>#2eM@M(oRGd4J`a5SCH&=y#;;-S)f`l>XfcBQy4Mdd_HU8Ld>GoU8s^*W1u zLso2ZCZ5h_6z8mz!BHiuI3f{TOeCUkiPTJ5;js#rJBG`(QT#TF6DAT-7vjRCwiq`q zM$g!_1t}Jwkdeg&_9$mh_f8*tOn_-+q*P}5(7~QV$BMbb!lZOP1L{E*Ceqg=Dx_zG z$!qE9zH+4y$$+|>o(ByC>Ot|qCS?~QFc5x2dH@FNf`9pE@LD9lZ}0vG`zxOBdVV(h z&&fBEi)YsSgNws!)t!qY4;`eo<(1zWqJE{l~k1viC=OSNNU{3s>#> z)akU^9@Z0gb-~s0o~z{tLo2S%#gl8C{aZs{8_Ih-e>_|0>V2=P_s+4Eu7P~#VBT~3 zJ~y-p^Ce&hi;okMOcf1-vH=)oN&(y{VfqRxsA#@Zrmz7hq5vqSbWjKVgiWHk2U;K1 zdPtEeLji;;3c1LYP#6MVoT_L&W=@&I=1@(sOizh6(LQAeyXX*|qU)GdAlp#8MZSdE zBUXvtFQE2`{u1hHu|}*F>y7~c-KO_?v7xA9!T`v`rZ4EdS!@wo#T{ZBs;+H%+zAu{ zP*}F5XfK&zjkpU&+`hNnWwaeSZ3k&5Q--Mkz|@Ekamp|hp$aNrwCL(jAUny}rv^0X z%pDb_j4Wgqn9ZdiKQtvX%?D6q9u!DV5{$kOrL)<25L00xN@a-#X&rphkCA6`M^2`* z$+(b8XM{K)msmzPIT03O>1-;qA4+DT*<=Pa>a{ez40;Vg!J%O8yoM_TX61z$zzed_ zeeukMEP&>nNhjk#8vuh1Rcyt zS5TLtV#BeZvmzMNmLqga!P($`Vb4N3nMlm0Q+onnpa8(BPzIar(H6d^h=zpjYl+Mp z=yM_XqMS|%phu;YjF>MNu+U@h%@hZfu4Dl{#pze@6PkO{IliX{n!wid0IV7u%bgep zNJJBHiOo*?jsjQ`m@%U(84zlLNf^9RmjTKc_JWN?kK=%Ltpk0$YdL36kH!(X)1%Sr ziTUijkjlO=P6p=wKR$;g+{xw|1JjP3AIL6%4aQNhxxm`XK|%kdvc_%jnRE$d zV*wJ8Xd;@FwXwo~+E_;OFCHntE9SJUIAaT0@OMB>D3*BN{U*))=q|Kq6Jq$!K%|uh1Y)#HZp5=m_)&>OqQ*>1p_vm%v3>BpWu; z(6~6hVRl-dH33fW*Sx}imw&lx$@*KjZ@sPk_4e1pKaKwA;!4}WrSXS6sj4q{I^Oei ztkur1HwuNur**KL?dRiAmo$*8PA#g|c~7*+k6ul9@jx{Or5ook+R+jew$&D*l(IlWc!E|9?B-d@7yFN*yMGG`Rpe)uAPz3&tu?~t9Rc3IwOH^I9MPjNtj&t?c-F94#NCK$|uK$Z!DSY`-d zS*EQbH*Gt@g~<_|WL<{)(T*}-A@q>^7(uyNif@_ug)gc_j4Xi2Cm(0yp zxsS?g7VTjZ%(^5lFv|hRG#mi{nS&5(DE0rrjoA8#jz4KkXV_9cChpV}iY|M?24qc{ zA-cw;EPa+)PW>&3t+{d?Y5+~eDXLKHk#$rc02-T1$Ah_^^9+6pYLKi~5HnHA3}lI+ zMv+5lg4onvh6Z{CjBx?f0H>2hv$k+B&8B(<@ZlMN7*m{DUsC*$;Rb=#sG2C-HU&OZsY>I775rsCx6Rv z(~++^akuu@P4}7#ec|Q4aQ?)(y#M^F<2lsZtoo@t@n0r>k+?IJ_n%pH46pf{mTYUy zZA;z_v&-2D26gA|A71|cR$!&=_|o`Vov<{x&UkIcT6b3FV4_wHfx|qKh&r6r{@npVk{yv}j>?3dECb2j>vBBug(R^#)-Hu-e?ga|{ z=a&1=<@?U(&tJ%2oXTIA&Nn~5%D=Eyw*z`dgZd?a!u8)e}l$ay1?nFN(H z=v}4KP+*Tl;^`PDSd5%e^qra3r0C7j8SJbM)GB7FPb=tFEl|i>eA1?Nq)nL8@$EZl;3$^gTU{bxj*2~Cp@vazH#mo zv(MVOX(2ZLe=x-qFzX@)t&K<5wVQD$+G<3Z*)*fsL}LmkC16hBZ9`M4n$c;euF*u> zv=h!=?0EK~nx{#aDoJNhOL+~Txoq)P-|F?DI+`*GPIVlHyBzWAwz=}%PWON^xn%Y2ehv% zw$oEOErb|Is6j6+{+88Oh?N%W!Ni-#a?QH)rJ@U&`4I>)K)-nCdFWWR-Y(U%i=;2T zsQ8OoOoJ_o8)I;hYwQ?+;<6RD+F=CyD(Wev3b4MagduJK*q?`RT{I4k0J^`BW0*k0 zHcmA+n@xalp^{R08v#ohvfzO!3^?&KsFt%pvIS0nPT{`oU2E@L;#M7PYb`tRw{g9t z>$RuUl&+C_ei|R~Wxi0MPp99p4CK*>XFc1wK+k zS1d}Qa+G$u70fmFW9Xu+5Mn7&I28)!_6|lfQDFGuu@%;+!H%*Py#fn-jpYt#Ey5}3 z`U1qfz_K%A$+|?pdrPx`i~8UzP>L^fYE`3+dPA|YmUK6!IS3DAXcP%9!Rrl;?IlRd zjO|quOs^rakFkA;_}oF(&p{Pg*w(f05bJ&Qb zXKtR!H}>B>ba!{YesGmP{Sn&Kza6@L_SVSFkw31Z;WVq1%y5pp+xPC_eDnA!Kf(Gm zC^xJ?_PH#;@L%uPLw@4w4_Mv`_H|h9b$B3o?@+M6!}4~A71EV;_Z*WC&}z`b)~8!5 zh#n#s_%`%)Woq57lz^>aO_UUFAx0Uz)E2FHn-~QTKCnw z;&{oCZ+h~+?-&GfJU%8z_0O&H7YwoZ&d6sEJ*0LAh#GVOEj%xI^1|4=SKf`~+d`{8 z@gwZzyN*}=uee`wBgpi$elDX;fB8V)0dntvtN)PY-mzf+e#_hYt&lF08;ess3pDkAEs%2>dyay2fAX5gjNE~u9vFieDzdr5}D6uUOA(vVK+*Cfg) zO5}pc09qI(65u2tnqp3ZW>Apl6%R8gOdJ4n6;KwYp`6J=!QBpj>pKIleRb*7D&P7c z?a_Ztqv5h0tl-m#Za8qo2;r8e++Uh%(W@(ev9MVmIR#3MdFcRG9~m=i*97b|hKJQih;LI^D9W>GM_aRLhPv64ub z3|(q%FWnHf(K-;qda|h51l5&e$HUR95Gcb-X+c3V{@P^qnFe#aS+uLS1U(2U7-1b? z`1?jMu>@>2mg^}Q#88S1GJtXxr1^!+4Rsw!IB*N*YRA$n3%g_-Dh3mGz(>k8p3O>G z2~m3nBK6Dx*1~`t;^hRy8=xkUx(ZMUjyN;YJY2{|YcI$@|V>g^JUK>>}4$(niBZ%k0;WiMYViF4pDb~1jDLYFeP|4yk zG>c?bAfXv{+^R+M=n_(ZV1Y})CW5vd;_;fc{~N4mL2bXyNBlkN{zCvP%0hXE|Mlb@ ze&xtO{_x=4%Xxoj)gc;jyn(yPyD#MZldF#KnzyFl?Swzwy{Mm9<fd@>unjmK`Nq?${Los_$ZTmd^6um+AA%;EK2q1dbZXt( z^lHbCT?OyHW$!-3TF#f8`PL(EPX0XdRwNHoZXR6qonCL~d_7+1I={7!tO;pA-& z0*wB;f=^iX2?bx*vajp)%=^Cm2+RPI<=gt-Ir1*|tKPf&^R46W`zAi|!(b0}4r_nSs1wM~;Ru>S4R(C!j<_E-eLj zO9?BrFZ1oI{BDLq5TBg5&-WSlWaK{oEc=EaNzBbaDOH6QWi3#}BD-`BOB49QJKSoh z7M*g6eg#D|#Ui%~huVjtys;oIO=1en2%#x>m4;~-un=Du-IP)MAx!(uPM@?)otYf^? zho1$4%unNB&%jHu4Z;N)xEpeup2o7>SjOQMN7|2hw32X&0Cx`X+6}U?QVqCMyNVH< z$+A0o6x9pGNSESRNJcYok}?msEwYz11rLV{b8wKERBUi52DpRTkRBP8I`D;T2q&gD zM7VGAc<^lIB%HxhbSTvDH#!d#y<>{bui`Hfo=?Q#VxmmZsZcyHvEm&XHQ<6aTK#(H zD=1Z{*ddHgvoV~ws#I-iJ6sst4pB2&++Ohl5Dv0U(_)*5$`vAY9=tVPy0GPOuYSUHD?I>;$Gz zOQ@fXg9`^&)f<<#E15`iVL@?#aTYT-;C?p`SqPJAMuLd}1d*iJ8$|u^LFh(t99NGa zo~Hi-Sy1alz5;JJFE^PsZN%hzKwJ-q`vGx2AXOib#@~?>4~X{x@joD*-;v-73H|}7 z&Y13#p%2L34O^AT_dD*H|Kc9|fD1g}_I$wY-Dqrp`d5!UB>21WB(YZ&Yz^<(8eVm* z*xDB@pSlK2rs{_wbDOF2_JxN8ejZ-+T``$PO-p+o5|-GQvk;D7%B@(MKrMS~ZpU7k zdueXDs;y9U?Dc(b*8IHjt;XeD#}-fjhV$Rvv&uDpVk1?}ukQX~_xHP3Ji8YMKjd8B z8vok(?V(j}2h`OzynJ-=*)`5r;A)q-+EuRpL%ewT^`WJ6x2A4R{oV8Txt5Kcglqn6 z<4hxQ)NB&F$@d@ry4!Ovw-x+d%l@u{Ke+4G|a literal 28039 zcmdUYX>c1?dSK(eiGu(M5L7Je6e-@M&}!7CnfUO-MW% zQ4+*=2#TN-!vw?sDi{TRRx(Qbt7266SIwy5S2?U1(lS~S!K#LJLwZI(WMB;VSut#6 zj2PC$nDDQeF~hHV*fLbYlnhxJ>yVAH4cQqxNz|YkG{cUeQl@ms$vANh+F{pF8B;b? z&Xfx^x|s^fJYC3UQTRXPnh@DK7d0N$y>cQfV0wRlyoN{&!9>M;uYr5_1YK`FE(um{-B6CxVGfUL{vL!Ng`l@%Th67U{bh znpsN37|tAw2WDami;++wH0xD!>XC3f!KqJ%XA+!datZP|{rF-c9E%1cP=}>A7M%;v zFEK%s#W@Nd`xqw108mL!JRV92tw9@3`ExirkKvMSeSZJAznkn2h9jX_G7%%g3yU!( zK}JIe%OG=&oQVXXrg%4*9O{e4mzWUQJR@)4w|K3Y3`S?kW(1cAEb;`#82lo16-Xh& zQ8IBUM9zkp&hFq2C{>R!gpji4)8OPrCC4-3SNsXG33Pg$${~)iJr*; z-`^p@=5()zn2WJ^Xhslo+%+ItgbMy1QIMQbtuWIPH^geuTrei&9Z4HBm|o)I&Y$=ae^XD-4gcIxlRajRavLNI5m; zCri{5w1QUBa$HM_K~0HMAK`aA`Ce>wr-D*H)h3%oi4VoeNNj!{#F?CnAz=!U!NtW$cm|IQ-{*UqT7vPJ zM0g?OjgwPNEs@acp-40cj~xf&7s#d-oCeu$YFR+yJ`Zq7d(-)jriG5CS+Z%MyJ@t$ zi5i13sljDAEwr-`OmMo{U?McPkl^%}K=vacCY~%C5m3R9B#Z?ZPu--qgwqK~a+;HU zC+Jh0dT^|NoKrKgSb}i@&76e^EyP|A4e(WQri;ONc!npEvrq{p6kHJBsp8k-oHm}A zjV&cO!+a=#S`9IrJ`g~}1Ol8o9G#1CX7mb?JBG;BGFH@%0T2oBgcwm$AjX7<5g&8{ zL7WO8;z-0vd~v5wbx-fwLjqcfP&6^UdsoNquKd$pax!!^0n=v?W#A^1eM?+P0$G)LU)vblUhBK9X>-SxO6@TI+F;OV zAD0m(N7m^1(CB%4AZ=_~?#-!nUmy6|0Bdb}Z$7)D`@d_g2FMAAbTfhawfJEO|(PKTyQCpKr`x<7;_osHKY{m_9f2>ssxF@^5h)o z3vsgLrNN0f2{Z3pEHVqQUie@Gc#w=I0Fxjy8&1MUJQBXhAWedwZOElKs(le=3LwM> zs)}hg6rW+jSi#|}JX9jY1hD48nZnVWYExCHY#>3cOEi;^&phN5v$?Yus(irEW&#_5^RYfGR#%`!;O1-xQOb4Ldh z0oHVYTGc<6JU$L85fh#bVeEu{B;iW}7-KkDgYXuF1glp(888_GFK}DTe$=2&s6cG* zO488LAxK2>)M)T(cwuRQj4mx)xohw>%u9UalwY-k=(SnL2KqO&k?Kt@FY_2E1$ofy{{__U+ScP=f$HyE{o zij%l}+((LC8W*Gux=9?NxGm5`LQFUqi3@GfU#=~w__JH`nmJ`G&KYJFm%vPcX@XNN z1g~a1=Ea$=Sq-4qm0wTSi%z!m?Of3G=b+Mu3CIP$xk@(0T^%4 z7l0Cw1QyZkYyb-5IgC@q6AY(1%lMXw(6~P%v>NSJi44ZP@+tzcyS9`>Nsn^NfT1)1WiEhQzzDQxe|Uj24M2TFAdU&<+S`)aYp*)LhX`1_6F!u-+8VkMk%J$Pzqmuc|Ag2 zURRTwCsD0T!;`-6LJ|1!<29)KGfE*jMb~Tt=y`k+?tV$9ld=dfu%w4#njWR3R9#BY zWVC+7&utg27F*h83>Pu{3XI_*I%Qrrk=5E!fPXRSi(n`WnQ8VrHkB*30_~v@nlYE^lx~4YJmx6{M(fs#=6SckSFCfYz*znrM?B z87$i_T0?2O6j3v6_SbED1&@?&XI^`^JOxHCgDH6mj6nutr3_IUWsKTstJKeJ&=$%y zA*v}eW$}ZIZNF%%$WP#m8p><^P1|7zg0_oLBr*Oq+9I`a7||28(Irw2aP%qMFTr?N zX$xQjC7?a6tkyHnEM-Tf_R1DQ#*fO{ge3$@4_FlMb-rB(Zc$!CHNd0q)wYY`;Y;_* zba2c^C&u5kR|Q-mXd7+sRAJp-B%PRQ6|3r0f}TGKvgm*|itv*v!*!KU!KmRc+9C47 z?-TR#zo`FcMu=DF>n+&tbG z7a1=RE>s21RTghSvS2EMKLj(xc$R^c8qgi*mZDfU!|IS+2_uo20Qw%U(qCc>YpCROZPAWUhb~BB=CUBj}o7fP~BLeTG;vHb7)3s#kzZBKuVlgcr)XP%iVo%wu7y{3Byx;KKX3YV8`0_vUL>A^ zk7PTTUT_mvi&55v3p@;h>)_dmtts@`>?S7x!x$5W^_O5o&TFA7LbE>H0ACnU4@x|g zL}Z`}ppUxA9dt~&n*E@eEdK_ zOaBL(#fr!lV3#=fpg{qtZp`q%%1K&IR?qT-e z!iR=vPF=!Vfd8TVZQVPT?^=E||0j_@jAYckpXf@Y)#>V#zWVL?cNX4S$mrW2x++$E zX_t3-EN8FH+8a~$#^s?c^-j&OBByuUwBE3?l|A=b?^Utxv5bCv(_XWBJ#BAW(QbNb z*Gkf!ovhyd&{@BBlsz<@at2onpO)2T%bHVV&FdkyteGwATrq)_Th;u|vA2$`AKFl+ zJ%^x3uA=ds+P7-gEAF01R~%R=$+;WevA$(p*WayAyZ5bFJ|$aNeRa-VwPN|D$(?I# zXU%oFn!R_2?jGJ??lu0*`(y9@(e#Oz*q$l2YI{${@NByn~FXq{PZg)(N7<&K*LY{8Rb6`gyXF8 z)52os*z;lb+^cNe)abF1tB?V|;af?bWquc6XS4^(q^_##SXCn6CrT zCGM=HHf5<@YhG_%uY%h5gPBbbbtjaM38JJ|nQQ7`E%mwDvHQvU5%#4iwt9Ny0+h4Y zvgE$IUtz6%tiJEJ56v|hvXeFKdqk)-{fbZZj;!9D(z{vD!Hvod^S$1`Qr|!QUv$}l zbE$!I?5XqY`6+h#0z36GTM@|UU&+~O*7QG^yj%DEv)PU#sg5IT`_YYy_w4snZ0kvA z;(@;Ji%pXgRX)sDUX{{Uv9-hZhwdL{&%VS?O|vguV5?uw=mWSew!QCO@E*ms4zkvv z2l}BewsffI=kYxt$$!54Waq#R;{FcX&>_|R{XK>ujq2ye6!7qu8VkBRYKHc!{&E-2 z*{^{d&ir~X5}pm<(H-a9GP@e)$-L)=Gr?+~xav}1TcbG&O4g3VlDTcg}<7g*L{ zf%8}1mIM7(Nh{#4qSbI$@70OgFiGqrqFPE5)xn=0{tQz(u{@;(*C>o!bVb$RUqMzr zzGyY2>ryC*sBsq&HOZ{ZVr%XCr(k4|TL4pNe&y?yEKqRizCsX7fWhK_eNG!j+DvJY zvJcdmp32OgZ=`pM0KOr;mJYS zr8j!vjec|mzi~)!48j}3=ywWML!_01JR%gd2J|>FRj$1n8Akb9Hu&D5D$qZ%4*+K! zt)~sNma6ipu?1Qq)(h5KFW~HyNn^1Uh%(%2IqfX8sliq)#w`ow0J{&tf^Am}Pp~H| zqRt-YDa9!0#(X*-1ye_~=U2I4)pbsZd|D*@8w>GqGT+Akm(GBPg=<+w*ya_K9qvfdDcz;U((r zsb?GT7sv*(g;&2(hae*#8Q|@_l^a-y&wDX8Uoj?46TF>=h)TM7fkeJfxz`npMoTkq zDuN#w`X*@+%pQRB`FsrerecpVn5ri5^@yMSq={O(xDZZYyDVuS6T$f9I21_QWcetQ zTxI>@PH?FCY>mcaQKj$cB?W8OvvI5i1?%xToNAKfUpC2e%{ax``& zDv%7QTaiUM6AHf$6`&qZx_q#P18ylqvdmbl>IDxUgaSpjn{=oU1B-J+(k&n)NEbQ7 z1QA{^aFSkW4MAKl;2DM%GDL7RjKS#BY{Qu|%X^qJBCr59TfoIyNFI{_G`tS%Hp5mQ zMBRj<46LdW401qln%U6BrFqT}f?)z{FR-u#Un_z+kKSqHxHGU7!=P_5@_Zmu8~mNH zjuM^at;3g5fdIO}r-AU6F;SGpCSVMuunD+`Qb_&47;%;PbpTNV49yD8KZ|I$@xDN_ zV7Bp3`j8mZsGD2D;Xs@sESio;c42)3=W|AE2J+^lz_0)W13|2#-0uSfp=c*0?VJvH zh-96K0G^zlXY19OI5N4n8GphfZv&PvOHq*ukP+rhfX~8T{4U5jSbYZxjUI*ftXzM4 z@9GkqK6TxJ6iHgb)%Bs1#kxl-5iNUpA)RoCR4uI15OsdIS* zJcp%aw}Pvlv;%^Zwu}cg#!U;kKJ}rcYvbIfW$u+zn`Z0H!5f3CruTZ+&FRK{Y{UM$ zVYcje#@zF;u66zJM|B5pTW%>fZKZb%w+(Dn`}*ni*WRbr`|jF*wD;~C>Afe}J$?7i z{MAYJ^i+CeIy(|fjRe`@i)`gg#x|R)s6*D=rgK-$324>kob5Shb<4U5Y`Mo#po=P_#-UxnFef+lJmLgZ(_*USSakIK^tu_q@(0ImjpUqU*4dnLHm{pA&V9N4CsNM6x5n1`*H3|o<*r%tq|4iL?mc&dDffX7 z-FtEswQF7J3dm|&x2N2#-?gBegXxO4oV#&dk#c*#YkuhVykmOH#O~OC_w~D%He&Z* zy+6&KkFt%ijC(N;_E|Gg)1&xJ6H(H=l~536yK<#u&QSvZpBXe}?G^z}*55vEBHVS4 z2(^93LudJ&(c7bJ$RSgv+2&r7h49X==0xmaZ=zzXUE%*CWCRk^ml0%MA>MH zSzOtfD{st|)xS5h-k#oZ;7=PiBI)ko)Q;g)*+{OcWwWw+_0qrV_{^lW8UK;c8jR2c zRQ7rN8z8xVW*Dd>eyr==IbcxzFe$gzLb_=a+!2I46L#6+XkUcn#uPq5(e2htDhFhd+veL_XXZ zG~0=n9ujDpdt`)J?MoSyl0sO_X$$JMQl<<_5vIjq<<6$OVJRywnSsRuxI_~~R4E}$ zPclwrb>wjmoGs)KlrIuFEn)B)7&k@f zpsd|jAR>Nu0fxm4R9o>DUL3>mT{0uCh?|_U|DU$-R0;dE=PuwWq>P?T>rw2dOZvceBXVMU^u zDEOtM&{Oo8k;Ol@V2>`|?}CWs*#`!6p{+OxVTVG1pU7bWuUYVIVs9k&laVcaz!BJ! z5WBa_+YZ}lA}H(<>SnHlk+q$y@8L}yWTOCmppi-}=3B@14+MWgZ0AKBbBcQwo;u)< zN_zURUX6;2T_eQ%c7v@&PWD4+T**vu5vkk(-av(h<4UXTgCg1IIfJx{cT-EXW3JyxW+R(19%pkVCfc<~CSlm=^$1C$^lI zB^-P_heQ%gKxpj{7+Xtu7aX>g$h_Tk2{Z}BG90-p)+W-yoPm)y2A(anp2>U8Bohf+ zJe&@Uh*$)zOdyD9xItj)(nUd5I2F{)8Tl?m)H7%;iqoJ)k43!G0?=VDprS|73wu9c z1EG)o2>m|f_hTdr%d2CMddB=7y8T;p`+ZbICv+8)L|MpsiK8{_0wcwNjFgM~$Q2YN z=4}8v0V6(6@`g&KV5qRx-9Kt%4-BUF3}yF>r}m7qyC>L*$qWq&YB){5nx$hYI>t^e zvX<8}Y6h$mx99CnmfV&0?9O_+Ql2hw0;S6jvufvNS;gCptN$upxiedNAXRxFUD=f` z>s}t;tf+c>lCAGdSMSSKA4ydoNmn0BR~%>6Wt%Sd+s@T!x}qanu`gAz58Ov-*TLm6 zurUl3IlVcjhxkVa>)e6UWpLlJ>aE)4Q;(ep{lTvdvbMJM-VX-eAGmuq-F}2^JIaJ=9~H(dbYITy_2l>P`dGOwy`JG z*uyrQU{73NU!Kjr980|%V=pYS_SZ6cCTFtWJaY4hV0N)?@A{4pI^OTdwjN2f9!a+z zW4*`OvYw3jgaG)d)s?l9DJv-SX={^YhIOpZeX#KULe|%l^7W*By==!xwzBVmt^Xm| zMW)*(w&vJ9?awSfwq%b}spC}oIL#hA!&aTm*v`pnOj(;)eba9rnyMZVN{tH{1cn<1 z*13<}-_Pzl#X1Hu`oX`kI5!KI+XdpcUAku~OQLJV6 z!;(K}*&t~ACF-3~*g@wnqD@Kf<*pn>F)doue&M@|bb5UGrIeThUO#C&XjC}`OSAmt zFQzw18Z&kbffiaU6cXGNKZNhJdde==mDe-zo;2h$-gb#pNg5Q)7RB`oKk#e&C6`z&Z9015Z)*LKJt)CNoa%0mDc1iWcgG0i1UA4!ZZgKn4SVlmANyV>M= z&ggfaTSkpsa|LC7#JeYGl|)rrK`nLLX#+bz=IF@lRsO~&Az_oPv|LXT(9b)bgh3&o zQrn>M%Bc1}2~EkIieO&=uKYFCE~d~1r+tFN7u5ZHx@4P%zYJY6jEhOTiPjbJJRhj% zNABKjmtPbfSw1aMEpjRd+?I_D={>ks^HNg`8w+hH#Adwnf2*DnL=|PF4gLc%M1`bC zLZMa+uD)kNLwN49aktUNLfmPizw61?DLedgpfKtwG~n<>OMMHjWNAj3LW{Kc5@$I- z(hs&>R9S}!W+L;sAN!AQdj*fch4Q^h3}K-yFESg<1`DLT*a&muP+6G`BjZdl_zD@4o-S>)mN8E~H`17UNqK%0->H^ta(_JXO$svF~iVh%KmTeTllFWzllFjH*Fc%z}153TtCBT8h=xk)M0=u4?~CoAQG;nl)VK>UC??Z8^VA?djF&$L<6VXD)b6Mo{wg4tz7qaC z@K^Pe@9-Ykcet9amU{fp07q#CrmMyJUu5PC!hD{e(~H*fjJZy1DOx3?xfqZ3(p81B zHvUstiDLcQNmoOU?JF45V`5+WC&X0#7c`^&d3@+9i94Vv%GxT1-GL{tdtFwd7B$(wRh!Vr~dgUaLKTG4h4B$&AJMBRjgcho|=5QQ_A2QFlH2fw`LsXEo3a^O6p=yog*^%D7mpZ;#NG+vNH` z$x0OC-%+{-i|1j_6FjVV4i8Vza6nNZ4?*@O zpOl{{Ns|)a3T02yME<{(J&%kF#bmFS-gcPN`154%Bu#GT+f!t(PbPbHbe+V*LfMma zw12TY?0$Q%~1ZgE-XysUe(dq)pK#x>Dl* zAHn?B@Iv49e*<_tTL&H%XAbgtkRvk4v*IYDtLRGEeA*zw&`s1R_#SuAJ4CFveN~ES zq+?>~=i)N{!jLlh#@#Yr6l;HiZYZ3`X^)6~5b?b6pPlEYWpv7U?ve4lQG^kA-b^=( zSibKp?G8lv?mr!um#3 zFKerqJebjjPWh_dm$Bo)S80gqd-SIaW21a3@2%X13uxcZ#ijHttkFP7G{Q&?>o6|7 z4)Yui$X5v^eDq@vo`X-*3*d8TJ8zK1BDJ9zGjDIhdnP$BfexC8O9${kkS5-*27#8s zAtmj|l_hOp1CJiRYZQ(kfemkQVb|m}A4DLcoo?d3J&0MP@F6)Q-fRf5k1f3;gXAbU zth`=TQteBI7vT;E?Icyc1IeU+5N{8J&7|m{pP){~T5d&tyzCz^o6wdu1=a|?6irj_6chIRdoOPU@ zL}vn>=%M-oCkOpw$=T=BjGdhwu#*tnkJ}2%9FxE%w0{$yU6XH4A|kny#^)G`XbC~l zQatoFJb`;VAxKp?cL~w-MAL&biV;3!i5<}q`*k=T%)4`5LeEd^VgO5B)mQUbk{$LzQv;aSs zaOPxCcmT6jek^x` z#5BS%m}IUYNr(6g2z&rB65Q?0NPf_fVm?kMWF_mddtVA}khLwqLc1;b0sUlsa7-iY z|K_Vrjy#1#j3S!kC8S>f$>f+wuM8_`%%EQ5>4)x~_3<)H!kvMl3EmJ5=NQ4Bc zrBkzBi0Y!yMXAoyIodPX=O64Dp`Jl!(R!alXVEfG(8-kugMgP5n7jBXJ|M9v4jk@k&D|HiWoYDPEHdwojyM131rlVPN(|@nz9Q+Ym#lESR>^4 zbJGfAwa`q%?@;h1JNm_VC$ZGC+DumCxA-hJu}*22fnaw$LBXhS99uE`lU}h+kzrz; z(nyr>YMWvhZGy|dE4=kXs}5IsjcY9wKzV874HRs z2r@W2R~A^c6ZHW2FJPw$uN|O0D06e+t8hFnI`oy(hT|xjo72HBivMP=qY}t3%12P4 zm`hF#WfPo{S2z$j4$R_oaJT_%2x2$~+DZqTO^}KgScuKSQ5k%QRW~k;1eVi;o^>2PzBvV$}lg`oMc#I32&IEY7?RC73C6JCDk0!Qa}?$sm%)EdzdjXRELqL05euR0}T<$IfM{47)1dn4Lg58 zL7NqH5C+z%3Fa&4_EmICpsL^~h;RZ}3sG*rgEGI4ZY$^w-XtSY9ET%`As$U4m~%?6 zc-r|Oey0Q=&ov422ig(9Ib;t)pK_(!D+AtfrQ*SUKn!LI1mJO{+Z#ZTP7xAb=0T0T z_+u`)@(l>NSZG}N+T_`GKB83U5ELqk;sdcV8-PO$O=22K$I+^T5V!Z-AAa|d8Xb1b z{0)@*T?K^lg$!uV(L+mh+ETmR57B2?Q&q}Tl{VEZpM)?sjpJ71o%Y-98FlSLyX#KH zH!Hs7fs-eM^M@@5GwLqbJ>x8cJxF#})?Syf*R5za&9+;vZw#*}awhZ5gEtP|?7q>R zbCumWcl+F(mv6tEb~UZ^=d8AyS8rUsnY@u)4Zah8EBwyow=S>Gu?NS~zHvCMFl%c_ z*&5dRKC-n+v2Ba&Yp=75SJ8EO}q0>#qEkMf~dj=H8^^eD9im#_iEj@Jt(4Y;Kl%3-hc0_Z2uY7bv9!@ z_qc@68^3<#YgfMhm9Kr}*2`I4V@lV!Sy{9C)pVtA#ftGk!GoQ518ip>Ywv%cKLyP8 z)IJIDGTV8QJ^2b7T+9Zqrh-?~!6f_2b=LkyM*o#9twH1bw5B6lvoBS%FI{th)qAA6 zA((HF-8aPAhco(-oZgew*QNAz>x~;u7Nw3pY}mmzcc&ZR^bHbDl5~`P`_i{+vi2P* z`;PUBlzk66*}Cd`ZSS_NRl~_0wVf$P=kjpQRF*Z>q)at%;z$Y(9T6i3+t!EIkFbue zjJ{jS^R733(Dr`YdNsQrZOzJ_4X4hA(`PTUXCkZvjv9$#RQYZJ;wxKnuDYDleP{Ug zaIUT^=P0}5yX`|6BezH3u*o~-+vd+;`JdzWP$i+PplFtJ`~b zI8_G$p|Ia<$J(`&b2r)x*R*yiRk{;|6H?Js>AvNWhrH;noXbGys3^vD8Fjtzu=2+0 zz#6rBbluHb_GHw1MHp*Cey1F3Ign8wl-_vJ!5Q8@*3zF*pOSLh*DtJ(vzEgd^%1e^ z#%t-i+Y0w_hxPVd!1|xlGkiQZ>pg;-O!h=?Z4&0L-DqotvYe<(7g_}a^ivg zG$7+}-SOP^u(c=HUYb2|hCP#HtFLG5Z*0~!z4OL*-&h|^*Y3aN`NUZZ0mpYP-M+-u z_1+u4cZ97y{h-YM5b`eHzRcE-JSZE5%79ul#9cEP^Xz6N`A++H+t;q8D|fF9e`2ka z5IDMVePe;G9C=_LeF*adYQFkZ#y;>%fPoq?Z^9YNtG|KCq4d+5`fSafRLve3vY(i0 zHf>JU)snV(S9;LIc=c{jhHC?;&?!k2Jq0Q#j_1zzIz2Eiy zL+R!NtgTVl56OD^*#4K;K4Aa!1KR~@#++bJ&$1ICHuM@>#XPXZAJ%&_t%up8)9lL^ z)Aci~x%Lxl{idq|c9^DJzFT(KLV9QL_8=OAYdzl^cz57?WABco8#_1j>Bi%m9lP$@ z|D@s%EB?rnhGS#OS|#3{X8m*Q>3O#H(u1id)Bm*<)hFR83eqwFFRKrO}JOA%f9b;cwWEbCH>%WpI`|5AB(B073+b!RE zIqPaoxmsB`cE@#O#h4=@*_$GJ(aA{tq*MVNN(_U-!8U$ob8%mD^F)^{ujgqS5~vDC?>UzTPB1_ zgM`Uo#yKRzq(5UjRTPs0?7>m?z!+OOp0Q0p0e)*VYuTAm@51xy?+mP*f1ob^?PE9t z$W{RX?eG!jH#f8HQ}-43;%wP)#yo<*({B#kI(>8a&0!RxZg79`xE=3_Rcf5l?0b|w z=4X#mECrvebL^S(tm7p%6kai;^sh>vtDB}bO*qKi`NiYoDxnJe;l)w(hc`}d^sf%v(-M-}lBnInCUh26y8Hy9&3RDa*$i`1!pRi}aU zue`1qonE|4pl}ys{iTHKpL4ANR| zkYR#PenShTiA(6fdu%0vB_ke!?i04d<8A%?ZhieCtlGk11Pjb}Q2GA|H%<%7@Sx5! zD4dZ&n?x8rx`EON|CbPPT?Zds$)68rLNBbm0xMVpupz~a${^1YGlOnFL_j7us~cAQ z`5h|}l>Nu(_9y6u%qS)eH?UV=%_YHnfYR!Vu#U)EgDAd(A@&u5};;em3Ix z2NMC0?84$>l>O^|(lj&X&Tw_=wOcZ2#Tv zx+UqJS-US~_oeN-mWTdEZD7p@HWYW`tnpAreHcHV$*9jkV&%k2awWnVJsEWs$~X38 z)F<%&76s;m(_bI{8jNu()ctuJ!U%~!b)Gmx{O3d3lMSjrSM<0~)~J3`qk)J21x(H* A>i_@% diff --git a/config.py b/config.py index 2a4b20c..9abe1db 100644 --- a/config.py +++ b/config.py @@ -1,6 +1,14 @@ # config.py # Core settings defining the pipeline standards and output format. +# --- Core Definitions --- +ALLOWED_ASSET_TYPES = ["Surface", "Model", "Decal", "Atlas", "UtilityMap"] +ALLOWED_FILE_TYPES = [ + "MAP_COL", "MAP_NRM", "MAP_METAL", "MAP_ROUGH", "MAP_AO", "MAP_DISP", + "MAP_REFL", "MAP_SSS", "MAP_FUZZ", "MAP_IDMAP", "MAP_MASK", + "MAP_IMPERFECTION", # Added for imperfection maps + "MODEL", "EXTRA", "FILE_IGNORE" +] # --- Target Output Standards --- TARGET_FILENAME_PATTERN = "{base_name}_{map_type}_{resolution}.{ext}" STANDARD_MAP_TYPES = [ diff --git a/gui/__pycache__/main_window.cpython-313.pyc b/gui/__pycache__/main_window.cpython-313.pyc index b7f40713288cd6e6549a4f502379371d5e01a38a..3fc5144e35773694ff125247c42bc5f728168ace 100644 GIT binary patch delta 24876 zcmb7s31C#!)$qHsW!7vn*|*7_Nx~##VF?5XSs(#I$YfZOz%V3}Fp$i|H;G8vI#^l@ zS_Qn;hvI@-KXIvIX@!2R6|D<)^NkFIj#Q%=r+kI!Bio5MGRs6>WgO-EmpSLG(y?^*Xd^&mSKxuUzS)t^WHqItnq3Vd^cv-G$Xb2{eob35ko^E&49^I4pFMMK8|enH1V zeql!=-^k)LD;9Mu<`=Vf?TRHGO?*?wQhq74DGplG9BEDW9@Z%O`FewbZ|!fTyVjYK z__qFKkf|+_i8Yj(Wjdj$ftbhvf#`|R#a*7MUdAwSW-b?Lpj*-2(ZP3gtl(F$Ht1KZ z>{!LG>gePR-#RfWOW>4njT6J)J+0euu~q*C@~9hKMEm zSgs;lHplL954l}~Yu&CLtGt7*5sNI8xYF%$wGO(+TU;ZqVduCjl-TSY-Qr#9y&)1q z?@-x}eWtP!(t6CIK1i~gq4lAZHRCJ2!^@qX!4Vg4bvWD}_qfBc^4RY+3kg|3Gj;Rl zwqQOL?;7y(gCY5-Z`g-qF`LbN8~pMsFgfWmG_Fzico)4z=ORn!w60Uvf%&jHeg$pU zXJxFx*X0NphDLlt&`Q6dFVV3C3=SyhE|clli~3KLq>Y|PE>`Y@*iFa&kbIDk<;T8e z+9$8wh-C*5=n-fT3?XnK$U^{w#qq-kwjyxT#`Ff~2)>R2u;_RXzIzdj0SLtnct<8i zJwEh|F|b zP7LZITW)_bsY8EjJ=+P7KB4{GW&2?7TV?l`bOJ4fQxMN0WP*R09sAGMJ@ zSziX#KeE&SDT(3Ckdo9)Qfb2J6#7oSjs_Y{YGXK!zTTKhSLErb?yQM!&r20PZy-i` zw7?*28Xzo&{xvU0=vn~f#R3C0K5AsYr}L!W=Ec>5-$df6T*wkO&m&6e7NGoR39vg| zcp;B8BBT$SS#II$TL5Q5zR9Sv#Z|>@yu+MDP3ILAWQUa9F6ZD*_kJLu+YB3-B|oI} zO^msCzL`!J)zh`btLQI_YgO25zKFh7vYxnTL1`d|ZG#$zV_?MT^En(oJoL=Fq#dl8 zCYGhg{TIaee1uk&&6iDU={L$U6k4vJgg#!DN#c(^XpzINMVFk+IQH}MLmG0B_SW5> zh}%?1>vN5-b^F|~(NI!vC2m?>-%GaATkFfoFnzZEA8OpvEOC4b{lT0Day|XaoO!A? zh~n>|C3B0(AC9e@+b;*?yI#sT_F}`JiahynX%itY(6XgJ0t~-j>eb=a$>V;+pP}oT z*BjXW$k3_)?a!L~B!n-~oR)HueQbG)M;1mXw%w>8Ptw&ZKJI)misT}`y@%k>2yiCw ze?jn91n(nYn=h-D)%6L+u?iJD(vtsx?q6w@YJC}M&7%wE=wMs_$I3^-bdtT3dhBRt zy0XNUWJ_LA-cjLEn>o9BWj{f8}u7Cm8tdEMPln{GA= ziB&|S>vP8D$T4^{ebIdx%jC#CTB+>Go|N+5Nak*A`W25|6WKeVU937NLVBsSH``#> zUOVmm9!R@ZxtOZ(aJu5T3Z1Om%lIAoKu9*MQnH5;I<-(wXC3C|HdwQo+RwhgQ%;pg8c8k)FT zL!aMV0J~6wqqH2E8us)hMc!;_F{oEYV(s+8h7@{ZuZg~OL{Fs9n7xAB8<}ug`fq1C zx$CkZ{Ug+Tv_MDn!ZY^~ld!psXsKd#iag#P4+4UTS{*u)feo3pI#SVBG^8t%4wL*a z8%Tez5!-^Zo<#`nybc@g+b^(n@KCfJu%n8b(&#h2xzZasJd-3fO9qYU*pgqHY12e_ z6Hh`Pu4?x5nZqxfEh{ERBGh{qYe>a5Bh?S&Llw~3Eh+hlW#Ix&*gEZrs|l0}4<{_2 zNU1e_MtX7}fsv@I&&1vlL--wPZsPhdds^s43_}=8`?Oyci^FBe8u?v)P^n0)q6| z6rSWpLFF1?RBQW}XE!ifkaBWctp=H1bnvo$M=& zr+?gNqEByv1$!;rv}GuXwsjCWOdcQe>Ic9zq#iU0!+nDs?nDj05pN`zf;L=|>7SXqzxpJ=iMI4Uk z)TPN=^68h36beg=KIY5~H*Iut_>H`HWvj{NT>1ldD!p|`M}O=7m0Z*L8+z}yW|B?c z*;YkP(8TR|q=Z&)UrdhBvF!zLzR~UP(?>_1BK>sj=;uoQpU^S7# zKI`dLZN*nNjrSI)*vU3b=Xe_m)n9`;&v8A-cziWR86r`mDDs8Wz%nr5 zkDraWYcNYX#I=vRMk5kmo0o59qD)BNyn3a>X7B1++1}b6iieQj(PXo=_BeVrbhU!4 z2+2K7-OC_?MTZO+)85m%ie=zY$_kkPy$!_6fpNFjv(&ZKdA%FDX2ztXtKvgiDXQDM z!^dxeJiG(JW&}7Viuf%6L#nO7jk)+x3gRB;UE>2=S2}%A4XcE%;p248n6;pXMwI@*9Bd#3^s)w(Yx+YM& zMzjqC)q~T}rD)Z&1M1mFi-PL8F=ea)wKZ6^WU8t=;IHbQs@fQ++89)CipgOKs4c#yxOQ2u43gSqLzsj=EHoj7 z*iXd-X4G6So2hA*+8j`uPpb>2)bj%Bc_&hW>c!JqPGg!<=LFO_2M4Ees{*-IN0Wl; z+DqDF)~_8`$odrY&;E_F>t(cU*ODdM33p!6R6bJ0{Xt(bn#X-Wv?G=B4|GkbBc<}c zl`0|dw<_&uw)}7N(?`?fAEqhc{lh%%=q&k%m1K0*u~&A@RU6`APK1<$t}PS8{7$w5`}!EN(R$ST3EaXpllBn+6Abl&Y&)lxVRe3kUZ{aN%ox2KTh!gU2C z@NZ(%g+5b*+TJd&)CGc1M@h1<2>qq#wU1B=KwSz`kxB(lx|Km(M8*|^w75qS%Ai_ z>yoX?mAdPZdvvxq{8uld`(G#%ux?*rxCmY*w%VE7^h+OXRtuFquE_$GkPBW-##=QKvYtW4C zW7wbtRjdVGo_6g?@efY$ZfLGcA|>!DkRiPl`LG7FR1LOs2s__k=Kl>tJDIuEDYcl< z*yS03#Bmqzf((-x(V{aQ!)%b0)8*nv-98`GVIFjOKqqf@4)_OL5aSt!^xSaKtFr-r z27{zF#FDLhBGOUatyLOHSvn89CLr?f^#19aUD!w1V0stY!4(G`B zkS;Ql9OK@Q+UawQd4291LMrFjn9DQBL)gtnqvzHfiS6^dJ%~&_04E zVTMvt0mejYTsI83#*pi@7^LIuHdb)vmIA$9X&_LQ0JGg%tP$c_v0i z*r^sWL`i|`iS^LIyM}md5Z?j7X8>wMl=%W$jaC-GQ?1(`7ve4d>GHr_##;>19W9&Zf`(IF8FN`AE){HeAwjNhYlA zVB7a=5AS?r;-W-Nrh8M#et;aBjr=&3j)aa zH3iC>PM0^Iy#93gs%X0x`%lJ;i&u-8UBSdP=hJeh(kySJS)fo_eITv=yeUmsabSg5 z(0sB}Y_^H!o}kHoE-UYRO19v*!*RIxs4TO9M4agEcLu&5k!}T8PiMezKbYgCw z*uPEe+b(901QSQkrDgrkMGcp~nJ{E$QtCgyl*lFKf6U1>>F4zc``Y)mi|NZw#f!_< ziK**@`VHswiTgVDcAnN}PwC47`ZBSi`DDkdt6x|xHus1Yzg=9{7u5Iv&X{&SA$2Mt zKag+*u9~BZ#G0jIYI9KEay71&lfADxUT}ylcG0p{T;CtmZ)B))FKbl+`4-&?tkLS5hh<^;MFNFjEOJujXl&&5C=~5{EI>O$n=1^4CaH zI{f@Ht0^9SepR7CSl6UpMdZIBN(fxWjiPD>_rH((>asD zv_U5+CoxRL=NS8^53_&$9JhXnaGf0(QL{`*YK4y;24x;-sOWUOfyn9pGT(7I{lDF*2&SvFVgASZyTA#Up839 z$V3gHc>3=zO!UwiH2UbOPqC znX%Hus(YkS8kX|VU3bRGvufq^p1YdYWY@}Vh8ebam;(z|PUmFVtXYf9trBlyb`B1L zy*?tU`OJ9gN(>_KHMao)cof)pG^k_d>bP)Lpy_1X^?HWJ#&n$Fx7LL94lkcMlJ{7@su;Y?7O)Yk;Q$HdJT!xOPa* zuufLn#-02)vSTsX#<%jI^$9bvm-oRssSR_Se&C4!7dyZu#K~53D>xP~Y(Dp}$2kIg z?%LMw_O=ZT=5Q$}ge4^A8Jkl&sV%jdn~|SG9x4V!OoJlW(rw$_V`EZlq5jxjWtayi zT;_3al>Y*jkFjjl$)#p62)f40e3nY{7GPpM-Y^RcH6ZC`sMa+@Dy00lL`0zHWK|T0 z)hsE_=#qO3{$ZEL#e+8oQV&&uR!ja2Zx@Q&>hw9s$9ev{ILvIoaSsfsTsHtK<^x_3 zqLHKH4io^kA1_{RD z2E8RH9)KwwaSph)dPh(zm=GbILn0-Qd>{XP09X54ApSk{kwbZTJCWgP07~rQ&Z~9% zGoY4fk$pS}U$wKecXSVBJP>?Iy9- zAv#^b)S=+k?NeKQfvvu2j@v=j%iw2&Y!m*hmpSlff^0?=mYX=~g?n#FD=eKVm>(#Z zKUL5gC}=%huuRnFoy)1F|M{rU|JK6QXV(vjxwEHo=LB-+1as$!BV&73|2ZKSSts=Y z^}%_ESBr&pVqI^rZIw3;aJ?G*z=aP_ulhraMs6 zJyp{asOdqrs{KH_nA;UJtvP2(pE8vPOyyIi>VT;_XsUU$WUhaI=Q(rHlzDEzJoiNW zY4fuE?dQ_Wfwao==~)kG?$#Xcc+z)t{u4Kf)nu4 zTTX5gTYAKvO+m*NabV=MV^rMa0V;UON+d?XDpcKNbjr@9XHTWq1=8!L(&q=#=bx~O z_Ce>_bl1CpNO!kRELn8oMsZQ6*f}f~Z9SXrzL>z3&byQxX3q`|9qv24N}TNzv&Vyp z6Xz05Li~aF(~0J(#L7To<&o=;Za=zFTzZ{2ixs2-1~|hX)(bTzFcVf6YUrjHO>}XtK><6sM%er~DG|PNh!|w~a2}iB)Q-tQ zhK1^Ml1{y!f*BgrQYkXK?5eX@hHQpO=(-b5=`lMydQA#;^ny+%a+o^jT#j9@w8^6f z1i)r+A zyOFj2f3`@`@o_Py{S^&Jh?~((fzl@xl|G<=(q|8YKVV-45S~5usM`ut)E&5mq$Fr< zTAL2N_3Ub!zRya5<)BMm)1o#-##DM5yGE#fj^q(dguBqn`%*dT+?H)J*yAqemcj|v zq`n2wN~6k1IDC__fX#^KwUmwyi+1`3pq zEuAXfV+a>4&-sd4a_#Xzhba3B#H7!BkxFf|jr8zlgG{QH5L$<=ukBRH;E*QQ3_wH`;o?lb+(2f&Qj9J-$uXwbiV2- z+yr~+{ZHSgWGb*@3y$iDe{yc8*X;97@B=P0KLM%}Nlj)3)gb8O%*^y?HG@uPD>6yI z4UO?$RCiffL8(N9XBkAUl5s|$%^Vy=uJKw07sq(B`k6)3Jm`grkwF3zqsv?hewPj> zXl|r-Ian=amQwSmYkaF0c&9R^Drzt{J3Uwkj1{~a3@DKfpsG}A_@tTTGy_7$M2349 z&CuYFC<-G;u$L?s@Q&KI7+C5I&ldaf1kV8oX}s6F_#Hej&@^-`cUbG47)Oy1#JZ3M znCda6iDXL6kj8~ta0j?>hmwcfAV6KVra}swh9NCNrY80Iz%hkm+RqsvIXt$|701nq zKSNG?Qh}WIB@>sJcI?gL=ZSw>ZO|Cc&L3Qs1&>E(+GOeNDz0(WY!aZ6sr4_{iKqeX&{mdse+fOfE zD=u0mt{WF8{EV4zLN+#eDYHN~%iclbettlmKcy}Ss7u7s`9XC<1VvI$P;H;q$5_#s z1HkN{`i}oj^ovEUL3JCLu=mC9jX$kUpHdeE)J0iXFBG)fF*u*rdZ%L3Is_%M7S9r_{v(b@AcBGwO;@FM+X}9{uTU zU^Gl*DnnyLhnc!As9#UXPcrYJ|C|23a%I$0}P zmYp&M^_}PS$xQQS{NkdCOKl@QbnkzaYqEA_aj%)0Qo9#%zba3$Co04&ZTAAXSlqa# zR30cbz>}z>?5j1}Wf*;~oHIglo_T`?-5` zY{WGRW_6Ybnga<R?J3&^D=7Qsj$1~ERkVFO8X!2rY=En$Im_EqCg&jv)>5k{~ zYf$&&D}`9l_QPz`?bV5_Qxr2%g>8SjY-y|84 z&9bm$p#*+XUMqBINP%#xn@GOU!nSmf7ZyDN>OkF zR|#XHSZuOQWmC@Za>H@p7rpz%xiVl2{_^4iKkM^kP7^9L%y2}3&I9av&Jk4D>@>q} zGQQK=Y0<{)+EGDD+CUvZ5uWX$+JO<5lXo!H!HllqPmmr`=`d`0@Pi#gK z?q?*t6wm36^pjsK1A)EqrKQ=+A$9;tL?2Y&qLBCTRs+-f(&t|)roY~;6;>Q0Mo@mz z&7wuANZ@Qrdf{d;JwPm7GD|PSpCCHn{X-afO)K*d;h8NUWU%Zy%pS+=x@tg|K`UO> z(|2FWB^^w0cvU71ovug|^0tAL@YbvG!nXM!8=gFsAna~}p<%w4Fr137p(3zpZBDpN z=`)pRFOx?Ee-lWFUoOA_)e4_STVE`aO@8fGh6`RQh+V%DZGF;`(%M5Hfg7uMHwjO0+@py_@8G111blYgY{ z0^p|8W9kM?aPHd>qo`%J47&e6`B!1iy%KXDy2`M}4oY0Q z=*=`r7o##jqo|IybJ62^de48-7`Dyl4ce$K+D;EFPDStYIGINkGu5m}@9e(CQQWGC zxCo0q$Bs)rw>By^n}zB7K#T*Tr`YmXqPbRX%a0O(zEGV;k3ObT=%MfQ!l_*HIXzjO zF6(ZFD-zi_mvphqq82MJ_=?I4V<)8@oo^ll(F80Iucy*?QnVTr;kV$NVj~ouKGle^ zhzVH5F?iK6ohpgx&WxODTz`XI8KX4^gHr5bDL}lX3~yO%yH-6c>CabpNfT3AnyHvu zTECW<+)?7GY~?s((TGwim0-glm8gg*AqlCOk--5WbrH}&7F$=#OkH5kf#EX5_Kyjq zvL@LwMd#BXn!+21q~M7oJ%A4;2;f8eK$wE_XYj=%zS`)72+Ma8wo4Ig5ed&yNw>dT z+Zsw4_YMyu_a>=}9pFj=Ct-;6eI~co4NVqoG#3^YnpdtysRWG(DBnzs!4@4YGMPy| zFK22e6&w3{8YXKtnju(CYyMz4w)+pONqt=m2F7V42KNw11yObRR_QJwo*TTR5T-x) zQ*CciOv!eS54>K@rJ(K&XDKxgAm2V&*A+z+zLUrV!4WTbiGlzb7V)GeW2LARr*HkK zQul8lDzjKjcG3^t%c9lqm8ioWvu3*Cy%r|$PBuYWE`8;_LM0P9=wIKfrYrwE%aCG5 z{j<+JfsPyWraxCNHElGv1A%~9KtXpa@0}RlYM!)6c8wT2K-fS5eTLJ~4a$CIm;iye zj9UIu;77{^h+-;9&YFyqvIFZ>dc5FatTVE4lV2zH2#<~Ao7 zKL)XRl5K(T^xJ>g3|r`z?`He0mf{(;r*wid7ZDK-RK6JK7czvE@;%Hz!+(GYzeaEe zrlR{tc(^2ML3~(>bx3R%e+IKM7mt-V8caZ%i*M}s_oHO@f05~sap3=gfT?epSokXp zM(qszvlx5=!Epr4yr7Y!B#mTPa<~VXxO0V|RKx!c3(S}h&}JN!YnWIQiobj@(EoWk zE6$GW$SEbwdL`c~2^?Ti*xz*Lpy0m4O}8H|6jMsXlI3D@dr;jGF*qb!gX$`(|KNvs z5iH?v-!-SBq;jo-y<#%*hxGYhq|w`t$I)4TOK21;`^EH)LH(vM>wb{jTl9eC9?PMM z!vkVQxmeyQrmha^yO2G~3FwL<&KJz*f}Z*)ht4~(*bwFWhl2WHy6~kUHAowyLA~c4 zdjE-x#J9lm5cY0JMXNvNu!VD+LjfUm|G1b_GnG&qNT@wgH?_b9|Dz>ZgZeg*6Q=aJ z0e$Y2-V)GTg8B+9mzsI7ZYrxbkX3s+wQi5@U4bN!6u(&961oh>Y{A$jSh@Ko*krG6~lM7CR!^)yAv1_N8IvLb| z?ahK&`{U1Lo2RmC1KG7lJ5Ogf@7J6&W(SOAVN1(l^8KPmEe}~9t~fFv=7ZNvuh{1n zd$)<(#)8*@JbB~k>rkY;jxdq3N#OZW30*nXY(8spzAJgh z9M1oK%cJcNwLiS#sGM1Zz&m45w7US|5V0eT^4H4XenQGR*+m0aXa&>BH9L2t=*Z3^ z+r{}`6H{}(9@Ky1D!IR&HP1^NE+_@rc z?`-)wYwdFt;nkM`br5W?T@~O8{iWnXVN4)cG7D3-W z%kFK_j@>!J-&I7P30hRBTY92Q9%xbf3h4BfY(Z=wde91k_$cq3oYN(&RFj-E6mTZS zaI4$`=2aK(1C0|%hu?dQv{smSwr!zZlWmOB{-+-WO-qkAyg9Pn$IS^#1R3w8~g^!G+ zSeRB3Z5;Gp);YOMXi}4!+2%6a2*|vHZXf8OJj?~rVztO(ntw&hg)d})=m9mU&i*^L zGKsY^x*@_Yc1GRLFFzhqlSQQFKae`9EFLR1m+^PNwi_arNqKcuwM7w)-Z`(8FR|9vyB};?DV;!PyD}$FqQ&hT4EAO(yL$s`_OU(qC4BPgr!N~ z^s^jdq%RR1-<3qnqz_q*$((N2fD2q(!~O=K3LT#S{(b93}$uGnCK^^Nb**8ie&mawCvn%1Hh}<_WLD zx>^OpDQWF{mNtVIM_1#dqS9<*u&uGDjhACZaWRBBhg3rqqoi?c!Z%bqvZ1i1kOj;h z8cG%XDI`_kQ%K>`G^EcMqfdFIw4IOSwNN_ zZ%rdVl(n#ipTgqV*o7nnxd^}^h~usJ#yFU2e4CA+7C{|=NWFQ&f=p5<*?-RogPEj= z6bXAX$ui;HOqd%0?oi+)5Z0uT)Z_cJKx~-9(6MSS$N88{VnyN@I}4g8=o2s*lG1dp z@L@J7gCR@JAse9w<2huuEOW+C{S3B@8LGh!1lDTd`8<-DhNChwa2B|&jfthgbRNk| zLN8YzT)==j@ZsA8vl-Uk62W06cLAkw6A%*e$>M?~kO|gzB{YaO8CG`wP_%1o(z?(OCJIw`N zQOM0psGt!Hl{5s~g%^rp2sR7v7n4@DwE3Hk*O!n6BKv|9t}7+^D?>Wz`yb3_C=>_# zGYA5sJgUN_W{0c139Hob-W{OH2k)Yg9E$laukA+RY$>#RSoorp4R~)Ed57dLfr1vo zHvnbe%Plf%bw~+cXK>vRohw3LIcXzf!b9a`f$>Sm-3d!~JCcp@!rv99%SlShA}9~_ zyYN(kTXt8s%@8FAG}6cFyP|!5q(&YcP1g{Q0DJP+fwr)*PLPWIP%FO=!7B*ZOurr9 zqB|OY3j|?8wFvuWk#uSGj0!)P1*>O=@Ww2%Jv@buud|ZfvYs~~cStu5-qXz>AP$4M z!;R>04Egg2evjY+f6zswlJ`#-f90%uSg_89dEgf|&L+QN-Wr+5SJaR@WMrT4 zRvjs{K81+lTHuIt@`Jnr3HTaBVWTlc$-BlzcFs_}s-CPQdxfvplV3pFFFXk=XTu!w zWz9**EKz{bf>8yo_(bsVc`y)BESUGOaLZg$NbVG#noGLFWRMNYk7v&#Yvsbqg(Qi@ z37Zy@T0a}yHvT(^EsbYHM?&#Ti+T;RNJl24MiBFz0T9x`6om=E<|{vi(H9YXi~!h8 zj{gJ!8@($=Gz-U&KaAi?;yxhU)<|+xhhRec+K(S?Buj}NF5MND?NOXj=fOGOxtJW) z#r5FwhLxjSL25~#Fnz z*ZF_wF;WIDFANUiqVzEW@Plk6xI7c1%vlbW6Qucl{9%a}`i#T=>B%QeDVjKolChrQ^UG6Co`yU*P`+IvO93ZfpFcKsre-fo;4K z?oU3eIKH)$Jg>;EfV6A-J9jCLH`~b9WrBY#$u@RA9lebONBc@k4avfv*OQji#E5%phi!EyIvwy7({^tl`}}E=C*uRqshQJI;;j^3 zxWEFV=L1!j0#!x)0mOS4vtDc3vssU8O zckk~C#7Q2e7an1L=|af>Feb)k5Vba+*9k|r5VLSAta;%)m>?9xYKva@-T>J{dWGCU zU@#U68wW{MRKq6h8w8%|;vnhvTau;WVX7s@+g!_d??B?AeLtk%GU6TBZj;nzJdgBb zV?3h@YQi{n0zH7GK89d~b$AdX9zyUl%(Rnm$yYHCKNa$a$m}=`@D#ou5n;m+`In+6 zsJ9C}!=zAnV3ACnMNr>7OG;BWyDg;Abz>s&wkPn;PBU{OD^qp+8+lsZh z5o`wlXUQnOdk~Bu@FIXeaJfb>hQVzJt^*Jvl~4i?3w~TUIYGYT$E^JI2zDTtK!7_Z zWRo@ojJN@SC4*;210(5gvA5CP63$g`*9d&&sRu77UBGJb4CQ}=x!7T`1hL?W5mGW{ zg|SAsd3M8xsxWP4Z&domM&Jg$fMuDCa*eGJ#u!gfHu3r0SpH@NxX$@KLi!Gpo5K2d zE5?o^xDCPW!ipWFP#5i&FBvZ0r^tY5-M<6mf|}#6?I3^Ck{;pNZ-T(PRe0~4q)`4> z1t;itlU)6sh~j$yI{9v4&Tg_$g`2}J^o z;@WsN=bnXE{y2gYg7Y@wkv&a>z-`2q`csUHr4EEAi-ZNcfq}8z4yVtLgzwxA6OWOD zEz7T>1dk3KyjwUA6!oveelm7%#(P|c-bxPXdLaJ{5|vS#k+&bphU6RIx}B`kE4h#( zMfv9tT)}v-9r)*%@RcOZ)z8eaPy$>j2>XDugOh(=p!>+|EOyCHgH&b?EBq!z$@b(9 z;q84SMPf^TDSWmMM4B6pXYMBt$LZKKmFOj$63BWsk9a{SISAs>Oh72BzB zM+zC>a~NGKn|fLuT}?g9rQ?XrF4j$)N&F!M?C@c0m5FYwuQT(7%{`nn(h0*B@nIx& zEQxAhPKCt`>`ll33=KHOgPDez)7fN#<0M9~`MJ<_H_0OR93Q!xyrGg&A}l#fW*gX= z$OvoQg&Pl(RR6WM<1rjcw$O1D`Iiv9jDXQ$W(mC!9dfou|L-FhmQ>ju4LdV@6caIK zu`yyjh&>~b0?!N92T7^y$I9dD9weVBjqLj#5r3{Zh`}vve3VqFKEU^n=`+VNg?k<) z*^Rgvg*0dkae#-%#AVF*y_lA9mBJ14pQw zb>K(h2phHdCgH{>NK0i0HWvk$kkKJspLJlaQBYPn;aW_TYN%om{{BQnyYP^Z_9Q8m z{ZT0_dXhXO2^k-dqrz`~K*}nQVyBKG;u8pFW?&JJ?XoVNmkIe#ky)~jl|t831g^3P zAP{F9f95GtEz|rFa!<})=^ds`KTa29&yb$JrvNkDoMja-w2aGX!saWPk0SqP9GSl$ zcn<;MU@JJJ2ey1AI8%;v0=qh+y^2iEV&R!*NO4>nHu!Cs@cU;-iR?3_p!^{@F7+Ih)6p<}O5_at`1PVg4ovL<WFadNz;Af$u@BdaWd~Ss+Z;79pgpf;6iN4oEr#(i{+Qt7IXN%vE^B6{KjzmO);fk=N%8U(dQV>I$~kGdtYr8br{Ean zb5BYp!|=HUUZ23Z_&M zanV||izK^(>Y;P$;puqhj3@cSF?YM-Iq*%Js6}pNP`wKDmZpq@rm2jIKt{z>#@s;0 z+|wEJ&l(%{G&5sXPC%bCrLPO<>yFya>gW66nzU&F`f*{+$&Eqvq8LBC(tx`32sxuJ zXZkHmwItG7p#x04C}|txFv#?*OYn_ZG})P%gu&;KtUp7*)E`V|iCnS7>b>g6*H;i+ zK){$f#?PI{;0p*|L~s%TJ3rXcVw(tC!*^o>Q`wzEa#v%WN(AhR47$uR(*}P4^HpH* zE_`DgcSr`4K(Yb9g@qYIjswllM}YpdmgRgm=C>i}L10IK?r%Ih8yFA4lvd1Kauy3V zV=CNygkuK5%Loo3co4yE%yKJ&CkWQR2Vd0)S`f4%Xhwiy0FTxbz65~`o2WvdMeuJ- zHQ`$(f-fN$Qf%>hM|h_Ao{P!pSdQsKYcN=k0E|Y!t|QnEz|Y6y>x~H5STJ?f!x&_H z&tZIHC*dxvg&q8iV`73zHm3d*0pqt&^~C=Y!Osyqj{v7O{}6)nh~RAm?<4pE!RLN# z(t1b^8N%Nk1;@t`hXb^Sk3g&slDQ?4V)I*w1i}57@HiCz^k(itPT`^Tq9XG`ZqZ@e zk)osW)46q`BI`m{{^6oS3q?ixCyIH>iiCAn$(jsh+4P(_%F3gImpJ&D z9wQmb{G&yeIQW^KQ=%+5;hpB-N8B`e34f=%h39@lZZFP8+(o;4r)7!CiW5a2bMS-M z6aR@3lK%Qz!c)H`8EQ3<7C(1H6Q48b`*5_`10RI>Z4F!*AM4f#=GRFYGdjZeAa8mk z1@BpD)B6mOtZ<762M!(&TvNaN%9rrK>m)&VaT6$H@D`ZhzZX2^;OdrmM+%%B8d<%J z-g{TBa54){qgUdMa`3McuEX!>g0~>O@hu(Q{RY#;D1_u@IH%xLGRl<}mGHbs%I8mJ z%$haJ-0gCLabggia~#7H?lOxRVneDn@O|-mCgTcgo%rvs9%x@D2ztk$t%587r|!3e zu>i>dmB8%*l3$8vekd-CgwFs%wBTm_G(^K@tn^GtE zgCu2bJ-*Mu+^L9l#>pwl56;Cz6`Y(y?qObwt<<%T;siBA#@Xb^B3{6AQez?zUKi>!)Jxbo1FGd zPV*+Gdy|Wo;P1Kg-*Y+VguQ3THuWd*9I1PoTPS>ahO8!k61vZl>&myJf}S51F#cX# zIm<{grpq~ne$V7-dB*P)x^K6GC0UXA>9l>e@Q)%eanl1+9u*-bX@*XBt!B%3#cK!A|fysN$>h@EWo1dq4o!Om7hzG@ z6?RoeWV@ErS`@=7TED?;%ux(SR}+k+P;k0o9lvoWNl>bXsj)GEe<+W{L_s7?M@)Mx zclejrdKry~4qJOESK402m9>|1h=}fiuRS<%Jv$rhWTkb zR<+l1we739)$MDzHOw!nqpp1|x0by}cdTn)&#iB-=jxeT*Ri3!fop(#MnD!D16f?F z=(~+&brIm2hnuhJ-!j|^kz0c0V}+vWN*#@BG!sfUcEx)Yi&3hTGG&V*6KhM~(ca#{ zb+mVKoh(;|j;{7@uDiX5>tXJgj^6f-+(z~u+tJr<=j`mAbo6rr!!7o>5*<|Q*5R%8 zc=k3ltm=yk&pYke5D~Yu4f?)Euc?mbjcbT8GED(3)%Sh+@PC-q@bVPPwAr4pN@-Cf zD{>U}#8ibn2|}I@Co8`XS5~X#H*SYLIaR?q>?r_uvUsTwFKyUZtqNd_+S4J-IUH@z zs2x-&99l@&nWLx~L72RGWtfA$jBQ<`_k#GFZjXUg&FDaN7yj z0e{`_w-NsA!&|sc`0L41fNOKZ=I(FFzq`mTMV%^3v5QzUk4LN1m4kh=6Rsxb_{Mq=QwOePfmY&=Ln^J~ zF3!P??v{P&aD+W(YSiWNjJwBL9d74@>-f=#YC<*~|5fDuB)tiW>`&M@(>WCgd9k&(9J_e8BBq>jEEy=rAM;ze+-(J9X9S511xJXn_&i-l{2U#<&-O^e95CdX&G z=sKN?Y@lD&b?Z7250m4%=s$I7DRz8qN5Cko!#4yQXobEYf(5KQzFi*^L0ai#T)yTy z2pc-i$K6jz`|&dKU8>3vjN(F|M-YWzH-a$)SqPwE72G(2JqY&FOG&j3H@;2*uGp6Qbm?cX>PPmo7U5;5N>Ynd+G-VDo4Vc$eKO9j0~l}D12o$W02Ayn02A%8 z0F&%S22B8yhf@}MtUWF@)O)$qMIrHFA!!&goIac$9Qt;1Sg1FH1w-$Uvu95jj;K=N zpxdJk4h=4I5%Hw3=vgdwLUA$B#iKa^~Bfcy&PS`%=sWUe}U4Uk`JUj=ZTy%ykl z`)YvoOKM|HSZ!=r5>^)$)*#n-pccIf``WO;M*BJ--NaCuLlwP#xEbL44DXh)@=q|^&d-J&4Iko?-2cR?3XU--n zdn0IF#?5r9EmLV%&^I#D`qcx9iRj>g0%ox zsX^ZXdW|)a{_FX8q@vs7;`!diC^=&ztEZ79jFfJ^y+gNus~7nCZc-F)RSy z`LdlPhW~XPx_iC!b&;SOBUv3z7S+}joI7z=mE*&4@a@zHi&Y1rC3rm*ViKP)-C zWrC!5XQnK;R$6wAPq-}8oXa^rIx{}y_RJM->~!pOO<1n8?3!}Vlz7JHT(y?+71IY+ zTVSrA;%Y5poa@@vmh1cH(l@fDb`#RM&ax9$Iu``Hr`#?G_G|iF4(GLR-3^oDxCQ1f zYqekFnVoiVxTw*(?8NWmXB&4s*nvjv-;%&zg4BW(ke42vlf`Uv9p92 zZ@}TWK+1A#d09h3ek~x%D9Eb$Q-Ubd6OK{W?x_i=f{_WRKr%rc&a!j^(w(_>!e!s> za?SXo!MSPNGY+dH7ft_?JxIoAPmV~MX?AX61Y6geXkBh{rTWgV0Z=uN#i~ZB zs#mP)^=UVTg;$B%DxrGBr`-W1i84sqEILzRTn^chbLr;|!k#YLR z()YB;2w5Y!X8P;0T5^D9l-J1Z@dW?I0g`Bh0Rp_{$h5=lni!cK|I_h9<-;mS<jn)h2P~NgNakYQket97`q$F5rV%V zVB8$b0ZTRlW3aT<9I~d|-|6$sHaP>ofD9Co{^NNqUk%io@-yl5PkFnKceNc;Ck_n& zK6{3+gR-Y@6Hb401N^~Es)L!dBoIRH?n)(F*|g-5^vBXczb|h*1`Z z0Siyx4mO*PvS@?7F$CSq$S%6AC!Kz;$4rk|buhomSd(ndbqD5L=0~Udi^z1qXDia; zFRUVF76qqs4^HQX+OPy?SpJ{r1wqO{%sB5JxLWj?a8Gg_=Fb>!H-}HF1Jt-NLD}bJ zv+%*MXf;s-QGJJ49&co&`Ye5;H(qyhF!8=ynD1>2|5Q!1YBBch|10b}|5w;||F5v` zxhnR(!Frd zH;HyDx^`xz$X*8>kCCJ=T{@zcZB4J9HvQ(4G>c;*K7IQ7=COAw7;;H}t)r zuRS%03*&KH&=&^7_gQm1vNfKbd)`2F^vS*?@_i^}20wKTiK0CNQ7Zj_9=y$TYCxw- zuO{^R>UhA`0iq#6FhJBvGZqnrQGS|b7bXFE^R`U-r~YL6(_#6yFhsg` zKu5*)B!k5s6_lpjG1RmJz%QqVLCu zMz?LwhLXUHLw?RmiT(BVVe^8E{CIIFVVMmJXLU_pq7M&SiNNaQqrpUAr%hWj@{M6_ z6_VYctx!T|59H&OP+P@5ZXib1RS`cicc+s3L&tA9_8A1oA_iJ)s0DC)n@ZCdkfNR@~pP%0R|dcK?=5 zCRa&-zP8mATM*17t5`Y4aU8mIc)JcKXXZtrKgQUXh2hGD^o>FA57sy#{+Acke7|;Xkkz!8PemEIW$6w?jS`0zNDb zTemKSW&aOh?{9~_zk~1eSbHKAbKCYaDt-3@w0fk0-H)V4%8H|L`q*e~&KC4mO-zmX)#L76Q(QN?HNfpp zaoo3%#4#G-95Qnc<2%Y-+#?7c1wik1=6aDe<4`!|z6QXr-Q?nC#z!3!{-g!bfJq9T z>51_f(By%>?^^q~)3w63aYohwdN?DL#~;5CVTNK6xw##f zlm&qUfZrR_J%#$jer|kh%*A=QFJsVQ3`!lDa)-$$<1?l;A&~hvFTOs2)S?@DI!EmN_5B0(C7hi<9)o)O zni|_0`kNX@>h1QXe!mVq8=Ly;+dA!j6FPeO+FIJW>jAaCztwL>ulmNuw*Iyr^lWbH zY_c;Z(T|nVz!Z3`<*Zg@&-|lIn6C7@wS-Q0fz)N2mBo{<9?@yIH8}f ze1lZpEtYpn<=e#aZ9eVxFa#?|R@S3=Qc;6g)Zo)Lh6SNCC23`8jkMqX;)p)E8-F*Q=w=oluWBc)2eyX>RTetYhxsBuBgpD(kSJv z5c5_@dG%sm{f|~l4O_&9EmFfCu>oT5J$ofyyO`JReX&F8+$nbMlsY|Pr^lzAxeDS2 zF>k|0?AiaFO_wPWnEI)uvj+i({5vxD@(*! z?Y`<}sd`YX9+av_#p+R?))|Jd654;|Ney(YMTA2V?s*i#Wm?#fNBgCcbz;dnsia*j zX@7CU%bh>zlsd=6&M}{M_XV>eDpnTj?_VLMm56C2N1c*wy=Yr6**ZmA=X_e%X{eEn z7qknA-YYQsL|fl{nq3&!yC(5%jrhwkh|hX%4r3{k~Ym@8jx=_SY%iO4^- zzogVB?ypvTP^|&S2X)a0ic}v)CLhRGeVDJo_mb!Xt5qL@+`jtwBiu@@Asg2LjnlPr zc8vQe{oUMGbn+4eQv#Pu@4IeI@(nnfpw_2BRx`E1tPa~7&GfD7nxi&iAlb#$(wff% z)BwlnzRy@l?{WS!r7GB<-wgn+}ok%gSLoUz~w2XTW?IGqX!e% z*377q_Wsj+{GNj=N!$j&1x>^KkVozd2);;PJyclo0K8f?98%@Jgx(JW@T+FI3BP9A zF|*s_!CepwK9Ek?jd|pDI&kA5C6Ipj`)1`w5p?6DIT23e(NxIZWnrG!V^_MBr3$xd zfW#|WNNbNm;Z_et(y7Bq^iq8qzurco3>Op%kU>-%i92FIL+cyOwCs=;gR~J&P}70m z8#jggqUb*lN6?*(W?oo9(sdE5AsNT8LNU0WZuFQVS3|}fl?s~mr)2&C8;Peo{%jyB z`m6z1%0C~DE@&>~WQ5OrFDWzXFwZtISOQ|DL;zglLkpVQ~Ae^g<&{r z83wf4x_cW|Cd*Xi4hZ|h?4LEIo4XslIP98M)tuTkH$Dvz_Fv~zwv}_-)AZ2&`AxPq z_ddtOxYIK3o}Qht6uPHoEY5MxGR0XyfjkYogv)8cE!**ZE^DoYi-G<&7rQ7@t%d&l z{w>~Sf5Obv*cj-SgL+yA<6+)HMsv_DW_}30cw0F>hUqB0cy56@9OR; z^b99xvvdQs8JW%f#MmWa`INaoiz7%4wh!bi2h+4!GyIXe9UjNb49DGtZ43?VRdAsA zhkbC@fpO1_hx-p`DDD;j{s_ndZjbGh7aO;3UpHhQ3kbo`&5nWXYM1J@8&5bJVD_AAx3Z_|+4T5ALTJ?EprL zkqZ&*9o8s$`u!^9A6Ra`2QlJ8J#IwKvdiO-4b=09+_#+m_~2eeKn6e&!4n0k;AG7n znFW26W;~dsyBC*-KLa?ZphFMVMf&sC2*#YV^a~GWM+nV>^A&^inFqJGjS5beYT#w3=cS5EU}o>bc}*7i%a+r`@LQtb}0cE>rBnNK;KBBbr`nH*Rxn9n?%Id96AOeLbJyAwd4ci6V zh~RMfOuNqK6i7L%#GF-9PLr6^G@sKV=(BzmKa}%}Wwqyw@qE76Ep7xvQg0%w;Z5U>pL)32~c1eH0V ziQfdc{6!nVl(ZUWFluU~T@}Q|`F#lGE%pSkVQn-`M)ohPC&8G;B{BQ?XU9n*o!%DD zm+dBceH{sR|smz-L($Lu$TkFUeS@Us%D%z*KCgJXA9w4I(gOD*8s2*=RSwzZf*w z8c^9|hpMQ%IGvU~6CHyb5t@a$)dT9jm7vdKJITrcH6@=<=U==7R_{HZk5}7`{ZVL& zmx8VmyLqTSlvrdi8^CEK^hW_qtPJh8TcA;6w~$1AQVG z1RB-s30ZuwTIvVt1&oj4U%CSn;Lw0{+hcmURLgTdrX_QMu}WF!bC_Y)>!|VZV1?xW zKUPS^q6*1Y46O;)K%~Oa0$oyLH?jT{H=u!Ts&+?ULzEi_BEl1v^Oa6}V)WHu&j+#U zHDiI_U1VtO6*S}jS2Vp>(hS-NLXNkFQ3qY8MUkawQFc!SF{}mMbLI9K2M64rtjR03 z>;zVG!IZ!F)39y!hO{n;+ z1^zB*n*eO)6^t;yY_8Dj&s*I=b5o_iI~f{0koa8$mSG%n8>m@ z(=sq8;ey4%${Y>05dNa65w0Karvrht5wNsrcvA;4-}5K4zF*O=?pOA!`bj?sfgZCG z!o+Hlz~6O~Jb$ByMF$htF+ZEn!A%{F;ZB}(5;K42O<)xS^S#$jRz-u^l(O5Z@oTUa zJboRt@d%V1G>@F$>;~R_jGLOBmc3xte`dGeJPE2?Zrm~93HOcM=i+uwd0fCc@=ySO z3=&$XXzm{HYlamBBSR4 zXW`io#6CQzxL++Lmx{@yQnGign7sC^IqOK)G21EY>7nzfnNn({m|A((k|$Z}MN7S8 z=@2a)^Oml&))L9uDq345YnN#4nz!~`>2p5aBBieo(^p98>&5i-^XVHdm=r1NE-Io@ z;+R3}TjK z^P4pwv(CD>6m)1n${3#pRnvFBo$p14&mY+XHhZ`ig8L#&aA8{Ykx?+R;-;Waxn#{b zw;jp;5|Osy8{->W@U0ceL_rq-@f5lu_eI3{JOFI(&H*XbFbzW=N~?wl@N)a9KunC{ws z>-Kp=hGejc2CGoiFKpWCGYp+I#8cr+;_-do`3cEl>ey^in|&l7jj4jBQIa-a)aD;? z(g#nZ)6x@}-Xl#uZBZx~ET{{tE?lOnzA=nwCkXQqfc@nQBE- zt_491w54 zwN*$c_vtH^w!_iY-|?JWJ-@O=SkZbqO0f0`{R3z8o8B}S@7i+f79nY~&oFpp(`*%n zclZnr2#^c~qM_i(jL%TaTv?(aOEP2@iH4%1)jmTx^UM?tnKGiVY}FY@qS~NS{8NL|);)tR}fi$HVmek(7{Bb#p+fGJxK+3}h+;{q>O~Ty~I) zGtkWiOd%T2AN(wdvGLLeaq4=Cs>C2d3hN#bw^!cJb|H#xC{R$i|9k+G}$t^y8 z>y_l|PR^Xss~S0GQ>e^S@<-GpN9kA--Z!Z6D=SKgT^YuitA5u7Zy9TUWQrZh5K*ETqRM9(rnaxz_tb_3{Tmu{o%E_#|O*~c> z8@V_|O1@fY(k}M1M$x9HR@&zBpy81h90eYdvp{#6by@cBmd$^IYp2|2KuJq)cxp9$ z`l+nVi`sfYAGfexF0?<>#?4`$koU1!LQL<*T?AiGSR;?n#w9m_G8{RDd^8$$zF9%7k77+kWwb;%0*rIIhg7OZygkp*PQH^>NbmYo25F3 zSm&6p8x__#g+$jG!>$V|g}ws%(;GiXQEp13=P&1>v5nGA=*F8Ybo~#l`N~+>jTs=& z4b(%R1&|-;Mj>?r2woBVOE-~3^MYTfYqhh93h3w;=tK-q@#zHK`x%&)pjLE5jvl=b zoxcBeV=x28g@};@N=S_VmW^a%Z>8%_Mey(2fXX;bZmj=;K^ut8vG=4a?F4h5ybxJi z8K!c`@k&*ba_@b|0O~? z|BXD7DdT1>;KnjsV`WGa0$LUYy_lFlYdcU>h>BG&=#yHMjzdsw*%YnpE+v}j=Rngj zQv;@?n}|8a9v2+HAsT^jN{pWv{K37S7_7VR6N67bXI5j{v?MEwZr#TO9C}SnVrC!c zpq4&tBWenCUUDG=8C3SIK>dv&KCA?4TE0Jl-kk}St1HknOIsVD#b>92E>78Lrl)G6 z)IA9x#fvp0oemw1Wt9SFEO14GewVsnLK*6JZJ2i72GIDVserGh|9l}4B`#Dc8SRNf z5G+3o3Cu9f(3DA_&`4g$T8||>R)jqT1H-yPSl>zw?^|QE7upUwU&=!Jr-dUdu9QHl zhS$lWN|Rgl01?ah>HslXw}%Teu=GxqcFw}2AOh2LuU;&H7jz| z(BhW58pO4{xV3VgcWt)P{R}1r;ZOJ^B zja|(Wt_TD!5B8bm2es_sec5c!w&yIQ3VWxh90d&{t$3WxMoF3hO}O$K^;QF#zAZRF z>9L+@!t{|~p$8);$DYd+;t>N8q0x5~vlmRg&%uZo5O_q7#{7`s~BJBBWbP1pt zSz|8<<}E}JbbX5!pJi8Iz$65va5C-f7)uap5GM*a*aCA-R@i1Xlvfj?jEZ?p-*`Yz z9BDz-ygM>%^SSrRvXDZY@~>W!6>>@B!n5s&@I^CZ>m5oTHZ=khRjSchc9pcOi_!dorXZes|g zFmaXzTLgNFqHh9K?HW+6uVy_r33qG?z-fg7!m=Pg!BdFtsHL4{dHKS$@oDgr!ts}M zJb5inz+zT}4-l41?DKPQ_5zO$GA4kq&ML>zV4O<6w}2#tdoxm1OGt6p@k&Cry1^oX zCCYyegv`7xSB#3V%vFa|?CA|GcL6>o39}+|VJpIylJM4uu&=~X9kxQTwv}VmgvT0F zfMPm1m`6~hg9_ZOTAX6jLW~`|7%%V!iuCj_igf{srcsh1 z7qVf2pdJZUMl7^(7`Y2Uav=@*s%}3S`bcG~+Vv6r+s{^M7{^SjUbN;GY_s6g=mK@< zc0);-)75T}-9bHn$wJ)G+1VH9j^A1_EQ2js0F!X3N@XP(9n*x_UdCWGjvS=DBLb zw+9S7K)s8H8{r^3m|1}BsRh(smZ@EEq;b~m^f1je8eL4-WJNuwD`h(iP`5F2SDVGY zdul(XIu{c#S^-6VDQNNo+X1j~ur$UJWj~aFZWGhJmcl@$x*$O)S81RQ7nN*L!DQ3l zrbfAFbM^l>1p`6Q<%YcgXFL?bwqPgD#i6|O#?LG1^h-rLw6l>nBHHM|m(%E}mkLy% zm8O4ssZnvW6hA6_o2v^Bt<3)=UDsUO3)669S3u(M!xJhr*4Z5BH5h1BKPck;HOJxw4870Dqz zvL&F9Kz(vP-74hNB7V*`S<8&7^*}7>l4EYigat2SfLz$-)^D@OdS=vrFhwc-yRd_l z1R4tlfoyssHr#LL&Q3YW;3U%;wkx|G{7^a21dlLkAQai}(z ztToQX5N1*z8gYk@q%f=UH5h9pf*J(12#Ou5dq@b{x1SfMGnx9H>p8Qv~lh1 zYML8IfC_EyZ3Gem)KU9QVZDpl<$FBQKpcmT{K&c}l=is~0Bl8RKjIYN98}QM`ETce z1#4Ep5vNexFrV2dWlo(oOKqcK+o;qwDYi|{w@nG!G+H>H8M{>&atm9h1kNrJ|o-UtH%ike5W`rHH!tAwzY0js;E~H?Vv_+y8)VXVY+I2uC%PWk{3A?Tn zuDf0^-Qd%DL7jXRbL`i3nIYZm2A{rxX?SIYSf!}1JO&4Do5iYTscNHGwNbEd_UQ+i zHtcFBTg57-l+`;B#T(G8EhaN4Rt*YUxBK)XA4_IPtQr!wIehw^AB)l_R)Lau(5K&W zJ~m#8%@$*`rPvZNwq!oG6s><6#fnC+RM9V1^v}okGmW=whF&9@YEGJ^RUP804r$ep zxC&C*=`)RZ^~reEK!e&S=5p6*4NM*h(?B@}yd->4pE0k42V9LRtHZm8WNgjlrN?{kaol_BwLS~kB%Q55^6?; z1gB5$b%Fj1^74tOV?x4ipMLz`v^rMw$F_ys>Q^?GH>~r>tv=$@@Az0UpSsnz`1D&p zzSS2R-HT>nAK&VZGp3!`m}!U8gbXwz!^Vu4^vgv3vZHFLuvRRrl?t20!loZxC$(%7 zTee9pZn4Gf(@#OYF2(B>3%h;2LsIW9v3Hl$yI<_x@Ac^qTm`dPENp&JC$(-DTenNC zQ)26sPd|MXyiT#O^ToYVw^QtPO5L+!_pDF9?<#mbV&T$m3IxHn-;1R|MfUysq^xo= z3y#3c14F9l6)Sq@vtT&)`}6~Wa$g;DlUT87J`2t=Z1w4fJ`r<^Sg~b33&#I8pME=H zdSMeNY-B)>Tzw&$?PwK9u?4utv_V|HL0aAg|Iw08cUX77<16a_ih3mKQQgr7A*)O% z+aaVoe8!z;jfv-CEwB%^)W}XKVaF_t+W}*#6<;(nnnBgNN=@pV61b~;b!bxIpt;s)2* zgcK>EQcS3n64r_dYfsMj5*nFbv6xUSC9D(^@VG|8+Ou9`;$I>Zb-hH$ESEA@h?y&V znKi<|mRov$6`vEz*ft-Q1*{A|UB<9to&vx5WzN|v7Hh0E}@us zE;aN187XU}n6+{~wdR(NU&UvJVs-h9-B-&-BJO%zIq?YAe&4{Lu*Eq);1c?G3A^@P zP$&+N7PKkdPg?P()vIj7-^6z1Ww71oP{Ks8cr$i+{i#x+zE9}eBv=C-jt%pCDK=k- z&BsGe_3&RPs(&-j_GqVI%noKZz1?T*gqbeC^s3%4$iafsdw>0d&M%LDar}Y3M{|V? zn_%k_QoDV|9#+l*(O4kcZTgJ10NpDh^a-hUpRpfOl1W~HU>8#ReZ~Ps5p6&vW4UN7 zKein9PkjZ(ogYWHS4iD>#@NT`(tk*4mg|Mo8+=AD&{D%^_N`DKDJGn{vzx! z-KQ7=)NO(Pem;g@&N^tWN#2n-0nqsbkvv-7kxr-2>r;Eit}L1p>;i`Xr6z-ac|XxvhL#5l z54$5Fi_~C6I9Bjq-U-|~oJ8iovJ>_SZrN_;U!Eqql%a}XydW-Y0KnlJym}}>-J%?= zfvQV^%f$lEP$j!WA}#0!MFwcj!Hg8lkjxl|71h$2h_y%8S>x#otIW!wHT17gm`g=w z)Uwi_-j_h_ooJer$k%TPR4{rTIn6xb`XM~%i$1@V(dg$&GMW1psAY`Cnjjp(V>Q#4 zp?&arq!;gt4@JZ*1S|Tj=fhe6BRsWVZrtzy(hcgkj6n5|d^JWnR6(a+O+XK*Y`9oS z7tc1*V4FIB!6t{EdNtKxo2w7=`S8_c-N7BjfEZ?D)-26)RRPPg4Gyr?hm#G{SGAAZ zCCgMGaiNtQifsS=Y99ULt4-cQ4z?9!vlk|5$tFH}JVe?*B}-d=BM@b_cq6iPzf(4I zi4WQO2SRr`IDb6av5bIRH#6%AqWj|lm|6d<=LNA@m z^ZM1-%#P3ab)X{vtw<*tiutucEPw6@Gu;Za#0uP91pd2RaLEx|Hz>=9Odevg9+MH7 z1rW0Z>Oe_^2~^iiPxMCuYFz3GEmCX>j=XV*_D`o=c#Q zpKGD9ujj>Gu~Yj+y7IM*m}JoIc{YI%bWrhoT{?BXwr+hG^H(M6t0eszQO|Zf2gEvg z!#1=Vzf#n%l=SOF{kl_XY5it#JxU(*(rfFzVMG=qZ4%dSlGYE2>xX>$;g7{aQquZS zas8-I?_^lAJg_U-(s$HtuBvx(m={GOJ+8d5GELIKs^xIH^LerVVx-D+CBOHYj2vtk`1e?=ma(xg%9j{%J9aa*o zE=k`g>Kjk>OHD4Z3B-+EXX*TFBmci^#q#fJh1UNng|t)Strc1+;IIjZK#B#GA4A+rn(e zKE)tg58e3vEFgJM@upUHSJbUhLL8o-K5GiUBkPPd3%2H$lrHyEl#V|BtBO^N^S>}! zm*zinY5uA4^;}&uBt*5=C^~VKQb^gj6z`OLDdJg)cb!Y3#JIl zpe2Ao)*$1qJD4I+@xKx@8dOS$X}rTwA}TfX$+xrlgJ5PrkG-wqr7n^stD*z@`$#Af z_2OX!ybEb8G8h<(vPRD}^iECBg1wW=1B(f}td1RWW0$fqrDx!jUHCD$pb{`pTpRaS zoIpQ9@Hec{s%@5%lC_p5H_X(aZ1jG@42i~9qb@jV=tS)#tw#%) zV!Py%6x&`|y^NZ?K#aMfZI(49C6=ZG<7~WqQU<%NT6r=22bM1z0ot5$OxeiPa}Pl4 za^D3%Ja~lfcx?-(1|VNc8xy>jHU+%=1vKk7DH)5_kj>C9J@XLrpaNLigmjuQZ_VEA z8=nv+XXeLeh21D6>?chq7Q{89SP<2se3M$|Gt#yRrtLoMNSJPm-53Dlt{8dWNaTHe z+EILu+Af$z&S-bQ@ay8aSHzTdO?S^iXef=(kyL_1ks7y#Z4D=(q2Y<@D{}En5$V0P5z{#TGk* z&2|0F%uF6qUTCS`KpI!Gl4B!!`|nrFiNs(c-~D}V`8C)Yc=d)nk7;k!3h`@CdWE$; zr)PwIhhX0+?A#@cd4ydvf@#*L-A7aZP$Qp12h*stW;*eQFO`Ri8SE1^*!IzAGYs1w zV9&ew#$uzA*1nYkH{)%4Yc*Ub{Drr^t!!N2Pl~Ysc?cfD>U<0V8vZU=y>ngY>_N~A zAed+az3ZJ^`54@9=*f5T$o2HKcUofQYY$KfdUT~vTS>FtO`)CdLd72c*}Ln>icTcM z7^B5hzpj+E!;z!%VbIXIe9VuX*1uOs4%0pFZ38-|-z!&onXZ|2fNvnhEr<%gm*6l4mL20ewiR- zNrCmls)w7T2_F`c@6oj%Hp%x}-*o&|xa2`;AoTgaW@mE*G7rY@u9-G4N#|mbba*m1 z(b$hNqi;t{wlJC2jWs7I!ZK2>}}b6yHK~7 zxtFhqB)PUzNHek#jxablCzphk?gy`6X*8T`dg9tpIry6*;Z)DBc_ETK2ZeY#Duw@s zmi((i#B8vdVRgi6>+AvrWR()bRnru=v z50!!!dqBYu8MLn_6GY;o} zY#=5ug*a;~IOkTG0DjMEo59zYaPN^k=p~$o&DmRoEzM1R4Yp2v`SP(kZbQ za^FV4n=g%hV^$ObYDe26|C z?QW|iUA3sIernmNJfU@1s@*2mE=X~`KK(`}b;)NO*v1a3ufrrUo2bXr6#DA(c!w_I z{!b(t$uV8ze&{Ehf$%?1Bb8(yPtr*-d78JSlfNq?316H+GQ10QYk@-gRj^uIZLEY< zFsol|2)`DN`Qg;%Vb#sywK4uEmwOf#8Q^yP8hK(wR-JPpU^85(9pE63XPiHsL1OA~ z-SVsVOpUu`p5k__?Z!p5^@h;p9zJ$pP$VnWyO9q z9*grw;Uybv`tV1?xgzLzctte#CCto206YnhFy66cR>5H6f1gR3m2sLArCH<|HR<8s z%Oxr3j?N>WQ7XaEg_))3(R`KngW)bS&gP%UBRW2H6lfS^3r95SjhDl2E+FYKEu`Bh zfBXULXY~AzO(dQFX#ptDp88L-DrSK2hq_3j)*j1lgHcmQM*>yC9z<_H(P)fzQvg9} zkK>1zkp^Ppk1Qi;s0gISTO%T4W{_58{u;n&M=FFUM>YwX59^q6S#k| zZaoSo7xC)8AaB%z2_MEjc^ka>wV0<7V6-{hq+i4o@wXR|4Uz4@S9nsD{Fx$xH_h0V zlZQZc(-73mB(FZ%D6QEluGuQBaf)l4^J{hqtH*>y-24pTup7@h#p+I}dXreaX})?; zsM;bVZat$P`XE6OSMv+g%D-KPZn&ICtDDOd&#p~s&Q$#{p-~HtA7@54mqh-!kiD0g z+apvjsOvLY3sk2I4B$9jSs&F-R6ix~;T-;*Vp64iMT4gh@=i3Dkar0=jAsz2dtVg4 zy_6in^FFjXD~W$L-DfIkKIGy=TShPxBN zeE|HV7-I0~yq~{j1Nl$y=kfiE2p&N21q65-DWHW`6YfFqwWe^4Q(|Mv!xn-eORCob4`SO*FMSTO4w zNe$so%r%mCqRD=KtcR46+j+i+=Q;A+;L1XH1~5q24cZsbQ}MAFG)yc6F-~jmy97lAF~m}kRP9@ z+(^EwEy8X$XRtf=xh(Y_i){AxEAVUPSYLb{UYR)}cb<>iO5DmHDES+=5_{rrFd)2^ zkWL=|P7g`p)kDD3yvElIL5F13!WOuXs}b%^B=uST4y;+^sbsn8n^=qA0^rx-ZC-(U zpg5*6U@IxB=7SK|ui6543BWxQvi=QgmisM&tFgzw!+go8xxV;o zon(;pC+-%@^FrFm5iYlNJ1#|^i8ovRLoTCO@G>dk4Y02$l?`HxQ3CIperK zxdj7?A-0&{62h^0HI22=6^svd1A%{ik}OlEMx4;P$wwMJ_Z^JYhhQ(CFhfe0KZftP zQgN>U@FxW>AR7tXK`{ciFOR_jjS@3|Xodv7DDqAIOEV->SsHQT2Q#D|Kk)#28z++Y zkq-%3&6^I8y!dt4z&eONB7Z@J;~MzZ1EjII6ElWd0Dt_5d_5Z`G6MPpj{~kr4%H8; z^Z1hoNI;eM9sFoUzOprf*Ir8=35e%Ex|X~`e#rB4q_Fr2EW{6y_*W4u^tV>1_C?~k zO2z+ij+7_|Bly(oNK2Sv>BM!{kus$+O2vQUdg2-SI*_y)!URsXkJ^BF*EBY57--=P zpeEsB5a4b|sCzyioTbQF`0D^t6jlb>&k{{WM{u&kkUpJ5fG{{e-r(&hbNH28U?K4Y zC^O*d&Z5^KSL_Qi4WQGXFvBX8OGg<7&@4>dC zc0fs=wInbhGQu8ZkA|}uWvL`J*{-wegNc_wvUUSPx0*j#N{SfQ9ZV~p-d31Miwp5A znU3y!GZUmtB|V&J*2C5n6Ep4b@hEnVw4^XTbMc{yB{?fxlrwe&kIBCOPg#Q_dZDbD z29%8Im3adp*tj%rtgu(9S>;ePyG62iZzScQi~m$siCPL@Io(qTJDF*a~~sz}~MYtmeACrGgJ{$Eg+D&Jp(IuekH z!BhnI2HBNHvaX0-oC*4O*xIa3+_Fza5Ta!^$X8y;<%T;u zEwk>u?y3E5%gnXYF3Vi1%@R^J0@G2$c3UAcP;%Q`&lQp3Tn8L>nHn93_1P)6z|_t? za0LxtUqKQ}fWjgI8x|3ekK4d~6XWd4SAKm3N%Ag<(rr!gM}j&6uY{FvdIVd3 z1dj*7=QrWZj$e(r=lY?Y!j_YCY`ZF)C8JOW!fgGyN3nr%@N*9W@TY>1E7x70YYaA; zyg+*N1ttbFl-sjsKAX~f<$(AUr;-_g&Z z3YlYS5OxLwSq;BFT(cSEnQ@Z#!d?Ps-&g_Bz6V~hXtEy8AU6c=K0-HKDwb9v;rwv* zDQH?kSA5A;HY&INT@kQa)W9r#P;mh?siwQ?Zmqj(-EHg6CVSJR~&TuN>dlbcSt z=94>bX?s03`CNAXk?}{fr0i8<_Nu#MLKd4<$F>Vqji(x3j1+pe2ty8GboYGgxX`jk zNZso*PB06~WukFez=F~$8+e*C?;n36Rf?|?X# z;`%;my;EH8^qF13ELi#k`>>|4D5XGBMrC#K1&+$r%~C9KO5oB$`9~o z6t8T>ALg2sU9*VNtK5e_S^JfLRwxp@2b6NrYNUiJF`){YM=n~OSX3t!HHk${r<|vo z=Zox8(U4d)Bn-pdr@Q8h#>BiaVb7$PH~Fze`&1?KT3Sv=Nv%U->yXegEDY}!#`g)k z_d^DOBgA|*f%z|#p-H(J%YZ-3H7>ix5v5VNhm~kAE761!$^gdI3vK*KP3%qLI}UdU zIqQF9J~jSamau-KuyLN!&kIr_#O#GNR2g<53|Yz~EgZ`n(^w@@wx*D;R_s>591$h!3&!(i@`^pL~-g2u%>uD z9?U{A%@B);bJ_}=aKdl!TKb`rXW2BPP;|gNa~VETpl4QB=F0Cho=lssYLcqD#j0+h zvgfqNXNR*=aP^g2n4X=t?-Tm=3vma|Xs=}z40};wpMiWfDpC2iYVnt?z31?*4!9qO0IPUg>6$vy1 za^rJVti2WzV&p;%hu42EB;`5)A0WhQBfH_g!I1F9{Q+7*zG?6BVr1Iegt1Y6lU_r z)XqoHi7bX}6vN}&=MgZoJ>*IlJHs7CKcXnak-moDQ3PoSb|7#d81W)x z>V_|3lmY~1q?(DK5J5BoG#cYF5a9Y-01uOkx_QgrmT37=aHz0pABVv`vhRSP6Fm zd_91!STAo2+>G%NVEcP8g%595ypfUnz@VT`eIqmPNdM8wV-fS2D+P7hpVe(dv;5Ll zWsRol(hMomWLz2~i2%!Xs5C1swMJHZHBIEwG*N<|HUfarsyRR|Z6dMgU*IB|)t904Nno`Zm zlT#NI@FQ%Wyo|pWcJk{#NABDmhqRZ^YZ5OgZJIU5bnh$R2gxTfkBd98u(uT4G<;DR zrI};7i@U&HFJ=ZpE+`{3?c`Yd`wINUxN(1BUa}Pww%w=#Kd2IS3ECCT)?n+hXGlrE zsZHm5Z-<-yKAQphi0{EgC-rHmZnAJk3f#OFzz4Mn!B3_rhhq43N|M1Yk>NkOoy5{# z-vpnvmVMy^aPhz%Qqwjwu1nEyeH=}R%|gRjuA~!t*o! zoA;2U$k!CfuPHLl^Do~+_UKle^O{ zE7;4W4Ha=DW3HFR`A95lI4+SlhXxt!dZUsUXq!V aV`+(0ybu}DvO@KOA*-ca^+LG@-v1A3J)*Dx diff --git a/gui/__pycache__/prediction_handler.cpython-313.pyc b/gui/__pycache__/prediction_handler.cpython-313.pyc index 131e7242f3cd4bdd45af666266508d5f4e3a9699..abdcc1753d9d89fe5711065c176651940f49b8c8 100644 GIT binary patch literal 19082 zcmc(HZERcFmDuB__$f*vMN!{RpA;pFvMk$UOa2&I-;yQSq@KS-n?sY7M4KWtkCeya zOm>{z?M6;o#M4bMnx<$JAm}%fYoJ}gD0We2k=@Nub}UgyponGUPVhaucl=gUk0UOS~I2PwX~0N zS~sQV^-~7kFlFS8v`#f$K4s!fQx$y0R3%?IRmE53$2aq4ue_D#iUU##~SJ>^GnW=&qb*QSC7o(gY+r_yWi z+Moun#)ERK$3OAzpxRSa$6QnL12(3W@tErvkHu>`q!35v4=%}C7_YsI@pmu`-{o~c z8=beSoEeZy^Z+~=-{9@$8$J3p7?^M7S-yqu;rBs(o5#R+He-vf;jYR%wBS%*wep7AG^q0|RMi~hApA{Y)N911~sIvh_3$`N=H6t3vHLn)Mx#!i33{s;3rA=rjAM44K0SASbj0WKct+=a^B2#IdW0&ea%}SSs8~0j92s@H z=iF@989s!F1JntBG#Cl-jzMYQp-EAoLm+qt%!RnVn zR1_iZf~I^P+LsthRWg=I!UZwk&q#|Ws#++0qwFibQ7-^J;?JY5V>p%DEIk)XiOXh2 zx|3i=%NBrI@yDqPwVD!+1OK1@c}jaRQm=AQVq6(qwVVd7x`8t34a;zhnHiT&q!=cu zU8t4D!kW(8P$9AQ3L!LIklOYn1}x5T)Dfk*h0GF^wL;`c?9X30oC^s zS`3^XurJ6Nj1%A*FBnUm{b6W|OIE`G%2_d)Vh z?l!4kv6N;7DEyxcvj%OWrf3CM0nZ=6Gpwy>Wwa`4<|@CK@~DNgNDzYv5wPV<(Q1z= zYUM0$P`uCoJQWgk(HgGCQ#tq@*aBf=<@6pCSEFRy_UC)RR!~*wW8-WRJfx7b&FNtt zrK*yyQR|%H^WJb)sY4J}<>$5fQnP60s$ecHoEfgwTs2&+oE5G$0|n_|8BWewQ335W zmz70~MF!#rw0}bU7xc?=g?X^2Ja7uvINMSY&lxW+zi=14VEB|qr z3q|4S@GgjQcdtZiv6R-VBn6%qY2OQ*W{L+}I(u`~FX2~<1OsHZwt(ZAT^LWkRBl>J zI9%X7aHcIk8zr3K6rg!P$D$@${T@hB?dPu-Pdit;d%eswi!wClDB)=9F8uc5OvmTo z*ICM|mvG8nGDbls1C3p~#^{q~kt?)mXB-aye*$uKF0cdFlc6~4XVLIsV~Z?WL2F3u)YOpCH1el8!Ay}hG z+^T4s0z674jv5kC8olXXS&f85THd5sBBpj~70*Jme)}N1P^iauZ`vu%t3VgLv-jam zUn165?BW}aMPj^jHM(Tq$J)b*(26gyz8b>T^bOWhM0|({Ru}u)zqaqILYqb7H?k)< z@zoc@D}cLPgxtnDolZ6e!oiCyi_=zLqu2Z-u>L3;T3Jo3(`jJCi)m<7 zoEdH~-&L{9OqHm!mY7!*i?Scd@Eoa#graaJk~{Yc8W2iY#nE^iR0PAaLSTH*yZ-<# zw?rtK@b>TT?LS(48er!E!8INf!%E);v5)1ev@e|HM6WErSxD779U~Vy_ z#{K@`vv<4xtzvWH`^HTpJpAuCvgY?~b+TlGC*cRp0|N29gn@7+jd|f(qau^6+cmWk zA3#`xY_cLh4^pjjEU6wtePK`5+M!<}unan4^uoN`#pc_9wwnC~P8g4zOe%f7tTSmY zHkM?_p48acF4R+OveLnV`e6$TpAPS2ftzq@-=&0L=?gMD9LdF#E5KY9w1s|x4%SmB zLA6A3+z#t32x}=JA_^3(BdL9&Xyws$vg4@MkyoJi;-Gq9{j&7DJ~=)+=N@$m`jV%l zeMHjsK^FN3=&$nO71XE{HdU2?WzsnVo{2E1N_I7dB822g3ZjDto1-$`gu)cQrxVoT~)a3)`yjnT=aA|*n?jjXs~y@(Ly z`fjjs_Kk328H9$1?MmDP^GMr&Ew?+9{Q%{~fr?OqY!@gr;C!A~hcjt#mC%-KDqSsFWoS569Lb;;a_Q@b3j52$eVpBG$bJ3@2`^>i{kiVXwj-xD<`SzUNFf zKc^H?Wkq1xo$MG+7MrXpnN!O8xYJ?darp^K>P%696oPt*EeU0B_<7uzg|cOT9CQrN zE4$isNzz&rLkRhgSt9O0l5u2f@n)t1T`lse`PJM@Il}#c`{k#j20aC2R{+ zJOmq+G4TNum#@G)!JMB9tje#hxKLG4QxzlFVEMt|d$0#N zfws@yKb}PZ<-1irKM$4Yw2uD}B71!@L%Xf7x^tM+zmnA-N$HO~P-kED!@okivl)&1vDl

VVqwBcuJrmkhnN*tpP_;l?QfZeWd-8xKqTn`SU>&J-Fwr#0N z)ws#wiR|II)Zw|z;j`%)ciQ5C3s%DxFY|P|L4OvrL%G9~^fC3TlG6<2ZCMKEU!6%+9J^Ca*!}mqbGFW`tv6-sy}O*Yy>dtObF=Nv zpgxRu`XA}*blOtn4h(4VBZ?iw0eXs6OXWyShe-PcQ`=q|(-sGRn{^{&i^0B4n-N|<*x71rV zx2}=>!(?Qh40D9LOs*`G%V82u0t?@g%^)Y=l+9u>D__E58R9okEXm_o#N`Prn%9uo z)i>lhOXpTHWjT~ZwGH`(D8I}F&2vcFZee+M$KTvNQOy8coJ-Mmgyt#Re zbj65uHDh=^SKIpiruUjQrgDa+&1A~Zoi+5Q4E?#rZv5BO1&{~If5j+`rY9j82*OT6 z#<1-u`DHqyZ%UUnZ}q3k+PB;6q-!YMcI^JyRNG0SVNt@hx3$DFM2^jqAz;lpQg!|V z%>|HeVr0|WmMvFW+wpj>ez=0HBV_af8M#OUq}@5>^;@!YSe%nz zz@qM=90n`DB!3J|FW$aLYNj&A=}$F`hW)VpSe}YIr+;WB9mmrxCkT6zoXlfH-ufD; z`n`#ekEl%k~KiZNy@|aT+?CTp-Wc&DKhbw3a0B-*{23Z*D5Xa znwg&ki2GW4_Bxq~07fga^N5)B0*Zw0qWqbGQE9~G+xH(8E_FfokS6~K>1pIr7`^E_wgl3==fAeyVxeP2Q9)!T@ zC*zn1n&tGj#5m-$V_-|G;%nfk5Hvp6=E4b|Ptf~d@6FSUfwg!jvM8v5Qm|tMtngoYRKE57|3>{t$$ zTg)SqIcsW8nVN~Fnc^kXgrlo#316H>0)1e9gGs*#(ui|9jX;X7@xR!AWxN1rxKriX7rM-3eex<%h#;q%2Dcu#kpkS|-jBJ$N z7fVlx=YPqtfb&Ox(g;CV8zmzYqG?h)P!h%x|10CJD!!5K+||-uvGkOCOp>BvnI%Kf z7y2B;y>Ko>Wx(V6R=-+SsxqP=B%wx};@ zp(&B&y<&F_7@;jqxp+zYMvUc_wx6?>^w>)-nu;?7)E9NqUf6V^wkRDPdKA3#7dj=L zVsu#IQ;>hDWICv1cKnu-+5P`VGCi%Oa=Byx>CrbaoZ7u#l8Is&H7pEBPn?W1czTY? zqsEu;dLQLVFK5^t(*`)@5ObB5h%_veQWcHdFPFxp3v&V5W(M{tj8vbijLZAs2mz5J z4aTdvN{h?k479<^j@J0qQe6&EBi8Ff+!|YVQWOiPmSRFwOOEV5&cC0iH;9I~`jL=h_Ms09+ z6uBF% zjNZ4ul)uMuM9&GX4z4Ew!&ebQxObJ>0iIJ)8)(S-h2s*+QcKRp)pGS7!+ypyd{iB6 z;H>VGQk!DQHAs|!*ZyCLlEqoM>If^fflQ9Q`c)VEL7W@*H|0`0^&o7yw5zgoa z8GHT*^o=fzOSN3N1O@8HN@a}G?}=53ZBYV$FZ*q>1WLiC0Vq?YdTs&Yz~Zl%4+AUG zm0!J$(WVk8duilpt`WvO%{6@vturN9G`UYn(28Y|{(-_l4rXdt6pDm?u$u~#Dm4OHEE4Z3yjFDXm-v~un(XGK2-5R>CHIdtj@9<6;P9xXAo z1fuAxYZ2-pi$x>rZ0tse=fgq&Rq%mhK!TBe5{H+ zgAOE!2N{6Q2;2Q$Ce#u>g3)^4(G5`A)#@S-%b!5Da1 zG3Fn-3E5T1o*Kw5@nE71Qdl}}wb+|>!yYuK?VL~Q#SAfQZci%hkk~_08F<{=cytaW z>racxSpG-^vOj{Ds1QobL&g$LTjz5x$H0+G-?3ej5O$@vkm{ctI>X2C+k||cPCmDv z;x6WT6}SuGPMAj#6y`<6ok>UWO95=0A45u6%X1L9YELR4Un*&(6GD^7@@$|l$C4u) zCZLFnhcySR$MLWYB$X9EjG(K&6a{IPL>MifI|?wIHsKw}IOoXdDQps*Sk`B_pvQbNTv$P6Wy1V=EL3i1*9EuS`9{C_jQ3Tu=Y@* z#bxWpTo;5=7?B6WDnzQK{(~&v37itjJZt=-KM*L z9-qW?Dbc~sI21hk#{_veA($a*9^xUX1w-S=)EFP%9S^``moRXqpd9m`q&S~bnJ*?f zO~F1EN7v?OG}FmC_x{lAJvV9G|6$F3xE1r9{-Dw#OIGJcFI|El@y0^_3|#(zxBV35dMrnqr z1}IE0l^mE`gE+U?6y_CAV}cTA2^fh+G>@<@i`O03z7C%1%RTxT$x<0lm%ZWmFsKs=QszNG+FTWFqgt~m* z1*MIKHHz=7v9$ySM?kniEQEVGMTOqPp!7P$;rLe6BjxZ1Bm7pkx@wWB1Gwrf4-_1~_xs1|yp{d#mI zEb;|M@fG56kl2E|CryA#no1C(7?iWD*q^0SpVSVrbWb^MPwHS(ra}=vo*cMbVh&L2 z1JkzRkUHmN3vde{sfw^CJ1>jxV6yi_2~*=Bi8Wb6cdIx^xRj=p9{_BZW4*oX=uOB; z2IcwEh3M$ubtsR#<50s>3^ug;H&Ez9{&xShZ z=pmZ=Tvs>I)a@v(TJv^w>()@Zx^H9bA5691Om5qqKS`3IGsHcgwsWMieY>;wC)dc} zEIH#&cX~)=+jgbp-P(6*N#hZ6^fWm#L+WQUm2-t!NU%CbhR>7w3m;Tod<<>t->D~! zmp-Umc-DlIPg9LfVT)52NY%xR=F*O`Ol#S0=_C6`(k(N@*z{=yQ_)gjTR~6)q&Ao_ zhF)OX#KZQCaqdg9jqTW&&lnp;_BBF^LhX+HvP^3MX0|!9t%p;shl!yXGU@6&a`gvt z?XNshDeB8Nra^Kc?Fh26hz;_jJY;B|%m+#6CJC++YcgYa^HE*H#&oWGDBJByb-S|N z!>NkSJG#xOPik6zaO8=BnUc-R(w*~vRd@f!){VQ@Gxous){%-f(m9{A?()23R_<4S?#q{r<&Kbay&U`iL#?cO`yoUI)*z-&Xg{^O}S=O=Z& zx&Dy{XH)&-@bsvz35aj9=b9b4=7A?FWmDB1-7_5n6A9^oq~YlO?)wc7yu`go&MuL< z4%{0`IZhHwXRd$n{;^d51hF_FCvZmQlVwkbQ>Vk^+G_Un z>#5VP1EO!pCUHrhg*>~b_{7Wa9whruk-p)SV+3B@lus#OsQ*#p06B&%sgJQe3;rsRP)e}3+$}Ab0d-ygqAy;_9zYPsta;jHQp*+DSny>Y$m``>=K8HDK zoo5ulrn<9=oO6iO_B`tCCzkHVY*((e=k9c>^~j^1kj(6ax@A*OZT&Jm{Fc}JJ2U~?7q*^1UwMeEjJw!-;jRL=B_$abdW zOlxn(&?G4zTld|eY>z9|<09Rs9@u}@_V9Xo>Jph;APZq)y_PXt&lws;owT)(G4wxc zC}__U|2a;E=hG)Sa^f607b2C58O_pm4NF=Md{{GZdwj#Sk&se4XYcmkZM%E%UgFQ* z{?oU~;qeE_jGH6pE@jX8Q|J6?_f>K>Kj$H6& zG?(f4sN%Akj+CY&qp{NjC{Tf4j5TgQA810KJ%eW_q>|7AHRS7{^f^l5B(24q~k2{oX;38&_=jlWDSm#!I3faqBGXsNRDc8)${g{jB)t| zm?!SPem_7uPdymP7$+a$Cb)ZN%foz(FikN2uj6sBJN|2{tLtJd^HY5_UI+EwW6aNF zu9_`4u+TkYRF2D#~fSI2&@3zFmiuDX76 zc*~rw?zlDkE2Ui30?Sfe{zNWQu}>9nw}W?2bc|ZJ(YBGe<4LPv`mKkM9d@2g z48vC{JQ%QyI3svyqfCa$fR8<3j;U4cBfVBKeycho~m3i=v2pY-HA!xzdc^Y^L2gx7fi1hrahmU9S{v$}O zgb#JY0r&_C+>?TGa&~Nv9`i&oNnjriFDzUH%`s8TPw;;OkKkBh;$FD!C}c9(-!tWZ z%h*0<^dB=?`p@(+Q~uwXwx2VtDW>&TYDPAgW{$$g1~UCe#>zWg-@1}Dwx*1&w{$sE zZPvu5OzbVgM+)_=x6%sB=J59?-kV4(+H>&r!^GDozA^XpxjU|R;+x0PHC-8{V@Fvf zYx=qJBn+!MnNptkSlRp!$`d(T15sA**qE~Nth(lXb8RpwQmNu2Hp$rFnE8jQ7j)Ee5iOhPu%Co z`Afv>BjLwXN_)$fKc=l=8@}sXN-uBU=~m!3P=Dx2AHo#t)R$zu7TE h?_bj#-~VD|N1B-S#2e*IqKq19?a@cedf+1wuo5c-IGD33mudDyE)=tqb{oa#6l zQDN1H8mmV%Si_Ps^>HoMLaI5g8_{EZuFQZ9w2r|Hq}t=g5fe6zn6a7G>yBGStk_EP z`r{=dHf$Ry#ib)+Mt86;cjugxd$SArFfh2sI4HYe(vE-AFyIr?X&=H;lNjYlOut zd>e72U+X;~_b7e?rtJ!*xWnJ!<%(ACQ1G#G&b8Hl9iBC$9CwGPFiGvV0;4#vZ=sAS*pc^KmuhDIe6v(q>h z;spUlEie8KM`vlfvE;tPUhgR{+i@E6WPE^kEEt`R@Yp@T9tpyXrrCIm4PTs(;W#@L z3|-)()9l6AG)!KA7Rj#GjtGB=k3@qPdAE>E@DCg4Hu!+{1{T4;0v6U(z4>(k_ojC1=G|KD1AToh`_1lgb_=^v7fkB#)V7k_jwMM5DkT zc0SJ$felC5AS>|mL0~!0#;HsZz63AD!qee)O{+2g0Lb~J`~CKpG9u<=n)WjosgL4vBU%d;b=G>2uS8YSP10s zgnF&OM`k2FV8=(}k}(hnMS_A52nb|omc1s0jt=;r*~x;O#-W9O$1~kK`U_t@>==I~ zp1{1oo{nASF&mp<$1cbGRA2-1iUbGZu!#J#iLk#wnZTSZte*L+xEcnNY91r6N5T48 zs27~@`t%xFW31N=DaN^2nP%8*ExVxo6XSdcbtsfm&m@mXo|3NyGCx8-o)+}3 zYV=(dCT-j*42ld1q$7(P;4PU7q>}0$$F5Mjmvk(F*EMwC=E&GwDVs}VT$m+LO2X0k zL_9DLS|uRFv7`@C6(H#_S%{Jz^FY98NDFP1hk*=5z6Cf6oX=0J!11Wor}gIft#m{v zs%t}kaRS8I7 zPre_Ed^%1I|FoPAa(a(`yQ&5G3=VV*#)~rJE1T2LdJC{^0SJ^~9UYJAEJOcavYXM}x5TLev+g@x9rmAMFNvfD(B zgT|IzI-{DfKVi%YrLK^2)~AeF^7JwDJDe?2B%Va}g{T5n9V~`hM{}$fWOV$_;&xT>wrb|^S&%x!V z1E3LMRI*J<6@}7RB2LydE!GS1U_2p61{pO$vIx}Bkh|g@;Cj+@l#lXc6PRX`H3hyD zqfh9R?4~mYR?+RBkc85MolfA5dkdXkQa!*Xt#TJSQ}}H6Mfof9V28j^PwZ91$CQOlu}tS{bU*&^V^A3o|k|k>Pe=;hos^Ka-d-IkF3!v6ln>_f-mz& zYCoNqp`7*O*|Ont08)4vwDC3c$bu?s7CoyK?gdk}uI=WjH&2O7^}2S4(e_A>s@rcR zf7QFJ`M3U6=i!Cnbq2L`XPQS+%_A#)tIf}g7WTfaIc4iwD=UAa`t@qD-Y-sG5d9G` za#5^}rpsdMdepu<+r1;}*_m~`(E3*z{U_`<9>J116#)0XpDN4scg|MYLR z=7-2+EM0?6XRn_XtB)-IlX&EecxGHYdrlmm5Uc!Y>m=aP+?#cGXFEOF+NNwn*V5op zk65=o+qe@@sX0d(m4w?^E zqyKE}f%mVgO^0lnUpKc8m1^EIsUZJesg=Cz+YgyE?`uiP`z9T{C38Wp1j!6SqO9f( zd#(irpnL8?5c)4N7>JybCs+x4Vt)7a=J#82wv7ie8 z;3QkL4Wo^la5nv3d!%KJ^!1xz#-LRy`f<%VKHZ zn6HZ^?EWZ{pIKqY_vZKEd?lPSmH9=rMO5EvS2j%mZ{^3E&nYFS2oh(uA42hkQOMuPnDM@STS%?spaarA*alM zt5Zt8nu( z{u{7-b9Q*t>N9TTckx*Z^f?LpI<;QO`Gh{cJf~s&e~0m{4ajHp_AB*;G|!Kci9JfG zuVjLpBY-g8R3$KU~AUx{}}sV}5lqXLg>8W;y% zCy%TkTxbaWm6j|zdG)FKta;+F(z(!cqhAMRxWzbJ3q4CTajjnjfotR1Jw=-6=?hx!<%-hMXNV}vt$1(V zid%b&baUP>44m0ltec8BUQxhY$#Gw0r`f-O^T_K}pcT@scK5=m8P)*sRerqR6jld{ zvHE|2|6CEZ^Ik)s_Fzxm8?a$sTXF2GQ2St!z9|~Fr%2Pl{!^gmH#P2%j{)gFq(z_| zi^eJ%>6_x*kz&rluDFF4e@l7FQ)2_&zbUMaZo=xzT9wDXu2soZZm=l}oc*R|He5V2 z66u(zQ&uB*s^&mb>WjF%)v3TiZOU?=ev_x_%byS`iXm`c^R%#$V$>_9AJor+29H7t zUNPcHr4P(6uxWmcN?jr4s*15I=yQ>7%ZZP0d9cM_~xpfThNajr(Rt#G~st7Znq zae>9nqNCc;f0AQin=qdQ%u9X7Ult8*;_--oOpm`JcLqzRGTJn69}ZeXPjGQT6B_6?s=A#66PC%C1!6_$+v) zSn!sGLwS$rZ1@r%rH)r{rb2j5_D-+~;+|rITl!uVH=;mYs&irRDdNyvI24RP{0V%y z5MxM$NKA$TR6J#y9kdW`8k-9XinkbYVG`k)mV<^$nkbQ0%jF)+vDZ9L&S5WX0bR1-;!r*|f)otzjONDfZ2-QD}y z!!Unv1G1+d^92H$iaQX3tR9w&2Ql?k6a1;qwv-LW`HR9f(iyx<``RT{d$PheKMi4C zIUaZ^%wLw<8s)Ygj|Y?eCFw52Y9q1WG%JUPauH(M1!e_6+4iKS9lX1hlt>^)!RZ`q zT6rYdQ{>I1?$~^rCxC4H!cO^7)$N|7YY13O{K+^5d@DrJ*%%FYdp-trgFYtn`IsS- z>hw_wemr(SBMa=s;FZAT7{0(`VPCRTnPe^`;30waq86AO;4$$rBzF{;!^VTcg@P+T zQE++7E`2tBb)E-uHBb1W7oxGtQC4=6ce6z^_EPWvWYSqwwz0cW={||O2?j;cb{T`& zyjPz@E)*B{01J~~;1lAmGqEJAK*=MaTq$5FHz$*&8w&vOfx%oPgpGwl2~0$jL}loz z2j1j}wS|g+zAjlt9uFCSs3hj0LDfVNCCL>B1rJN%1DY`t+*JW2dlC~T3yot))*L-$ zA&^v)Njt*dOOKvXQMiebew2mj)Gs%ttl}MNl5ZxR#gAr&5jGf|v2t36P zSpVFl66SwezmAzOh9F>{dn1I8iPIq22NNUVJhJaWbPYDOCtv`2irg}Q(Ytf>$Z5Eo zuq&u;B#OtA>@1M4ur#)Aopb^LsWwg!^~{65mGAR-lG`WQz1`jH;VWUPwV_>ZTqP*9 zX(MoV(VGK!7m<6z&l;5*NpCVGd#9MZ_BN+6^yBb1@u+ zSMZ9YjtP=6G@pQrONnS48%V8QNKC=#_#piV&d3LodlyHUeb~v zucU{Y9?(qE5~4^N2nqr=AXb>1QY0PS05S2NV#0VyN7gA^Z=j1;kjja~Zx(CGSv32c z7|OhLSIZ&7Lj>Mo@*rhWzbx4(fznfrm2Uw|;V$s}8d@(wmeP#57XFjq-0|zj zMd$9cr9W%2e`sUx+E`Kwp+m9qWZE*Cg|-$~%HqmcT2hu4v2}Q*H*Gmhi%CJo;!auI zX-gM$|8HAO9^&4#W#40c2ksn7TL!o6d%xDTpiMK*wes3bx%+Opd$qi4!MMhlzsFo> zZtVEs*c)fRfA$9xw;W=5>#eaR_foCcn@C@}x{lB*sw3)$2whekRm0aM)iIJBQ;(D6 zta_Rx<7%EHyn2Qt_F45Kgp70QEMv(qwJD}H!>}oa6`Obe%5i66wSQRLJ+k5!8^<74 znP$fCmsKq2*VPuIW38g;)}Gah?F&a9HK3}-A4C?+1V!U@qgdXPX8K6D)-5v4_wD5i zhPBG-pENH9f7bNQwtwCBQ%|O;@AlxG##G~;)tbF`f~%F!$(><-qGR8k3wI{Pk#l0% zM4IuhYj;90wygQK`EJ?XW#*%{9XCer*SFpbyct*;%XA-2bst@65Q8(T^|LpQWXo&c z7=3;8)~Smz~Sws{^B=_w4Gx+57do zGxY~j^#_*q_v(-StXFNRqk65OE!)^hc(L^DWh*oy zaqq6}*`2)*-+y4%yUK64VR<_0GxnyGy(wdNr|j+}_dRqIbK(($jFu-XrK@6DGR?d~ z7er%h#D`}?Z$!jntv2fwwiSUp8mqR=`w>lgakDUivUyv86eV8u95p@pr z8-qWbek1(-@DDEBY8ETnZcQ)sFLjB1bLsF!=oEvC-muQktBwS4q?woQZ&E;wd+v;-nS&G{ z(MTDFfK4;)^nxQ%k55#HA+c=wPYnO*qmx=x+Vl|VKx5^UxpQ#2Yq@?UL{`c3V`8N* zZ8@`9KetmYw9>p1T;asl=SA*?v}Jq?ZS87V06ku}X0d<1W`jGIT6q|H&Eu4KdSc~U zqCX(M6chtfVofM*nSN}bV=J{QR?+u@*g7trdogVZJU$A6zHS82pFesLv_k7cWUQ!N zbJpH8zG)O&PONY%hsmO!3@#c|&Z(@kC*vGQIR~=cr?S1fvwi*9w(hLE|4#Fr3bFk_ zcH6U{aQ4=G0=H&rAClMS>pFn<*(d#=cJ7t!{ru6WDtBdt-h!FDJ}K72ZJC$E7lLB# zRN5NKTFWw4Hf3cq*4C7@RqQyr!Yn7nHm^7~p0=LNT01h?gf9`+wa3)}C8}SktxCd)v75 ziZ~Dx=U*0MSgaB<*7#j({PT4U>GYZKOW50gQ*9paMZc{yPIjZ;x(4-=wVIW-UH($- z@9NZ$|6Ri#$p5~Y93G#@tZe5qPUV zAnoCWz?9a*7JGtN$g@wZr9+~;3ZYSg=o0o7Y+r2@i-1o zCE`5Xk(R8v>(U_}&NKnIKwJ-v=>_8FsJVto)PTN-@hbcRUZ4ImdZ1S8mO&Um?ljYqM@$&o}peV9WrI7!prG6?dyqU7{d4i5IaJkQ66;B<;f5J2WQ zIV4dY0e`OwM24u<9l#`_BAJ8`4$s3~(J=gK19(k-mJuM%D}0?)YpEfwha(Qi035Z7 zf0a=MBRi)5OU7=y&@l~=@m{8RWOem3EM;RRYi zrTP#Jq|m@esN@6W{0lPw1sOj?2R=Z}pXiZlU={U0WRS}IXKUGwj(>SEV{J-Vo35F% zw(5+HP1)FMmOpFs*Ir%KlrIk5Joe_XRZUA)YxwT5*N%Pn)N7}148ATb?p>|wNNe5e zTC=L*L+ybNwTH6KI#F8z7Kx!Gqp!NFuUceQ^^Mmw_YJ1+e*3j=XAI7i!I{-rGCJp7 zopbT>%~#)iRqPGk)lEIpU(u*$REvhcBbxbi-GvwjC}mZ{pS4EOGQ85b;t)+I)7sJd z<<4s-vlZ3XMzhsTqP8+y+XAa2>#YBiw&L@38}#^${v6=fcKgsy^xjVWp(@RLyDJXa KHSgPXkpDlV=fRf% diff --git a/gui/delegates.py b/gui/delegates.py new file mode 100644 index 0000000..32182cd --- /dev/null +++ b/gui/delegates.py @@ -0,0 +1,89 @@ +# gui/delegates.py +from PySide6.QtWidgets import QStyledItemDelegate, QLineEdit, QComboBox +from PySide6.QtCore import Qt, QModelIndex +from config import ALLOWED_ASSET_TYPES, ALLOWED_FILE_TYPES # Import config lists + +class LineEditDelegate(QStyledItemDelegate): + """Delegate for editing string values using a QLineEdit.""" + def createEditor(self, parent, option, index): + # Creates the QLineEdit editor widget used for editing. + editor = QLineEdit(parent) + return editor + + def setEditorData(self, editor: QLineEdit, index: QModelIndex): + # Sets the editor's initial data based on the model's data. + # Use EditRole to get the raw data suitable for editing. + value = index.model().data(index, Qt.EditRole) + editor.setText(str(value) if value is not None else "") + + def setModelData(self, editor: QLineEdit, model, index: QModelIndex): + # Commits the editor's data back to the model. + value = editor.text() + # Pass the potentially modified text back to the model's setData. + model.setData(index, value, Qt.EditRole) + + def updateEditorGeometry(self, editor, option, index): + # Ensures the editor widget is placed correctly within the cell. + editor.setGeometry(option.rect) + + +class ComboBoxDelegate(QStyledItemDelegate): + """ + Delegate for editing string values from a predefined list using a QComboBox. + Determines the list source based on column index. + """ + def createEditor(self, parent, option, index: QModelIndex): + # Creates the QComboBox editor widget. + editor = QComboBox(parent) + column = index.column() + model = index.model() # Get the model instance + + # Add a "clear" option first, associating None with it. + editor.addItem("---", None) # UserData = None + + # Populate based on column using lists from config + items_list = None + if column == 2: # Asset-Type Override (AssetRule) + items_list = ALLOWED_ASSET_TYPES + elif column == 4: # Item-Type Override (FileRule) + items_list = ALLOWED_FILE_TYPES + + if items_list: + for item_str in items_list: + # Add item with the string itself as text and UserData + editor.addItem(item_str, item_str) + else: + # If the delegate is incorrectly applied to another column, + # it will just have the "---" option. + pass + + return editor + + def setEditorData(self, editor: QComboBox, index: QModelIndex): + # Sets the combo box's current item based on the model's string data. + # Get the current string value (or None) from the model via EditRole. + value = index.model().data(index, Qt.EditRole) # This should be a string or None + + idx = -1 + if value is not None: + # Find the index corresponding to the string value. + idx = editor.findText(value) + else: + # If the model value is None, find the "---" item. + idx = editor.findData(None) # Find the item with UserData == None + + # Set the current index, defaulting to 0 ("---") if not found. + editor.setCurrentIndex(idx if idx != -1 else 0) + + + def setModelData(self, editor: QComboBox, model, index: QModelIndex): + # Commits the selected combo box data (string or None) back to the model. + # Get the UserData associated with the currently selected item. + # This will be the string value or None (for the "---" option). + value = editor.currentData() # This is either the string or None + # Pass this string value or None back to the model's setData. + model.setData(index, value, Qt.EditRole) + + def updateEditorGeometry(self, editor, option, index): + # Ensures the editor widget is placed correctly within the cell. + editor.setGeometry(option.rect) \ No newline at end of file diff --git a/gui/main_window.py b/gui/main_window.py index 9c7c013..b053ce5 100644 --- a/gui/main_window.py +++ b/gui/main_window.py @@ -3,9 +3,9 @@ import os import json import logging import time +import zipfile # Added for archive extraction from pathlib import Path from functools import partial # For connecting signals with arguments - log = logging.getLogger(__name__) log.info(f"sys.path: {sys.path}") @@ -22,11 +22,14 @@ from PySide6.QtGui import QColor, QAction, QPalette # Add QColor import, QAction # --- Backend Imports for Data Structures --- from rule_structure import SourceRule, AssetRule, FileRule # Import Rule Structures -from gui.rule_editor_widget import RuleEditorWidget # Import the new rule editor widget +# Removed incorrect import of AssetType, ItemType from config +# Removed: from gui.rule_editor_widget import RuleEditorWidget # --- GUI Model Imports --- -from gui.preview_table_model import PreviewTableModel, PreviewSortFilterProxyModel -from gui.rule_hierarchy_model import RuleHierarchyModel # Import the new hierarchy model +# Removed: from gui.preview_table_model import PreviewTableModel, PreviewSortFilterProxyModel +# Removed: from gui.rule_hierarchy_model import RuleHierarchyModel +from gui.unified_view_model import UnifiedViewModel # Import the new unified model +from gui.delegates import LineEditDelegate, ComboBoxDelegate # Import delegates # --- Backend Imports --- script_dir = Path(__file__).parent @@ -37,7 +40,7 @@ if str(project_root) not in sys.path: try: from configuration import Configuration, ConfigurationError from asset_processor import AssetProcessor, AssetProcessingError - from gui.processing_handler import ProcessingHandler + # from gui.processing_handler import ProcessingHandler # REMOVED Obsolete Handler from gui.prediction_handler import PredictionHandler import config as core_config # Import the config module # PresetEditorDialog is no longer needed @@ -46,7 +49,7 @@ except ImportError as e: print(f"Ensure GUI is run from project root or backend modules are in PYTHONPATH.") Configuration = None AssetProcessor = None - ProcessingHandler = None + # ProcessingHandler = None # REMOVED Obsolete Handler PredictionHandler = None ConfigurationError = Exception AssetProcessingError = Exception @@ -156,7 +159,9 @@ class MainWindow(QMainWindow): # Signal emitted when presets change in the editor panel presets_changed_signal = Signal() # Signal to trigger prediction handler in its thread - start_prediction_signal = Signal(list, str, object) # input_paths, preset_name, rules + start_prediction_signal = Signal(str, list, str) # input_source_identifier, file_list, preset_name + # Signal to request processing with the final list of rules + processing_requested = Signal(list) # Emits List[SourceRule] def __init__(self): super().__init__() @@ -166,8 +171,8 @@ class MainWindow(QMainWindow): # --- Internal State --- self.current_asset_paths = set() # Store unique paths of assets added - self.rule_hierarchy_model = RuleHierarchyModel() # Instantiate the hierarchy model - self._current_source_rule = None # Store the current SourceRule object + # Removed: self.rule_hierarchy_model = RuleHierarchyModel() + # Removed: self._current_source_rule = None # The new model will hold the data # --- Editor State --- self.current_editing_preset_path = None @@ -175,32 +180,15 @@ class MainWindow(QMainWindow): self._is_loading_editor = False # Flag to prevent signals during load # --- Threading Setup --- - self.processing_thread = None - self.processing_handler = None + # self.processing_thread = None # REMOVED Obsolete Handler Thread + # self.processing_handler = None # REMOVED Obsolete Handler self.prediction_thread = None self.prediction_handler = None self.setup_threads() - # --- Preview Area (Table) Setup --- - # Initialize models - self.preview_model = PreviewTableModel() - self.preview_proxy_model = PreviewSortFilterProxyModel() - self.preview_proxy_model.setSourceModel(self.preview_model) - - # Initialize table view and placeholder - self.preview_table_view = QTableView() - self.preview_table_view.setModel(self.preview_proxy_model) - - self.preview_placeholder_label = QLabel("Please select a preset to view file predictions") - self.preview_placeholder_label.setAlignment(Qt.AlignmentFlag.AlignCenter) - self.preview_placeholder_label.setStyleSheet("QLabel { font-size: 16px; color: grey; }") - - # Initially hide the table view and show the placeholder - self.preview_table_view.setVisible(False) - self.preview_placeholder_label.setVisible(True) - - # Apply style sheet to remove borders and rounded corners - self.preview_table_view.setStyleSheet("QTableView { border: none; }") + # --- Preview Area (Table) Setup --- REMOVED --- + # Models, TableView, and Placeholder are no longer needed here. + # They are replaced by the Unified View. # --- Main Layout with Splitter --- self.splitter = QSplitter(Qt.Orientation.Horizontal) @@ -404,94 +392,36 @@ class MainWindow(QMainWindow): main_layout.addWidget(self.drag_drop_area) self.drag_drop_area.setVisible(False) # Hide the specific visual drag/drop area - # --- Hierarchy and Rule Editor Splitter --- - self.hierarchy_rule_splitter = QSplitter(Qt.Orientation.Vertical) - main_layout.addWidget(self.hierarchy_rule_splitter, 1) # Give it stretch factor + # --- Unified View Setup --- + from PySide6.QtWidgets import QTreeView # Import QTreeView here if not already imported globally - # --- Hierarchy Tree View --- - from PySide6.QtWidgets import QTreeView # Import QTreeView - self.hierarchy_tree_view = QTreeView() - self.hierarchy_tree_view.setHeaderHidden(True) # Hide header for simple hierarchy display - self.hierarchy_tree_view.setEditTriggers(QAbstractItemView.EditTrigger.NoEditTriggers) # Make items non-editable - self.hierarchy_tree_view.setModel(self.rule_hierarchy_model) # Set the hierarchy model - self.hierarchy_tree_view.clicked.connect(self._on_hierarchy_item_clicked) # Connect click signal - self.hierarchy_rule_splitter.addWidget(self.hierarchy_tree_view) + self.unified_view = QTreeView() + self.unified_model = UnifiedViewModel() # Instantiate the new model + self.unified_view.setModel(self.unified_model) # Set the model - # --- Rule Editor Widget --- - self.rule_editor_widget = RuleEditorWidget() - self.rule_editor_widget.rule_updated.connect(self._on_rule_updated) # Connect rule updated signal - self.hierarchy_rule_splitter.addWidget(self.rule_editor_widget) + # Instantiate Delegates + lineEditDelegate = LineEditDelegate(self.unified_view) + comboBoxDelegate = ComboBoxDelegate(self.unified_view) - # Set initial sizes for the splitter - self.hierarchy_rule_splitter.setSizes([200, 400]) # Adjust sizes as needed + # Set Delegates for Columns (adjust column indices as per UnifiedViewModel) + # Assuming columns are: Name (0), Supplier (1), AssetType (2), TargetAsset (3), ItemType (4) + self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_SUPPLIER, lineEditDelegate) + self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_ASSET_TYPE, comboBoxDelegate) + self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_TARGET_ASSET, lineEditDelegate) + self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_ITEM_TYPE, comboBoxDelegate) - # --- Preview Area (Table) - Moved into the splitter --- - # The preview table view will now be used to display files for the selected asset/source + # Configure View Appearance (optional, customize as needed) + self.unified_view.setAlternatingRowColors(True) + self.unified_view.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows) + self.unified_view.setEditTriggers(QAbstractItemView.EditTrigger.DoubleClicked | QAbstractItemView.EditTrigger.SelectedClicked | QAbstractItemView.EditTrigger.EditKeyPressed) + self.unified_view.header().setStretchLastSection(False) # Adjust as needed + # self.unified_view.header().setSectionResizeMode(0, QHeaderView.ResizeMode.Stretch) # Example: Stretch first column + # self.unified_view.header().setSectionResizeMode(1, QHeaderView.ResizeMode.ResizeToContents) # Example: Resize others to contents - # Set headers and resize modes using the model's headerData - header = self.preview_table_view.horizontalHeader() - # Set resize modes for detailed columns - header.setSectionResizeMode(self.preview_model.COL_STATUS, QHeaderView.ResizeMode.ResizeToContents) - header.setSectionResizeMode(self.preview_model.COL_PREDICTED_ASSET, QHeaderView.ResizeMode.ResizeToContents) # Fit - header.setSectionResizeMode(self.preview_model.COL_DETAILS, QHeaderView.ResizeMode.ResizeToContents) # Fit - header.setSectionResizeMode(self.preview_model.COL_ORIGINAL_PATH, QHeaderView.ResizeMode.ResizeToContents) # Fixed width (using ResizeToContents as closest) - header.setSectionResizeMode(self.preview_model.COL_ADDITIONAL_FILES, QHeaderView.ResizeMode.Stretch) # Stretch (Fit-If-Possible) + # Add the Unified View to the main layout + main_layout.addWidget(self.unified_view, 1) # Give it stretch factor 1 - # Hide the Predicted Output column - self.preview_table_view.setColumnHidden(self.preview_model.COL_PREDICTED_OUTPUT, True) - - # Set selection behavior and alternating colors - self.preview_table_view.setEditTriggers(QAbstractItemView.EditTrigger.NoEditTriggers) - self.preview_table_view.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows) - self.preview_table_view.setAlternatingRowColors(False) - - # Enable sorting via header clicks - self.preview_table_view.setSortingEnabled(True) - # Set default sort column (Status) - the proxy model's lessThan handles the custom order - self.preview_table_view.sortByColumn(self.preview_model.COL_STATUS, Qt.SortOrder.AscendingOrder) - - # Move columns to the desired order: Status, Predicted Asset, Details, Original Path, Additional Files - # Initial logical order: [0, 1, 2, 3(hidden), 4, 5] - # Initial visual order: [0, 1, 2, 3, 4, 5] (assuming no initial moves) - # Desired visual order: [0, 1, 4, 2, 5, 3(hidden)] - - # Move Predicted Asset (logical 1) to visual index 1 (already there) - - # Move Details (logical 4) to visual index 2 - header.moveSection(header.visualIndex(self.preview_model.COL_DETAILS), 2) - # Current visual: [0, 1, 4, 2, 3, 5] - - # Move Original Path (logical 2) to visual index 3 - header.moveSection(header.visualIndex(self.preview_model.COL_ORIGINAL_PATH), 3) - # Current visual: [0, 1, 4, 2, 3, 5] - Original Path is already at visual index 3 after moving Details - - # Move Additional Files (logical 5) to visual index 4 - header.moveSection(header.visualIndex(self.preview_model.COL_ADDITIONAL_FILES), 4) - # Current visual: [0, 1, 4, 2, 5, 3] - This looks correct. - - # Add placeholder label for the preview area (already done, just referencing) - # self.preview_placeholder_label = QLabel("Please select a preset to view file predictions") # Already initialized in __init__ - # self.preview_placeholder_label.setAlignment(Qt.AlignmentFlag.AlignCenter) # Already done - # self.preview_placeholder_label.setStyleSheet("QLabel { font-size: 16px; color: grey; }") # Optional styling # Already done - - # Add both the table view and the placeholder label to the layout (already done, just referencing) - # We will manage their visibility later (already done, just referencing) - # main_layout.addWidget(self.preview_placeholder_label, 1) # Give it stretch factor # REMOVED - Now managed by splitter - # main_layout.addWidget(self.preview_table_view, 1) # Give it stretch factor # REMOVED - Now managed by splitter - - # Initially hide the table view and show the placeholder (already done, just referencing) - # self.preview_table_view.setVisible(False) # Already done - # self.preview_placeholder_label.setVisible(True) # Already done - - # Apply style sheet to remove borders and rounded corners (already done, just referencing) - # self.preview_table_view.setStyleSheet("QTableView { border: none; }") # Already done - - # --- Add Preview Table View to Splitter --- - # The preview table view will now be placed below the hierarchy tree view in the splitter - # It will display the files associated with the selected item in the hierarchy - self.hierarchy_rule_splitter.addWidget(self.preview_table_view) - # Set initial sizes for the splitter (adjusting to include the table view) - self.hierarchy_rule_splitter.setSizes([200, 200, 400]) # Hierarchy, Rule Editor, File Preview + # --- REMOVED Old Hierarchy/Rule/Preview Splitter and Contents --- # --- Progress Bar --- self.progress_bar = QProgressBar() @@ -657,49 +587,97 @@ class MainWindow(QMainWindow): self.add_input_paths(paths) else: event.ignore() + def _extract_file_list(self, input_path_str: str) -> list | None: + """Extracts a list of relative file paths from a directory or zip archive.""" + input_path = Path(input_path_str) + file_list = [] + try: + if input_path.is_dir(): + log.debug(f"Extracting files from directory: {input_path_str}") + for root, _, files in os.walk(input_path): + for file in files: + full_path = Path(root) / file + relative_path = full_path.relative_to(input_path).as_posix() # Use POSIX paths for consistency + file_list.append(relative_path) + log.debug(f"Found {len(file_list)} files in directory.") + elif input_path.is_file() and input_path.suffix.lower() == '.zip': + log.debug(f"Extracting files from zip archive: {input_path_str}") + if not zipfile.is_zipfile(input_path): + log.warning(f"File is not a valid zip archive: {input_path_str}") + return None + with zipfile.ZipFile(input_path, 'r') as zip_ref: + # Filter out directory entries if any exist in the zip explicitly + file_list = [name for name in zip_ref.namelist() if not name.endswith('/')] + log.debug(f"Found {len(file_list)} files in zip archive.") + else: + log.warning(f"Input path is neither a directory nor a supported .zip file: {input_path_str}") + return None # Not a directory or supported archive + return file_list + except FileNotFoundError: + log.error(f"File or directory not found during extraction: {input_path_str}") + self.statusBar().showMessage(f"Error: Input not found: {input_path.name}", 5000) + return None + except zipfile.BadZipFile: + log.error(f"Bad zip file encountered: {input_path_str}") + self.statusBar().showMessage(f"Error: Invalid zip file: {input_path.name}", 5000) + return None + except PermissionError: + log.error(f"Permission denied accessing: {input_path_str}") + self.statusBar().showMessage(f"Error: Permission denied for: {input_path.name}", 5000) + return None + except Exception as e: + log.exception(f"Unexpected error extracting files from {input_path_str}: {e}") + self.statusBar().showMessage(f"Error extracting files from: {input_path.name}", 5000) + return None + def add_input_paths(self, paths): + log.debug(f"--> Entered add_input_paths with paths: {paths}") if not hasattr(self, 'current_asset_paths'): self.current_asset_paths = set() added_count = 0 newly_added_paths = [] for p_str in paths: p = Path(p_str) if p.exists(): - supported_suffixes = ['.zip', '.rar', '.7z'] - if p.is_dir() or (p.is_file() and p.suffix.lower() in supported_suffixes): + # Only support directories and .zip files for now + if p.is_dir() or (p.is_file() and p.suffix.lower() == '.zip'): if p_str not in self.current_asset_paths: self.current_asset_paths.add(p_str) newly_added_paths.append(p_str) added_count += 1 else: log.debug(f"Skipping duplicate asset path: {p_str}") # Changed print to log.debug - else: self.statusBar().showMessage(f"Invalid input (not dir or supported archive): {p.name}", 5000); log.warning(f"Invalid input: {p_str}") # Changed print to log.warning and updated message + else: self.statusBar().showMessage(f"Invalid input (not dir or .zip): {p.name}", 5000); log.warning(f"Invalid input (not dir or .zip): {p_str}") else: self.statusBar().showMessage(f"Input path not found: {p.name}", 5000); print(f"Input path not found: {p_str}") if added_count > 0: log.info(f"Added {added_count} new asset paths: {newly_added_paths}") self.statusBar().showMessage(f"Added {added_count} asset(s). Updating preview...", 3000) - # --- Auto-disable detailed preview if > 10 assets --- - preview_toggled = False - if hasattr(self, 'toggle_preview_action') and len(self.current_asset_paths) > 10: - if not self.toggle_preview_action.isChecked(): # Only check it if it's not already checked - log.info(f"Asset count ({len(self.current_asset_paths)}) > 10. Forcing simple preview.") - self.toggle_preview_action.setChecked(True) # This will trigger update_preview via its signal - preview_toggled = True - - # Only call update_preview directly if the toggle wasn't triggered - # If in simple mode, we need to explicitly update the model with the simple list of paths - if hasattr(self, 'toggle_preview_action') and self.toggle_preview_action.isChecked(): - log.debug("Currently in simple preview mode. Updating model with simple paths.") - self.preview_model.set_data(list(self.current_asset_paths)) # Update model with simple list - self.statusBar().showMessage(f"Added {added_count} asset(s). Preview updated.", 3000) - # Only call update_preview if a preset is currently selected in the editor list + # --- Trigger prediction for newly added paths --- current_editor_item = self.editor_preset_list.currentItem() - if not preview_toggled and current_editor_item: - log.debug("Preset selected and not in simple mode. Triggering detailed preview update.") - self.update_preview() - elif not current_editor_item: - log.debug("No preset selected. Not triggering detailed preview update.") - self.statusBar().showMessage(f"Added {added_count} asset(s). Select a preset to update preview.", 3000) - + is_placeholder = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__" + selected_preset = current_editor_item.text() if current_editor_item and not is_placeholder else None + + if selected_preset: + log.info(f"Preset '{selected_preset}' selected. Triggering prediction for {len(newly_added_paths)} new paths.") +# Ensure the prediction thread is running before emitting signals + if self.prediction_thread and not self.prediction_thread.isRunning(): + log.debug("Starting prediction thread from add_input_paths.") + self.prediction_thread.start() + for input_path_str in newly_added_paths: + file_list = self._extract_file_list(input_path_str) + if file_list is not None: # Check if extraction was successful (not None) + log.debug(f"Extracted {len(file_list)} files for {input_path_str}. Emitting signal.") + log.info(f"VERIFY: Extracted file list for '{input_path_str}'. Count: {len(file_list)}. Emitting prediction signal.") # DEBUG Verify + self.start_prediction_signal.emit(input_path_str, file_list, selected_preset) + else: + log.warning(f"Skipping prediction for {input_path_str} due to extraction error.") + else: + log.warning(f"Added {added_count} asset(s), but no valid preset selected. Prediction not triggered.") + self.statusBar().showMessage(f"Added {added_count} asset(s). Select a preset to generate preview.", 3000) + + # --- REMOVED call to self.update_preview() --- + # The preview update is now triggered per-item via the signal emission above, + # and also when the preset selection changes (handled in update_preview). + def _browse_for_output_directory(self): """Opens a dialog to select the output directory.""" current_path = self.output_path_edit.text() @@ -720,13 +698,15 @@ class MainWindow(QMainWindow): # --- Processing Action Methods --- def start_processing(self): - if self.processing_handler and self.processing_handler.is_running: - log.warning("Start clicked, but processing is already running.") - self.statusBar().showMessage("Processing is already in progress.", 3000) - return - if ProcessingHandler is None: - self.statusBar().showMessage("Error: Processing components not loaded.", 5000) - return + # REMOVED Check for old processing handler state + # if self.processing_handler and self.processing_handler.is_running: + # log.warning("Start clicked, but processing is already running.") + # self.statusBar().showMessage("Processing is already in progress.", 3000) + # return + # REMOVED Check for old processing handler import + # if ProcessingHandler is None: + # self.statusBar().showMessage("Error: Processing components not loaded.", 5000) + # return if not hasattr(self, 'current_asset_paths') or not self.current_asset_paths: self.statusBar().showMessage("No assets added to process.", 3000) return @@ -780,69 +760,90 @@ class MainWindow(QMainWindow): # --- End Output Directory Validation --- log.info(f"Preparing to start processing {len(input_paths)} items to '{output_dir_str}'.") - self.set_controls_enabled(False) - self.cancel_button.setEnabled(True) - self.start_button.setText("Processing...") + + # --- Get the final list of SourceRule objects from the model --- + # Assuming UnifiedViewModel has a method like get_all_source_rules() + try: + final_source_rules = self.unified_model.get_all_source_rules() + if not final_source_rules: + log.warning("No source rules found in the model. Nothing to process.") + self.statusBar().showMessage("No rules generated or assets added. Nothing to process.", 3000) + return + except AttributeError: + log.error("UnifiedViewModel does not have 'get_all_source_rules()' method.") + self.statusBar().showMessage("Error: Cannot retrieve rules from model.", 5000) + return + except Exception as e: + log.exception(f"Error getting rules from model: {e}") + self.statusBar().showMessage(f"Error retrieving rules: {e}", 5000) + return + + log.info(f"Retrieved {len(final_source_rules)} SourceRule objects from the model.") + + # --- Emit signal with the list of rules --- + log.info(f"DEBUG: Emitting processing_requested with rules: {final_source_rules}") # DEBUG LOG +# DEBUG Verify: Log the rules being emitted + rule_paths = [rule.input_path for rule in final_source_rules] + log.info(f"VERIFY: Emitting processing_requested with {len(final_source_rules)} SourceRule(s). Input paths: {rule_paths}") + # test_message = f"Processing requested for {len(final_source_rules)} rules." # Reverted + self.processing_requested.emit(final_source_rules) # Emit original list + log.info("Emitted processing_requested signal with the list of SourceRules.") # Reverted log + + # --- Update UI (Progress bar, status, buttons) --- + # Note: The actual processing start/progress/finish will now be handled + # by the main application logic connected to the processing_requested signal. + # We might want to show an intermediate status here. self.progress_bar.setValue(0) - self.progress_bar.setFormat("%p%") - self.setup_threads() - if self.processing_thread and self.processing_handler: - try: self.processing_thread.started.disconnect() - except RuntimeError: pass - # Use the current SourceRule from the hierarchy model - if self._current_source_rule is None: - log.error("Cannot start processing: No rule hierarchy available.") - self.statusBar().showMessage("Error: No rule hierarchy available. Run preview first.", 5000) - self.set_controls_enabled(True) - self.cancel_button.setEnabled(False) - self.start_button.setText("Start Processing") - return + self.progress_bar.setFormat("Waiting for processing start...") + self.statusBar().showMessage(f"Requested processing for {len(final_source_rules)} rule sets...", 0) + # Disable start button, enable cancel (assuming main will handle re-enabling) + self.set_controls_enabled(False) # Disable most controls + self.start_button.setEnabled(False) # Keep start disabled + self.start_button.setText("Processing...") + self.cancel_button.setEnabled(True) # Enable cancel - log.debug(f"Using SourceRule '{self._current_source_rule.input_path}' for processing.") - - self.processing_thread.started.connect( - lambda: self.processing_handler.run_processing( - input_paths, selected_preset, output_dir_str, overwrite, num_workers, - # Pass Blender integration settings - rules=self._current_source_rule, # Pass the current SourceRule - run_blender=self.blender_integration_checkbox.isChecked(), - nodegroup_blend_path=self.nodegroup_blend_path_input.text(), - materials_blend_path=self.materials_blend_path_input.text(), - # Pass verbose setting - verbose=self.toggle_verbose_action.isChecked() - ) - ) - self.processing_thread.start() - log.info("Processing thread started.") - self.statusBar().showMessage(f"Processing {len(input_paths)} items...", 0) - else: - log.error("Failed to start processing: Thread or handler not initialized.") - self.statusBar().showMessage("Error: Failed to initialize processing thread.", 5000) - self.set_controls_enabled(True) - self.cancel_button.setEnabled(False) - self.start_button.setText("Start Processing") + # --- Old direct processing call REMOVED --- + # self.set_controls_enabled(False) + # self.cancel_button.setEnabled(True) + # self.start_button.setText("Processing...") + # self.progress_bar.setValue(0) + # self.progress_bar.setFormat("%p%") + # self.setup_threads() # Ensure threads are ready (might be redundant if setup elsewhere) + # if self.processing_thread and self.processing_handler: + # # ... (old thread starting logic removed) ... + # else: + # log.error("Failed to start processing: Thread or handler not initialized.") + # self.statusBar().showMessage("Error: Failed to initialize processing thread.", 5000) + # self.set_controls_enabled(True) + # self.cancel_button.setEnabled(False) + # self.start_button.setText("Start Processing") def cancel_processing(self): - if self.processing_handler and self.processing_handler.is_running: - log.info("Cancel button clicked. Requesting cancellation.") - self.statusBar().showMessage("Requesting cancellation...", 3000) - self.processing_handler.request_cancel() - self.cancel_button.setEnabled(False) - self.start_button.setText("Cancelling...") - else: - log.warning("Cancel clicked, but no processing is running.") - self.statusBar().showMessage("Nothing to cancel.", 3000) + # TODO: Implement cancellation by signaling the App/main thread to stop the QThreadPool tasks + log.warning("Cancel button clicked, but cancellation logic needs reimplementation.") + self.statusBar().showMessage("Cancellation not yet implemented.", 3000) + # if self.processing_handler and self.processing_handler.is_running: + # log.info("Cancel button clicked. Requesting cancellation.") + # self.statusBar().showMessage("Requesting cancellation...", 3000) + # self.processing_handler.request_cancel() # OLD HANDLER + # self.cancel_button.setEnabled(False) + # self.start_button.setText("Cancelling...") + # else: + # log.warning("Cancel clicked, but no processing is running.") + # self.statusBar().showMessage("Nothing to cancel.", 3000) def clear_queue(self): """Clears the current asset queue and the preview table.""" - if self.processing_handler and self.processing_handler.is_running: - self.statusBar().showMessage("Cannot clear queue while processing.", 3000) - return + # TODO: Check processing state via App/main thread if needed before clearing + # if self.processing_handler and self.processing_handler.is_running: # OLD HANDLER CHECK + # self.statusBar().showMessage("Cannot clear queue while processing.", 3000) + # return if hasattr(self, 'current_asset_paths') and self.current_asset_paths: log.info(f"Clearing asset queue ({len(self.current_asset_paths)} items).") self.current_asset_paths.clear() - self.preview_model.clear_data() # Clear the model data + # self.preview_model.clear_data() # Old model removed + self.unified_model.clear_data() # Clear the new model data self.statusBar().showMessage("Asset queue cleared.", 3000) else: self.statusBar().showMessage("Asset queue is already empty.", 3000) @@ -851,21 +852,18 @@ class MainWindow(QMainWindow): # --- Preview Update Method --- def update_preview(self): log.info(f"--> Entered update_preview. View Action exists: {hasattr(self, 'toggle_preview_action')}") - if hasattr(self, 'toggle_preview_action'): - log.info(f" Disable Preview Action checked: {self.toggle_preview_action.isChecked()}") - - # --- Preview Update Method --- - def update_preview(self): + # The duplicate update_preview definition below this was removed. + # This is now the primary update_preview method. + log.debug(f"[{time.time():.4f}] ### LOG: Entering update_preview") + log.debug("--> Entered update_preview") thread_id = QThread.currentThread() # Get current thread object log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered update_preview. View Action exists: {hasattr(self, 'toggle_preview_action')}") if hasattr(self, 'toggle_preview_action'): log.info(f"[{time.time():.4f}][T:{thread_id}] Disable Preview Action checked: {self.toggle_preview_action.isChecked()}") - # Determine mode based on menu action - simple_mode_enabled = hasattr(self, 'toggle_preview_action') and self.toggle_preview_action.isChecked() - log.info(f"[{time.time():.4f}][T:{thread_id}] Determined simple_mode_enabled: {simple_mode_enabled}") + # Determine mode based on menu action (REMOVED - No longer relevant) - # --- Cancel Prediction if Running --- + # --- Cancel Prediction if Running (Existing logic, might need refinement) --- if self.prediction_handler and self.prediction_handler.is_running: log.warning(f"[{time.time():.4f}][T:{thread_id}] Prediction is running. Attempting to call prediction_handler.request_cancel()...") try: @@ -879,223 +877,138 @@ class MainWindow(QMainWindow): # Note: Cancellation is not immediate even if it existed. The thread would stop when it next checks the flag. # We proceed with updating the UI immediately. - # Set the model's mode - log.info(f"[{time.time():.4f}][T:{thread_id}] Calling preview_model.set_simple_mode({simple_mode_enabled})...") - self.preview_model.set_simple_mode(simple_mode_enabled) - log.info(f"[{time.time():.4f}][T:{thread_id}] Returned from preview_model.set_simple_mode({simple_mode_enabled}).") + # --- REMOVED Old Preview Model Mode Setting and Table Configuration --- + # The Unified View does not have a simple/detailed mode toggle. + # The Prediction Handler is triggered regardless of view settings. - # Configure the QTableView based on the mode - header = self.preview_table_view.horizontalHeader() - if simple_mode_enabled: - log.info(" Configuring QTableView for SIMPLE mode.") - # Hide detailed columns, show simple column - self.preview_table_view.setColumnHidden(self.preview_model.COL_STATUS, True) - self.preview_table_view.setColumnHidden(self.preview_model.COL_PREDICTED_ASSET, True) - self.preview_table_view.setColumnHidden(self.preview_model.COL_ORIGINAL_PATH, True) - self.preview_table_view.setColumnHidden(self.preview_model.COL_PREDICTED_OUTPUT, True) # Already hidden, but good practice - self.preview_table_view.setColumnHidden(self.preview_model.COL_DETAILS, True) - # Ensure the simple path column exists and is visible - if self.preview_model.columnCount() > self.preview_model.COL_SIMPLE_PATH: - self.preview_table_view.setColumnHidden(self.preview_model.COL_SIMPLE_PATH, False) # Show the simple path column - # Set resize mode for the single visible column - header.setSectionResizeMode(self.preview_model.COL_SIMPLE_PATH, QHeaderView.ResizeMode.Stretch) - else: - log.error(" Simple path column index out of bounds for model.") + log.debug(f"[{time.time():.4f}] ### LOG: Checking if prediction handler is running") + # --- Trigger Prediction Handler --- + if self.prediction_handler and self.prediction_handler.is_running: + log.warning(f"[{time.time():.4f}] Preview update requested, but already running.") + log.debug(f"[{time.time():.4f}] ### LOG: Inside 'is_running' check") + # Removed the 'return' statement here to allow the signal to be emitted + # return + # The rest of the logic should execute regardless of is_running state, + # though the handler itself should handle being called multiple times. + # A better fix might involve properly resetting is_running in the handler. - - # Disable sorting in simple mode (optional, but makes sense) - self.preview_table_view.setSortingEnabled(False) - - # Update status bar - if hasattr(self, 'current_asset_paths') and self.current_asset_paths: - self.statusBar().showMessage(f"Preview disabled. Showing {len(self.current_asset_paths)} input assets.", 3000) - else: - self.statusBar().showMessage("Preview disabled. No assets added.", 3000) - - # In simple mode, the model's data is derived from current_asset_paths. - # We need to ensure the model's simple data is up-to-date. - # The simplest way is to re-set the data, which will re-extract simple data. - # This might be slightly inefficient if only the mode changed, but safe. - # A more optimized approach would be to have a separate method in the model - # to just update the simple data from a list of paths. - # For now, let's re-set the data. - # --- REMOVED REDUNDANT set_data CALL --- - # The set_simple_mode(True) call above should be sufficient as the model - # already holds the simple data internally. This extra reset seems to cause instability. - # log.debug(" Simple mode enabled. Re-setting model data to trigger simple data update.") - # self.preview_model.set_data(list(self.current_asset_paths)) # Pass the list of paths - # --- END REMOVAL --- - - # Stop here, do not run PredictionHandler in simple mode - log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting update_preview (Simple Mode).") - return - else: - # --- Proceed with Detailed Preview --- - log.info(f"[{time.time():.4f}][T:{thread_id}] Configuring QTableView for DETAILED mode.") - # Show detailed columns, hide simple column - self.preview_table_view.setColumnHidden(self.preview_model.COL_STATUS, False) - self.preview_table_view.setColumnHidden(self.preview_model.COL_PREDICTED_ASSET, False) - self.preview_table_view.setColumnHidden(self.preview_model.COL_ORIGINAL_PATH, False) - self.preview_table_view.setColumnHidden(self.preview_model.COL_PREDICTED_OUTPUT, True) # Keep this hidden - self.preview_table_view.setColumnHidden(self.preview_model.COL_DETAILS, False) - # Ensure the simple path column exists and is hidden - if self.preview_model.columnCount() > self.preview_model.COL_SIMPLE_PATH: - self.preview_table_view.setColumnHidden(self.preview_model.COL_SIMPLE_PATH, True) # Hide the simple path column - else: - log.warning(" Simple path column index out of bounds for model when hiding.") - - - # Set resize modes for detailed columns - header.setSectionResizeMode(self.preview_model.COL_STATUS, QHeaderView.ResizeMode.ResizeToContents) - header.setSectionResizeMode(self.preview_model.COL_PREDICTED_ASSET, QHeaderView.ResizeMode.ResizeToContents) # Fit - header.setSectionResizeMode(self.preview_model.COL_DETAILS, QHeaderView.ResizeMode.ResizeToContents) # Fit - header.setSectionResizeMode(self.preview_model.COL_ORIGINAL_PATH, QHeaderView.ResizeMode.ResizeToContents) # Fixed width (using ResizeToContents as closest) - header.setSectionResizeMode(self.preview_model.COL_ADDITIONAL_FILES, QHeaderView.ResizeMode.Stretch) # Stretch (Fit-If-Possible) - - # Move columns to the desired order: Status, Predicted Asset, Details, Original Path, Additional Files - # Initial logical order: [0, 1, 2, 3(hidden), 4, 5] - # Initial visual order: [0, 1, 2, 3, 4, 5] (assuming no initial moves) - # Desired visual order: [0, 1, 4, 2, 5, 3(hidden)] - - # Move Predicted Asset (logical 1) to visual index 1 (already there) - - # Move Details (logical 4) to visual index 2 - header.moveSection(header.visualIndex(self.preview_model.COL_DETAILS), 2) - # Current visual: [0, 1, 4, 2, 3, 5] - - # Move Original Path (logical 2) to visual index 3 - header.moveSection(header.visualIndex(self.preview_model.COL_ORIGINAL_PATH), 3) - # Current visual: [0, 1, 4, 2, 3, 5] - Original Path is already at visual index 3 after moving Details - - # Move Additional Files (logical 5) to visual index 4 - header.moveSection(header.visualIndex(self.preview_model.COL_ADDITIONAL_FILES), 4) - # Current visual: [0, 1, 4, 2, 5, 3] - This looks correct. - - # Re-enable sorting for detailed mode - self.preview_table_view.setSortingEnabled(True) - # Reset sort order if needed (optional, proxy model handles default) - # self.preview_table_view.sortByColumn(self.preview_model.COL_STATUS, Qt.SortOrder.AscendingOrder) - - # --- Trigger Prediction Handler --- - if self.prediction_handler and self.prediction_handler.is_running: - log.warning(f"[{time.time():.4f}] Preview update requested, but already running.") - return - if PredictionHandler is None: + if PredictionHandler is None: + log.error("PredictionHandler not loaded. Cannot update preview.") self.statusBar().showMessage("Error: Prediction components not loaded.", 5000) return - # Get preset from editor list - current_editor_item = self.editor_preset_list.currentItem() + # Get preset from editor list + current_editor_item = self.editor_preset_list.currentItem() - # Check if the selected item is the placeholder - is_placeholder = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__" + # Check if the selected item is the placeholder + is_placeholder = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__" - if is_placeholder: - log.debug("Update preview called with placeholder preset selected. Clearing preview.") - self.preview_model.clear_data() # Clear model if placeholder selected - self.statusBar().showMessage("Select a preset from the list on the left to update preview.", 3000) - # Ensure placeholder is visible and table is hidden - if hasattr(self, 'preview_placeholder_label') and hasattr(self, 'preview_table_view'): - self.preview_placeholder_label.setVisible(True) - self.preview_table_view.setVisible(False) - return # Stop prediction as no valid preset is selected + if is_placeholder: + log.debug("Update preview called with placeholder preset selected. Clearing unified view.") + self.unified_model.clear_data() # Clear the new model if placeholder selected + self.statusBar().showMessage("Select a preset from the list on the left to update preview.", 3000) + # No placeholder label to manage for unified view + return # Stop prediction as no valid preset is selected - # Existing logic to get selected_preset text and proceed - selected_preset = current_editor_item.text() if current_editor_item else None - if not selected_preset: - log.debug("Update preview called with no preset selected in the editor list.") - self.preview_model.clear_data() # Clear model if no preset selected - self.statusBar().showMessage("Select a preset from the list on the left to update preview.", 3000) - return - if not hasattr(self, 'current_asset_paths') or not self.current_asset_paths: - log.debug("Update preview called with no assets tracked.") - self.preview_model.clear_data() # Clear model if no assets - return - input_paths = list(self.current_asset_paths) - if not input_paths: - log.debug("Update preview called but no input paths derived.") - self.preview_model.clear_data() # Clear model if no paths - return + # Existing logic to get selected_preset text and proceed + selected_preset = current_editor_item.text() if current_editor_item else None + if not selected_preset: + log.debug("Update preview called with no preset selected in the editor list.") + self.unified_model.clear_data() # Clear the new model if no preset selected + self.statusBar().showMessage("Select a preset from the list on the left to update preview.", 3000) + return + if not hasattr(self, 'current_asset_paths') or not self.current_asset_paths: + log.debug("Update preview called with no assets tracked.") + self.unified_model.clear_data() # Clear the new model if no assets + return + input_paths = list(self.current_asset_paths) - log.info(f"[{time.time():.4f}] Requesting background preview update for {len(input_paths)} items, Preset='{selected_preset}'") - self.statusBar().showMessage(f"Updating preview for '{selected_preset}'...", 0) - # Clearing is handled by model's set_data now, no need to clear table view directly - if self.prediction_thread and self.prediction_handler: - # Create a placeholder SourceRule instance (replace with actual rule loading later) - placeholder_rules = SourceRule() # Temporary rule for passing data - log.debug(f"Created placeholder SourceRule for prediction.") + log.info(f"[{time.time():.4f}] Requesting background preview update for {len(input_paths)} items using Preset='{selected_preset}'") + self.statusBar().showMessage(f"Updating preview for '{selected_preset}'...", 0) + # Clearing is handled by model's set_data now, no need to clear table view directly + if self.prediction_thread and self.prediction_handler: + # REMOVED Placeholder SourceRule creation - # Create a placeholder SourceRule instance (replace with actual rule loading later) - placeholder_rules = SourceRule() # Temporary rule for passing data - log.debug(f"Created placeholder SourceRule for prediction.") + # Start the prediction thread + # The thread should already be running or started once. Don't restart it here. + # log.debug(f"[{time.time():.4f}] Starting prediction thread...") + self.prediction_thread.start() # Ensure thread is running + # log.debug(f"[{time.time():.4f}] Prediction thread start requested.") - # Start the prediction thread - log.debug(f"[{time.time():.4f}] Starting prediction thread...") - self.prediction_thread.start() - log.debug(f"[{time.time():.4f}] Prediction thread start requested.") + # Iterate through all current paths, extract files, and emit signal for each + log.debug(f"[{time.time():.4f}] Iterating through {len(input_paths)} paths to extract files and emit signals.") + for input_path_str in input_paths: + file_list = self._extract_file_list(input_path_str) + if file_list is not None: # Check if extraction was successful + log.debug(f"[{time.time():.4f}] Emitting start_prediction_signal for: {input_path_str} with {len(file_list)} files.") + self.start_prediction_signal.emit(input_path_str, file_list, selected_preset) + else: + log.warning(f"[{time.time():.4f}] Skipping prediction signal for {input_path_str} due to extraction error during preview update.") - # Emit the signal to trigger run_prediction in the prediction thread - log.debug(f"[{time.time():.4f}] Emitting start_prediction_signal...") - self.start_prediction_signal.emit(input_paths, selected_preset, placeholder_rules) - log.debug(f"[{time.time():.4f}] start_prediction_signal emitted.") - - else: - log.error(f"[{time.time():.4f}][T:{thread_id}] Failed to start prediction: Thread or handler not initialized.") - self.statusBar().showMessage("Error: Failed to initialize prediction thread.", 5000) - log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting update_preview (Detailed Mode).") + else: + log.error(f"[{time.time():.4f}][T:{thread_id}] Failed to trigger prediction: Thread or handler not initialized.") + self.statusBar().showMessage("Error: Failed to initialize prediction thread.", 5000) + log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting update_preview.") # --- Threading and Processing Control --- def setup_threads(self): - # Setup Processing Thread - if ProcessingHandler and self.processing_thread is None: - self.processing_thread = QThread(self) - self.processing_handler = ProcessingHandler() - self.processing_handler.moveToThread(self.processing_thread) - self.processing_handler.progress_updated.connect(self.update_progress_bar) - self.processing_handler.file_status_updated.connect(self.update_file_status) - self.processing_handler.processing_finished.connect(self.on_processing_finished) - self.processing_handler.status_message.connect(self.show_status_message) - self.processing_handler.processing_finished.connect(self.processing_thread.quit) - self.processing_handler.processing_finished.connect(self.processing_handler.deleteLater) - self.processing_thread.finished.connect(self.processing_thread.deleteLater) - self.processing_thread.finished.connect(self._reset_processing_thread_references) - log.debug("Processing thread and handler set up.") - elif not ProcessingHandler: - log.error("ProcessingHandler not available. Cannot set up processing thread.") - if hasattr(self, 'start_button'): - self.start_button.setEnabled(False) - self.start_button.setToolTip("Error: Backend processing components failed to load.") + # --- REMOVED Old Processing Thread Setup --- + # if ProcessingHandler and self.processing_thread is None: + # self.processing_thread = QThread(self) + # self.processing_handler = ProcessingHandler() + # self.processing_handler.moveToThread(self.processing_thread) + # self.processing_handler.progress_updated.connect(self.update_progress_bar) + # self.processing_handler.file_status_updated.connect(self.update_file_status) + # self.processing_handler.processing_finished.connect(self.on_processing_finished) + # self.processing_handler.status_message.connect(self.show_status_message) + # self.processing_handler.processing_finished.connect(self.processing_thread.quit) + # self.processing_handler.processing_finished.connect(self.processing_handler.deleteLater) + # self.processing_thread.finished.connect(self.processing_thread.deleteLater) + # self.processing_thread.finished.connect(self._reset_processing_thread_references) + # log.debug("Processing thread and handler set up.") + # elif not ProcessingHandler: + # log.error("ProcessingHandler not available. Cannot set up processing thread.") + # if hasattr(self, 'start_button'): + # self.start_button.setEnabled(False) + # self.start_button.setToolTip("Error: Backend processing components failed to load.") + # --- END REMOVED --- - # Setup Prediction Thread + # Setup Prediction Thread (Keep this) if PredictionHandler and self.prediction_thread is None: self.prediction_thread = QThread(self) self.prediction_handler = PredictionHandler() self.prediction_handler.moveToThread(self.prediction_thread) # Connect the new signal to the handler's run_prediction slot using QueuedConnection self.start_prediction_signal.connect(self.prediction_handler.run_prediction, Qt.ConnectionType.QueuedConnection) - self.prediction_handler.prediction_results_ready.connect(self.on_prediction_results_ready) # Connect the file list signal - self.prediction_handler.rule_hierarchy_ready.connect(self._on_rule_hierarchy_ready) # Connect the new hierarchy signal + # Removed: self.prediction_handler.prediction_results_ready.connect(self.on_prediction_results_ready) # Old signal + self.prediction_handler.rule_hierarchy_ready.connect(self._on_rule_hierarchy_ready) # Connect the LIST signal self.prediction_handler.prediction_finished.connect(self.on_prediction_finished) self.prediction_handler.status_message.connect(self.show_status_message) - self.prediction_handler.prediction_finished.connect(self.prediction_thread.quit) - self.prediction_handler.prediction_finished.connect(self.prediction_handler.deleteLater) - self.prediction_thread.finished.connect(self.prediction_thread.deleteLater) - self.prediction_thread.finished.connect(self._reset_prediction_thread_references) - log.debug("Prediction thread and handler set up.") + # --- REMOVED connections causing thread/handler cleanup --- + # self.prediction_handler.prediction_finished.connect(self.prediction_thread.quit) + # self.prediction_handler.prediction_finished.connect(self.prediction_handler.deleteLater) + # self.prediction_thread.finished.connect(self.prediction_thread.deleteLater) + # self.prediction_thread.finished.connect(self._reset_prediction_thread_references) + # --- END REMOVED --- + log.debug("Prediction thread and handler set up to be persistent.") elif not PredictionHandler: log.error("PredictionHandler not available. Cannot set up prediction thread.") - @Slot() - def _reset_processing_thread_references(self): - log.debug("Resetting processing thread and handler references.") - self.processing_thread = None - self.processing_handler = None + # --- REMOVED Old Processing Thread Reset --- + # @Slot() + # def _reset_processing_thread_references(self): + # # This might still be needed if processing is meant to be single-shot + # log.debug("Resetting processing thread and handler references.") + # self.processing_thread = None + # self.processing_handler = None + # --- END REMOVED --- @Slot() def _reset_prediction_thread_references(self): - log.debug("Resetting prediction thread and handler references.") - self.prediction_thread = None - self.prediction_handler = None + # This slot is no longer connected, but keep it for now in case needed later + log.debug("Resetting prediction thread and handler references (Slot disconnected).") + # self.prediction_thread = None # Keep references alive + # self.prediction_handler = None # Keep references alive @Slot(int, int) def update_progress_bar(self, current_count, total_count): @@ -1107,17 +1020,12 @@ class MainWindow(QMainWindow): self.progress_bar.setValue(0) self.progress_bar.setFormat("0/0") - # Slot for prediction results (Updated for new format and coloring) - @Slot(list) - def on_prediction_results_ready(self, results: list): - """Populates the preview table model with detailed prediction results.""" - thread_id = QThread.currentThread() # Get current thread object - log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered on_prediction_results_ready. Received {len(results)} file details.") - # Update the model with the new data - log.info(f"[{time.time():.4f}][T:{thread_id}] Calling preview_model.set_data()...") - self.preview_model.set_data(results) - log.info(f"[{time.time():.4f}][T:{thread_id}] Returned from preview_model.set_data().") - log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting on_prediction_results_ready.") + # Slot for prediction results (Updated for new format and coloring) - REMOVED + # @Slot(list) + # def on_prediction_results_ready(self, results: list): + # """Populates the preview table model with detailed prediction results.""" + # # This is no longer needed as _on_rule_hierarchy_ready handles data loading for the new model. + # pass @Slot() def on_prediction_finished(self): @@ -1134,9 +1042,12 @@ class MainWindow(QMainWindow): self.statusBar().showMessage(status_text, 5000) log.debug(f"Received file status update: {input_path_str} - {status}") + # TODO: This slot needs to be connected to a signal from the App/main thread + # indicating that all tasks in the QThreadPool are complete. @Slot(int, int, int) def on_processing_finished(self, processed_count, skipped_count, failed_count): - log.info(f"GUI received processing_finished signal: P={processed_count}, S={skipped_count}, F={failed_count}") + # This log message might be inaccurate until signal source is updated + log.info(f"GUI received processing_finished signal (Source TBC): P={processed_count}, S={skipped_count}, F={failed_count}") self.set_controls_enabled(True) self.cancel_button.setEnabled(False) self.start_button.setText("Start Processing") @@ -1152,8 +1063,8 @@ class MainWindow(QMainWindow): self.start_button.setEnabled(enabled) self.setAcceptDrops(enabled) self.drag_drop_area.setEnabled(enabled) - # self.preview_table.setEnabled(enabled) # This was the old QTableWidget - self.preview_table_view.setEnabled(enabled) # Enable/disable the QTableView instead + # Removed: self.preview_table_view.setEnabled(enabled) + self.unified_view.setEnabled(enabled) # Enable/disable the new Unified View # Editor panel controls (should generally be enabled unless processing) self.editor_panel.setEnabled(enabled) # Enable/disable the whole panel # Blender controls @@ -1287,11 +1198,8 @@ class MainWindow(QMainWindow): self.setWindowTitle("Asset Processor Tool") # Reset window title self._set_editor_enabled(False) - # Ensure placeholder is visible and table is hidden when editor is cleared - if hasattr(self, 'preview_placeholder_label') and hasattr(self, 'preview_table_view'): - log.debug("Clearing editor. Showing placeholder, hiding table view.") - self.preview_placeholder_label.setVisible(True) - self.preview_table_view.setVisible(False) + # Ensure unified view is cleared (handled by model clear) + # No placeholder label to manage for unified view self._is_loading_editor = False @@ -1368,8 +1276,7 @@ class MainWindow(QMainWindow): self.setWindowTitle(f"Asset Processor Tool - {file_path.name}") log.info(f"Preset '{file_path.name}' loaded into editor.") log.debug("Preset loaded. Checking visibility states.") - log.debug(f"preview_placeholder_label visible: {self.preview_placeholder_label.isVisible()}") - log.debug(f"preview_table_view visible: {self.preview_table_view.isVisible()}") + # No placeholder/table view visibility to manage here except json.JSONDecodeError as json_err: log.error(f"Invalid JSON in {file_path.name}: {json_err}") QMessageBox.warning(self, "Load Error", f"Failed to load preset '{file_path.name}'.\nInvalid JSON structure:\n{json_err}") @@ -1396,12 +1303,10 @@ class MainWindow(QMainWindow): return if is_placeholder: - log.debug("Placeholder item selected. Clearing editor and preview.") - self._clear_editor() # This also hides the table and shows the placeholder label - self.preview_model.clear_data() # Ensure the model is empty - # Visibility is handled by _clear_editor, but explicitly set here for clarity - self.preview_placeholder_label.setVisible(True) - self.preview_table_view.setVisible(False) + log.debug("Placeholder item selected. Clearing editor and unified view.") + self._clear_editor() + self.unified_model.clear_data() # Ensure the new model is empty + # No placeholder/table view visibility to manage self.start_button.setEnabled(False) # Disable start button return # Stop processing as no real preset is selected @@ -1415,18 +1320,12 @@ class MainWindow(QMainWindow): self.update_preview() # --- End Trigger --- - # Hide placeholder and show table view - log.debug("Real preset selected. Hiding placeholder, showing table view.") - self.preview_placeholder_label.setVisible(False) - self.preview_table_view.setVisible(True) + # No placeholder/table view visibility to manage else: # This case should ideally not be reached if the placeholder is always present log.debug("No preset selected (unexpected state if placeholder is present). Clearing editor.") self._clear_editor() # Clear editor if selection is cleared - # Ensure placeholder is visible if no preset is selected - log.debug("No preset selected. Showing placeholder, hiding table view.") - self.preview_placeholder_label.setVisible(True) - self.preview_table_view.setVisible(False) + # No placeholder/table view visibility to manage def _gather_editor_data(self) -> dict: """Gathers data from all editor UI widgets and returns a dictionary.""" @@ -1674,87 +1573,37 @@ class MainWindow(QMainWindow): else: event.accept() # Accept close event - # --- Slots for Hierarchy and Rule Editor --- + # --- REMOVED Slots for Old Hierarchy and Rule Editor --- + # @Slot(QModelIndex) + # def _on_hierarchy_item_clicked(self, index: QModelIndex): ... - @Slot(QModelIndex) - def _on_hierarchy_item_clicked(self, index: QModelIndex): - """Loads the selected rule item into the rule editor and filters the preview table.""" - if index.isValid(): - rule_item = self.rule_hierarchy_model.get_item_from_index(index) - if rule_item: - rule_type_name = type(rule_item).__name__ - log.debug(f"Hierarchy item clicked: {rule_type_name} - {getattr(rule_item, 'name', 'N/A')}") - self.rule_editor_widget.load_rule(rule_item, rule_type_name) + # @Slot(object) + # def _on_rule_updated(self, rule_object): ... - # Filter the preview table based on the selected item - if isinstance(rule_item, SourceRule): - # Show all files for the source - self.preview_proxy_model.setFilterRegularExpression("") # Clear filter - self.preview_proxy_model.setFilterKeyColumn(-1) # Apply to all columns (effectively no column filter) - log.debug("Filtering preview table: Showing all files for Source.") - elif isinstance(rule_item, AssetRule): - # Show files belonging to this asset - # Filter by the 'source_asset' column (which stores the asset name/path) - # Need to escape potential regex special characters in the asset name/path - filter_string = "^" + rule_item.asset_name.replace('\\', '\\\\').replace('.', '\\.') + "$" - self.preview_proxy_model.setFilterRegularExpression(filter_string) - self.preview_proxy_model.setFilterKeyColumn(self.preview_model.ROLE_SOURCE_ASSET) # Filter by source_asset column - log.debug(f"Filtering preview table: Showing files for Asset '{rule_item.asset_name}'. Filter: '{filter_string}' on column {self.preview_model.ROLE_SOURCE_ASSET}") - elif isinstance(rule_item, FileRule): - # Show only this specific file - # Filter by the 'original_path' column - filter_string = "^" + rule_item.file_path.replace('\\', '\\\\').replace('.', '\\.') + "$" - self.preview_proxy_model.setFilterRegularExpression(filter_string) - self.preview_proxy_model.setFilterKeyColumn(self.preview_model.COL_ORIGINAL_PATH) # Filter by original_path column - log.debug(f"Filtering preview table: Showing file '{rule_item.file_path}'. Filter: '{filter_string}' on column {self.preview_model.COL_ORIGINAL_PATH}") - else: - # Clear filter for unknown types - self.preview_proxy_model.setFilterRegularExpression("") - self.preview_proxy_model.setFilterKeyColumn(-1) - log.warning(f"Clicked item has unknown type {type(rule_item)}. Clearing preview filter.") + @Slot(list) # Changed signature to accept list + def _on_rule_hierarchy_ready(self, source_rules_list: list): + log.debug(f"--> Entered _on_rule_hierarchy_ready with {len(source_rules_list)} SourceRule(s)") + """Receives the generated list of SourceRule hierarchies and updates the unified view model.""" + # Removed: log.info(f"Received rule hierarchy ready signal for input: {source_rule.input_path}") + # Removed: self._current_source_rule = source_rule # This concept might need rethinking if processing needs a specific rule + # Removed: self.rule_hierarchy_model.set_root_rule(source_rule) + # Removed: self.hierarchy_tree_view.expandToDepth(0) - else: - log.warning("Clicked item has no associated rule object. Clearing editor and preview filter.") - self.rule_editor_widget.clear_editor() - self.preview_proxy_model.setFilterRegularExpression("") # Clear filter - self.preview_proxy_model.setFilterKeyColumn(-1) + # Load the LIST of data into the new UnifiedViewModel + self.unified_model.load_data(source_rules_list) # Pass the list + log.debug("Unified view model updated with new list of SourceRules.") - else: - log.debug("Clicked item index is invalid. Clearing editor and preview filter.") - self.rule_editor_widget.clear_editor() - self.preview_proxy_model.setFilterRegularExpression("") # Clear filter - self.preview_proxy_model.setFilterKeyColumn(-1) - - - @Slot(object) - def _on_rule_updated(self, rule_object): - """Handles the signal when a rule is updated in the editor.""" - # This slot is called when an attribute is changed in the RuleEditorWidget. - # The rule_object passed is the actual object instance from the hierarchy model. - # Since the RuleEditorWidget modifies the object in place, we don't need to - # explicitly update the model's data structure here. - # However, if the change affects the display in the hierarchy tree or preview table, - # we might need to emit dataChanged signals or trigger updates. - # For now, just log the update. - log.debug(f"Rule object updated in editor: {type(rule_object).__name__} - {getattr(rule_object, 'name', 'N/A')}") - # TODO: Consider if any UI updates are needed based on the rule change. - # E.g., if a rule name changes, the hierarchy tree might need a dataChanged signal. - # If a rule affects file output names, the preview table might need updating. - # This is complex and depends on which rule attributes are editable and their impact. - - @Slot(object) - def _on_rule_hierarchy_ready(self, source_rule: SourceRule): - """Receives the generated SourceRule hierarchy and updates the tree view model.""" - log.info(f"Received rule hierarchy ready signal for input: {source_rule.input_path}") - self._current_source_rule = source_rule # Store the generated rule hierarchy - self.rule_hierarchy_model.set_root_rule(source_rule) # Update the tree view model - self.hierarchy_tree_view.expandToDepth(0) # Expand the first level (Source and Assets) - log.debug("Rule hierarchy model updated and tree view expanded.") + # Resize columns to fit content after loading data + for col in range(self.unified_model.columnCount()): + self.unified_view.resizeColumnToContents(col) + log.debug("Unified view columns resized to contents.") + self.unified_view.expandToDepth(1) # Expand Source -> Asset level # --- Main Execution --- def run_gui(): """Initializes and runs the Qt application.""" + print("--- Reached run_gui() ---") app = QApplication(sys.argv) app.setStyle('Fusion') diff --git a/gui/prediction_handler.py b/gui/prediction_handler.py index 3183724..9e699da 100644 --- a/gui/prediction_handler.py +++ b/gui/prediction_handler.py @@ -1,17 +1,19 @@ -from rule_structure import SourceRule, AssetRule, FileRule # gui/prediction_handler.py import logging from pathlib import Path -import time # For potential delays if needed -import os # For cpu_count -from concurrent.futures import ThreadPoolExecutor, as_completed # For parallel prediction +import time +import os +import re # Import regex +import tempfile # Added for temporary extraction directory +import zipfile # Added for zip file handling +# import patoolib # Potential import for rar/7z - Add later if zip works from collections import defaultdict +from typing import List, Dict, Any # For type hinting # --- PySide6 Imports --- -from PySide6.QtCore import QObject, Signal, QThread, Slot # Import QThread and Slot +from PySide6.QtCore import QObject, Signal, QThread, Slot # --- Backend Imports --- -# Adjust path to ensure modules can be found relative to this file's location import sys script_dir = Path(__file__).parent project_root = script_dir.parent @@ -20,15 +22,22 @@ if str(project_root) not in sys.path: try: from configuration import Configuration, ConfigurationError - from asset_processor import AssetProcessor, AssetProcessingError + # AssetProcessor might not be needed directly anymore if logic is moved here + # from asset_processor import AssetProcessor, AssetProcessingError + from rule_structure import SourceRule, AssetRule, FileRule # Removed AssetType, ItemType + import config as app_config # Import project's config module + # Import the lists directly for easier access + from config import ALLOWED_ASSET_TYPES, ALLOWED_FILE_TYPES BACKEND_AVAILABLE = True except ImportError as e: - print(f"ERROR (PredictionHandler): Failed to import backend modules: {e}") + print(f"ERROR (PredictionHandler): Failed to import backend/config modules: {e}") # Define placeholders if imports fail Configuration = None - AssetProcessor = None + # AssetProcessor = None ConfigurationError = Exception - AssetProcessingError = Exception + # AssetProcessingError = Exception + SourceRule, AssetRule, FileRule, AssetType, ItemType = (None,)*5 # Placeholder for rule structures + app_config = None # Placeholder for config BACKEND_AVAILABLE = False log = logging.getLogger(__name__) @@ -37,17 +46,155 @@ if not log.hasHandlers(): logging.basicConfig(level=logging.INFO, format='%(levelname)s (PredictHandler): %(message)s') +# Helper function for classification (can be moved outside class if preferred) +def classify_files(file_list: List[str], config: Configuration) -> Dict[str, List[Dict[str, Any]]]: + """ + Analyzes a list of files based on configuration rules to group them by asset + and determine initial file properties. + + Args: + file_list: List of absolute file paths. + config: The loaded Configuration object containing naming rules. + + Returns: + A dictionary grouping file information by predicted asset name. + Example: + { + 'AssetName1': [ + {'file_path': '/path/to/AssetName1_Color.png', 'item_type': 'Color', 'asset_name': 'AssetName1'}, + {'file_path': '/path/to/AssetName1_Normal.png', 'item_type': 'Normal', 'asset_name': 'AssetName1'} + ], + # ... other assets + } + Returns an empty dict if classification fails or no files are provided. + """ + temp_grouped_files = defaultdict(list) + extra_files_to_associate = [] # Store tuples: (file_path_str, filename) + primary_asset_names = set() # Store asset names derived from map files + + # --- Validation --- + if not file_list or not config: + log.warning("Classification skipped: Missing file list or config.") + return {} + if not hasattr(config, 'compiled_map_keyword_regex') or not config.compiled_map_keyword_regex: + log.warning("Classification skipped: Missing compiled map keyword regex.") + # Don't return yet, might still find extras + if not hasattr(config, 'compiled_extra_regex'): + log.warning("Configuration object missing 'compiled_extra_regex'. Cannot classify extra files.") + # Continue, but extras won't be found + + compiled_map_regex = getattr(config, 'compiled_map_keyword_regex', {}) + compiled_extra_regex = getattr(config, 'compiled_extra_regex', []) + num_map_rules = sum(len(patterns) for patterns in compiled_map_regex.values()) + num_extra_rules = len(compiled_extra_regex) + + log.debug(f"Starting classification for {len(file_list)} files using {num_map_rules} map keyword patterns and {num_extra_rules} extra patterns.") + + # --- Initial Pass: Classify Maps and Identify Extras --- + for file_path_str in file_list: + file_path = Path(file_path_str) + filename = file_path.name + is_extra = False + is_map = False + + # 1. Check for Extra Files FIRST + for extra_pattern in compiled_extra_regex: + if extra_pattern.search(filename): + log.debug(f"File '{filename}' matched EXTRA pattern: {extra_pattern.pattern}") + extra_files_to_associate.append((file_path_str, filename)) + is_extra = True + break # Stop checking extra patterns for this file + + if is_extra: + continue # Move to the next file if it's an extra + + # 2. Check for Map Files + # TODO: Consider rule priority if multiple patterns match the same file + for target_type, patterns_list in compiled_map_regex.items(): + for compiled_regex, original_keyword, rule_index in patterns_list: + match = compiled_regex.search(filename) + if match: + matched_item_type = target_type # The standard type (e.g., MAP_COL) + asset_name = None + # --- Asset Name Extraction Logic (Simplified Heuristic) --- + match_start_index = match.start(1) + if match_start_index > 0: + potential_name = filename[:match_start_index].rstrip('_- .') + asset_name = potential_name if potential_name else file_path.stem + else: + asset_name = file_path.stem + if not asset_name: asset_name = file_path.stem + + log.debug(f"File '{filename}' matched keyword '{original_keyword}' (rule {rule_index}) for item_type '{matched_item_type}'. Assigned asset name: '{asset_name}'") + temp_grouped_files[asset_name].append({ + 'file_path': file_path_str, + 'item_type': matched_item_type, + 'asset_name': asset_name + }) + primary_asset_names.add(asset_name) # Mark this as a primary asset name + is_map = True + break # Stop checking patterns for this file + if is_map: + break # Stop checking target types for this file + + # 3. Handle Unmatched Files (Not Extra, Not Map) + if not is_extra and not is_map: + log.debug(f"File '{filename}' did not match any map/extra pattern. Grouping by stem as FILE_IGNORE.") + asset_name = file_path.stem + temp_grouped_files[asset_name].append({ + 'file_path': file_path_str, + 'item_type': "FILE_IGNORE", + 'asset_name': asset_name + }) + + # --- Determine Primary Asset Name --- + # Simple heuristic: if only one name derived from maps, use it. Otherwise, log warning. + final_primary_asset_name = None + if len(primary_asset_names) == 1: + final_primary_asset_name = list(primary_asset_names)[0] + log.debug(f"Determined single primary asset name: '{final_primary_asset_name}'") + elif len(primary_asset_names) > 1: + # TODO: Implement a better heuristic for multiple assets (e.g., longest common prefix) + final_primary_asset_name = list(primary_asset_names)[0] # Fallback: use the first one found + log.warning(f"Multiple potential primary asset names found: {primary_asset_names}. Using '{final_primary_asset_name}' for associating extra files. Consider refining asset name extraction.") + else: + # No maps found, but maybe extras exist? Associate with the first asset group found. + if temp_grouped_files and extra_files_to_associate: + final_primary_asset_name = list(temp_grouped_files.keys())[0] + log.warning(f"No map files found to determine primary asset name. Associating extras with first group found: '{final_primary_asset_name}'.") + else: + log.debug("No primary asset name determined (no maps found).") + + + # --- Associate Extra Files --- + if final_primary_asset_name and extra_files_to_associate: + log.debug(f"Associating {len(extra_files_to_associate)} extra file(s) with primary asset '{final_primary_asset_name}'") + for file_path_str, filename in extra_files_to_associate: + temp_grouped_files[final_primary_asset_name].append({ + 'file_path': file_path_str, + 'item_type': "EXTRA", # Assign specific type + 'asset_name': final_primary_asset_name # Associate with primary asset + }) + elif extra_files_to_associate: + log.warning(f"Could not determine a primary asset name to associate {len(extra_files_to_associate)} extra file(s) with. They will be ignored.") + # Optionally, create a separate 'Extras' asset group? + # for file_path_str, filename in extra_files_to_associate: + # temp_grouped_files["_Extras_"].append(...) + + + log.debug(f"Classification complete. Found {len(temp_grouped_files)} potential assets.") + return dict(temp_grouped_files) + + class PredictionHandler(QObject): """ Handles running predictions in a separate thread to avoid GUI freezes. + Generates the initial SourceRule hierarchy based on file lists and presets. """ # --- Signals --- - # Emits a list of dictionaries, each representing a file row for the table - # Dict format: {'original_path': str, 'predicted_asset_name': str | None, 'predicted_output_name': str | None, 'status': str, 'details': str | None, 'source_asset': str} - prediction_results_ready = Signal(list) - # Emitted when the hierarchical rule structure is ready - rule_hierarchy_ready = Signal(object) # Emits a SourceRule object - # Emitted when all predictions for a batch are done + # Emitted when the hierarchical rule structure is ready for a single source + rule_hierarchy_ready = Signal(list) # Emits a LIST containing ONE SourceRule object + # Emitted when prediction/hierarchy generation for a source is done prediction_finished = Signal() # Emitted for status updates status_message = Signal(str, int) @@ -55,102 +202,72 @@ class PredictionHandler(QObject): def __init__(self, parent=None): super().__init__(parent) self._is_running = False - # No explicit cancel needed for prediction for now, it should be fast per-item @property def is_running(self): return self._is_running - def _predict_single_asset(self, input_path_str: str, config: Configuration, rules: SourceRule) -> list[dict] | dict: + # Removed _predict_single_asset method + + @Slot(str, list, str) # Explicitly define types for the slot + def run_prediction(self, input_source_identifier: str, original_input_paths: list[str], preset_name: str): """ - Helper method to run detailed file prediction for a single input path. - Runs within the ThreadPoolExecutor. - Returns a list of file prediction dictionaries for the input, or a dictionary representing an error. - """ - input_path = Path(input_path_str) - source_asset_name = input_path.name # For reference in error reporting - - try: - # Create AssetProcessor instance (needs dummy output path for prediction) - # The detailed prediction method handles its own workspace setup/cleanup - processor = AssetProcessor(input_path, config, Path(".")) # Dummy output path - - # Get the detailed file predictions - # This method returns a list of dictionaries - detailed_predictions = processor.get_detailed_file_predictions(rules) - - if detailed_predictions is None: - log.error(f"AssetProcessor.get_detailed_file_predictions returned None for {input_path_str}.") - # Return a list containing a single error entry for consistency - return [{ - 'original_path': source_asset_name, - 'predicted_asset_name': None, - 'predicted_output_name': None, - 'status': 'Error', - 'details': 'Prediction returned no results', - 'source_asset': source_asset_name - }] - - # Add the source_asset name to each prediction result for grouping later - for prediction in detailed_predictions: - prediction['source_asset'] = source_asset_name - - log.debug(f"Generated {len(detailed_predictions)} detailed predictions for {input_path_str}.") - return detailed_predictions # Return the list of dictionaries - - except AssetProcessingError as e: - log.error(f"Asset processing error during prediction for {input_path_str}: {e}") - # Return a list containing a single error entry for consistency - return [{ - 'original_path': source_asset_name, - 'predicted_asset_name': None, - 'predicted_output_name': None, - 'status': 'Error', - 'details': f'Asset Error: {e}', - 'source_asset': source_asset_name - }] - except Exception as e: - log.exception(f"Unexpected error during prediction for {input_path_str}: {e}") - # Return a list containing a single error entry for consistency - return [{ - 'original_path': source_asset_name, - 'predicted_asset_name': None, - 'predicted_output_name': None, - 'status': 'Error', - 'details': f'Unexpected Error: {e}', - 'source_asset': source_asset_name - }] - - - @Slot() - def run_prediction(self, input_paths: list[str], preset_name: str, rules: SourceRule): - """ - Runs the prediction logic for the given paths and preset using a ThreadPoolExecutor. - Generates the hierarchical rule structure and detailed file predictions. + Generates the initial SourceRule hierarchy for a given source identifier + (which could be a folder or archive path), extracting the actual file list first. + file list, and preset name. Populates only overridable fields based on + classification and preset defaults. This method is intended to be run in a separate QThread. """ + thread_id = QThread.currentThread() + log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered PredictionHandler.run_prediction.") + # Note: file_list argument is renamed to original_input_paths for clarity, + # but the signal passes the list of source paths, not the content files yet. + # We use input_source_identifier as the primary path to analyze. + log.info(f"VERIFY: PredictionHandler received request. Source: '{input_source_identifier}', Original Paths: {original_input_paths}, Preset: '{preset_name}'") # DEBUG Verify + log.info(f"Source Identifier: '{input_source_identifier}', Preset: '{preset_name}'") + if self._is_running: - log.warning("Prediction is already running.") + log.warning("Prediction is already running for another source. Aborting this run.") + # Don't emit finished, let the running one complete. return if not BACKEND_AVAILABLE: - log.error("Backend modules not available. Cannot run prediction.") + log.error("Backend/config modules not available. Cannot run prediction.") self.status_message.emit("Error: Backend components missing.", 5000) - self.prediction_finished.emit() + # self.prediction_finished.emit() # Don't emit finished if never started properly return if not preset_name: log.warning("No preset selected for prediction.") self.status_message.emit("No preset selected.", 3000) - self.prediction_finished.emit() + # self.prediction_finished.emit() return + # Check the identifier path itself + source_path = Path(input_source_identifier) + if not source_path.exists(): + log.warning(f"Input source path does not exist: '{input_source_identifier}'. Skipping prediction.") + self.status_message.emit("Input path not found.", 3000) + self.rule_hierarchy_ready.emit([]) + self.prediction_finished.emit() + return + self._is_running = True - thread_id = QThread.currentThread() # Get current thread object - log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered PredictionHandler.run_prediction. Starting run for {len(input_paths)} items, Preset='{preset_name}'") - self.status_message.emit(f"Updating preview for {len(input_paths)} items...", 0) + self.status_message.emit(f"Analyzing '{source_path.name}'...", 0) + + config: Configuration | None = None + allowed_asset_types: List[str] = [] + allowed_file_types: List[str] = [] # These are ItemType names - config = None # Load config once if possible try: config = Configuration(preset_name) + # Load allowed types from the project's config module + if app_config: + allowed_asset_types = getattr(app_config, 'ALLOWED_ASSET_TYPES', []) + allowed_file_types = getattr(app_config, 'ALLOWED_FILE_TYPES', []) + log.debug(f"Loaded allowed AssetTypes: {allowed_asset_types}") + log.debug(f"Loaded allowed FileTypes (ItemTypes): {allowed_file_types}") + else: + log.warning("Project config module not loaded. Cannot get allowed types.") + except ConfigurationError as e: log.error(f"Failed to load configuration for preset '{preset_name}': {e}") self.status_message.emit(f"Error loading preset '{preset_name}': {e}", 5000) @@ -158,131 +275,142 @@ class PredictionHandler(QObject): self._is_running = False return except Exception as e: - log.exception(f"Unexpected error loading configuration for preset '{preset_name}': {e}") + log.exception(f"Unexpected error loading configuration or allowed types for preset '{preset_name}': {e}") self.status_message.emit(f"Unexpected error loading preset '{preset_name}'.", 5000) self.prediction_finished.emit() + self._is_running = False return - # Create the root SourceRule object - # For now, use a generic name. Later, this might be derived from input paths. - source_rule = SourceRule() - log.debug(f"Created root SourceRule object.") - - # Collect all detailed file prediction results from completed futures - all_file_prediction_results = [] - - futures = [] - max_workers = min(max(1, (os.cpu_count() or 1) // 2), 8) - log.info(f"Using ThreadPoolExecutor with max_workers={max_workers} for prediction.") - + log.debug(f"DEBUG: Calling classify_files with file_list: {original_input_paths}") # DEBUG LOG + # --- Perform Classification --- try: - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Submit tasks for each input path - for input_path_str in input_paths: - # _predict_single_asset now returns a list of file prediction dicts or an error dict list - future = executor.submit(self._predict_single_asset, input_path_str, config, rules) - futures.append(future) + classified_assets = classify_files(original_input_paths, config) + except Exception as e: + log.exception(f"Error during file classification for source '{input_source_identifier}': {e}") + self.status_message.emit(f"Error classifying files: {e}", 5000) + self.prediction_finished.emit() + self._is_running = False + return - # Process results as they complete - for future in as_completed(futures): - try: - result = future.result() - if isinstance(result, list): - # Extend the main list with results from this asset - all_file_prediction_results.extend(result) - elif isinstance(result, dict) and result.get('status') == 'Error': - # Handle error dictionaries returned by _predict_single_asset (should be in a list now, but handle single dict for safety) - all_file_prediction_results.append(result) - else: - log.error(f'Prediction task returned unexpected result type: {type(result)}') - all_file_prediction_results.append({ - 'original_path': '[Unknown Asset - Unexpected Result]', - 'predicted_asset_name': None, - 'predicted_output_name': None, - 'status': 'Error', - 'details': f'Unexpected result type: {type(result)}', - 'source_asset': '[Unknown]' - }) + if not classified_assets: + log.warning(f"Classification yielded no assets for source '{input_source_identifier}'.") + self.status_message.emit("No assets identified from files.", 3000) + self.rule_hierarchy_ready.emit([]) # Emit empty list + self.prediction_finished.emit() + self._is_running = False + return - except Exception as exc: - log.error(f'Prediction task generated an exception: {exc}', exc_info=True) - all_file_prediction_results.append({ - 'original_path': '[Unknown Asset - Executor Error]', - 'predicted_asset_name': None, - 'predicted_output_name': None, - 'status': 'Error', - 'details': f'Executor Error: {exc}', - 'source_asset': '[Unknown]' - }) + # --- Build the Hierarchy --- + source_rules_list = [] + try: + # Determine SourceRule level overrides/defaults + # Get supplier name from the config property + supplier_identifier = config.supplier_name # Use the property - except Exception as pool_exc: - log.exception(f"An error occurred with the prediction ThreadPoolExecutor: {pool_exc}") - self.status_message.emit(f"Error during prediction setup: {pool_exc}", 5000) - all_file_prediction_results.append({ - 'original_path': '[Prediction Pool Error]', - 'predicted_asset_name': None, - 'predicted_output_name': None, - 'status': 'Error', - 'details': f'Pool Error: {pool_exc}', - 'source_asset': '[System]' - }) + # Create the single SourceRule for this input source + source_rule = SourceRule( + input_path=input_source_identifier, # Use the identifier provided + supplier_identifier=supplier_identifier # Set overridable field + ) + log.debug(f"Created SourceRule for identifier: {input_source_identifier} with supplier: {supplier_identifier}") + + asset_rules = [] + for asset_name, files_info in classified_assets.items(): + if not files_info: continue # Skip empty asset groups + + # Determine AssetRule level overrides/defaults + # TODO: Implement logic to determine asset_type based on file types present? + # For now, default to MATERIAL if common material maps are present, else GENERIC. + # This requires checking item_types in files_info. + item_types_in_asset = {f_info['item_type'] for f_info in files_info} + predicted_asset_type = "Surface" # Default to "Surface" string + # Simple heuristic: if common material types exist, assume Surface + # Use strings directly from config.py's ALLOWED_FILE_TYPES + material_indicators = {"MAP_COL", "MAP_NRM", "MAP_ROUGH", "MAP_METAL", "MAP_AO", "MAP_DISP"} + if any(it in material_indicators for it in item_types_in_asset): + predicted_asset_type = "Surface" # Predict as "Surface" string + + # Ensure the predicted type is allowed, fallback if necessary + # Now predicted_asset_type is already a string + if allowed_asset_types and predicted_asset_type not in allowed_asset_types: + log.warning(f"Predicted AssetType '{predicted_asset_type}' for asset '{asset_name}' is not in ALLOWED_ASSET_TYPES. Falling back.") + # Fallback logic: use the default from config if allowed, else first allowed type + default_type = getattr(app_config, 'DEFAULT_ASSET_CATEGORY', 'Surface') + if default_type in allowed_asset_types: + predicted_asset_type = default_type + elif allowed_asset_types: + predicted_asset_type = allowed_asset_types[0] + else: + pass # Keep the original prediction if allowed list is empty - # --- Build the hierarchical rule structure (SourceRule -> AssetRule -> FileRule) --- - # Group file prediction results by predicted_asset_name - grouped_by_asset = defaultdict(list) - for file_pred in all_file_prediction_results: - # Group by predicted_asset_name, handle None or errors - asset_name = file_pred.get('predicted_asset_name') - if asset_name is None: - # Group files without a predicted asset name under a special key or ignore for hierarchy? - # Let's group them under their source_asset name for now, but mark them clearly. - asset_name = f"[{file_pred.get('source_asset', 'UnknownSource')}]" # Use source asset name as a fallback identifier - log.debug(f"File '{file_pred.get('original_path', 'UnknownPath')}' has no predicted asset name, grouping under '{asset_name}' for hierarchy.") - grouped_by_asset[asset_name].append(file_pred) + asset_rule = AssetRule( + asset_name=asset_name, # This is determined by classification + asset_type=predicted_asset_type, # Set overridable field (use the string) + # asset_type_override=None # This is for user edits, leave as None initially + ) + log.debug(f"Created AssetRule for asset: {asset_name} with type: {predicted_asset_type}") - # Create AssetRule objects from the grouped results - asset_rules = [] - for asset_name, file_preds in grouped_by_asset.items(): - # Determine the source_path for the AssetRule (use the source_asset from the first file in the group) - source_asset_path = file_preds[0].get('source_asset', asset_name) # Fallback to asset_name if source_asset is missing - asset_rule = AssetRule(asset_name=asset_name) + file_rules = [] + for file_info in files_info: + # Determine FileRule level overrides/defaults + item_type_override = file_info['item_type'] # From classification + target_asset_name_override = file_info['asset_name'] # From classification - # Create FileRule objects from the file prediction dictionaries - for file_pred in file_preds: - file_rule = FileRule( - file_path=file_pred.get('original_path', 'UnknownPath'), - map_type_override=None, # Assuming these are not predicted here - resolution_override=None, # Assuming these are not predicted here - channel_merge_instructions={}, # Assuming these are not predicted here - output_format_override=None # Assuming these are not predicted here - ) - asset_rule.files.append(file_rule) + # Ensure the predicted item type is allowed (check against prefixed version), skipping EXTRA and FILE_IGNORE + # Only prefix if it's a map type that doesn't already have the prefix + prefixed_item_type = f"MAP_{item_type_override}" if not item_type_override.startswith("MAP_") and item_type_override not in ["FILE_IGNORE", "EXTRA", "MODEL"] else item_type_override + # Check if the (potentially prefixed) type is allowed, but only if it's not supposed to be ignored or extra + if allowed_file_types and prefixed_item_type not in allowed_file_types and item_type_override not in ["FILE_IGNORE", "EXTRA"]: + log.warning(f"Predicted ItemType '{item_type_override}' (checked as '{prefixed_item_type}') for file '{file_info['file_path']}' is not in ALLOWED_FILE_TYPES. Setting to FILE_IGNORE.") + item_type_override = "FILE_IGNORE" # Fallback to FILE_IGNORE string + # Output format is determined by the engine, not predicted here. Leave as None. + output_format_override = None - asset_rules.append(asset_rule) + file_rule = FileRule( + file_path=file_info['file_path'], # This is static info based on input + # --- Populate ONLY Overridable Fields --- + item_type_override=item_type_override, + target_asset_name_override=target_asset_name_override, + output_format_override=output_format_override, + # --- Leave Static Fields as Default/None --- + resolution_override=None, + channel_merge_instructions={}, + # etc. + ) + file_rules.append(file_rule) - # Populate the SourceRule with the collected AssetRules - source_rule.assets = asset_rules - log.debug(f"Built SourceRule with {len(asset_rules)} AssetRule(s).") + asset_rule.files = file_rules + asset_rules.append(asset_rule) + + # Populate the SourceRule with its assets + source_rule.assets = asset_rules + log.debug(f"Built SourceRule '{source_rule.input_path}' with {len(asset_rules)} AssetRule(s).") + source_rules_list.append(source_rule) # Add the single completed SourceRule + + except Exception as e: + log.exception(f"Error building rule hierarchy for source '{input_source_identifier}': {e}") + self.status_message.emit(f"Error building rules: {e}", 5000) + # Don't emit hierarchy, just finish + self.prediction_finished.emit() + self._is_running = False + # Removed erroneous temp_dir_obj cleanup + return - # Emit the hierarchical rule structure - log.info(f"[{time.time():.4f}][T:{thread_id}] Parallel prediction run finished. Preparing to emit rule hierarchy.") - self.rule_hierarchy_ready.emit(source_rule) + # --- Emit Results --- +# DEBUG Verify: Log the hierarchy being emitted + log.info(f"VERIFY: Emitting rule_hierarchy_ready with {len(source_rules_list)} SourceRule(s).") + for i, rule in enumerate(source_rules_list): + log.debug(f" VERIFY Rule {i}: Input='{rule.input_path}', Assets={len(rule.assets)}") + log.info(f"[{time.time():.4f}][T:{thread_id}] Prediction run finished. Emitting hierarchy for '{input_source_identifier}'.") + self.rule_hierarchy_ready.emit(source_rules_list) # Emit list containing the one SourceRule log.info(f"[{time.time():.4f}][T:{thread_id}] Emitted rule_hierarchy_ready signal.") - # Emit the combined list of detailed file results for the table view - log.info(f"[{time.time():.4f}][T:{thread_id}] Preparing to emit {len(all_file_prediction_results)} file results for table view.") - log.debug(f"[{time.time():.4f}][T:{thread_id}] Type of all_file_prediction_results before emit: {type(all_file_prediction_results)}") - try: - log.debug(f"[{time.time():.4f}][T:{thread_id}] Content of all_file_prediction_results (first 5) before emit: {all_file_prediction_results[:5]}") - except Exception as e: - log.error(f"[{time.time():.4f}][T:{thread_id}] Error logging all_file_prediction_results content: {e}") - log.info(f"[{time.time():.4f}][T:{thread_id}] Emitting prediction_results_ready signal...") - self.prediction_results_ready.emit(all_file_prediction_results) - log.info(f"[{time.time():.4f}][T:{thread_id}] Emitted prediction_results_ready signal.") + # Removed prediction_results_ready signal emission - self.status_message.emit("Preview update complete.", 3000) + self.status_message.emit(f"Analysis complete for '{input_source_identifier}'.", 3000) self.prediction_finished.emit() self._is_running = False - log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting PredictionHandler.run_prediction.") \ No newline at end of file + # Removed temp_dir_obj cleanup - not relevant here + log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting PredictionHandler.run_prediction.") diff --git a/gui/unified_view_model.py b/gui/unified_view_model.py new file mode 100644 index 0000000..c7112c8 --- /dev/null +++ b/gui/unified_view_model.py @@ -0,0 +1,319 @@ +# gui/unified_view_model.py +from PySide6.QtCore import QAbstractItemModel, QModelIndex, Qt +from pathlib import Path # Added for file_name extraction +from rule_structure import SourceRule, AssetRule, FileRule # Removed AssetType, ItemType import + +class UnifiedViewModel(QAbstractItemModel): + """ + A QAbstractItemModel for displaying and editing the hierarchical structure + of SourceRule -> AssetRule -> FileRule. + """ + Columns = [ + "Name", "Supplier Override", "Asset-Type Override", + "Target Asset Name Override", "Item-Type Override", + "Status", "Output Path" + ] + + COL_NAME = 0 + COL_SUPPLIER = 1 + COL_ASSET_TYPE = 2 + COL_TARGET_ASSET = 3 + COL_ITEM_TYPE = 4 + COL_STATUS = 5 + COL_OUTPUT_PATH = 6 + + def __init__(self, parent=None): + super().__init__(parent) + self._source_rules = [] # Now stores a list of SourceRule objects + + def load_data(self, source_rules_list: list): # Accepts a list + """Loads or reloads the model with a list of SourceRule objects.""" + self.beginResetModel() + self._source_rules = source_rules_list if source_rules_list else [] # Assign the new list + # Ensure back-references for parent lookup are set on the NEW items + for source_rule in self._source_rules: + for asset_rule in source_rule.assets: + asset_rule.parent_source = source_rule # Set parent SourceRule + for file_rule in asset_rule.files: + file_rule.parent_asset = asset_rule # Set parent AssetRule + self.endResetModel() + + def clear_data(self): + """Clears the model data.""" + self.beginResetModel() + self._source_rules = [] # Clear the list + self.endResetModel() + + def get_all_source_rules(self) -> list: + """Returns the internal list of SourceRule objects.""" + return self._source_rules + def rowCount(self, parent: QModelIndex = QModelIndex()) -> int: + """Returns the number of rows under the given parent.""" + if not parent.isValid(): + # Parent is the invisible root. Children are the SourceRules. + return len(self._source_rules) + + parent_item = parent.internalPointer() + + if isinstance(parent_item, SourceRule): + # Parent is a SourceRule. Children are AssetRules. + return len(parent_item.assets) + elif isinstance(parent_item, AssetRule): + # Parent is an AssetRule. Children are FileRules. + return len(parent_item.files) + elif isinstance(parent_item, FileRule): + return 0 # FileRules have no children + + return 0 # Should not happen for valid items + + + def columnCount(self, parent: QModelIndex = QModelIndex()) -> int: + """Returns the number of columns.""" + return len(self.Columns) + + def parent(self, index: QModelIndex) -> QModelIndex: + """Returns the parent of the model item with the given index.""" + if not index.isValid(): + return QModelIndex() + + child_item = index.internalPointer() + if child_item is None: + return QModelIndex() + + # Determine the parent based on the item type + if isinstance(child_item, SourceRule): + # Parent is the invisible root + return QModelIndex() + elif isinstance(child_item, AssetRule): + # Parent is a SourceRule. Find its row in the _source_rules list. + parent_item = getattr(child_item, 'parent_source', None) + if parent_item and parent_item in self._source_rules: + try: + parent_row = self._source_rules.index(parent_item) + return self.createIndex(parent_row, 0, parent_item) + except ValueError: + return QModelIndex() # Should not happen if parent_source is correct + else: + return QModelIndex() # Parent SourceRule not found or reference missing + + elif isinstance(child_item, FileRule): + # Parent is an AssetRule. Find its row within its parent SourceRule. + parent_item = getattr(child_item, 'parent_asset', None) # Get parent AssetRule + if parent_item: + grandparent_item = getattr(parent_item, 'parent_source', None) # Get the SourceRule + if grandparent_item: + try: + parent_row = grandparent_item.assets.index(parent_item) + # We need the index of the grandparent (SourceRule) to create the parent index + grandparent_row = self._source_rules.index(grandparent_item) + return self.createIndex(parent_row, 0, parent_item) # Create index for the AssetRule parent + except ValueError: + return QModelIndex() # Parent AssetRule or Grandparent SourceRule not found in respective lists + else: + return QModelIndex() # Grandparent (SourceRule) reference missing + else: + return QModelIndex() # Parent AssetRule reference missing + + return QModelIndex() # Should not be reached + + + def index(self, row: int, column: int, parent: QModelIndex = QModelIndex()) -> QModelIndex: + """Returns the index of the item in the model specified by the given row, column and parent index.""" + if not self.hasIndex(row, column, parent): + return QModelIndex() + + parent_item = None + if not parent.isValid(): + # Parent is invisible root. Children are SourceRules. + if row < len(self._source_rules): + child_item = self._source_rules[row] + return self.createIndex(row, column, child_item) + else: + return QModelIndex() # Row out of bounds for top-level items + else: + # Parent is a valid index, get its item + parent_item = parent.internalPointer() + + child_item = None + if isinstance(parent_item, SourceRule): + # Parent is SourceRule. Children are AssetRules. + if row < len(parent_item.assets): + child_item = parent_item.assets[row] + # Ensure parent reference is set + if not hasattr(child_item, 'parent_source'): + child_item.parent_source = parent_item + elif isinstance(parent_item, AssetRule): + # Parent is AssetRule. Children are FileRules. + if row < len(parent_item.files): + child_item = parent_item.files[row] + # Ensure parent reference is set + if not hasattr(child_item, 'parent_asset'): + child_item.parent_asset = parent_item + + if child_item: + # Create index for the child item under the parent + return self.createIndex(row, column, child_item) + else: + # Invalid row or parent type has no children (FileRule) + return QModelIndex() + + def data(self, index: QModelIndex, role: int = Qt.DisplayRole): + """Returns the data stored under the given role for the item referred to by the index.""" + if not index.isValid(): # Check only index validity, data list might be empty but valid + return None + + item = index.internalPointer() + column = index.column() + + # --- Handle different item types --- + if isinstance(item, SourceRule): # This might only be relevant if SourceRule is displayed + if role == Qt.DisplayRole: + if column == 0: return item.input_path + # Use supplier_override if set, otherwise empty string + if column == self.COL_SUPPLIER: return item.supplier_override if item.supplier_override is not None else "" + # Other columns return None or "" for SourceRule + elif role == Qt.EditRole: + # Return supplier_override for editing + if column == self.COL_SUPPLIER: return item.supplier_override if item.supplier_override is not None else "" + return None # Default for SourceRule for other roles/columns + + elif isinstance(item, AssetRule): + if role == Qt.DisplayRole: + if column == self.COL_NAME: return item.asset_name + # Use asset_type_override if set, otherwise fall back to predicted asset_type + if column == self.COL_ASSET_TYPE: + display_value = item.asset_type_override if item.asset_type_override is not None else item.asset_type + return display_value if display_value else "" + # Placeholder columns + if column == self.COL_STATUS: return "" # Status (Not handled yet) + if column == self.COL_OUTPUT_PATH: return "" # Output Path (Not handled yet) + elif role == Qt.EditRole: + # Return asset_type_override for editing (delegate expects string or None) + if column == self.COL_ASSET_TYPE: + return item.asset_type_override # Return string or None + return None # Default for AssetRule + + + elif isinstance(item, FileRule): + if role == Qt.DisplayRole: + if column == self.COL_NAME: return Path(item.file_path).name # Display only filename + # Use target_asset_name_override if set, otherwise empty string + if column == self.COL_TARGET_ASSET: + return item.target_asset_name_override if item.target_asset_name_override is not None else "" + # Use item_type_override if set, otherwise empty string (assuming predicted isn't stored directly) + if column == self.COL_ITEM_TYPE: + # Assuming item_type_override stores the string name of the ItemType enum + return item.item_type_override if item.item_type_override else "" + if column == self.COL_STATUS: return "" # Status (Not handled yet) + if column == self.COL_OUTPUT_PATH: return "" # Output Path (Not handled yet) + elif role == Qt.EditRole: + # Return target_asset_name_override for editing + if column == self.COL_TARGET_ASSET: return item.target_asset_name_override if item.target_asset_name_override is not None else "" + # Return item_type_override for editing (delegate expects string or None) + if column == self.COL_ITEM_TYPE: return item.item_type_override # Return string or None + return None # Default for FileRule + + return None # Should not be reached if item is one of the known types + + def setData(self, index: QModelIndex, value, role: int = Qt.EditRole) -> bool: + """Sets the role data for the item at index to value.""" + if not index.isValid() or role != Qt.EditRole: # Check only index and role + return False + + item = index.internalPointer() + if item is None: # Extra check for safety + return False + column = index.column() + changed = False + + # --- Handle different item types --- + if isinstance(item, SourceRule): # If SourceRule is editable + if column == self.COL_SUPPLIER: + # Ensure value is string or None + new_value = str(value).strip() if value is not None else None + if new_value == "": new_value = None # Treat empty string as None + # Update supplier_override + if item.supplier_override != new_value: + item.supplier_override = new_value + changed = True + + elif isinstance(item, AssetRule): + if column == self.COL_ASSET_TYPE: + # Delegate provides string value (e.g., "Surface", "Model") or None + new_value = str(value) if value is not None else None + if new_value == "": new_value = None # Treat empty string as None + # Update asset_type_override + if item.asset_type_override != new_value: + item.asset_type_override = new_value + changed = True + + elif isinstance(item, FileRule): + if column == self.COL_TARGET_ASSET: # Target Asset Name Override + # Ensure value is string or None + new_value = str(value).strip() if value is not None else None + if new_value == "": new_value = None # Treat empty string as None + # Update target_asset_name_override + if item.target_asset_name_override != new_value: + item.target_asset_name_override = new_value + changed = True + elif column == self.COL_ITEM_TYPE: # Item-Type Override + # Delegate provides string value (e.g., "MAP_COL") or None + new_value = str(value) if value is not None else None + if new_value == "": new_value = None # Treat empty string as None + # Update item_type_override + if item.item_type_override != new_value: + item.item_type_override = new_value + changed = True + + + if changed: + # Emit dataChanged for the specific index and affected roles + self.dataChanged.emit(index, index, [Qt.DisplayRole, Qt.EditRole]) + return True + + return False + + def flags(self, index: QModelIndex) -> Qt.ItemFlags: + """Returns the item flags for the given index.""" + if not index.isValid(): + return Qt.NoItemFlags # No flags for invalid index + + # Start with default flags for a valid item + default_flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable + + item = index.internalPointer() + column = index.column() + + can_edit = False + # Determine editability based on item type and column + if isinstance(item, SourceRule): # If SourceRule is displayed/editable + if column == 1: can_edit = True + elif isinstance(item, AssetRule): + if column == 2: can_edit = True + elif isinstance(item, FileRule): + if column == 3: can_edit = True + if column == 4: can_edit = True + + if can_edit: + return default_flags | Qt.ItemIsEditable + else: + return default_flags + + def headerData(self, section: int, orientation: Qt.Orientation, role: int = Qt.DisplayRole): + """Returns the data for the given role and section in the header.""" + if orientation == Qt.Horizontal and role == Qt.DisplayRole: + if 0 <= section < len(self.Columns): + return self.Columns[section] + # Optionally handle Vertical header (row numbers) + # if orientation == Qt.Vertical and role == Qt.DisplayRole: + # return str(section + 1) + return None + + # Helper to get item from index + def getItem(self, index: QModelIndex): + """Safely returns the item associated with the index.""" + if index.isValid(): + item = index.internalPointer() + if item: # Ensure internal pointer is not None + return item + return None # Return None for invalid index or None pointer \ No newline at end of file diff --git a/main.py b/main.py index 650e91a..666c8f4 100644 --- a/main.py +++ b/main.py @@ -8,15 +8,25 @@ import logging from pathlib import Path from concurrent.futures import ProcessPoolExecutor, as_completed import platform # To potentially adjust worker count defaults -import subprocess # <<< ADDED IMPORT -import shutil # <<< ADDED IMPORT -from typing import List, Dict, Tuple, Optional # Added for type hinting +import subprocess +import shutil +import tempfile # Added for temporary workspace +import zipfile # Added for zip extraction +from typing import List, Dict, Tuple, Optional -# --- Assuming classes are in sibling files --- +# --- Qt Imports for Application Structure --- +from PySide6.QtCore import QObject, Slot, QThreadPool, QRunnable, Signal # Added for App structure and threading +from PySide6.QtCore import Qt # Added for ConnectionType +from PySide6.QtWidgets import QApplication # Added for App structure + +# --- Backend Imports --- try: from configuration import Configuration, ConfigurationError - from asset_processor import AssetProcessor, AssetProcessingError - import config as core_config_module # <<< IMPORT config.py HERE + # from asset_processor import AssetProcessor, AssetProcessingError # REMOVED OLD PROCESSOR + from processing_engine import ProcessingEngine # <<< ADDED NEW ENGINE IMPORT + from rule_structure import SourceRule # Import SourceRule for type hinting + import config as core_config_module + from gui.main_window import MainWindow # Import MainWindow except ImportError as e: # Provide a more helpful error message if imports fail script_dir = Path(__file__).parent.resolve() @@ -84,14 +94,16 @@ def setup_arg_parser(): "input_paths", metavar="INPUT_PATH", type=str, - nargs='+', # Requires one or more input paths - help="Path(s) to the input ZIP file(s) or folder(s) containing assets." + nargs='*', # Allow zero or more paths initially + default=[], # Default to empty list + help="Path(s) to the input ZIP file(s) or folder(s) containing assets (Required for CLI mode)." ) parser.add_argument( "-p", "--preset", type=str, - required=True, - help="Name of the configuration preset (e.g., 'poliigon') located in the 'presets' directory (without .json extension)." + required=False, # Make not required initially + default=None, # Default to None + help="Name of the configuration preset (Required for CLI mode)." ) parser.add_argument( "-o", "--output-dir", @@ -128,271 +140,844 @@ def setup_arg_parser(): default=None, help="Path to the .blend file for creating/updating materials. Overrides config.py default." ) + parser.add_argument( + "--gui", + action="store_true", + help="Force launch in GUI mode, ignoring other arguments." + ) # Potential future flags: # parser.add_argument("--log-file", type=str, default=None, help="Path to save log output to a file.") return parser -# --- Worker Function --- -def process_single_asset_wrapper(input_path_str: str, preset_name: str, output_dir_str: str, overwrite: bool, verbose: bool, rules) -> Tuple[str, str, Optional[str]]: - """ - Wrapper function for processing a single input path (which might contain multiple assets) - in a separate process. Handles instantiation of Configuration and AssetProcessor, - passes the overwrite flag, catches errors, and interprets the multi-asset status dictionary. +# --- Worker Runnable for Thread Pool --- +class TaskSignals(QObject): # Create a QObject subclass for signals + finished = Signal(str, str, object) # rule_input_path, status, result/error + # error = Signal(str, str) # Can combine into finished signal - Ensures logging is configured for the worker process. +class ProcessingTask(QRunnable): + """Wraps a call to processing_engine.process for execution in a thread pool.""" - Returns: - Tuple[str, str, Optional[str]]: - - input_path_str: The original input path processed. - - overall_status_string: A single status string summarizing the outcome - ("processed", "skipped", "failed", "partial_success"). - - error_message_or_None: An error message if failures occurred, potentially - listing failed assets. - """ - # Explicitly configure logging for this worker process - worker_log = logging.getLogger(f"Worker_{os.getpid()}") # Log with worker PID - if not logging.root.handlers: - logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-8s] %(name)s: %(message)s') - worker_log.setLevel(logging.DEBUG if verbose else logging.INFO) - if verbose: - logging.root.setLevel(logging.DEBUG) + def __init__(self, engine: ProcessingEngine, rule: SourceRule, workspace_path: Path, output_base_path: Path): # Added paths + super().__init__() + self.engine = engine + self.rule = rule + self.workspace_path = workspace_path # Store path + self.output_base_path = output_base_path # Store path + self.signals = TaskSignals() # Instantiate signals object - input_path_obj = Path(input_path_str) - input_name = input_path_obj.name + @Slot() # Decorator required for QRunnable's run method + def run(self): + """Prepares input files and executes the engine's process method.""" + log.info(f"Worker Thread: Starting processing for rule: {self.rule.input_path}") + log.debug(f"DEBUG: Rule passed to ProcessingTask.run: {self.rule}") # DEBUG LOG + status = "failed" # Default status + result_or_error = None + temp_workspace_dir = None # Initialize outside try - try: - worker_log.info(f"Starting processing attempt for input: {input_name}") - config = Configuration(preset_name) - output_base_path = Path(output_dir_str) + try: + # --- 1. Prepare Input Workspace --- + original_input_path = Path(self.rule.input_path) + prepared_workspace_path = None - processor = AssetProcessor(input_path_obj, config, output_base_path, overwrite=overwrite) - # processor.process() now returns a Dict[str, List[str]] - status_dict = processor.process(rules=rules) + if not original_input_path.exists(): + raise FileNotFoundError(f"Original input path does not exist: {original_input_path}") - # --- Interpret the status dictionary --- - processed_assets = status_dict.get("processed", []) - skipped_assets = status_dict.get("skipped", []) - failed_assets = status_dict.get("failed", []) + # Create a temporary directory for processing + temp_workspace_dir = tempfile.mkdtemp(prefix="asset_proc_") + prepared_workspace_path = Path(temp_workspace_dir) + log.info(f"Created temporary workspace: {prepared_workspace_path}") - overall_status_string = "failed" # Default - error_message = None - - if failed_assets: - overall_status_string = "failed" - error_message = f"Failed assets within {input_name}: {', '.join(failed_assets)}" - worker_log.error(error_message) # Log the failure details - elif processed_assets: - overall_status_string = "processed" - # Check for partial success (mix of processed/skipped and failed should be caught above) - if skipped_assets: - worker_log.info(f"Input '{input_name}' processed with some assets skipped. Processed: {processed_assets}, Skipped: {skipped_assets}") + # Check if input is directory or zip file + if original_input_path.is_dir(): + log.info(f"Input is a directory, copying contents to workspace: {original_input_path}") + # Copy directory contents into the temp workspace + shutil.copytree(original_input_path, prepared_workspace_path, dirs_exist_ok=True) + elif original_input_path.is_file() and original_input_path.suffix.lower() == '.zip': + log.info(f"Input is a zip file, extracting to workspace: {original_input_path}") + with zipfile.ZipFile(original_input_path, 'r') as zip_ref: + zip_ref.extractall(prepared_workspace_path) else: - worker_log.info(f"Input '{input_name}' processed successfully. Assets: {processed_assets}") - elif skipped_assets: - overall_status_string = "skipped" - worker_log.info(f"Input '{input_name}' skipped (all contained assets already exist). Assets: {skipped_assets}") - else: - # Should not happen if input contained files, but handle as failure. - worker_log.warning(f"Input '{input_name}' resulted in no processed, skipped, or failed assets. Reporting as failed.") - overall_status_string = "failed" - error_message = f"No assets processed, skipped, or failed within {input_name}." + # Handle unsupported input types if necessary + raise ValueError(f"Unsupported input type: {original_input_path}. Must be a directory or .zip file.") +# --- DEBUG: List files in prepared workspace --- + try: + log.debug(f"Listing contents of prepared workspace: {prepared_workspace_path}") + for item in prepared_workspace_path.rglob('*'): # Recursively list all items + log.debug(f" Found item: {item.relative_to(prepared_workspace_path)}") + except Exception as list_err: + log.error(f"Error listing prepared workspace contents: {list_err}") + # --- END DEBUG --- + # --- 2. Execute Processing Engine --- + log.info(f"Calling ProcessingEngine.process with rule for input: {self.rule.input_path}, prepared workspace: {prepared_workspace_path}, output: {self.output_base_path}") + log.debug(f" Rule Details: {self.rule}") # Optional detailed log - return (input_path_str, overall_status_string, error_message) + # Pass rule positionally, prepared workspace, and output base path + result_or_error = self.engine.process( + self.rule, # Pass rule as first positional argument + workspace_path=prepared_workspace_path, # Use the prepared temp workspace + output_base_path=self.output_base_path + ) + status = "processed" # Assume success if no exception + log.info(f"Worker Thread: Finished processing for rule: {self.rule.input_path}, Status: {status}") + # Signal emission moved to finally block - except (ConfigurationError, AssetProcessingError) as e: - # Catch errors during processor setup or the process() call itself if it raises before returning dict - worker_log.error(f"Processing failed for input '{input_name}': {type(e).__name__}: {e}") - return (input_path_str, "failed", f"{type(e).__name__}: {e}") - except Exception as e: - # Catch any other unexpected errors - worker_log.exception(f"Unexpected worker failure processing input '{input_name}': {e}") - return (input_path_str, "failed", f"Unexpected Worker Error: {e}") + except (FileNotFoundError, ValueError, zipfile.BadZipFile, OSError) as prep_error: + log.exception(f"Worker Thread: Error preparing workspace for rule {self.rule.input_path}: {prep_error}") + status = "failed_preparation" + result_or_error = str(prep_error) + # Signal emission moved to finally block + except Exception as proc_error: + log.exception(f"Worker Thread: Error during engine processing for rule {self.rule.input_path}: {proc_error}") + status = "failed_processing" + result_or_error = str(proc_error) + # Signal emission moved to finally block + finally: + # --- Emit finished signal regardless of success or failure --- + try: + self.signals.finished.emit(str(self.rule.input_path), status, result_or_error) + log.debug(f"Worker Thread: Emitted finished signal for {self.rule.input_path} with status {status}") + except Exception as sig_err: + log.error(f"Worker Thread: Error emitting finished signal for {self.rule.input_path}: {sig_err}") - -# --- Core Processing Function --- -def run_processing( - valid_inputs: List[str], - preset_name: str, - output_dir_for_processor: str, - overwrite: bool, - num_workers: int, - verbose: bool # Add verbose parameter here -) -> Dict: - """ - Executes the core asset processing logic using a process pool. - - Args: - valid_inputs: List of validated input file/directory paths (strings). - preset_name: Name of the preset to use. - output_dir_for_processor: Absolute path string for the output base directory. - overwrite: Boolean flag to force reprocessing. - num_workers: Maximum number of worker processes. - verbose: Boolean flag for verbose logging. - - Returns: - A dictionary containing processing results: - { - "processed": int, - "skipped": int, - "failed": int, - "results_list": List[Tuple[str, str, Optional[str]]] # (input_path, status, error_msg) - } - """ - log.info(f"Processing {len(valid_inputs)} asset(s) using preset '{preset_name}' with up to {num_workers} worker(s)...") - results_list = [] - successful_processed_count = 0 - skipped_count = 0 - failed_count = 0 - - # Ensure at least one worker - num_workers = max(1, num_workers) - - # Using ProcessPoolExecutor is generally good if AssetProcessor tasks are CPU-bound. - # If tasks are mostly I/O bound, ThreadPoolExecutor might be sufficient. - # Important: Ensure Configuration and AssetProcessor are "pickleable". - try: - with ProcessPoolExecutor(max_workers=num_workers) as executor: - # Create futures - futures = {} - log.debug(f"Submitting {len(valid_inputs)} tasks...") - # Removed the 1-second delay for potentially faster submission in non-CLI use - for i, input_path in enumerate(valid_inputs): - log.debug(f"Submitting task {i+1}/{len(valid_inputs)} for: {Path(input_path).name}") - future = executor.submit( - process_single_asset_wrapper, - input_path, - preset_name, - output_dir_for_processor, - overwrite, - verbose # Pass the verbose flag - ) - futures[future] = input_path # Store future -> input_path mapping - - # Process completed futures - for i, future in enumerate(as_completed(futures), 1): - input_path = futures[future] - asset_name = Path(input_path).name - log.info(f"--- [{i}/{len(valid_inputs)}] Worker finished attempt for: {asset_name} ---") + # --- 3. Cleanup Workspace --- + if temp_workspace_dir and Path(temp_workspace_dir).exists(): try: - # Get result tuple: (input_path_str, status_string, error_message_or_None) - result_tuple = future.result() - results_list.append(result_tuple) - input_path_res, status, err_msg = result_tuple - - # Increment counters based on status - if status == "processed": - successful_processed_count += 1 - elif status == "skipped": - skipped_count += 1 - elif status == "failed": - failed_count += 1 - else: # Should not happen, but log as warning/failure - log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.") - failed_count += 1 - - except Exception as e: - # Catch errors if the future itself fails (e.g., worker process crashed hard) - log.exception(f"Critical worker failure for {asset_name}: {e}") - results_list.append((input_path, "failed", f"Worker process crashed: {e}")) - failed_count += 1 # Count crashes as failures - - except Exception as pool_exc: - log.exception(f"An error occurred with the process pool: {pool_exc}") - # Re-raise or handle as appropriate for the calling context (monitor.py) - # For now, log and return current counts - return { - "processed": successful_processed_count, - "skipped": skipped_count, - "failed": failed_count + (len(valid_inputs) - len(results_list)), # Count unprocessed as failed - "results_list": results_list, - "pool_error": str(pool_exc) # Add pool error info - } - - return { - "processed": successful_processed_count, - "skipped": skipped_count, - "failed": failed_count, - "results_list": results_list - } + log.info(f"Cleaning up temporary workspace: {temp_workspace_dir}") + shutil.rmtree(temp_workspace_dir) + except OSError as cleanup_error: + log.error(f"Worker Thread: Failed to cleanup temporary workspace {temp_workspace_dir}: {cleanup_error}") -# --- Blender Script Execution Helper --- -def run_blender_script(blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str): - """ - Executes a Python script within Blender in the background. +# --- CLI Worker Function (COMMENTED OUT - Replaced by GUI Flow) --- +# def process_single_asset_wrapper(input_path_str: str, preset_name: str, output_dir_str: str, overwrite: bool, verbose: bool, rules) -> Tuple[str, str, Optional[str]]: +# """ +# Wrapper function for processing a single input path (which might contain multiple assets) +# in a separate process. Handles instantiation of Configuration and AssetProcessor, +# passes the overwrite flag, catches errors, and interprets the multi-asset status dictionary. +# +# Ensures logging is configured for the worker process. +# +# Returns: +# Tuple[str, str, Optional[str]]: +# - input_path_str: The original input path processed. +# - overall_status_string: A single status string summarizing the outcome +# ("processed", "skipped", "failed", "partial_success"). +# - error_message_or_None: An error message if failures occurred, potentially +# listing failed assets. +# """ +# # Explicitly configure logging for this worker process +# worker_log = logging.getLogger(f"Worker_{os.getpid()}") # Log with worker PID +# if not logging.root.handlers: +# logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-8s] %(name)s: %(message)s') +# worker_log.setLevel(logging.DEBUG if verbose else logging.INFO) +# if verbose: +# logging.root.setLevel(logging.DEBUG) +# +# input_path_obj = Path(input_path_str) +# input_name = input_path_obj.name +# +# try: +# worker_log.info(f"Starting processing attempt for input: {input_name}") +# config = Configuration(preset_name) +# output_base_path = Path(output_dir_str) +# +# processor = AssetProcessor(input_path_obj, config, output_base_path, overwrite=overwrite) +# # processor.process() now returns a Dict[str, List[str]] +# status_dict = processor.process(rules=rules) +# +# # --- Interpret the status dictionary --- +# processed_assets = status_dict.get("processed", []) +# skipped_assets = status_dict.get("skipped", []) +# failed_assets = status_dict.get("failed", []) +# +# overall_status_string = "failed" # Default +# error_message = None +# +# if failed_assets: +# overall_status_string = "failed" +# error_message = f"Failed assets within {input_name}: {', '.join(failed_assets)}" +# worker_log.error(error_message) # Log the failure details +# elif processed_assets: +# overall_status_string = "processed" +# # Check for partial success (mix of processed/skipped and failed should be caught above) +# if skipped_assets: +# worker_log.info(f"Input '{input_name}' processed with some assets skipped. Processed: {processed_assets}, Skipped: {skipped_assets}") +# else: +# worker_log.info(f"Input '{input_name}' processed successfully. Assets: {processed_assets}") +# elif skipped_assets: +# overall_status_string = "skipped" +# worker_log.info(f"Input '{input_name}' skipped (all contained assets already exist). Assets: {skipped_assets}") +# else: +# # Should not happen if input contained files, but handle as failure. +# worker_log.warning(f"Input '{input_name}' resulted in no processed, skipped, or failed assets. Reporting as failed.") +# overall_status_string = "failed" +# error_message = f"No assets processed, skipped, or failed within {input_name}." +# +# +# return (input_path_str, overall_status_string, error_message) +# +# except (ConfigurationError, AssetProcessingError) as e: +# # Catch errors during processor setup or the process() call itself if it raises before returning dict +# worker_log.error(f"Processing failed for input '{input_name}': {type(e).__name__}: {e}") +# return (input_path_str, "failed", f"{type(e).__name__}: {e}") +# except Exception as e: +# # Catch any other unexpected errors +# worker_log.exception(f"Unexpected worker failure processing input '{input_name}': {e}") +# return (input_path_str, "failed", f"Unexpected Worker Error: {e}") - Args: - blender_exe_path: Path to the Blender executable. - blend_file_path: Path to the .blend file to open. - python_script_path: Path to the Python script to execute within Blender. - asset_root_dir: Path to the processed asset library root directory (passed to the script). - Returns: - True if the script executed successfully (return code 0), False otherwise. - """ - log.info(f"Attempting to run Blender script: {Path(python_script_path).name} on {Path(blend_file_path).name}") +# --- Core CLI Processing Function (COMMENTED OUT - Replaced by GUI Flow) --- +# def run_processing( +# valid_inputs: List[str], +# preset_name: str, +# output_dir_for_processor: str, +# overwrite: bool, +# num_workers: int, +# verbose: bool # Add verbose parameter here +# ) -> Dict: +# """ +# Executes the core asset processing logic using a process pool. +# +# Args: +# valid_inputs: List of validated input file/directory paths (strings). +# preset_name: Name of the preset to use. +# output_dir_for_processor: Absolute path string for the output base directory. +# overwrite: Boolean flag to force reprocessing. +# num_workers: Maximum number of worker processes. +# verbose: Boolean flag for verbose logging. +# +# Returns: +# A dictionary containing processing results: +# { +# "processed": int, +# "skipped": int, +# "failed": int, +# "results_list": List[Tuple[str, str, Optional[str]]] # (input_path, status, error_msg) +# } +# """ +# log.info(f"Processing {len(valid_inputs)} asset(s) using preset '{preset_name}' with up to {num_workers} worker(s)...") +# results_list = [] +# successful_processed_count = 0 +# skipped_count = 0 +# failed_count = 0 +# +# # Ensure at least one worker +# num_workers = max(1, num_workers) +# +# # Using ProcessPoolExecutor is generally good if AssetProcessor tasks are CPU-bound. +# # If tasks are mostly I/O bound, ThreadPoolExecutor might be sufficient. +# # Important: Ensure Configuration and AssetProcessor are "pickleable". +# try: +# with ProcessPoolExecutor(max_workers=num_workers) as executor: +# # Create futures +# futures = {} +# log.debug(f"Submitting {len(valid_inputs)} tasks...") +# # Removed the 1-second delay for potentially faster submission in non-CLI use +# for i, input_path in enumerate(valid_inputs): +# log.debug(f"Submitting task {i+1}/{len(valid_inputs)} for: {Path(input_path).name}") +# future = executor.submit( +# process_single_asset_wrapper, +# input_path, +# preset_name, +# output_dir_for_processor, +# overwrite, +# verbose # Pass the verbose flag +# ) +# futures[future] = input_path # Store future -> input_path mapping +# +# # Process completed futures +# for i, future in enumerate(as_completed(futures), 1): +# input_path = futures[future] +# asset_name = Path(input_path).name +# log.info(f"--- [{i}/{len(valid_inputs)}] Worker finished attempt for: {asset_name} ---") +# try: +# # Get result tuple: (input_path_str, status_string, error_message_or_None) +# result_tuple = future.result() +# results_list.append(result_tuple) +# input_path_res, status, err_msg = result_tuple +# +# # Increment counters based on status +# if status == "processed": +# successful_processed_count += 1 +# elif status == "skipped": +# skipped_count += 1 +# elif status == "failed": +# failed_count += 1 +# else: # Should not happen, but log as warning/failure +# log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.") +# failed_count += 1 +# +# except Exception as e: +# # Catch errors if the future itself fails (e.g., worker process crashed hard) +# log.exception(f"Critical worker failure for {asset_name}: {e}") +# results_list.append((input_path, "failed", f"Worker process crashed: {e}")) +# failed_count += 1 # Count crashes as failures +# +# except Exception as pool_exc: +# log.exception(f"An error occurred with the process pool: {pool_exc}") +# # Re-raise or handle as appropriate for the calling context (monitor.py) +# # For now, log and return current counts +# return { +# "processed": successful_processed_count, +# "skipped": skipped_count, +# "failed": failed_count + (len(valid_inputs) - len(results_list)), # Count unprocessed as failed +# "results_list": results_list, +# "pool_error": str(pool_exc) # Add pool error info +# } +# +# return { +# "processed": successful_processed_count, +# "skipped": skipped_count, +# "failed": failed_count, +# "results_list": results_list +# } - # Ensure paths are absolute strings for subprocess - blender_exe_path = str(Path(blender_exe_path).resolve()) - blend_file_path = str(Path(blend_file_path).resolve()) - python_script_path = str(Path(python_script_path).resolve()) - asset_root_dir = str(Path(asset_root_dir).resolve()) - # Construct the command arguments - # -b: Run in background (no UI) - # -S: Save the file after running the script - # --python: Execute the specified Python script - # --: Separator, arguments after this are passed to the Python script's sys.argv - command = [ - blender_exe_path, - "-b", # Run in background - blend_file_path, - "--python", python_script_path, - "--", # Pass subsequent arguments to the script - asset_root_dir, - "-S" # Save the blend file after script execution - ] +# --- Blender Script Execution Helper (COMMENTED OUT - Part of CLI Flow) --- +# def run_blender_script(blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str): +# """ +# Executes a Python script within Blender in the background. +# +# Args: +# blender_exe_path: Path to the Blender executable. +# blend_file_path: Path to the .blend file to open. +# python_script_path: Path to the Python script to execute within Blender. +# asset_root_dir: Path to the processed asset library root directory (passed to the script). +# +# Returns: +# True if the script executed successfully (return code 0), False otherwise. +# """ +# log.info(f"Attempting to run Blender script: {Path(python_script_path).name} on {Path(blend_file_path).name}") +# +# # Ensure paths are absolute strings for subprocess +# blender_exe_path = str(Path(blender_exe_path).resolve()) +# blend_file_path = str(Path(blend_file_path).resolve()) +# python_script_path = str(Path(python_script_path).resolve()) +# asset_root_dir = str(Path(asset_root_dir).resolve()) +# +# # Construct the command arguments +# # -b: Run in background (no UI) +# # -S: Save the file after running the script +# # --python: Execute the specified Python script +# # --: Separator, arguments after this are passed to the Python script's sys.argv +# command = [ +# blender_exe_path, +# "-b", # Run in background +# blend_file_path, +# "--python", python_script_path, +# "--", # Pass subsequent arguments to the script +# asset_root_dir, +# "-S" # Save the blend file after script execution +# ] +# +# log.debug(f"Executing Blender command: {' '.join(command)}") # Log the command for debugging +# +# try: +# # Execute the command +# # capture_output=True captures stdout and stderr +# # text=True decodes stdout/stderr as text +# # check=False prevents raising CalledProcessError on non-zero exit codes +# result = subprocess.run(command, capture_output=True, text=True, check=False) +# +# # Log results +# log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}") +# if result.stdout: +# log.debug(f"Blender stdout:\n{result.stdout.strip()}") +# if result.stderr: +# # Log stderr as warning or error depending on return code +# if result.returncode != 0: +# log.error(f"Blender stderr:\n{result.stderr.strip()}") +# else: +# log.warning(f"Blender stderr (Return Code 0):\n{result.stderr.strip()}") # Log stderr even on success as scripts might print warnings +# +# return result.returncode == 0 +# +# except FileNotFoundError: +# log.error(f"Blender executable not found at: {blender_exe_path}") +# return False +# except Exception as e: +# log.exception(f"An unexpected error occurred while running Blender script '{Path(python_script_path).name}': {e}") +# return False - log.debug(f"Executing Blender command: {' '.join(command)}") # Log the command for debugging - try: - # Execute the command - # capture_output=True captures stdout and stderr - # text=True decodes stdout/stderr as text - # check=False prevents raising CalledProcessError on non-zero exit codes - result = subprocess.run(command, capture_output=True, text=True, check=False) +# --- Main CLI Execution (COMMENTED OUT - Replaced by GUI App Flow) --- +# def main(): +# """Parses arguments, sets up logging, runs processing, and reports summary.""" +# parser = setup_arg_parser() +# args = parser.parse_args() +# +# # Setup logging based on verbosity argument *before* logging status messages +# setup_logging(args.verbose) +# +# start_time = time.time() +# log.info("Asset Processor Script Started (CLI Mode)") +# +# # --- Validate Input Paths --- +# valid_inputs = [] +# for p_str in args.input_paths: +# p = Path(p_str) +# if p.exists(): +# suffix = p.suffix.lower() +# if p.is_dir() or (p.is_file() and suffix in ['.zip', '.rar', '.7z']): +# valid_inputs.append(p_str) # Store the original string path +# else: +# log.warning(f"Input is not a directory or a supported archive type (.zip, .rar, .7z), skipping: {p_str}") +# else: +# log.warning(f"Input path not found, skipping: {p_str}") +# +# if not valid_inputs: +# log.error("No valid input paths found. Exiting.") +# sys.exit(1) # Exit with error code +# +# # --- Determine Output Directory --- +# output_dir_str = args.output_dir # Get value from args (might be None) +# if not output_dir_str: +# log.debug("Output directory not specified via -o, reading default from config.py.") +# try: +# output_dir_str = getattr(core_config_module, 'OUTPUT_BASE_DIR', None) +# if not output_dir_str: +# log.error("Output directory not specified with -o and OUTPUT_BASE_DIR not found or empty in config.py. Exiting.") +# sys.exit(1) +# log.info(f"Using default output directory from config.py: {output_dir_str}") +# except Exception as e: +# log.error(f"Could not read OUTPUT_BASE_DIR from config.py: {e}") +# sys.exit(1) +# +# # --- Resolve Output Path (Handles Relative Paths Explicitly) --- +# output_path_obj: Path +# if os.path.isabs(output_dir_str): +# output_path_obj = Path(output_dir_str) +# log.info(f"Using absolute output directory: {output_path_obj}") +# else: +# # Path() interprets relative paths against CWD by default +# output_path_obj = Path(output_dir_str) +# log.info(f"Using relative output directory '{output_dir_str}'. Resolved against CWD to: {output_path_obj.resolve()}") +# +# # --- Validate and Setup Output Directory --- +# try: +# # Resolve to ensure we have an absolute path for consistency and creation +# resolved_output_dir = output_path_obj.resolve() +# log.info(f"Ensuring output directory exists: {resolved_output_dir}") +# resolved_output_dir.mkdir(parents=True, exist_ok=True) +# # Use the resolved absolute path string for the processor +# output_dir_for_processor = str(resolved_output_dir) +# except Exception as e: +# log.error(f"Cannot create or access output directory '{resolved_output_dir}': {e}", exc_info=True) +# sys.exit(1) +# +# # --- Check Preset Existence (Basic Check) --- +# preset_dir = Path(__file__).parent / "presets" +# preset_file = preset_dir / f"{args.preset}.json" +# if not preset_file.is_file(): +# log.error(f"Preset file not found: {preset_file}") +# log.error(f"Ensure a file named '{args.preset}.json' exists in the directory: {preset_dir.resolve()}") +# sys.exit(1) +# +# # --- Execute Processing via the new function --- +# processing_results = run_processing( +# valid_inputs=valid_inputs, +# preset_name=args.preset, +# output_dir_for_processor=output_dir_for_processor, +# overwrite=args.overwrite, +# num_workers=args.workers, +# verbose=args.verbose # Pass the verbose flag +# ) +# +# # --- Report Summary --- +# duration = time.time() - start_time +# successful_processed_count = processing_results["processed"] +# skipped_count = processing_results["skipped"] +# failed_count = processing_results["failed"] +# results_list = processing_results["results_list"] +# +# log.info("=" * 40) +# log.info("Processing Summary") +# log.info(f" Duration: {duration:.2f} seconds") +# log.info(f" Assets Attempted: {len(valid_inputs)}") +# log.info(f" Successfully Processed: {successful_processed_count}") +# log.info(f" Skipped (Already Existed): {skipped_count}") +# log.info(f" Failed: {failed_count}") +# +# if processing_results.get("pool_error"): +# log.error(f" Process Pool Error: {processing_results['pool_error']}") +# # Ensure failed count reflects pool error if it happened +# if failed_count == 0 and successful_processed_count == 0 and skipped_count == 0: +# failed_count = len(valid_inputs) # Assume all failed if pool died early +# +# exit_code = 0 +# if failed_count > 0: +# log.warning("Failures occurred:") +# # Iterate through results to show specific errors for failed items +# for input_path, status, err_msg in results_list: +# if status == "failed": +# log.warning(f" - {Path(input_path).name}: {err_msg}") +# exit_code = 1 # Exit with error code if failures occurred +# else: +# # Consider skipped assets as a form of success for the overall run exit code +# if successful_processed_count > 0 or skipped_count > 0: +# log.info("All assets processed or skipped successfully.") +# exit_code = 0 # Exit code 0 indicates success (including skips) +# else: +# # This case might happen if all inputs were invalid initially +# log.warning("No assets were processed, skipped, or failed (check input validation logs).") +# exit_code = 0 # Still exit 0 as the script itself didn't crash +# +# # --- Blender Script Execution (Optional) --- +# run_nodegroups = False +# run_materials = False +# nodegroup_blend_path = None +# materials_blend_path = None +# blender_exe = None +# +# # 1. Find Blender Executable +# try: +# blender_exe_config = getattr(core_config_module, 'BLENDER_EXECUTABLE_PATH', None) +# if blender_exe_config: +# # Check if the path in config exists +# if Path(blender_exe_config).is_file(): +# blender_exe = str(Path(blender_exe_config).resolve()) +# log.info(f"Using Blender executable from config: {blender_exe}") +# else: +# # Try finding it in PATH if config path is invalid +# log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying to find 'blender' in PATH.") +# blender_exe = shutil.which("blender") +# if blender_exe: +# log.info(f"Found Blender executable in PATH: {blender_exe}") +# else: +# log.warning("Could not find 'blender' in system PATH.") +# else: +# # Try finding it in PATH if not set in config +# log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying to find 'blender' in PATH.") +# blender_exe = shutil.which("blender") +# if blender_exe: +# log.info(f"Found Blender executable in PATH: {blender_exe}") +# else: +# log.warning("Could not find 'blender' in system PATH.") +# +# if not blender_exe: +# log.warning("Blender executable not found or configured. Skipping Blender script execution.") +# +# except Exception as e: +# log.error(f"Error checking Blender executable path: {e}") +# blender_exe = None # Ensure it's None on error +# +# # 2. Determine Blend File Paths if Blender Exe is available +# if blender_exe: +# # Nodegroup Blend Path +# nodegroup_blend_arg = args.nodegroup_blend +# if nodegroup_blend_arg: +# p = Path(nodegroup_blend_arg) +# if p.is_file() and p.suffix.lower() == '.blend': +# nodegroup_blend_path = str(p.resolve()) +# log.info(f"Using nodegroup blend file from argument: {nodegroup_blend_path}") +# else: +# log.warning(f"Invalid nodegroup blend file path from argument: '{nodegroup_blend_arg}'. Ignoring.") +# else: +# default_ng_path_str = getattr(core_config_module, 'DEFAULT_NODEGROUP_BLEND_PATH', None) +# if default_ng_path_str: +# p = Path(default_ng_path_str) +# if p.is_file() and p.suffix.lower() == '.blend': +# nodegroup_blend_path = str(p.resolve()) +# log.info(f"Using default nodegroup blend file from config: {nodegroup_blend_path}") +# else: +# log.warning(f"Invalid default nodegroup blend file path in config: '{default_ng_path_str}'. Ignoring.") +# +# # Materials Blend Path +# materials_blend_arg = args.materials_blend +# if materials_blend_arg: +# p = Path(materials_blend_arg) +# if p.is_file() and p.suffix.lower() == '.blend': +# materials_blend_path = str(p.resolve()) +# log.info(f"Using materials blend file from argument: {materials_blend_path}") +# else: +# log.warning(f"Invalid materials blend file path from argument: '{materials_blend_arg}'. Ignoring.") +# else: +# default_mat_path_str = getattr(core_config_module, 'DEFAULT_MATERIALS_BLEND_PATH', None) +# if default_mat_path_str: +# p = Path(default_mat_path_str) +# if p.is_file() and p.suffix.lower() == '.blend': +# materials_blend_path = str(p.resolve()) +# log.info(f"Using default materials blend file from config: {materials_blend_path}") +# else: +# log.warning(f"Invalid default materials blend file path in config: '{default_mat_path_str}'. Ignoring.") +# +# # 3. Execute Scripts if Paths are Valid +# if blender_exe: +# script_dir = Path(__file__).parent / "blenderscripts" +# nodegroup_script_path = script_dir / "create_nodegroups.py" +# materials_script_path = script_dir / "create_materials.py" +# asset_output_root = output_dir_for_processor # Use the resolved output dir +# +# if nodegroup_blend_path: +# if nodegroup_script_path.is_file(): +# log.info("-" * 40) +# log.info("Starting Blender Node Group Script Execution...") +# success_ng = run_blender_script( +# blender_exe_path=blender_exe, +# blend_file_path=nodegroup_blend_path, +# python_script_path=str(nodegroup_script_path), +# asset_root_dir=asset_output_root +# ) +# if not success_ng: +# log.error("Blender node group script execution failed.") +# # Optionally change exit code if Blender script fails? +# # exit_code = 1 +# log.info("Finished Blender Node Group Script Execution.") +# log.info("-" * 40) +# else: +# log.error(f"Node group script not found: {nodegroup_script_path}") +# +# if materials_blend_path: +# if materials_script_path.is_file(): +# log.info("-" * 40) +# log.info("Starting Blender Material Script Execution...") +# success_mat = run_blender_script( +# blender_exe_path=blender_exe, +# blend_file_path=materials_blend_path, +# python_script_path=str(materials_script_path), +# asset_root_dir=asset_output_root +# ) +# if not success_mat: +# log.error("Blender material script execution failed.") +# # Optionally change exit code if Blender script fails? +# # exit_code = 1 +# log.info("Finished Blender Material Script Execution.") +# log.info("-" * 40) +# else: +# log.error(f"Material script not found: {materials_script_path}") +# +# # --- Final Exit --- +# log.info("Asset Processor Script Finished.") +# sys.exit(exit_code) - # Log results - log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}") - if result.stdout: - log.debug(f"Blender stdout:\n{result.stdout.strip()}") - if result.stderr: - # Log stderr as warning or error depending on return code - if result.returncode != 0: - log.error(f"Blender stderr:\n{result.stderr.strip()}") + +# --- Main Application Class (Integrates GUI and Engine) --- +class App(QObject): + # Signal emitted when all queued processing tasks are complete + all_tasks_finished = Signal(int, int, int) # processed_count, skipped_count, failed_count (Placeholder counts for now) + + def __init__(self): + super().__init__() + self.config_obj = None + self.processing_engine = None + self.main_window = None + self.thread_pool = QThreadPool() + self._active_tasks_count = 0 # Track running tasks + self._task_results = {"processed": 0, "skipped": 0, "failed": 0} # Store results + log.info(f"Maximum threads for pool: {self.thread_pool.maxThreadCount()}") + + self._load_config() + self._init_engine() + self._init_gui() + + def _load_config(self): + """Loads the base configuration using a default preset.""" + # The actual preset name comes from the GUI request later, but the engine + # needs an initial valid configuration object. + try: + # Find the first available preset to use as a default + preset_dir = Path(__file__).parent / "Presets" + default_preset_name = None + if preset_dir.is_dir(): + presets = sorted([f.stem for f in preset_dir.glob("*.json") if f.is_file() and not f.name.startswith('_')]) + if presets: + default_preset_name = presets[0] + log.info(f"Using first available preset as default for initial config: '{default_preset_name}'") + + if not default_preset_name: + # Fallback or raise error if no presets found + log.error("No presets found in the 'Presets' directory. Cannot initialize default configuration.") + # Option 1: Raise an error + raise ConfigurationError("No presets found to load default configuration.") + # Option 2: Try initializing with None (if Configuration handles it, unlikely based on error) + # self.config_obj = Configuration(preset_name=None) + + self.config_obj = Configuration(preset_name=default_preset_name) # Pass the default preset name + log.info(f"Base configuration loaded using default preset '{default_preset_name}'.") + except ConfigurationError as e: + log.error(f"Fatal: Failed to load base configuration using default preset: {e}") + # In a real app, show this error to the user before exiting + sys.exit(1) + except Exception as e: + log.exception(f"Fatal: Unexpected error loading configuration: {e}") + sys.exit(1) + + def _init_engine(self): + """Initializes the ProcessingEngine.""" + if self.config_obj: + try: + self.processing_engine = ProcessingEngine(self.config_obj) + log.info("ProcessingEngine initialized.") + except Exception as e: + log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}") + # Show error and exit + sys.exit(1) + else: + log.error("Fatal: Cannot initialize ProcessingEngine without configuration.") + sys.exit(1) + + def _init_gui(self): + """Initializes the MainWindow and connects signals.""" + if self.processing_engine: + self.main_window = MainWindow() # MainWindow now part of the App + # Connect the signal from the GUI to the App's slot using QueuedConnection + connection_success = self.main_window.processing_requested.connect(self.on_processing_requested, Qt.ConnectionType.QueuedConnection) + log.info(f"DEBUG: Connection result for processing_requested (Queued): {connection_success}") # <-- Modified LOG + if not connection_success: + log.error("*********************************************************") + log.error("FATAL: Failed to connect MainWindow.processing_requested signal to App.on_processing_requested slot!") + log.error("*********************************************************") + # Connect the App's completion signal to the MainWindow's slot + self.all_tasks_finished.connect(self.main_window.on_processing_finished) + log.info("MainWindow initialized and signals connected.") + else: + log.error("Fatal: Cannot initialize MainWindow without ProcessingEngine.") + sys.exit(1) + + @Slot(list) # Slot to receive List[SourceRule] + def on_processing_requested(self, source_rules: list): + # log.info("*********************************************************") # REMOVED + log.debug("DEBUG: App.on_processing_requested slot entered.") # DEBUG Verify Entry (Keep this one) + # log.info("*********************************************************") # REMOVED + """Handles the processing request from the GUI.""" + # --- Restore original logic --- + log.info(f"Received processing request for {len(source_rules)} rule sets.") + log.info(f"DEBUG: Rules received by on_processing_requested: {source_rules}") # DEBUG LOG + log.info(f"VERIFY: App.on_processing_requested received {len(source_rules)} rules.") # DEBUG Verify + for i, rule in enumerate(source_rules): + log.debug(f" VERIFY Rule {i}: Input='{rule.input_path}', Assets={len(rule.assets)}") # DEBUG Verify + if not self.processing_engine: + log.error("Processing engine not available. Cannot process request.") + # Update GUI status bar maybe? + self.main_window.statusBar().showMessage("Error: Processing Engine not ready.", 5000) + return + if not source_rules: + log.warning("Processing requested with an empty rule list.") + self.main_window.statusBar().showMessage("No rules to process.", 3000) + return + + # Reset task counter and results for this batch + self._active_tasks_count = len(source_rules) + self._task_results = {"processed": 0, "skipped": 0, "failed": 0} + log.debug(f"Initialized active task count to: {self._active_tasks_count}") + + # Update GUI progress bar/status + self.main_window.progress_bar.setMaximum(len(source_rules)) + self.main_window.progress_bar.setValue(0) + self.main_window.progress_bar.setFormat(f"0/{len(source_rules)} tasks") + + # --- Get paths needed for ProcessingTask --- + try: + output_base_path_str = self.main_window.output_path_edit.text().strip() + if not output_base_path_str: + log.error("Cannot queue tasks: Output directory path is empty in the GUI.") + self.main_window.statusBar().showMessage("Error: Output directory cannot be empty.", 5000) + return + output_base_path = Path(output_base_path_str) + # Basic validation - check if it's likely a valid path structure (doesn't guarantee existence/writability here) + if not output_base_path.is_absolute(): + # Or attempt to resolve relative to workspace? For now, require absolute from GUI. + log.warning(f"Output path '{output_base_path}' is not absolute. Processing might fail if relative path is not handled correctly by engine.") + # Consider resolving: output_base_path = Path.cwd() / output_base_path # If relative paths are allowed + + # Define workspace path (assuming main.py is in the project root) + workspace_path = Path(__file__).parent.resolve() + log.debug(f"Using Workspace Path: {workspace_path}") + log.debug(f"Using Output Base Path: {output_base_path}") + + except Exception as e: + log.exception(f"Error getting/validating paths for processing task: {e}") + self.main_window.statusBar().showMessage(f"Error preparing paths: {e}", 5000) + return + # --- End Get paths --- + + + # Queue tasks in the thread pool + log.debug("DEBUG: Entering task queuing loop.") # <-- Keep this log + for i, rule in enumerate(source_rules): # Added enumerate for index logging + if isinstance(rule, SourceRule): + log.debug(f"DEBUG: Preparing to queue task {i+1}/{len(source_rules)} for rule: {rule.input_path}") # <-- Keep this log + # Pass the required paths to the ProcessingTask constructor + task = ProcessingTask( + engine=self.processing_engine, + rule=rule, + workspace_path=workspace_path, + output_base_path=output_base_path + ) + # Connect the task's finished signal to the App's slot + task.signals.finished.connect(self._on_task_finished) + log.debug(f"DEBUG: Calling thread_pool.start() for task {i+1}") # <-- Keep this log + self.thread_pool.start(task) + log.debug(f"DEBUG: Returned from thread_pool.start() for task {i+1}") # <-- Keep this log else: - log.warning(f"Blender stderr (Return Code 0):\n{result.stderr.strip()}") # Log stderr even on success as scripts might print warnings + log.warning(f"Skipping invalid item (index {i}) in rule list: {type(rule)}") # Added index - return result.returncode == 0 + log.info(f"Queued {len(source_rules)} processing tasks (finished loop).") # Added context + # --- End Restore original logic --- + # GUI status already updated in MainWindow when signal was emitted - except FileNotFoundError: - log.error(f"Blender executable not found at: {blender_exe_path}") - return False - except Exception as e: - log.exception(f"An unexpected error occurred while running Blender script '{Path(python_script_path).name}': {e}") - return False + # --- Slot to handle completion of individual tasks --- + @Slot(str, str, object) + def _on_task_finished(self, rule_input_path, status, result_or_error): + """Handles the 'finished' signal from a ProcessingTask.""" + log.info(f"Task finished signal received for {rule_input_path}. Status: {status}") + self._active_tasks_count -= 1 + log.debug(f"Active tasks remaining: {self._active_tasks_count}") + + # Update overall results (basic counts for now) + if status == "processed": + self._task_results["processed"] += 1 + elif status == "skipped": # Assuming engine might return 'skipped' status eventually + self._task_results["skipped"] += 1 + else: # Count all other statuses (failed_preparation, failed_processing) as failed + self._task_results["failed"] += 1 + + # Update progress bar + total_tasks = self.main_window.progress_bar.maximum() + completed_tasks = total_tasks - self._active_tasks_count + self.main_window.update_progress_bar(completed_tasks, total_tasks) # Use MainWindow's method + + # Update status for the specific file in the GUI (if needed) + # self.main_window.update_file_status(rule_input_path, status, str(result_or_error) if result_or_error else "") + + if self._active_tasks_count == 0: + log.info("All processing tasks finished.") + # Emit the signal with the final counts + self.all_tasks_finished.emit( + self._task_results["processed"], + self._task_results["skipped"], + self._task_results["failed"] + ) + elif self._active_tasks_count < 0: + log.error("Error: Active task count went below zero!") # Should not happen + + def run(self): + """Shows the main window.""" + if self.main_window: + self.main_window.show() + log.info("Application started. Showing main window.") + else: + log.error("Cannot run application, MainWindow not initialized.") -# --- Main Execution (for CLI usage) --- -def main(): - """Parses arguments, sets up logging, runs processing, and reports summary.""" - parser = setup_arg_parser() - args = parser.parse_args() +# --- Main CLI Execution Function (Adapted from old main()) --- +def run_cli(args): # Accept parsed args + """Uses parsed arguments, sets up logging, runs processing, and reports summary for CLI mode.""" + # parser = setup_arg_parser() # No longer needed + # args = parser.parse_args() # Args are passed in - # Setup logging based on verbosity argument *before* logging status messages - setup_logging(args.verbose) + # --- Validate required CLI arguments --- + if not args.input_paths: + log.error("CLI Error: Input path(s) are required for CLI mode.") + sys.exit(1) + if not args.preset: + log.error("CLI Error: Preset (-p/--preset) is required for CLI mode.") + sys.exit(1) + # --- End Validation --- + + # Logging setup is already done outside this function in the __main__ block start_time = time.time() log.info("Asset Processor Script Started (CLI Mode)") @@ -403,10 +988,11 @@ def main(): p = Path(p_str) if p.exists(): suffix = p.suffix.lower() - if p.is_dir() or (p.is_file() and suffix in ['.zip', '.rar', '.7z']): + # TODO: Add support for other archive types if needed (.rar, .7z) + if p.is_dir() or (p.is_file() and suffix == '.zip'): valid_inputs.append(p_str) # Store the original string path else: - log.warning(f"Input is not a directory or a supported archive type (.zip, .rar, .7z), skipping: {p_str}") + log.warning(f"Input is not a directory or a supported archive type (.zip), skipping: {p_str}") else: log.warning(f"Input path not found, skipping: {p_str}") @@ -428,215 +1014,160 @@ def main(): log.error(f"Could not read OUTPUT_BASE_DIR from config.py: {e}") sys.exit(1) - # --- Resolve Output Path (Handles Relative Paths Explicitly) --- - output_path_obj: Path - if os.path.isabs(output_dir_str): - output_path_obj = Path(output_dir_str) - log.info(f"Using absolute output directory: {output_path_obj}") - else: - # Path() interprets relative paths against CWD by default - output_path_obj = Path(output_dir_str) - log.info(f"Using relative output directory '{output_dir_str}'. Resolved against CWD to: {output_path_obj.resolve()}") + # --- Resolve Output Path --- + output_path_obj = Path(output_dir_str).resolve() # Resolve to absolute path # --- Validate and Setup Output Directory --- try: - # Resolve to ensure we have an absolute path for consistency and creation - resolved_output_dir = output_path_obj.resolve() - log.info(f"Ensuring output directory exists: {resolved_output_dir}") - resolved_output_dir.mkdir(parents=True, exist_ok=True) - # Use the resolved absolute path string for the processor - output_dir_for_processor = str(resolved_output_dir) + log.info(f"Ensuring output directory exists: {output_path_obj}") + output_path_obj.mkdir(parents=True, exist_ok=True) + output_dir_for_processor = str(output_path_obj) except Exception as e: - log.error(f"Cannot create or access output directory '{resolved_output_dir}': {e}", exc_info=True) + log.error(f"Cannot create or access output directory '{output_path_obj}': {e}", exc_info=True) sys.exit(1) - # --- Check Preset Existence (Basic Check) --- - preset_dir = Path(__file__).parent / "presets" - preset_file = preset_dir / f"{args.preset}.json" - if not preset_file.is_file(): - log.error(f"Preset file not found: {preset_file}") - log.error(f"Ensure a file named '{args.preset}.json' exists in the directory: {preset_dir.resolve()}") + # --- Load Configuration --- + try: + config = Configuration(args.preset) # Pass preset name from args + log.info(f"Configuration loaded for preset: {args.preset}") + except ConfigurationError as e: + log.error(f"Error loading configuration for preset '{args.preset}': {e}") + sys.exit(1) + except Exception as e: + log.exception(f"Unexpected error loading configuration: {e}") sys.exit(1) - # --- Execute Processing via the new function --- - processing_results = run_processing( - valid_inputs=valid_inputs, - preset_name=args.preset, - output_dir_for_processor=output_dir_for_processor, - overwrite=args.overwrite, - num_workers=args.workers, - verbose=args.verbose # Pass the verbose flag - ) + # --- Initialize Processing Engine --- + try: + engine = ProcessingEngine(config) + log.info("ProcessingEngine initialized for CLI mode.") + except Exception as e: + log.exception(f"Fatal: Failed to initialize ProcessingEngine: {e}") + sys.exit(1) + + # --- Execute Processing (Simplified Sequential for now) --- + # TODO: Re-implement parallel processing using concurrent.futures if needed. + # TODO: CLI mode needs a way to generate SourceRule objects. + # For now, we'll pass a simplified structure or assume engine handles it. + # This part likely needs significant adaptation based on ProcessingEngine.process requirements. + log.warning("CLI processing currently uses simplified sequential execution.") + log.warning("SourceRule generation for CLI mode is basic and may need refinement.") + + processed_count = 0 + skipped_count = 0 # Placeholder + failed_count = 0 + results_list = [] # Placeholder + + for input_path_str in valid_inputs: + log.info(f"--- Processing Input: {Path(input_path_str).name} ---") + try: + # --- Basic SourceRule Creation (Needs Review/Adaptation) --- + # This is a placeholder. The engine likely needs more detailed file info. + # We might need to extract file list here like the GUI does. + input_path_obj = Path(input_path_str) + # Example: Create a rule assuming the input is a single asset + # This won't handle multi-asset archives correctly without more logic. + asset_name = input_path_obj.stem # Basic assumption + # File list extraction would be needed here for proper FileRule creation + # file_list = _extract_file_list(input_path_str) # Need to define/import this helper + # file_rules = [FileRule(file_path=f) for f in file_list] if file_list else [] + # asset_rule = AssetRule(asset_name=asset_name, files=file_rules) + # rule = SourceRule(input_path=input_path_str, assets=[asset_rule], supplier_identifier=config.supplier_name) + # --- End Placeholder --- + + # --- TEMPORARY: Call engine process with just config and path --- + # This assumes engine.process can handle this or needs adaptation. + # If engine.process strictly requires a SourceRule, this will fail. + # result = engine.process(config=config, input_path=input_path_obj, overwrite=args.overwrite) + # --- END TEMPORARY --- + + # --- Attempt with Placeholder SourceRule (More likely signature) --- + # This still requires file list extraction and rule creation logic + log.error("CLI Processing Logic Incomplete: SourceRule creation and engine call need implementation.") + # Example (requires file list extraction and rule building): + # rule = build_basic_source_rule(input_path_str, config) # Hypothetical function + # if rule: + # engine.process(rule) # Assuming process takes one rule + # processed_count += 1 # Basic success tracking + # else: + # log.warning(f"Could not create basic rule for {input_path_str}, skipping.") + # failed_count += 1 + # --- End Placeholder --- + raise NotImplementedError("CLI processing logic for SourceRule creation and engine call is not fully implemented.") + + + except NotImplementedError as e: + log.error(f"Stopping CLI run due to incomplete implementation: {e}") + failed_count += 1 + break # Stop processing further items + except Exception as e: + log.exception(f"Error processing input '{Path(input_path_str).name}': {e}") + failed_count += 1 + results_list.append((input_path_str, "failed", str(e))) # Placeholder result # --- Report Summary --- duration = time.time() - start_time - successful_processed_count = processing_results["processed"] - skipped_count = processing_results["skipped"] - failed_count = processing_results["failed"] - results_list = processing_results["results_list"] - log.info("=" * 40) - log.info("Processing Summary") + log.info("CLI Processing Summary") log.info(f" Duration: {duration:.2f} seconds") - log.info(f" Assets Attempted: {len(valid_inputs)}") - log.info(f" Successfully Processed: {successful_processed_count}") - log.info(f" Skipped (Already Existed): {skipped_count}") + log.info(f" Inputs Attempted: {len(valid_inputs)}") + log.info(f" Successfully Processed: {processed_count}") + log.info(f" Skipped: {skipped_count}") log.info(f" Failed: {failed_count}") - if processing_results.get("pool_error"): - log.error(f" Process Pool Error: {processing_results['pool_error']}") - # Ensure failed count reflects pool error if it happened - if failed_count == 0 and successful_processed_count == 0 and skipped_count == 0: - failed_count = len(valid_inputs) # Assume all failed if pool died early - exit_code = 0 if failed_count > 0: - log.warning("Failures occurred:") - # Iterate through results to show specific errors for failed items + log.warning("Failures occurred.") + # Log specific errors if results_list was populated for input_path, status, err_msg in results_list: - if status == "failed": - log.warning(f" - {Path(input_path).name}: {err_msg}") + if status == "failed": + log.warning(f" - {Path(input_path).name}: {err_msg}") exit_code = 1 # Exit with error code if failures occurred - else: - # Consider skipped assets as a form of success for the overall run exit code - if successful_processed_count > 0 or skipped_count > 0: - log.info("All assets processed or skipped successfully.") - exit_code = 0 # Exit code 0 indicates success (including skips) - else: - # This case might happen if all inputs were invalid initially - log.warning("No assets were processed, skipped, or failed (check input validation logs).") - exit_code = 0 # Still exit 0 as the script itself didn't crash - # --- Blender Script Execution (Optional) --- - run_nodegroups = False - run_materials = False - nodegroup_blend_path = None - materials_blend_path = None - blender_exe = None - - # 1. Find Blender Executable - try: - blender_exe_config = getattr(core_config_module, 'BLENDER_EXECUTABLE_PATH', None) - if blender_exe_config: - # Check if the path in config exists - if Path(blender_exe_config).is_file(): - blender_exe = str(Path(blender_exe_config).resolve()) - log.info(f"Using Blender executable from config: {blender_exe}") - else: - # Try finding it in PATH if config path is invalid - log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying to find 'blender' in PATH.") - blender_exe = shutil.which("blender") - if blender_exe: - log.info(f"Found Blender executable in PATH: {blender_exe}") - else: - log.warning("Could not find 'blender' in system PATH.") - else: - # Try finding it in PATH if not set in config - log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying to find 'blender' in PATH.") - blender_exe = shutil.which("blender") - if blender_exe: - log.info(f"Found Blender executable in PATH: {blender_exe}") - else: - log.warning("Could not find 'blender' in system PATH.") - - if not blender_exe: - log.warning("Blender executable not found or configured. Skipping Blender script execution.") - - except Exception as e: - log.error(f"Error checking Blender executable path: {e}") - blender_exe = None # Ensure it's None on error - - # 2. Determine Blend File Paths if Blender Exe is available - if blender_exe: - # Nodegroup Blend Path - nodegroup_blend_arg = args.nodegroup_blend - if nodegroup_blend_arg: - p = Path(nodegroup_blend_arg) - if p.is_file() and p.suffix.lower() == '.blend': - nodegroup_blend_path = str(p.resolve()) - log.info(f"Using nodegroup blend file from argument: {nodegroup_blend_path}") - else: - log.warning(f"Invalid nodegroup blend file path from argument: '{nodegroup_blend_arg}'. Ignoring.") - else: - default_ng_path_str = getattr(core_config_module, 'DEFAULT_NODEGROUP_BLEND_PATH', None) - if default_ng_path_str: - p = Path(default_ng_path_str) - if p.is_file() and p.suffix.lower() == '.blend': - nodegroup_blend_path = str(p.resolve()) - log.info(f"Using default nodegroup blend file from config: {nodegroup_blend_path}") - else: - log.warning(f"Invalid default nodegroup blend file path in config: '{default_ng_path_str}'. Ignoring.") - - # Materials Blend Path - materials_blend_arg = args.materials_blend - if materials_blend_arg: - p = Path(materials_blend_arg) - if p.is_file() and p.suffix.lower() == '.blend': - materials_blend_path = str(p.resolve()) - log.info(f"Using materials blend file from argument: {materials_blend_path}") - else: - log.warning(f"Invalid materials blend file path from argument: '{materials_blend_arg}'. Ignoring.") - else: - default_mat_path_str = getattr(core_config_module, 'DEFAULT_MATERIALS_BLEND_PATH', None) - if default_mat_path_str: - p = Path(default_mat_path_str) - if p.is_file() and p.suffix.lower() == '.blend': - materials_blend_path = str(p.resolve()) - log.info(f"Using default materials blend file from config: {materials_blend_path}") - else: - log.warning(f"Invalid default materials blend file path in config: '{default_mat_path_str}'. Ignoring.") - - # 3. Execute Scripts if Paths are Valid - if blender_exe: - script_dir = Path(__file__).parent / "blenderscripts" - nodegroup_script_path = script_dir / "create_nodegroups.py" - materials_script_path = script_dir / "create_materials.py" - asset_output_root = output_dir_for_processor # Use the resolved output dir - - if nodegroup_blend_path: - if nodegroup_script_path.is_file(): - log.info("-" * 40) - log.info("Starting Blender Node Group Script Execution...") - success_ng = run_blender_script( - blender_exe_path=blender_exe, - blend_file_path=nodegroup_blend_path, - python_script_path=str(nodegroup_script_path), - asset_root_dir=asset_output_root - ) - if not success_ng: - log.error("Blender node group script execution failed.") - # Optionally change exit code if Blender script fails? - # exit_code = 1 - log.info("Finished Blender Node Group Script Execution.") - log.info("-" * 40) - else: - log.error(f"Node group script not found: {nodegroup_script_path}") - - if materials_blend_path: - if materials_script_path.is_file(): - log.info("-" * 40) - log.info("Starting Blender Material Script Execution...") - success_mat = run_blender_script( - blender_exe_path=blender_exe, - blend_file_path=materials_blend_path, - python_script_path=str(materials_script_path), - asset_root_dir=asset_output_root - ) - if not success_mat: - log.error("Blender material script execution failed.") - # Optionally change exit code if Blender script fails? - # exit_code = 1 - log.info("Finished Blender Material Script Execution.") - log.info("-" * 40) - else: - log.error(f"Material script not found: {materials_script_path}") + # --- Blender Script Execution (Optional - Copied from old main()) --- + # This section might need review based on current config/engine + run_blender = False # Placeholder, add logic if needed + if run_blender: + # ... (Blender execution logic from old main() would go here) ... + log.warning("Blender script execution from CLI not yet re-implemented.") + pass # --- Final Exit --- - log.info("Asset Processor Script Finished.") + log.info("Asset Processor Script Finished (CLI Mode).") sys.exit(exit_code) if __name__ == "__main__": - # This ensures the main() function runs only when the script is executed directly - # Important for multiprocessing to work correctly on some platforms (like Windows) - main() \ No newline at end of file + # Setup argument parser + parser = setup_arg_parser() + # Parse all arguments now + args = parser.parse_args() + + # Setup logging based on --verbose flag + setup_logging(args.verbose) + + # Determine mode based on presence of required CLI args + if args.input_paths or args.preset: + # If either input_paths or preset is provided, assume CLI mode + # run_cli will handle validation that *both* are actually present + log.info("CLI arguments detected (input_paths or preset), attempting CLI mode.") + run_cli(args) # Pass parsed args to run_cli + else: + # If neither input_paths nor preset is provided, run GUI mode + log.info("No required CLI arguments detected, starting GUI mode.") + # --- Run the GUI Application --- + try: + qt_app = QApplication(sys.argv) # Pass original sys.argv + # Optional: Apply style/palette if desired + qt_app.setStyle('Fusion') + # palette = qt_app.palette() ... set colors ... qt_app.setPalette(palette) + + app_instance = App() + app_instance.run() + + sys.exit(qt_app.exec()) + except Exception as gui_exc: + log.exception(f"An error occurred during GUI startup or execution: {gui_exc}") + sys.exit(1) + + # --- Old logic removed --- \ No newline at end of file diff --git a/processing_engine.py b/processing_engine.py new file mode 100644 index 0000000..5673179 --- /dev/null +++ b/processing_engine.py @@ -0,0 +1,1424 @@ +# processing_engine.py + +import os +import math +import shutil +import tempfile +import logging +import json +import re +import time +from pathlib import Path +from typing import List, Dict, Tuple, Optional, Set +from collections import defaultdict + +# Attempt to import image processing libraries +try: + import cv2 + import numpy as np +except ImportError: + print("ERROR: Missing required image processing libraries. Please install opencv-python and numpy:") + print("pip install opencv-python numpy") + # Allow import to fail but log error; execution will likely fail later + cv2 = None + np = None + +# Attempt to import OpenEXR - Check if needed for advanced EXR flags/types +try: + import OpenEXR + import Imath + _HAS_OPENEXR = True +except ImportError: + _HAS_OPENEXR = False + # Log this information - basic EXR might still work via OpenCV + logging.debug("Optional 'OpenEXR' python package not found. EXR saving relies on OpenCV's built-in support.") + + +# Import project-specific modules +try: + from configuration import Configuration, ConfigurationError + from rule_structure import SourceRule, AssetRule, FileRule # Import necessary structures +except ImportError: + print("ERROR: Cannot import Configuration or rule_structure classes.") + print("Ensure configuration.py and rule_structure.py are in the same directory or Python path.") + # Allow import to fail but log error; execution will likely fail later + Configuration = None + SourceRule = None + AssetRule = None + FileRule = None + + +# Use logger defined in main.py (or configure one here if run standalone) +log = logging.getLogger(__name__) +# Basic config if logger hasn't been set up elsewhere (e.g., during testing) +if not log.hasHandlers(): + logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s') + + +# --- Custom Exception --- +class ProcessingEngineError(Exception): + """Custom exception for errors during processing engine operations.""" + pass + +# --- Helper Functions (Moved from AssetProcessor or kept static) --- + +def _is_power_of_two(n: int) -> bool: + """Checks if a number is a power of two.""" + return (n > 0) and (n & (n - 1) == 0) + +def get_nearest_pot(value: int) -> int: + """Finds the nearest power of two to the given value.""" + if value <= 0: + return 1 # Or raise error, POT must be positive + if _is_power_of_two(value): + return value + + # Calculate the powers of two below and above the value + lower_pot = 1 << (value.bit_length() - 1) + upper_pot = 1 << value.bit_length() + + # Determine which power of two is closer + if (value - lower_pot) < (upper_pot - value): + return lower_pot + else: + return upper_pot + +def calculate_target_dimensions(orig_w, orig_h, target_max_dim) -> tuple[int, int]: + """ + Calculates target dimensions by first scaling to fit target_max_dim + while maintaining aspect ratio, then finding the nearest power-of-two + value for each resulting dimension (Stretch/Squash to POT). + """ + if orig_w <= 0 or orig_h <= 0: + # Fallback to target_max_dim if original dimensions are invalid + pot_dim = get_nearest_pot(target_max_dim) + log.warning(f"Invalid original dimensions ({orig_w}x{orig_h}). Falling back to nearest POT of target_max_dim: {pot_dim}x{pot_dim}") + return (pot_dim, pot_dim) + + # Step 1: Calculate intermediate dimensions maintaining aspect ratio + ratio = orig_w / orig_h + if ratio > 1: # Width is dominant + scaled_w = target_max_dim + scaled_h = max(1, round(scaled_w / ratio)) + else: # Height is dominant or square + scaled_h = target_max_dim + scaled_w = max(1, round(scaled_h * ratio)) + + # Step 2: Find the nearest power of two for each scaled dimension + pot_w = get_nearest_pot(scaled_w) + pot_h = get_nearest_pot(scaled_h) + + log.debug(f"POT Calc: Orig=({orig_w}x{orig_h}), MaxDim={target_max_dim} -> Scaled=({scaled_w}x{scaled_h}) -> POT=({pot_w}x{pot_h})") + + return int(pot_w), int(pot_h) + +def _calculate_image_stats(image_data: np.ndarray) -> dict | None: + """ + Calculates min, max, mean for a given numpy image array. + Handles grayscale and multi-channel images. Converts to float64 for calculation. + """ + if image_data is None: + log.warning("Attempted to calculate stats on None image data.") + return None + if np is None: + log.error("Numpy not available for stats calculation.") + return None + try: + # Use float64 for calculations to avoid potential overflow/precision issues + data_float = image_data.astype(np.float64) + + # Normalize data_float based on original dtype before calculating stats + if image_data.dtype == np.uint16: + log.debug("Stats calculation: Normalizing uint16 data to 0-1 range.") + data_float /= 65535.0 + elif image_data.dtype == np.uint8: + log.debug("Stats calculation: Normalizing uint8 data to 0-1 range.") + data_float /= 255.0 + # Assuming float inputs are already in 0-1 range or similar + + log.debug(f"Stats calculation: data_float dtype: {data_float.dtype}, shape: {data_float.shape}") + # Log a few sample values to check range after normalization + if data_float.size > 0: + sample_values = data_float.flatten()[:10] # Get first 10 values + log.debug(f"Stats calculation: Sample values (first 10) after normalization: {sample_values.tolist()}") + + + if len(data_float.shape) == 2: # Grayscale (H, W) + min_val = float(np.min(data_float)) + max_val = float(np.max(data_float)) + mean_val = float(np.mean(data_float)) + stats = {"min": min_val, "max": max_val, "mean": mean_val} + log.debug(f"Calculated Grayscale Stats: Min={min_val:.4f}, Max={max_val:.4f}, Mean={mean_val:.4f}") + elif len(data_float.shape) == 3: # Color (H, W, C) + channels = data_float.shape[2] + min_val = [float(v) for v in np.min(data_float, axis=(0, 1))] + max_val = [float(v) for v in np.max(data_float, axis=(0, 1))] + mean_val = [float(v) for v in np.mean(data_float, axis=(0, 1))] + # Assume data is RGB order after potential conversion in _load_and_transform_source + stats = {"min": min_val, "max": max_val, "mean": mean_val} + log.debug(f"Calculated {channels}-Channel Stats (RGB order): Min={min_val}, Max={max_val}, Mean={mean_val}") + else: + log.warning(f"Cannot calculate stats for image with unsupported shape {data_float.shape}") + return None + return stats + except Exception as e: + log.error(f"Error calculating image stats: {e}", exc_info=True) # Log exception info + return {"error": str(e)} + +def _get_base_map_type(target_map_string: str) -> str: + """Extracts the base map type (e.g., 'COL') from a potentially numbered string ('COL-1').""" + match = re.match(r"([a-zA-Z]+)", target_map_string) + if match: + return match.group(1).upper() + return target_map_string.upper() # Fallback if no number suffix + +def _sanitize_filename(name: str) -> str: + """Removes or replaces characters invalid for filenames/directory names.""" + if not isinstance(name, str): name = str(name) + name = re.sub(r'[^\w.\-]+', '_', name) # Allow alphanumeric, underscore, hyphen, dot + name = re.sub(r'_+', '_', name) + name = name.strip('_') + if not name: name = "invalid_name" + return name + +def _normalize_aspect_ratio_change(original_width, original_height, resized_width, resized_height, decimals=2): + """ + Calculates the aspect ratio change string (e.g., "EVEN", "X133"). + Returns the string representation. + """ + if original_width <= 0 or original_height <= 0: + log.warning("Cannot calculate aspect ratio change with zero original dimensions.") + return "InvalidInput" + + # Avoid division by zero if resize resulted in zero dimensions (shouldn't happen with checks) + if resized_width <= 0 or resized_height <= 0: + log.warning("Cannot calculate aspect ratio change with zero resized dimensions.") + return "InvalidResize" + + # Original logic from user feedback + width_change_percentage = ((resized_width - original_width) / original_width) * 100 + height_change_percentage = ((resized_height - original_height) / original_height) * 100 + + normalized_width_change = width_change_percentage / 100 + normalized_height_change = height_change_percentage / 100 + + normalized_width_change = min(max(normalized_width_change + 1, 0), 2) + normalized_height_change = min(max(normalized_height_change + 1, 0), 2) + + # Handle potential zero division if one dimension change is exactly -100% (normalized to 0) + # If both are 0, aspect ratio is maintained. If one is 0, the other dominates. + if normalized_width_change == 0 and normalized_height_change == 0: + closest_value_to_one = 1.0 # Avoid division by zero, effectively scale_factor = 1 + elif normalized_width_change == 0: + closest_value_to_one = abs(normalized_height_change) + elif normalized_height_change == 0: + closest_value_to_one = abs(normalized_width_change) + else: + closest_value_to_one = min(abs(normalized_width_change), abs(normalized_height_change)) + + # Add a small epsilon to avoid division by zero if closest_value_to_one is extremely close to 0 + epsilon = 1e-9 + scale_factor = 1 / (closest_value_to_one + epsilon) if abs(closest_value_to_one) < epsilon else 1 / closest_value_to_one + + scaled_normalized_width_change = scale_factor * normalized_width_change + scaled_normalized_height_change = scale_factor * normalized_height_change + + output_width = round(scaled_normalized_width_change, decimals) + output_height = round(scaled_normalized_height_change, decimals) + + # Convert to int if exactly 1.0 after rounding + if abs(output_width - 1.0) < epsilon: output_width = 1 + if abs(output_height - 1.0) < epsilon: output_height = 1 + + # Determine output string + if original_width == original_height or abs(output_width - output_height) < epsilon: + output = "EVEN" + elif output_width != 1 and output_height == 1: + output = f"X{str(output_width).replace('.', '')}" + elif output_height != 1 and output_width == 1: + output = f"Y{str(output_height).replace('.', '')}" + else: + # Both changed relative to each other + output = f"X{str(output_width).replace('.', '')}Y{str(output_height).replace('.', '')}" + + log.debug(f"Aspect ratio change calculated: Orig=({original_width}x{original_height}), Resized=({resized_width}x{resized_height}) -> String='{output}'") + return output + + +# --- Processing Engine Class --- +class ProcessingEngine: + """ + Handles the core processing pipeline for assets based on explicit rules + provided in a SourceRule object and static configuration. + It does not perform classification, prediction, or rule fallback internally. + """ + # Define the list of known grayscale map types (adjust as needed) + # This comes from static knowledge/config, not dynamic rules. + GRAYSCALE_MAP_TYPES = ['HEIGHT', 'ROUGH', 'METAL', 'AO', 'OPC', 'MASK'] + + def __init__(self, config_obj: Configuration): + """ + Initializes the processing engine with static configuration. + + Args: + config_obj: The loaded Configuration object containing static settings. + """ + if cv2 is None or np is None or Configuration is None or SourceRule is None: + raise ProcessingEngineError("Essential libraries (OpenCV, NumPy) or classes (Configuration, SourceRule) are not available.") + + if not isinstance(config_obj, Configuration): + raise ProcessingEngineError("config_obj must be a valid Configuration object.") + + self.config_obj: Configuration = config_obj + self.temp_dir: Path | None = None # Path to the temporary working directory for a process run + self.loaded_data_cache: dict = {} # Cache for loaded/resized data within a single process call + + log.debug("ProcessingEngine initialized.") + + + def process(self, source_rule: SourceRule, workspace_path: Path, output_base_path: Path, overwrite: bool = False) -> Dict[str, List[str]]: + """ + Executes the processing pipeline for all assets defined in the SourceRule. + + Args: + source_rule: The SourceRule object containing explicit instructions for all assets and files. + workspace_path: The path to the directory containing the source files (e.g., extracted archive). + output_base_path: The base directory where processed output will be saved. + overwrite: If True, forces reprocessing even if output exists for an asset. + + Returns: + Dict[str, List[str]]: A dictionary summarizing the status of each asset: + {"processed": [asset_name1, ...], + "skipped": [asset_name2, ...], + "failed": [asset_name3, ...]} + """ + log.info(f"VERIFY: ProcessingEngine.process called with rule for input: {source_rule.input_path}") # DEBUG Verify + log.debug(f" VERIFY Rule Details: {source_rule}") # DEBUG Verify (Optional detailed log) + if not isinstance(source_rule, SourceRule): + raise ProcessingEngineError("process() requires a valid SourceRule object.") + if not isinstance(workspace_path, Path) or not workspace_path.is_dir(): + raise ProcessingEngineError(f"Invalid workspace path provided: {workspace_path}") + if not isinstance(output_base_path, Path): + raise ProcessingEngineError(f"Invalid output base path provided: {output_base_path}") + + log.info(f"ProcessingEngine starting process for {len(source_rule.assets)} asset(s) defined in SourceRule.") + overall_status = {"processed": [], "skipped": [], "failed": []} + self.loaded_data_cache = {} # Reset cache for this run + + # Use a temporary directory for intermediate files (like saved maps) + try: + self.temp_dir = Path(tempfile.mkdtemp(prefix=self.config_obj.temp_dir_prefix)) + log.debug(f"Created temporary workspace for engine: {self.temp_dir}") + # --- Loop through each asset defined in the SourceRule --- + for asset_rule in source_rule.assets: + asset_name = asset_rule.asset_name + log.info(f"--- Processing asset: '{asset_name}' ---") + asset_processed = False + asset_skipped = False + asset_failed = False + temp_metadata_path_asset = None # Track metadata file for this asset + + try: + # --- Skip Check --- + # Use static config for supplier name and metadata filename + supplier_sanitized = _sanitize_filename(self.config_obj.supplier_name) + asset_name_sanitized = _sanitize_filename(asset_name) + final_dir = output_base_path / supplier_sanitized / asset_name_sanitized + metadata_file_path = final_dir / self.config_obj.metadata_filename + + if not overwrite and final_dir.exists() and metadata_file_path.is_file(): + log.info(f"Output directory and metadata found for asset '{asset_name_sanitized}' and overwrite is False. Skipping.") + overall_status["skipped"].append(asset_name) + asset_skipped = True + continue # Skip to the next asset + + elif overwrite and final_dir.exists(): + log.warning(f"Output directory exists for '{asset_name_sanitized}' and overwrite is True. Removing existing directory: {final_dir}") + try: + shutil.rmtree(final_dir) + except Exception as rm_err: + raise ProcessingEngineError(f"Failed to remove existing output directory {final_dir} during overwrite: {rm_err}") from rm_err + + # --- Prepare Asset Metadata --- + # Start with common metadata from the rule, add asset name + current_asset_metadata = asset_rule.common_metadata.copy() + current_asset_metadata["asset_name"] = asset_name + # Add other fields that will be populated + current_asset_metadata["maps_present"] = [] + current_asset_metadata["merged_maps"] = [] + current_asset_metadata["shader_features"] = [] + current_asset_metadata["source_files_in_extra"] = [] + current_asset_metadata["image_stats_1k"] = {} + current_asset_metadata["map_details"] = {} + current_asset_metadata["aspect_ratio_change_string"] = "N/A" + current_asset_metadata["merged_map_channel_stats"] = {} # Initialize for stats + + # --- Process Individual Maps --- + processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset = self._process_individual_maps( + asset_rule=asset_rule, + workspace_path=workspace_path, # Use the workspace path received by process() (contains prepared files) + current_asset_metadata=current_asset_metadata # Pass mutable dict + ) + # Update metadata with results (stats and aspect ratio are updated directly in current_asset_metadata by the method) + # map_details are also updated directly in current_asset_metadata + + # --- Merge Maps --- + merged_maps_details_asset = self._merge_maps( + asset_rule=asset_rule, + workspace_path=workspace_path, + processed_maps_details_asset=processed_maps_details_asset, # Needed to find resolutions + current_asset_metadata=current_asset_metadata # Pass mutable dict for stats + ) + + # --- Generate Metadata --- + temp_metadata_path_asset = self._generate_metadata_file( + source_rule=source_rule, # Pass the parent SourceRule + asset_rule=asset_rule, + current_asset_metadata=current_asset_metadata, # Pass the populated dict + processed_maps_details_asset=processed_maps_details_asset, + merged_maps_details_asset=merged_maps_details_asset + ) + + # --- Organize Output --- + self._organize_output_files( + asset_rule=asset_rule, + output_base_path=output_base_path, # Pass output path + processed_maps_details_asset=processed_maps_details_asset, + merged_maps_details_asset=merged_maps_details_asset, + temp_metadata_path=temp_metadata_path_asset + ) + + log.info(f"--- Asset '{asset_name}' processed successfully. ---") + overall_status["processed"].append(asset_name) + asset_processed = True + + except Exception as asset_err: + log.error(f"--- Failed processing asset '{asset_name}': {asset_err} ---", exc_info=True) + overall_status["failed"].append(asset_name) + asset_failed = True + # Continue to the next asset + + log.info(f"ProcessingEngine finished. Summary: {overall_status}") + return overall_status + + except Exception as e: + log.exception(f"Processing engine failed unexpectedly: {e}") + # Ensure all assets not processed/skipped are marked as failed + processed_or_skipped = set(overall_status["processed"] + overall_status["skipped"]) + for asset_rule in source_rule.assets: + if asset_rule.asset_name not in processed_or_skipped: + overall_status["failed"].append(asset_rule.asset_name) + return overall_status # Return partial status if possible + finally: + self._cleanup_workspace() + + + def _setup_workspace(self): + """Creates a temporary directory for processing.""" + # This is now handled within the process method to ensure it's created per run. + # Kept as a placeholder if needed later, but currently unused. + pass + + def _cleanup_workspace(self): + """Removes the temporary workspace directory if it exists.""" + if self.temp_dir and self.temp_dir.exists(): + try: + log.debug(f"Cleaning up engine temporary workspace: {self.temp_dir}") + # Ignore errors during cleanup (e.g., permission errors on copied .git files) + shutil.rmtree(self.temp_dir, ignore_errors=True) + self.temp_dir = None + log.debug("Engine temporary workspace cleaned up successfully.") + except Exception as e: + log.error(f"Failed to remove engine temporary workspace {self.temp_dir}: {e}", exc_info=True) + self.loaded_data_cache = {} # Clear cache after cleanup + + + def _load_and_transform_source(self, source_path_abs: Path, map_type: str, target_resolution_key: str, is_gloss_source: bool) -> Tuple[Optional[np.ndarray], Optional[np.dtype]]: + """ + Loads a source image file, performs initial prep (BGR->RGB, Gloss->Rough), + resizes it to the target resolution, and caches the result. + Uses static configuration from self.config_obj. + + Args: + source_path_abs: Absolute path to the source file in the workspace. + map_type: The standard map type (e.g., "NRM", "ROUGH", "ROUGH-1"). + target_resolution_key: The key for the target resolution (e.g., "4K"). + is_gloss_source: Boolean indicating if this source should be treated as gloss for inversion. + + Returns: + Tuple containing: + - Resized NumPy array (float32 for gloss-inverted, original type otherwise) or None if loading/processing fails. + - Original source NumPy dtype or None if loading fails. + """ + if cv2 is None or np is None: + log.error("OpenCV or NumPy not available for image loading.") + return None, None + + cache_key = (source_path_abs, target_resolution_key) # Use absolute path for cache key + if cache_key in self.loaded_data_cache: + log.debug(f"CACHE HIT: Returning cached data for {source_path_abs.name} at {target_resolution_key}") + return self.loaded_data_cache[cache_key] # Return tuple (image_data, source_dtype) + + log.debug(f"CACHE MISS: Loading and transforming {source_path_abs.name} for {target_resolution_key}") + img_prepared = None + source_dtype = None + + try: + # --- 1. Load Source Image --- + # Determine read flag (Grayscale for specific types, unchanged otherwise) + # Use static GRAYSCALE_MAP_TYPES list + base_map_type = _get_base_map_type(map_type) # Get base type (e.g., ROUGH from ROUGH-1) + read_flag = cv2.IMREAD_GRAYSCALE if base_map_type in self.GRAYSCALE_MAP_TYPES else cv2.IMREAD_UNCHANGED + # Special case for MASK: always load unchanged first to check alpha + if base_map_type == 'MASK': read_flag = cv2.IMREAD_UNCHANGED + + log.debug(f"Loading source {source_path_abs.name} with flag: {'GRAYSCALE' if read_flag == cv2.IMREAD_GRAYSCALE else 'UNCHANGED'}") + img_loaded = cv2.imread(str(source_path_abs), read_flag) + if img_loaded is None: + raise ProcessingEngineError(f"Failed to load image file: {source_path_abs.name} with flag {read_flag}") + source_dtype = img_loaded.dtype + log.debug(f"Loaded source {source_path_abs.name}, dtype: {source_dtype}, shape: {img_loaded.shape}") + + # --- 2. Initial Preparation (BGR->RGB, Gloss Inversion, MASK handling) --- + img_prepared = img_loaded # Start with loaded image + + # MASK Handling (Extract alpha or convert) - Do this BEFORE general color conversions + if base_map_type == 'MASK': + log.debug(f"Processing as MASK type for {source_path_abs.name}.") + shape = img_prepared.shape + if len(shape) == 3 and shape[2] == 4: + log.debug("MASK processing: Extracting alpha channel (4-channel source).") + img_prepared = img_prepared[:, :, 3] # Extract alpha + elif len(shape) == 3 and shape[2] == 3: + log.debug("MASK processing: Converting BGR to Grayscale (3-channel source).") # OpenCV loads as BGR + img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGR2GRAY) # Convert BGR to Gray + elif len(shape) == 2: + log.debug("MASK processing: Source is already grayscale.") + # img_prepared remains img_prepared + else: + log.warning(f"MASK processing: Unexpected source shape {shape}. Cannot reliably extract mask.") + img_prepared = None # Cannot process + else: + # BGR -> RGB conversion (only for 3/4-channel images not loaded as grayscale) + if len(img_prepared.shape) == 3 and img_prepared.shape[2] >= 3 and read_flag != cv2.IMREAD_GRAYSCALE: + log.debug(f"Converting loaded image from BGR to RGB for {source_path_abs.name}.") + if img_prepared.shape[2] == 4: # BGRA -> RGB + img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGRA2RGB) + else: # BGR -> RGB + img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_BGR2RGB) + elif len(img_prepared.shape) == 2: + log.debug(f"Image {source_path_abs.name} is grayscale, no BGR->RGB conversion needed.") + # else: log warning handled below + + if img_prepared is None: raise ProcessingEngineError("Image data is None after MASK/Color prep.") + + # Gloss -> Roughness Inversion (only if map_type starts with ROUGH and is_gloss_source is True) + if map_type.startswith('ROUGH') and is_gloss_source: + log.info(f"Performing Gloss->Roughness inversion for {source_path_abs.name}") + # Ensure grayscale before inversion + if len(img_prepared.shape) == 3: + img_prepared = cv2.cvtColor(img_prepared, cv2.COLOR_RGB2GRAY) # Use RGB2GRAY as it should be RGB now + + # Normalize based on original source dtype before inversion + if source_dtype == np.uint16: + img_float = 1.0 - (img_prepared.astype(np.float32) / 65535.0) + elif source_dtype == np.uint8: + img_float = 1.0 - (img_prepared.astype(np.float32) / 255.0) + else: # Assuming float input is already 0-1 range + img_float = 1.0 - img_prepared.astype(np.float32) + img_prepared = np.clip(img_float, 0.0, 1.0) # Result is float32 + log.debug(f"Inverted gloss map stored as float32 for ROUGH, original dtype: {source_dtype}") + + + # Ensure data is float32/uint8/uint16 for resizing compatibility + if isinstance(img_prepared, np.ndarray) and img_prepared.dtype not in [np.uint8, np.uint16, np.float32, np.float16]: + log.warning(f"Converting unexpected dtype {img_prepared.dtype} to float32 before resizing.") + img_prepared = img_prepared.astype(np.float32) + + + # --- 3. Resize --- + if img_prepared is None: raise ProcessingEngineError("Image data is None after initial prep.") + orig_h, orig_w = img_prepared.shape[:2] + # Get resolutions from static config + target_dim_px = self.config_obj.image_resolutions.get(target_resolution_key) + if not target_dim_px: + raise ProcessingEngineError(f"Target resolution key '{target_resolution_key}' not found in config.") + + # Avoid upscaling check (using static config) + max_original_dimension = max(orig_w, orig_h) + # TODO: Add config option for allowing upscale? For now, skip if target > original. + if target_dim_px > max_original_dimension: + log.warning(f"Target dimension {target_dim_px}px is larger than original {max_original_dimension}px for {source_path_abs.name}. Skipping resize for {target_resolution_key}.") + # Store None in cache for this specific resolution to avoid retrying + self.loaded_data_cache[cache_key] = (None, source_dtype) + return None, source_dtype # Indicate resize was skipped + + if orig_w <= 0 or orig_h <= 0: + raise ProcessingEngineError(f"Invalid original dimensions ({orig_w}x{orig_h}) for {source_path_abs.name}.") + + target_w, target_h = calculate_target_dimensions(orig_w, orig_h, target_dim_px) + interpolation = cv2.INTER_LANCZOS4 if (target_w * target_h) < (orig_w * orig_h) else cv2.INTER_CUBIC + log.debug(f"Resizing {source_path_abs.name} from ({orig_w}x{orig_h}) to ({target_w}x{target_h}) for {target_resolution_key}") + img_resized = cv2.resize(img_prepared, (target_w, target_h), interpolation=interpolation) + + # --- 4. Cache and Return --- + # Keep resized dtype unless it was gloss-inverted (which is float32) + final_data_to_cache = img_resized + if map_type.startswith('ROUGH') and is_gloss_source and final_data_to_cache.dtype != np.float32: + final_data_to_cache = final_data_to_cache.astype(np.float32) + + log.debug(f"CACHING result for {cache_key}. Shape: {final_data_to_cache.shape}, Dtype: {final_data_to_cache.dtype}") + self.loaded_data_cache[cache_key] = (final_data_to_cache, source_dtype) + return final_data_to_cache, source_dtype + + except Exception as e: + log.error(f"Error in _load_and_transform_source for {source_path_abs.name} at {target_resolution_key}: {e}", exc_info=True) + # Cache None to prevent retrying on error for this specific key + self.loaded_data_cache[cache_key] = (None, None) + return None, None + + + def _save_image(self, image_data: np.ndarray, map_type: str, resolution_key: str, asset_base_name: str, source_info: dict, output_bit_depth_rule: str) -> Optional[Dict]: + """ + Handles saving an image NumPy array to a temporary file within the engine's temp_dir. + Uses static configuration from self.config_obj for formats, quality, etc. + + Args: + image_data: NumPy array containing the image data to save. + map_type: The standard map type being saved (e.g., "COL", "NRMRGH"). + resolution_key: The resolution key (e.g., "4K"). + asset_base_name: The sanitized base name of the asset. + source_info: Dictionary containing details about the source(s), e.g., + {'original_extension': '.tif', 'source_bit_depth': 16, 'involved_extensions': {'.tif', '.png'}, 'max_input_bit_depth': 16} + output_bit_depth_rule: Rule for determining output bit depth ('respect', 'force_8bit', 'force_16bit', 'respect_inputs'). + + Returns: + A dictionary containing details of the saved file (path relative to engine's temp_dir, + width, height, bit_depth, format) or None if saving failed. + """ + if cv2 is None or np is None: + log.error("OpenCV or NumPy not available for image saving.") + return None + if image_data is None: + log.error(f"Cannot save image for {map_type} ({resolution_key}): image_data is None.") + return None + if not self.temp_dir or not self.temp_dir.exists(): + log.error(f"Cannot save image for {map_type} ({resolution_key}): Engine temp_dir is invalid.") + return None + + try: + h, w = image_data.shape[:2] + current_dtype = image_data.dtype + log.debug(f"Saving {map_type} ({resolution_key}) for asset '{asset_base_name}'. Input shape: {image_data.shape}, dtype: {current_dtype}") + + # --- Get Static Config Values --- + config = self.config_obj # Alias for brevity + primary_fmt_16, fallback_fmt_16 = config.get_16bit_output_formats() + fmt_8bit_config = config.get_8bit_output_format() + threshold = config.resolution_threshold_for_jpg + force_lossless_map_types = config.force_lossless_map_types + jpg_quality = config.jpg_quality + png_compression_level = config._core_settings.get('PNG_COMPRESSION_LEVEL', 6) + target_filename_pattern = config.target_filename_pattern + image_resolutions = config.image_resolutions + + # --- 1. Determine Output Bit Depth --- + source_bpc = source_info.get('source_bit_depth', 8) # Default to 8 if missing + max_input_bpc = source_info.get('max_input_bit_depth', source_bpc) # For 'respect_inputs' merge rule + output_dtype_target, output_bit_depth = np.uint8, 8 # Default + + if output_bit_depth_rule == 'force_8bit': + output_dtype_target, output_bit_depth = np.uint8, 8 + elif output_bit_depth_rule == 'force_16bit': + output_dtype_target, output_bit_depth = np.uint16, 16 + elif output_bit_depth_rule == 'respect': # For individual maps + if source_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16 + # Handle float source? Assume 16-bit output if source was float? Needs clarification. + # For now, stick to uint8/16 based on source_bpc. + elif output_bit_depth_rule == 'respect_inputs': # For merged maps + if max_input_bpc == 16: output_dtype_target, output_bit_depth = np.uint16, 16 + else: # Default to 8-bit if rule is unknown + log.warning(f"Unknown output_bit_depth_rule '{output_bit_depth_rule}'. Defaulting to 8-bit.") + output_dtype_target, output_bit_depth = np.uint8, 8 + + log.debug(f"Target output bit depth: {output_bit_depth}-bit (dtype: {output_dtype_target.__name__}) based on rule '{output_bit_depth_rule}'") + + # --- 2. Determine Output Format --- + output_format, output_ext, save_params, needs_float16 = "", "", [], False + base_map_type = _get_base_map_type(map_type) # Use base type for lossless check + force_lossless = base_map_type in force_lossless_map_types + original_extension = source_info.get('original_extension', '.png') # Primary source ext + involved_extensions = source_info.get('involved_extensions', {original_extension}) # For merges + target_dim_px = image_resolutions.get(resolution_key, 0) # Get target dimension size + + # Apply format determination logic (using static config) + if force_lossless: + log.debug(f"Format forced to lossless for map type '{base_map_type}'.") + if output_bit_depth == 16: + output_format = primary_fmt_16 + if output_format.startswith("exr"): + output_ext, needs_float16 = ".exr", True + save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) + else: # Assume PNG if primary 16-bit isn't EXR + if output_format != "png": log.warning(f"Primary 16-bit format '{output_format}' not PNG/EXR for forced lossless. Using fallback '{fallback_fmt_16}'.") + output_format = fallback_fmt_16 if fallback_fmt_16 == "png" else "png" # Ensure PNG + output_ext = ".png" + save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) + else: # 8-bit lossless -> PNG + output_format = "png"; output_ext = ".png" + save_params = [cv2.IMWRITE_PNG_COMPRESSION, png_compression_level] + + elif output_bit_depth == 8 and target_dim_px >= threshold: + output_format = 'jpg'; output_ext = '.jpg' + save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality]) + log.debug(f"Using JPG format (Quality: {jpg_quality}) for {map_type} at {resolution_key} due to resolution threshold ({target_dim_px} >= {threshold}).") + else: + # Determine highest format involved (for merges) or use original (for individuals) + highest_format_str = 'jpg' # Default lowest + relevant_extensions = involved_extensions # Use involved_extensions directly + if '.exr' in relevant_extensions: highest_format_str = 'exr' + elif '.tif' in relevant_extensions: highest_format_str = 'tif' + elif '.png' in relevant_extensions: highest_format_str = 'png' + + if highest_format_str == 'exr': + if output_bit_depth == 16: output_format, output_ext, needs_float16 = "exr", ".exr", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) + else: output_format, output_ext = "png", ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) + elif highest_format_str == 'tif': + if output_bit_depth == 16: + output_format = primary_fmt_16 + if output_format.startswith("exr"): output_ext, needs_float16 = ".exr", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) + else: output_format = "png"; output_ext = ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) + else: output_format, output_ext = "png", ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) + elif highest_format_str == 'png': + if output_bit_depth == 16: + output_format = primary_fmt_16 + if output_format.startswith("exr"): output_ext, needs_float16 = ".exr", True; save_params.extend([cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) + else: output_format = "png"; output_ext = ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) + else: output_format, output_ext = "png", ".png"; save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) + else: # Default to configured 8-bit format if highest was JPG or unknown + output_format = fmt_8bit_config; output_ext = f".{output_format}" + if output_format == "png": save_params.extend([cv2.IMWRITE_PNG_COMPRESSION, png_compression_level]) + elif output_format == "jpg": save_params.extend([cv2.IMWRITE_JPEG_QUALITY, jpg_quality]) + + # Final check: JPG must be 8-bit + if output_format == "jpg" and output_bit_depth == 16: + log.warning(f"Output format is JPG, but target bit depth is 16. Forcing 8-bit for {map_type} ({resolution_key}).") + output_dtype_target, output_bit_depth = np.uint8, 8 + + log.debug(f"Determined save format: {output_format}, ext: {output_ext}, bit_depth: {output_bit_depth}, needs_float16: {needs_float16}") + + # --- 3. Final Data Type Conversion --- + img_to_save = image_data.copy() # Work on a copy + if output_dtype_target == np.uint8 and img_to_save.dtype != np.uint8: + log.debug(f"Converting image data from {img_to_save.dtype} to uint8 for saving.") + if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0 * 255.0).astype(np.uint8) + elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 255.0).astype(np.uint8) + else: img_to_save = img_to_save.astype(np.uint8) # Direct cast for other types (e.g., bool) + elif output_dtype_target == np.uint16 and img_to_save.dtype != np.uint16: + log.debug(f"Converting image data from {img_to_save.dtype} to uint16 for saving.") + if img_to_save.dtype == np.uint8: img_to_save = img_to_save.astype(np.uint16) * 257 # Proper 8->16 bit scaling + elif img_to_save.dtype in [np.float16, np.float32]: img_to_save = (np.clip(img_to_save, 0.0, 1.0) * 65535.0).astype(np.uint16) + else: img_to_save = img_to_save.astype(np.uint16) + if needs_float16 and img_to_save.dtype != np.float16: + log.debug(f"Converting image data from {img_to_save.dtype} to float16 for EXR saving.") + if img_to_save.dtype == np.uint16: img_to_save = (img_to_save.astype(np.float32) / 65535.0).astype(np.float16) + elif img_to_save.dtype == np.uint8: img_to_save = (img_to_save.astype(np.float32) / 255.0).astype(np.float16) + elif img_to_save.dtype == np.float32: img_to_save = img_to_save.astype(np.float16) + else: log.warning(f"Cannot convert {img_to_save.dtype} to float16 for EXR save."); return None + + # --- 4. Final Color Space Conversion (RGB -> BGR for non-EXR) --- + img_save_final = img_to_save + is_3_channel = len(img_to_save.shape) == 3 and img_to_save.shape[2] == 3 + if is_3_channel and not output_format.startswith("exr"): + log.debug(f"Converting RGB to BGR for saving {map_type} ({resolution_key}) as {output_format}") + try: + img_save_final = cv2.cvtColor(img_to_save, cv2.COLOR_RGB2BGR) + except Exception as cvt_err: + log.error(f"Failed RGB->BGR conversion before save for {map_type} ({resolution_key}): {cvt_err}. Saving original RGB.") + img_save_final = img_to_save # Fallback + + # --- 5. Construct Filename & Save --- + filename = target_filename_pattern.format( + base_name=asset_base_name, + map_type=map_type, + resolution=resolution_key, + ext=output_ext.lstrip('.') + ) + output_path_temp = self.temp_dir / filename # Save to engine's temp dir + log.debug(f"Attempting to save: {output_path_temp.name} (Format: {output_format}, Dtype: {img_save_final.dtype})") + + saved_successfully = False + actual_format_saved = output_format + try: + cv2.imwrite(str(output_path_temp), img_save_final, save_params) + saved_successfully = True + log.info(f" > Saved {map_type} ({resolution_key}, {output_bit_depth}-bit) as {output_format}") + except Exception as save_err: + log.error(f"Save failed ({output_format}) for {map_type} {resolution_key}: {save_err}") + # --- Try Fallback --- + if output_bit_depth == 16 and output_format.startswith("exr") and fallback_fmt_16 != output_format and fallback_fmt_16 == "png": + log.warning(f"Attempting fallback PNG save for {map_type} {resolution_key}") + actual_format_saved = "png"; output_ext = ".png"; + filename = target_filename_pattern.format(base_name=asset_base_name, map_type=map_type, resolution=resolution_key, ext="png") + output_path_temp = self.temp_dir / filename + save_params_fallback = [cv2.IMWRITE_PNG_COMPRESSION, png_compression_level] + img_fallback = None; target_fallback_dtype = np.uint16 + + # Convert original data (before float16 conversion) to uint16 for PNG fallback + if img_to_save.dtype == np.float16: # This means original was likely float or uint16/8 converted to float16 + # Safest is to convert the float16 back to uint16 + img_scaled = np.clip(img_to_save.astype(np.float32) * 65535.0, 0, 65535) + img_fallback = img_scaled.astype(target_fallback_dtype) + elif img_to_save.dtype == target_fallback_dtype: img_fallback = img_to_save # Already uint16 + else: log.error(f"Cannot convert {img_to_save.dtype} for PNG fallback."); return None + + # --- Conditional RGB -> BGR Conversion for fallback --- + img_fallback_save_final = img_fallback + is_3_channel_fallback = len(img_fallback.shape) == 3 and img_fallback.shape[2] == 3 + if is_3_channel_fallback: # PNG is non-EXR + log.debug(f"Converting RGB to BGR for fallback PNG save {map_type} ({resolution_key})") + try: img_fallback_save_final = cv2.cvtColor(img_fallback, cv2.COLOR_RGB2BGR) + except Exception as cvt_err_fb: log.error(f"Failed RGB->BGR conversion for fallback PNG: {cvt_err_fb}. Saving original.") + + try: + cv2.imwrite(str(output_path_temp), img_fallback_save_final, save_params_fallback) + saved_successfully = True + log.info(f" > Saved {map_type} ({resolution_key}) using fallback PNG") + except Exception as fallback_err: + log.error(f"Fallback PNG save failed for {map_type} {resolution_key}: {fallback_err}", exc_info=True) + else: + log.error(f"No suitable fallback available or applicable for failed save of {map_type} ({resolution_key}) as {output_format}.") + + + # --- 6. Return Result --- + if saved_successfully: + return { + "path": output_path_temp.relative_to(self.temp_dir), # Store relative path within engine's temp + "resolution": resolution_key, + "width": w, "height": h, + "bit_depth": output_bit_depth, + "format": actual_format_saved + } + else: + return None # Indicate save failure + + except Exception as e: + log.error(f"Unexpected error in _save_image for {map_type} ({resolution_key}): {e}", exc_info=True) + return None + + + def _process_individual_maps(self, asset_rule: AssetRule, workspace_path: Path, current_asset_metadata: Dict) -> Tuple[Dict[str, Dict[str, Dict]], Dict[str, Dict], str]: + """ + Processes, resizes, and saves individual map files for a specific asset + based on the provided AssetRule and static configuration. + + Args: + asset_rule: The AssetRule object containing file rules for this asset. + workspace_path: Path to the directory containing the source files. + current_asset_metadata: Mutable metadata dictionary for the current asset (updated directly). + + Returns: + Tuple containing: + - processed_maps_details_asset: Dict mapping map_type to resolution details. + - image_stats_asset: Dict mapping map_type to calculated image statistics (also added to current_asset_metadata). + - aspect_ratio_change_string_asset: String indicating aspect ratio change (also added to current_asset_metadata). + """ + if not self.temp_dir: raise ProcessingEngineError("Engine workspace (temp_dir) not setup.") + asset_name = asset_rule.asset_name + log.info(f"Processing individual map files for asset '{asset_name}'...") + + # Initialize results specific to this asset + processed_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict) + image_stats_asset: Dict[str, Dict] = {} # Local dict for stats + map_details_asset: Dict[str, Dict] = {} # Store details like source bit depth, gloss inversion + aspect_ratio_change_string_asset: str = "N/A" + + # --- Settings retrieval from static config --- + resolutions = self.config_obj.image_resolutions + stats_res_key = self.config_obj.calculate_stats_resolution + stats_target_dim = resolutions.get(stats_res_key) + if not stats_target_dim: log.warning(f"Stats resolution key '{stats_res_key}' not found in config. Stats skipped for '{asset_name}'.") + base_name = asset_name # Use the asset name from the rule + + # --- Aspect Ratio Calculation Setup --- + first_map_rule_for_aspect = next((fr for fr in asset_rule.files if fr.item_type_override is not None), None) + orig_w_aspect, orig_h_aspect = None, None + if first_map_rule_for_aspect: + first_res_key = next(iter(resolutions)) # Use first resolution key + source_path_abs = workspace_path / first_map_rule_for_aspect.file_path + temp_img_for_dims, _ = self._load_and_transform_source( + source_path_abs, + first_map_rule_for_aspect.item_type_override, + first_res_key, + is_gloss_source=False, # Added: Not relevant for dimension check, but required by method + # self.loaded_data_cache is used internally by the method + ) + if temp_img_for_dims is not None: + orig_h_aspect, orig_w_aspect = temp_img_for_dims.shape[:2] + log.debug(f"Got original dimensions ({orig_w_aspect}x{orig_h_aspect}) for aspect ratio calculation from {first_map_rule_for_aspect.file_path}") + else: + log.warning(f"Could not load image {first_map_rule_for_aspect.file_path} to get original dimensions for aspect ratio.") + else: + log.warning("No map files found in AssetRule, cannot calculate aspect ratio string.") + + + # --- Process Each Individual Map defined in the AssetRule --- + for file_rule in asset_rule.files: + # --- Check if this file should be processed individually --- + # Skip if no item type is assigned, if it's explicitly "EXTRA", or if marked to skip + should_skip = ( + file_rule.item_type_override is None or + file_rule.item_type_override == "EXTRA" or # Explicitly skip "EXTRA" type + getattr(file_rule, 'skip_processing', False) + ) + if should_skip: + log.debug(f"Skipping individual processing for {file_rule.file_path} (ItemTypeOverride: {file_rule.item_type_override}, SkipProcessing: {getattr(file_rule, 'skip_processing', False)})") + continue # Skip to the next file_rule + + # --- Proceed with processing for this file_rule --- + source_path_rel = Path(file_rule.file_path) # Ensure it's a Path object + # IMPORTANT: Use the ENGINE's workspace_path (self.temp_dir) for loading, + # as individual maps should have been copied there by the caller (ProcessingTask) + # Correction: _process_individual_maps receives the *engine's* temp_dir as workspace_path + source_path_abs = workspace_path / source_path_rel + map_type = file_rule.item_type_override # Use the explicit map type from the rule + # Determine if the source is gloss based on its type identifier and config + gloss_identifiers = getattr(self.config_obj, 'gloss_map_identifiers', []) + is_gloss_source = map_type in gloss_identifiers + original_extension = source_path_rel.suffix.lower() # Get from path + + log.info(f"-- Asset '{asset_name}': Processing Individual Map: {map_type} (Source: {source_path_rel.name}) --") + current_map_details = {"derived_from_gloss": is_gloss_source} + source_bit_depth_found = None # Track if we've found the bit depth for this map type + + try: + # --- Loop through target resolutions from static config --- + for res_key, target_dim_px in resolutions.items(): + log.debug(f"Processing {map_type} for resolution: {res_key}...") + + # --- 1. Load and Transform Source (using helper + cache) --- + # This now only runs for files that have an item_type_override + img_resized, source_dtype = self._load_and_transform_source( + source_path_abs=source_path_abs, + map_type=map_type, # Pass the specific map type (e.g., ROUGH-1) + target_resolution_key=res_key, + is_gloss_source=is_gloss_source + # self.loaded_data_cache is used internally + ) + + if img_resized is None: + # This warning now correctly indicates a failure for a map we *intended* to process + log.warning(f"Failed to load/transform source map {source_path_rel} for {res_key}. Skipping resolution.") + continue # Skip this resolution + + # Store source bit depth once found + if source_dtype is not None and source_bit_depth_found is None: + source_bit_depth_found = 16 if source_dtype == np.uint16 else (8 if source_dtype == np.uint8 else 8) # Default non-uint to 8 + current_map_details["source_bit_depth"] = source_bit_depth_found + log.debug(f"Stored source bit depth for {map_type}: {source_bit_depth_found}") + + # --- 2. Calculate Stats (if applicable) --- + if res_key == stats_res_key and stats_target_dim: + log.debug(f"Calculating stats for {map_type} using {res_key} image...") + stats = _calculate_image_stats(img_resized) + if stats: image_stats_asset[map_type] = stats # Store locally first + else: log.warning(f"Stats calculation failed for {map_type} at {res_key}.") + + # --- 3. Calculate Aspect Ratio Change String (once per asset) --- + if aspect_ratio_change_string_asset == "N/A" and orig_w_aspect is not None and orig_h_aspect is not None: + target_w_aspect, target_h_aspect = img_resized.shape[1], img_resized.shape[0] # Use current resized dims + try: + aspect_string = _normalize_aspect_ratio_change(orig_w_aspect, orig_h_aspect, target_w_aspect, target_h_aspect) + aspect_ratio_change_string_asset = aspect_string + log.debug(f"Stored aspect ratio change string using {res_key}: '{aspect_string}'") + except Exception as aspect_err: + log.error(f"Failed to calculate aspect ratio change string using {res_key}: {aspect_err}", exc_info=True) + aspect_ratio_change_string_asset = "Error" + elif aspect_ratio_change_string_asset == "N/A": + aspect_ratio_change_string_asset = "Unknown" # Set to unknown if original dims failed + + # --- 4. Save Image (using helper) --- + source_info = { + 'original_extension': original_extension, + 'source_bit_depth': source_bit_depth_found or 8, # Use found depth or default + 'involved_extensions': {original_extension} # Only self for individual maps + } + # Get bit depth rule solely from the static configuration using the correct method signature + bit_depth_rule = self.config_obj.get_bit_depth_rule(map_type) # Pass only map_type + + save_result = self._save_image( + image_data=img_resized, + map_type=map_type, + resolution_key=res_key, + asset_base_name=base_name, + source_info=source_info, + output_bit_depth_rule=bit_depth_rule + # _save_image uses self.config_obj for other settings + ) + + # --- 5. Store Result --- + if save_result: + processed_maps_details_asset.setdefault(map_type, {})[res_key] = save_result + # Update overall map detail (e.g., final format) if needed + current_map_details["output_format"] = save_result.get("format") + else: + log.error(f"Failed to save {map_type} at {res_key}.") + processed_maps_details_asset.setdefault(map_type, {})[f'error_{res_key}'] = "Save failed" + + + except Exception as map_proc_err: + log.error(f"Failed processing map {map_type} from {source_path_rel.name}: {map_proc_err}", exc_info=True) + processed_maps_details_asset.setdefault(map_type, {})['error'] = str(map_proc_err) + + # Store collected details for this map type + map_details_asset[map_type] = current_map_details + + # --- Final Metadata Updates --- + # Update the passed-in current_asset_metadata dictionary directly + current_asset_metadata["map_details"] = map_details_asset + current_asset_metadata["image_stats_1k"] = image_stats_asset # Add collected stats + current_asset_metadata["aspect_ratio_change_string"] = aspect_ratio_change_string_asset # Add collected aspect string + + log.info(f"Finished processing individual map files for asset '{asset_name}'.") + # Return details needed for organization, stats and aspect ratio are updated in-place + return processed_maps_details_asset, image_stats_asset, aspect_ratio_change_string_asset + + + def _merge_maps(self, asset_rule: AssetRule, workspace_path: Path, processed_maps_details_asset: Dict[str, Dict[str, Dict]], current_asset_metadata: Dict) -> Dict[str, Dict[str, Dict]]: + """ + Merges channels from different source maps for a specific asset based on static + merge rules in configuration, using explicit file paths from the AssetRule. + + Args: + asset_rule: The AssetRule object containing file rules for this asset. + workspace_path: Path to the directory containing the source files. + processed_maps_details_asset: Details of processed maps (used to find common resolutions). + current_asset_metadata: Mutable metadata dictionary for the current asset (updated for stats). + + + Returns: + Dict[str, Dict[str, Dict]]: Details of the merged maps created for this asset. + """ + if not self.temp_dir: raise ProcessingEngineError("Engine workspace (temp_dir) not setup.") + asset_name = asset_rule.asset_name + # Get merge rules from static config + merge_rules = self.config_obj.map_merge_rules + log.info(f"Asset '{asset_name}': Applying {len(merge_rules)} map merging rule(s) from static config...") + + # Initialize results for this asset + merged_maps_details_asset: Dict[str, Dict[str, Dict]] = defaultdict(dict) + + for rule_index, rule in enumerate(merge_rules): + output_map_type = rule.get("output_map_type") + inputs_mapping = rule.get("inputs") # e.g., {"R": "AO", "G": "ROUGH", "B": "METAL"} + defaults = rule.get("defaults", {}) + rule_bit_depth = rule.get("output_bit_depth", "respect_inputs") + + if not output_map_type or not inputs_mapping: + log.warning(f"Asset '{asset_name}': Skipping static merge rule #{rule_index+1}: Missing 'output_map_type' or 'inputs'. Rule: {rule}") + continue + + log.info(f"-- Asset '{asset_name}': Applying merge rule for '{output_map_type}' --") + + # --- Find required SOURCE FileRules within the AssetRule --- + required_input_file_rules: Dict[str, FileRule] = {} # map_type -> FileRule + possible_to_find_sources = True + input_types_needed = set(inputs_mapping.values()) # e.g., {"AO", "ROUGH", "METAL"} + + for input_type in input_types_needed: + found_rule_for_type = False + # Search in the asset_rule's files + for file_rule in asset_rule.files: + # Check if the file_rule's map_type matches the required input type + # Handle variants (e.g., ROUGH-1 should match ROUGH) + if file_rule.item_type_override and file_rule.item_type_override.startswith(input_type): # Check override exists and matches + # TODO: Add prioritization logic if multiple files match (e.g., prefer non-gloss rough if gloss exists but isn't needed?) + # For now, take the first match. + required_input_file_rules[input_type] = file_rule + found_rule_for_type = True + log.debug(f"Found source FileRule for merge input '{input_type}': {file_rule.file_path} (ItemTypeOverride: {file_rule.item_type_override})") # Gloss status checked during load + break # Found the first matching source for this input type + if not found_rule_for_type: + log.warning(f"Asset '{asset_name}': Required source FileRule for input map type '{input_type}' not found in AssetRule. Cannot perform merge for '{output_map_type}'.") + possible_to_find_sources = False + break + + if not possible_to_find_sources: + continue # Skip this merge rule + + # --- Determine common resolutions based on *processed* maps --- + # This still seems the most reliable way to know which sizes are actually available + possible_resolutions_per_input: List[Set[str]] = [] + resolutions_config = self.config_obj.image_resolutions # Static config + + for input_type in input_types_needed: + # Find the corresponding processed map details (might be ROUGH-1, ROUGH-2 etc.) + processed_details_for_input = None + input_file_rule = required_input_file_rules.get(input_type) + if input_file_rule: + processed_details_for_input = processed_maps_details_asset.get(input_file_rule.item_type_override) # Use the correct attribute + + if processed_details_for_input: + res_keys = {res for res, details in processed_details_for_input.items() if isinstance(details, dict) and 'error' not in details} + if not res_keys: + log.warning(f"Asset '{asset_name}': Input map type '{input_type}' (using {input_file_rule.item_type_override if input_file_rule else 'N/A'}) for merge rule '{output_map_type}' has no successfully processed resolutions.") # Use item_type_override + possible_resolutions_per_input = [] # Invalidate if any input has no resolutions + break + possible_resolutions_per_input.append(res_keys) + else: + # If the input map wasn't processed individually (used_for_merge_only=True) + # Assume all configured resolutions are potentially available. Loading will handle skips. + log.debug(f"Input map type '{input_type}' for merge rule '{output_map_type}' might not have been processed individually. Assuming all configured resolutions possible.") + possible_resolutions_per_input.append(set(resolutions_config.keys())) + + + if not possible_resolutions_per_input: + log.warning(f"Asset '{asset_name}': Cannot determine common resolutions for '{output_map_type}'. Skipping rule.") + continue + + common_resolutions = set.intersection(*possible_resolutions_per_input) + + if not common_resolutions: + log.warning(f"Asset '{asset_name}': No common resolutions found among required inputs {input_types_needed} for merge rule '{output_map_type}'. Skipping rule.") + continue + log.debug(f"Asset '{asset_name}': Common resolutions for '{output_map_type}': {common_resolutions}") + + # --- Loop through common resolutions --- + res_order = {k: resolutions_config[k] for k in common_resolutions if k in resolutions_config} + if not res_order: + log.warning(f"Asset '{asset_name}': Common resolutions {common_resolutions} do not match config. Skipping merge for '{output_map_type}'.") + continue + + sorted_res_keys = sorted(res_order.keys(), key=lambda k: res_order[k], reverse=True) + base_name = asset_name # Use current asset's name + + for current_res_key in sorted_res_keys: + log.debug(f"Asset '{asset_name}': Merging '{output_map_type}' for resolution: {current_res_key}") + try: + loaded_inputs_data = {} # map_type -> loaded numpy array + source_info_for_save = {'involved_extensions': set(), 'max_input_bit_depth': 8} + + # --- Load required SOURCE maps using helper --- + possible_to_load = True + target_channels = list(inputs_mapping.keys()) # e.g., ['R', 'G', 'B'] + + for map_type_needed in input_types_needed: # e.g., {"AO", "ROUGH", "METAL"} + file_rule = required_input_file_rules.get(map_type_needed) + if not file_rule: + log.error(f"Internal Error: FileRule missing for '{map_type_needed}' during merge load.") + possible_to_load = False; break + + source_path_rel_str = file_rule.file_path # Keep original string if needed + source_path_rel = Path(source_path_rel_str) # Convert to Path object + source_path_abs = workspace_path / source_path_rel + is_gloss = file_rule.item_type_override in getattr(self.config_obj, 'gloss_map_identifiers', []) + original_ext = source_path_rel.suffix.lower() # Now works on Path object + source_info_for_save['involved_extensions'].add(original_ext) + + log.debug(f"Loading source '{source_path_rel}' for merge input '{map_type_needed}' at {current_res_key} (Gloss: {is_gloss})") + img_resized, source_dtype = self._load_and_transform_source( + source_path_abs=source_path_abs, + map_type=file_rule.item_type_override, # Use the specific type override from rule (e.g., ROUGH-1) + target_resolution_key=current_res_key, + is_gloss_source=is_gloss + # self.loaded_data_cache used internally + ) + + if img_resized is None: + log.warning(f"Asset '{asset_name}': Failed to load/transform source '{source_path_rel}' for merge input '{map_type_needed}' at {current_res_key}. Skipping resolution.") + possible_to_load = False; break + + loaded_inputs_data[map_type_needed] = img_resized # Store by base type (AO, ROUGH) + + # Track max source bit depth + if source_dtype == np.uint16: + source_info_for_save['max_input_bit_depth'] = max(source_info_for_save['max_input_bit_depth'], 16) + # Add other dtype checks if needed + + if not possible_to_load: continue + + # --- Calculate Stats for ROUGH source if used and at stats resolution --- + stats_res_key = self.config_obj.calculate_stats_resolution + if current_res_key == stats_res_key: + log.debug(f"Asset '{asset_name}': Checking for ROUGH source stats for '{output_map_type}' at {stats_res_key}") + for target_channel, source_map_type in inputs_mapping.items(): + if source_map_type == 'ROUGH' and source_map_type in loaded_inputs_data: + log.debug(f"Asset '{asset_name}': Calculating stats for ROUGH source (mapped to channel '{target_channel}') for '{output_map_type}' at {stats_res_key}") + rough_image_data = loaded_inputs_data[source_map_type] + rough_stats = _calculate_image_stats(rough_image_data) + if rough_stats: + # Update the mutable metadata dict passed in + stats_dict = current_asset_metadata.setdefault("merged_map_channel_stats", {}).setdefault(output_map_type, {}).setdefault(target_channel, {}) + stats_dict[stats_res_key] = rough_stats + log.debug(f"Asset '{asset_name}': Stored ROUGH stats for '{output_map_type}' channel '{target_channel}' at {stats_res_key}: {rough_stats}") + else: + log.warning(f"Asset '{asset_name}': Failed to calculate ROUGH stats for '{output_map_type}' channel '{target_channel}' at {stats_res_key}.") + + + # --- Determine dimensions --- + first_map_type = next(iter(loaded_inputs_data)) + h, w = loaded_inputs_data[first_map_type].shape[:2] + num_target_channels = len(target_channels) + + # --- Prepare and Merge Channels --- + merged_channels_float32 = [] + for target_channel in target_channels: # e.g., 'R', 'G', 'B' + source_map_type = inputs_mapping.get(target_channel) # e.g., "AO", "ROUGH", "METAL" + channel_data_float32 = None + + if source_map_type and source_map_type in loaded_inputs_data: + img_input = loaded_inputs_data[source_map_type] # Get the loaded NumPy array + + # Ensure input is float32 0-1 range for merging + if img_input.dtype == np.uint16: img_float = img_input.astype(np.float32) / 65535.0 + elif img_input.dtype == np.uint8: img_float = img_input.astype(np.float32) / 255.0 + elif img_input.dtype == np.float16: img_float = img_input.astype(np.float32) # Assume float16 is 0-1 + else: img_float = img_input.astype(np.float32) # Assume other floats are 0-1 + + num_source_channels = img_float.shape[2] if len(img_float.shape) == 3 else 1 + + # Extract the correct channel + if num_source_channels >= 3: + if target_channel == 'R': channel_data_float32 = img_float[:, :, 0] + elif target_channel == 'G': channel_data_float32 = img_float[:, :, 1] + elif target_channel == 'B': channel_data_float32 = img_float[:, :, 2] + elif target_channel == 'A' and num_source_channels == 4: channel_data_float32 = img_float[:, :, 3] + else: log.warning(f"Target channel '{target_channel}' invalid for 3/4 channel source '{source_map_type}'.") + elif num_source_channels == 1 or len(img_float.shape) == 2: + # If source is grayscale, use it for R, G, B, or A target channels + channel_data_float32 = img_float.reshape(h, w) + else: + log.warning(f"Unexpected shape {img_float.shape} for source '{source_map_type}'.") + + # Apply default if channel data couldn't be extracted + if channel_data_float32 is None: + default_val = defaults.get(target_channel) + if default_val is None: + raise ProcessingEngineError(f"Missing input/default for target channel '{target_channel}' in merge rule '{output_map_type}'.") + log.debug(f"Using default value {default_val} for target channel '{target_channel}' in '{output_map_type}'.") + channel_data_float32 = np.full((h, w), float(default_val), dtype=np.float32) + + merged_channels_float32.append(channel_data_float32) + + if not merged_channels_float32 or len(merged_channels_float32) != num_target_channels: + raise ProcessingEngineError(f"Channel count mismatch during merge for '{output_map_type}'. Expected {num_target_channels}, got {len(merged_channels_float32)}.") + + merged_image_float32 = cv2.merge(merged_channels_float32) + log.debug(f"Merged channels for '{output_map_type}' ({current_res_key}). Result shape: {merged_image_float32.shape}, dtype: {merged_image_float32.dtype}") + + # --- Save Merged Map using Helper --- + save_result = self._save_image( + image_data=merged_image_float32, # Pass the merged float32 data + map_type=output_map_type, + resolution_key=current_res_key, + asset_base_name=base_name, + source_info=source_info_for_save, # Pass collected source info + output_bit_depth_rule=rule_bit_depth # Pass the rule's requirement + # _save_image uses self.config_obj for other settings + ) + + # --- Record details locally --- + if save_result: + merged_maps_details_asset[output_map_type][current_res_key] = save_result + else: + log.error(f"Asset '{asset_name}': Failed to save merged map '{output_map_type}' at resolution '{current_res_key}'.") + merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = "Save failed via helper" + + + except Exception as merge_res_err: + log.error(f"Asset '{asset_name}': Failed merging '{output_map_type}' at resolution '{current_res_key}': {merge_res_err}", exc_info=True) + # Store error locally for this asset + merged_maps_details_asset.setdefault(output_map_type, {})[f'error_{current_res_key}'] = str(merge_res_err) + + log.info(f"Asset '{asset_name}': Finished applying map merging rules.") + # Return the details for this asset + return merged_maps_details_asset + + + def _generate_metadata_file(self, source_rule: SourceRule, asset_rule: AssetRule, current_asset_metadata: Dict, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]]) -> Path: + """ + Gathers metadata for a specific asset based on the AssetRule and processing results, + and writes it to a temporary JSON file in the engine's temp_dir. + + Args: + asset_rule: The AssetRule object for this asset. + current_asset_metadata: Base metadata dictionary (already contains name, category, archetype, stats, aspect ratio, map_details). + processed_maps_details_asset: Details of processed maps for this asset. + merged_maps_details_asset: Details of merged maps for this asset. + + Returns: + Path: The path to the generated temporary metadata file. + """ + if not self.temp_dir: raise ProcessingEngineError("Engine workspace (temp_dir) not setup.") + asset_name = asset_rule.asset_name + if not asset_name: + log.warning("Asset name missing during metadata generation, file may be incomplete or incorrectly named.") + asset_name = "UnknownAsset_Metadata" # Fallback for filename + + log.info(f"Generating metadata file for asset '{asset_name}'...") + + # Start with the base metadata passed in (already contains name, category, archetype, stats, aspect, map_details) + final_metadata = current_asset_metadata.copy() + + # Add supplier name from static config + final_metadata["supplier_name"] = self.config_obj.supplier_name + + # Populate map resolution details from processing results + final_metadata["processed_map_resolutions"] = {} + for map_type, res_dict in processed_maps_details_asset.items(): + keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d] + if keys: final_metadata["processed_map_resolutions"][map_type] = sorted(keys) + + final_metadata["merged_map_resolutions"] = {} + for map_type, res_dict in merged_maps_details_asset.items(): + keys = [res for res, d in res_dict.items() if isinstance(d, dict) and 'error' not in d] + if keys: final_metadata["merged_map_resolutions"][map_type] = sorted(keys) + + # Determine maps present based on successful processing for this asset + final_metadata["maps_present"] = sorted(list(processed_maps_details_asset.keys())) + final_metadata["merged_maps"] = sorted(list(merged_maps_details_asset.keys())) + + # Determine shader features based on this asset's maps and rules + features = set() + map_details_asset = final_metadata.get("map_details", {}) # Get from metadata dict + for map_type, details in map_details_asset.items(): + base_map_type = _get_base_map_type(map_type) + # Check standard feature types + if base_map_type in ["SSS", "FUZZ", "MASK", "TRANSMISSION", "EMISSION", "CLEARCOAT"]: # Add more as needed + features.add(base_map_type) + if details.get("derived_from_gloss"): features.add("InvertedGloss") + # Check if any resolution was saved as 16-bit + res_details = processed_maps_details_asset.get(map_type, {}) + if any(res_info.get("bit_depth") == 16 for res_info in res_details.values() if isinstance(res_info, dict)): features.add(f"16bit_{base_map_type}") + # Check merged maps for 16-bit output + for map_type, res_dict in merged_maps_details_asset.items(): + base_map_type = _get_base_map_type(map_type) + if any(res_info.get("bit_depth") == 16 for res_info in res_dict.values() if isinstance(res_info, dict)): features.add(f"16bit_{base_map_type}") + + final_metadata["shader_features"] = sorted(list(features)) + + # Determine source files in this asset's Extra folder based on FileRule category + source_files_in_extra_set = set() + for file_rule in asset_rule.files: + if file_rule.item_type_override is None: # Assume files without an assigned type are extra/ignored/unmatched + source_files_in_extra_set.add(str(file_rule.file_path)) + final_metadata["source_files_in_extra"] = sorted(list(source_files_in_extra_set)) + + # Add processing info (using static config for preset name) + final_metadata["_processing_info"] = { + "preset_used": self.config_obj.preset_name, # Get from static config + "timestamp_utc": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "input_source": source_rule.supplier_identifier or "Unknown", # Use identifier from parent SourceRule + } + + # Sort lists just before writing + for key in ["maps_present", "merged_maps", "shader_features", "source_files_in_extra"]: + if key in final_metadata and isinstance(final_metadata[key], list): final_metadata[key].sort() + + # Use asset name in temporary filename to avoid conflicts + # Use static config for the base metadata filename + temp_metadata_filename = f"{asset_name}_{self.config_obj.metadata_filename}" + output_path = self.temp_dir / temp_metadata_filename + log.debug(f"Writing metadata for asset '{asset_name}' to temporary file: {output_path}") + try: + with open(output_path, 'w', encoding='utf-8') as f: + # Use a custom encoder if numpy types might be present (though they shouldn't be at this stage) + json.dump(final_metadata, f, indent=4, ensure_ascii=False, sort_keys=True) + log.info(f"Metadata file '{self.config_obj.metadata_filename}' generated successfully for asset '{asset_name}'.") + return output_path # Return the path to the temporary file + except Exception as e: + raise ProcessingEngineError(f"Failed to write metadata file {output_path} for asset '{asset_name}': {e}") from e + + + def _organize_output_files(self, asset_rule: AssetRule, output_base_path: Path, processed_maps_details_asset: Dict[str, Dict[str, Dict]], merged_maps_details_asset: Dict[str, Dict[str, Dict]], temp_metadata_path: Path): + """ + Moves/copies processed files for a specific asset from the engine's temp dir + to the final output structure, based on the AssetRule and static config. + + Args: + asset_rule: The AssetRule object for this asset. + output_base_path: The final base output directory. + processed_maps_details_asset: Details of processed maps for this asset. + merged_maps_details_asset: Details of merged maps for this asset. + temp_metadata_path: Path to the temporary metadata file for this asset. + """ + if not self.temp_dir or not self.temp_dir.exists(): raise ProcessingEngineError("Engine temp workspace missing.") + asset_name = asset_rule.asset_name + if not asset_name: raise ProcessingEngineError("Asset name missing for organization.") + + # Get structure names from static config + supplier_name = self.config_obj.supplier_name + metadata_filename = self.config_obj.metadata_filename + extra_subdir_name = self.config_obj.extra_files_subdir + + if not supplier_name: raise ProcessingEngineError("Supplier name missing from config.") + + supplier_sanitized = _sanitize_filename(supplier_name) + asset_name_sanitized = _sanitize_filename(asset_name) + final_dir = output_base_path / supplier_sanitized / asset_name_sanitized + log.info(f"Organizing output files for asset '{asset_name_sanitized}' into: {final_dir}") + + try: + # Overwrite logic is handled in the main process() method before calling this + final_dir.mkdir(parents=True, exist_ok=True) + except Exception as e: + raise ProcessingEngineError(f"Failed to create final dir {final_dir} for asset '{asset_name_sanitized}': {e}") from e + + # --- Helper for moving files from engine's temp dir --- + def _safe_move(src_rel_path: Path | None, dest_dir: Path, file_desc: str): + if not src_rel_path: log.warning(f"Asset '{asset_name_sanitized}': Missing src relative path for {file_desc}."); return + source_abs = self.temp_dir / src_rel_path # Path relative to engine's temp + # Use the original filename from the source path for the destination + dest_abs = dest_dir / src_rel_path.name + try: + if source_abs.exists(): + log.debug(f"Asset '{asset_name_sanitized}': Moving {file_desc}: {source_abs.name} -> {dest_dir.relative_to(output_base_path)}/") + dest_dir.mkdir(parents=True, exist_ok=True) + shutil.move(str(source_abs), str(dest_abs)) + else: log.warning(f"Asset '{asset_name_sanitized}': Source file missing in engine temp for {file_desc}: {source_abs}") + except Exception as e: log.error(f"Asset '{asset_name_sanitized}': Failed moving {file_desc} '{source_abs.name}': {e}", exc_info=True) + + # --- Move Processed/Merged Maps --- + for details_dict in [processed_maps_details_asset, merged_maps_details_asset]: + for map_type, res_dict in details_dict.items(): + if 'error' in res_dict: continue + for res_key, details in res_dict.items(): + if isinstance(details, dict) and 'path' in details: + # details['path'] is relative to engine's temp dir + _safe_move(details['path'], final_dir, f"{map_type} ({res_key})") + + # --- Move Models (copy from original workspace) --- + # Models are not processed/saved in temp, copy from original workspace + # This requires the original workspace path, which isn't directly available here. + # TODO: Revisit how models are handled. Should they be copied to temp first? + # For now, assume models are handled by the caller or need adjustment. + # log.warning("Model file organization not implemented in ProcessingEngine._organize_output_files yet.") + # Find model FileRules and copy from workspace_path (passed to process) + # This needs workspace_path access. Let's assume it's available via self for now, though it's not ideal. + # Correction: workspace_path is not stored in self. Pass it down or handle differently. + # Let's assume the caller handles model copying for now. + + # --- Move Metadata File --- + if temp_metadata_path and temp_metadata_path.exists(): + # temp_metadata_path is absolute path within engine's temp dir + final_metadata_path = final_dir / metadata_filename # Use standard name from config + try: + log.debug(f"Asset '{asset_name_sanitized}': Moving metadata file: {temp_metadata_path.name} -> {final_metadata_path.relative_to(output_base_path)}") + shutil.move(str(temp_metadata_path), str(final_metadata_path)) + except Exception as e: + log.error(f"Asset '{asset_name_sanitized}': Failed moving metadata file '{temp_metadata_path.name}': {e}", exc_info=True) + else: + log.warning(f"Asset '{asset_name_sanitized}': Temporary metadata file path missing or file does not exist: {temp_metadata_path}") + + + # --- Handle Extra/Ignored/Unmatched Files (copy from original workspace) --- + # These also need copying from the original workspace_path. + # TODO: Revisit how these are handled. Should they be copied to temp first? + # For now, assume the caller handles copying these based on the SourceRule. + # log.warning("Extra/Ignored/Unmatched file organization not implemented in ProcessingEngine._organize_output_files yet.") + # Find relevant FileRules and copy from workspace_path. Needs workspace_path access. + # Let's assume the caller handles this for now. + + log.info(f"Finished organizing output for asset '{asset_name_sanitized}'.") + +# --- End of ProcessingEngine Class --- \ No newline at end of file diff --git a/rule_structure.py b/rule_structure.py index e18ddf9..cac449f 100644 --- a/rule_structure.py +++ b/rule_structure.py @@ -1,11 +1,11 @@ import dataclasses import json from typing import List, Dict, Any, Tuple - @dataclasses.dataclass class FileRule: file_path: str = None - map_type_override: str = None + item_type_override: str = None # Renamed from map_type_override + target_asset_name_override: str = None # Added override field resolution_override: Tuple[int, int] = None channel_merge_instructions: Dict[str, Any] = dataclasses.field(default_factory=dict) output_format_override: str = None # Potentially others identified during integration @@ -21,7 +21,8 @@ class FileRule: @dataclasses.dataclass class AssetRule: asset_name: str = None - asset_type: str = None + asset_type: str = None # Predicted type + asset_type_override: str = None # Added override field common_metadata: Dict[str, Any] = dataclasses.field(default_factory=dict) files: List[FileRule] = dataclasses.field(default_factory=list) @@ -37,7 +38,8 @@ class AssetRule: @dataclasses.dataclass class SourceRule: - supplier_identifier: str = None + supplier_identifier: str = None # Predicted/Original identifier + supplier_override: str = None # Added override field high_level_sorting_parameters: Dict[str, Any] = dataclasses.field(default_factory=dict) assets: List[AssetRule] = dataclasses.field(default_factory=list) input_path: str = None