Pre-Codebase-review commit :3
Codebase dedublication and Cleanup refactor Documentation updated as well Preferences update Removed testfiles from repository
This commit is contained in:
138
gui/asset_restructure_handler.py
Normal file
138
gui/asset_restructure_handler.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# gui/asset_restructure_handler.py
|
||||
import logging
|
||||
from PySide6.QtCore import QObject, Slot, QModelIndex
|
||||
from PySide6.QtGui import QColor # Might be needed if copying logic directly, though unlikely now
|
||||
from pathlib import Path
|
||||
from .unified_view_model import UnifiedViewModel # Use relative import
|
||||
from rule_structure import SourceRule, AssetRule, FileRule
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class AssetRestructureHandler(QObject):
|
||||
"""
|
||||
Handles the model restructuring logic triggered by changes
|
||||
to FileRule target asset overrides in the UnifiedViewModel.
|
||||
"""
|
||||
def __init__(self, model: UnifiedViewModel, parent=None):
|
||||
super().__init__(parent)
|
||||
if not isinstance(model, UnifiedViewModel):
|
||||
raise TypeError("AssetRestructureHandler requires a UnifiedViewModel instance.")
|
||||
self.model = model
|
||||
log.debug("AssetRestructureHandler initialized.")
|
||||
|
||||
@Slot(QModelIndex, object)
|
||||
def handle_target_asset_override(self, index: QModelIndex, new_target_path: object):
|
||||
"""
|
||||
Slot connected to UnifiedViewModel.targetAssetOverrideChanged.
|
||||
Orchestrates model changes based on the new target asset path.
|
||||
|
||||
Args:
|
||||
index: The QModelIndex of the FileRule whose override changed.
|
||||
new_target_path: The new target asset path (string or None).
|
||||
"""
|
||||
log.debug(f"Handler received targetAssetOverrideChanged: Index=({index.row()},{index.column()}), New Path='{new_target_path}'")
|
||||
|
||||
if not index.isValid():
|
||||
log.warning("Handler received invalid index. Aborting.")
|
||||
return
|
||||
|
||||
file_item = self.model.getItem(index)
|
||||
if not isinstance(file_item, FileRule):
|
||||
log.warning(f"Handler received index for non-FileRule item: {type(file_item)}. Aborting.")
|
||||
return
|
||||
|
||||
# Ensure new_target_path is a string or None
|
||||
new_target_name = str(new_target_path).strip() if new_target_path is not None else None
|
||||
if new_target_name == "": new_target_name = None # Treat empty string as None
|
||||
|
||||
# --- Get necessary context ---
|
||||
old_parent_asset = getattr(file_item, 'parent_asset', None)
|
||||
if not old_parent_asset:
|
||||
log.error(f"Handler: File item '{Path(file_item.file_path).name}' has no parent asset. Cannot restructure.")
|
||||
# Note: Data change already happened in setData, cannot easily revert here.
|
||||
return
|
||||
|
||||
source_rule = getattr(old_parent_asset, 'parent_source', None)
|
||||
if not source_rule:
|
||||
log.error(f"Handler: Could not find SourceRule for parent asset '{old_parent_asset.asset_name}'. Cannot restructure.")
|
||||
return
|
||||
|
||||
# --- Logic based on the new target name ---
|
||||
target_parent_asset = None
|
||||
target_parent_index = QModelIndex()
|
||||
move_occurred = False
|
||||
|
||||
# 1. Find existing target parent AssetRule within the same SourceRule
|
||||
if new_target_name:
|
||||
for i, asset in enumerate(source_rule.assets):
|
||||
if asset.asset_name == new_target_name:
|
||||
target_parent_asset = asset
|
||||
# Get index for the target parent
|
||||
try:
|
||||
source_rule_row = self.model._source_rules.index(source_rule)
|
||||
source_rule_index = self.model.createIndex(source_rule_row, 0, source_rule)
|
||||
target_parent_index = self.model.index(i, 0, source_rule_index)
|
||||
if not target_parent_index.isValid():
|
||||
log.error(f"Handler: Failed to create valid index for existing target parent '{new_target_name}'.")
|
||||
target_parent_asset = None # Reset if index is invalid
|
||||
except ValueError:
|
||||
log.error(f"Handler: Could not find SourceRule index while looking for target parent '{new_target_name}'.")
|
||||
target_parent_asset = None # Reset if index is invalid
|
||||
break # Found the asset
|
||||
|
||||
# 2. Handle Move or Creation
|
||||
if target_parent_asset:
|
||||
# --- Move to Existing Parent ---
|
||||
if target_parent_asset != old_parent_asset:
|
||||
log.info(f"Handler: Moving file '{Path(file_item.file_path).name}' to existing asset '{target_parent_asset.asset_name}'.")
|
||||
if self.model.moveFileRule(index, target_parent_index):
|
||||
move_occurred = True
|
||||
else:
|
||||
log.error(f"Handler: Model failed to move file rule to existing asset '{target_parent_asset.asset_name}'.")
|
||||
# Consider how to handle failure - maybe log and continue to cleanup?
|
||||
else:
|
||||
# Target is the same as the old parent. No move needed.
|
||||
log.debug(f"Handler: Target asset '{new_target_name}' is the same as the current parent. No move required.")
|
||||
pass # No move needed, but might still need cleanup if old parent becomes empty later (unlikely in this specific case)
|
||||
|
||||
elif new_target_name: # Only create if a *new* specific target name was given
|
||||
# --- Create New Parent AssetRule and Move ---
|
||||
log.info(f"Handler: Creating new asset '{new_target_name}' and moving file '{Path(file_item.file_path).name}'.")
|
||||
# Create the new asset rule using the model's method
|
||||
new_asset_index = self.model.createAssetRule(source_rule, new_target_name, copy_from_asset=old_parent_asset)
|
||||
|
||||
if new_asset_index.isValid():
|
||||
# Now move the file to the newly created asset
|
||||
if self.model.moveFileRule(index, new_asset_index):
|
||||
move_occurred = True
|
||||
target_parent_asset = new_asset_index.internalPointer() # Update for cleanup check
|
||||
else:
|
||||
log.error(f"Handler: Model failed to move file rule to newly created asset '{new_target_name}'.")
|
||||
# If move fails after creation, should we remove the created asset? Maybe.
|
||||
# For now, just log the error.
|
||||
else:
|
||||
log.error(f"Handler: Model failed to create new asset rule '{new_target_name}'. Cannot move file.")
|
||||
|
||||
else: # new_target_name is None or empty
|
||||
# --- Moving back to original/default parent (Clearing Override) ---
|
||||
# The file *should* already be under its original parent if the override was just cleared.
|
||||
# However, if it was previously moved *away* from its original parent due to an override,
|
||||
# clearing the override *should* ideally move it back.
|
||||
# This logic is complex: we need to know the *original* parent before any overrides.
|
||||
# The current structure doesn't explicitly store this.
|
||||
# For now, assume clearing the override means it stays in its *current* parent,
|
||||
# and we only handle cleanup if that parent becomes empty.
|
||||
# A more robust solution might involve finding the asset matching the file's *directory* name.
|
||||
log.debug(f"Handler: Target asset override cleared for '{Path(file_item.file_path).name}'. File remains in parent '{old_parent_asset.asset_name}'.")
|
||||
# No move occurs in this simplified interpretation.
|
||||
|
||||
# 3. Cleanup Empty Old Parent (only if a move occurred)
|
||||
# Check the old_parent_asset *after* the potential move
|
||||
if move_occurred and old_parent_asset and not old_parent_asset.files:
|
||||
log.info(f"Handler: Attempting to remove empty old parent asset '{old_parent_asset.asset_name}'.")
|
||||
if not self.model.removeAssetRule(old_parent_asset):
|
||||
log.warning(f"Handler: Model failed to remove empty old parent asset '{old_parent_asset.asset_name}'.")
|
||||
elif move_occurred:
|
||||
log.debug(f"Handler: Old parent asset '{old_parent_asset.asset_name}' still contains files. No removal needed.")
|
||||
|
||||
log.debug(f"Handler finished processing targetAssetOverrideChanged for '{Path(file_item.file_path).name}'.")
|
||||
133
gui/base_prediction_handler.py
Normal file
133
gui/base_prediction_handler.py
Normal file
@@ -0,0 +1,133 @@
|
||||
# gui/base_prediction_handler.py
|
||||
import logging
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import List, Any
|
||||
|
||||
from PySide6.QtCore import QObject, Signal, Slot, QThread
|
||||
|
||||
# Assuming rule_structure defines SourceRule
|
||||
try:
|
||||
from rule_structure import SourceRule
|
||||
except ImportError:
|
||||
print("ERROR (BasePredictionHandler): Failed to import SourceRule. Predictions might fail.")
|
||||
# Define a placeholder if the import fails to allow type hinting
|
||||
class SourceRule: pass
|
||||
|
||||
from abc import ABCMeta
|
||||
from PySide6.QtCore import QObject # Ensure QObject is imported if not already
|
||||
|
||||
# Combine metaclasses to avoid conflict between QObject and ABC
|
||||
class QtABCMeta(type(QObject), ABCMeta):
|
||||
pass
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class BasePredictionHandler(QObject, ABC, metaclass=QtABCMeta):
|
||||
"""
|
||||
Abstract base class for prediction handlers that generate SourceRule hierarchies.
|
||||
Designed to be run in a separate QThread.
|
||||
"""
|
||||
# --- Standardized Signals ---
|
||||
# Emitted when prediction is successfully completed.
|
||||
# Args: input_source_identifier (str), results (List[SourceRule])
|
||||
prediction_ready = Signal(str, list)
|
||||
|
||||
# Emitted when an error occurs during prediction.
|
||||
# Args: input_source_identifier (str), error_message (str)
|
||||
prediction_error = Signal(str, str)
|
||||
|
||||
# Emitted for status updates during the prediction process.
|
||||
# Args: status_message (str)
|
||||
status_update = Signal(str)
|
||||
|
||||
def __init__(self, input_source_identifier: str, parent: QObject = None):
|
||||
"""
|
||||
Initializes the base handler.
|
||||
|
||||
Args:
|
||||
input_source_identifier: The unique identifier for the input source (e.g., file path).
|
||||
parent: The parent QObject.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.input_source_identifier = input_source_identifier
|
||||
self._is_running = False
|
||||
self._is_cancelled = False # Added cancellation flag
|
||||
|
||||
@property
|
||||
def is_running(self) -> bool:
|
||||
"""Returns True if the handler is currently processing."""
|
||||
return self._is_running
|
||||
|
||||
@Slot()
|
||||
def run(self):
|
||||
"""
|
||||
Main execution slot intended to be connected to QThread.started.
|
||||
Handles the overall process: setup, execution, error handling, signaling.
|
||||
"""
|
||||
if self._is_running:
|
||||
log.warning(f"Handler for '{self.input_source_identifier}' is already running. Aborting.")
|
||||
return
|
||||
if self._is_cancelled:
|
||||
log.info(f"Handler for '{self.input_source_identifier}' was cancelled before starting.")
|
||||
# Optionally emit an error or specific signal for cancellation before start
|
||||
return
|
||||
|
||||
self._is_running = True
|
||||
self._is_cancelled = False # Ensure cancel flag is reset at start
|
||||
thread_id = QThread.currentThread() # Use currentThread() for PySide6
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Starting prediction run for: {self.input_source_identifier}")
|
||||
self.status_update.emit(f"Starting analysis for '{Path(self.input_source_identifier).name}'...")
|
||||
|
||||
try:
|
||||
# --- Execute Core Logic ---
|
||||
results = self._perform_prediction()
|
||||
|
||||
if self._is_cancelled:
|
||||
log.info(f"Prediction cancelled during execution for: {self.input_source_identifier}")
|
||||
self.prediction_error.emit(self.input_source_identifier, "Prediction cancelled by user.")
|
||||
else:
|
||||
# --- Emit Success Signal ---
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Prediction successful for '{self.input_source_identifier}'. Emitting results.")
|
||||
self.prediction_ready.emit(self.input_source_identifier, results)
|
||||
self.status_update.emit(f"Analysis complete for '{Path(self.input_source_identifier).name}'.")
|
||||
|
||||
except Exception as e:
|
||||
# --- Emit Error Signal ---
|
||||
log.exception(f"[{time.time():.4f}][T:{thread_id}] Error during prediction for '{self.input_source_identifier}': {e}")
|
||||
error_msg = f"Error analyzing '{Path(self.input_source_identifier).name}': {e}"
|
||||
self.prediction_error.emit(self.input_source_identifier, error_msg)
|
||||
# Status update might be redundant if error is shown elsewhere, but can be useful
|
||||
# self.status_update.emit(f"Error: {e}")
|
||||
|
||||
finally:
|
||||
# --- Cleanup ---
|
||||
self._is_running = False
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Finished prediction run for: {self.input_source_identifier}")
|
||||
# Note: The thread itself should be managed (quit/deleteLater) by the caller
|
||||
# based on the signals emitted (prediction_ready, prediction_error).
|
||||
|
||||
@Slot()
|
||||
def cancel(self):
|
||||
"""
|
||||
Sets the cancellation flag. The running process should check this flag periodically.
|
||||
"""
|
||||
log.info(f"Cancellation requested for handler: {self.input_source_identifier}")
|
||||
self._is_cancelled = True
|
||||
self.status_update.emit(f"Cancellation requested for '{Path(self.input_source_identifier).name}'...")
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def _perform_prediction(self) -> List[SourceRule]:
|
||||
"""
|
||||
Abstract method to be implemented by concrete subclasses.
|
||||
This method contains the specific logic for generating the SourceRule list.
|
||||
It should periodically check `self._is_cancelled`.
|
||||
|
||||
Returns:
|
||||
A list of SourceRule objects representing the prediction results.
|
||||
|
||||
Raises:
|
||||
Exception: If any critical error occurs during the prediction process.
|
||||
"""
|
||||
pass
|
||||
@@ -1,8 +1,10 @@
|
||||
from pathlib import Path
|
||||
# gui/delegates.py
|
||||
from PySide6.QtWidgets import QStyledItemDelegate, QLineEdit, QComboBox
|
||||
from PySide6.QtCore import Qt, QModelIndex
|
||||
# Import the new config dictionaries
|
||||
from configuration import load_base_config # Import load_base_config
|
||||
# Import Configuration and ConfigurationError
|
||||
from configuration import Configuration, ConfigurationError, load_base_config # Keep load_base_config for SupplierSearchDelegate
|
||||
from PySide6.QtWidgets import QListWidgetItem # Import QListWidgetItem
|
||||
|
||||
import json
|
||||
import logging
|
||||
@@ -40,29 +42,49 @@ class LineEditDelegate(QStyledItemDelegate):
|
||||
class ComboBoxDelegate(QStyledItemDelegate):
|
||||
"""
|
||||
Delegate for editing string values from a predefined list using a QComboBox.
|
||||
Determines the list source based on column index.
|
||||
Determines the list source based on column index by accessing the
|
||||
UnifiedViewModel directly.
|
||||
"""
|
||||
# REMOVED main_window parameter
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
# REMOVED self.main_window store
|
||||
|
||||
def createEditor(self, parent, option, index: QModelIndex):
|
||||
# Creates the QComboBox editor widget.
|
||||
editor = QComboBox(parent)
|
||||
column = index.column()
|
||||
model = index.model() # Get the model instance
|
||||
model = index.model() # GET model from index
|
||||
|
||||
# Add a "clear" option first, associating None with it.
|
||||
editor.addItem("---", None) # UserData = None
|
||||
|
||||
# Populate based on column using keys from config dictionaries
|
||||
items_keys = None
|
||||
try:
|
||||
base_config = load_base_config() # Load base config
|
||||
if column == 2: # Asset-Type Override (AssetRule)
|
||||
items_keys = list(base_config.get('ASSET_TYPE_DEFINITIONS', {}).keys()) # Access from base_config
|
||||
elif column == 4: # Item-Type Override (FileRule)
|
||||
items_keys = list(base_config.get('FILE_TYPE_DEFINITIONS', {}).keys()) # Access from base_config
|
||||
except Exception as e:
|
||||
log.error(f"Error loading base config for ComboBoxDelegate: {e}")
|
||||
items_keys = [] # Fallback to empty list on error
|
||||
# Populate based on column by accessing the model's cached keys
|
||||
items_keys = [] # Default to empty list
|
||||
|
||||
# --- Get keys directly from the UnifiedViewModel ---
|
||||
# Check if the model is the correct type and has the attributes
|
||||
if hasattr(model, '_asset_type_keys') and hasattr(model, '_file_type_keys'):
|
||||
try:
|
||||
# Use column constants from the model if available
|
||||
COL_ASSET_TYPE = getattr(model, 'COL_ASSET_TYPE', 3) # Default fallback
|
||||
COL_ITEM_TYPE = getattr(model, 'COL_ITEM_TYPE', 4) # Default fallback
|
||||
|
||||
if column == COL_ASSET_TYPE:
|
||||
items_keys = model._asset_type_keys # Use cached keys
|
||||
elif column == COL_ITEM_TYPE:
|
||||
items_keys = model._file_type_keys # Use cached keys
|
||||
# else: # Handle other columns if necessary (optional)
|
||||
# log.debug(f"ComboBoxDelegate applied to unexpected column: {column}")
|
||||
|
||||
except Exception as e:
|
||||
log.error(f"Error getting keys from UnifiedViewModel in ComboBoxDelegate: {e}")
|
||||
items_keys = [] # Fallback on error
|
||||
else:
|
||||
log.warning("ComboBoxDelegate: Model is not a UnifiedViewModel or is missing key attributes (_asset_type_keys, _file_type_keys). Dropdown may be empty.")
|
||||
# --- End key retrieval from model ---
|
||||
|
||||
# REMOVED the entire block that loaded Configuration based on main_window preset
|
||||
|
||||
if items_keys:
|
||||
for item_key in sorted(items_keys): # Sort keys alphabetically for consistency
|
||||
|
||||
340
gui/llm_interaction_handler.py
Normal file
340
gui/llm_interaction_handler.py
Normal file
@@ -0,0 +1,340 @@
|
||||
import os
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from PySide6.QtCore import QObject, Signal, QThread, Slot, QTimer
|
||||
|
||||
# --- Backend Imports ---
|
||||
# Assuming these might be needed based on MainWindow's usage
|
||||
try:
|
||||
from configuration import Configuration, ConfigurationError, load_base_config
|
||||
from .llm_prediction_handler import LLMPredictionHandler # Backend handler
|
||||
from rule_structure import SourceRule # For signal emission type hint
|
||||
except ImportError as e:
|
||||
logging.getLogger(__name__).critical(f"Failed to import backend modules for LLMInteractionHandler: {e}")
|
||||
LLMPredictionHandler = None
|
||||
load_base_config = None
|
||||
ConfigurationError = Exception
|
||||
SourceRule = None # Define as None if import fails
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class LLMInteractionHandler(QObject):
|
||||
"""
|
||||
Handles the logic for interacting with the LLM prediction service,
|
||||
including managing the queue, thread, and communication.
|
||||
"""
|
||||
# Signals to communicate results/status back to MainWindow or other components
|
||||
llm_prediction_ready = Signal(str, list) # input_path, List[SourceRule]
|
||||
llm_prediction_error = Signal(str, str) # input_path, error_message
|
||||
llm_status_update = Signal(str) # status_message
|
||||
llm_processing_state_changed = Signal(bool) # is_processing (True when busy, False when idle)
|
||||
|
||||
def __init__(self, main_window_ref, parent=None):
|
||||
"""
|
||||
Initializes the handler.
|
||||
|
||||
Args:
|
||||
main_window_ref: A reference to the MainWindow instance for accessing
|
||||
shared components like status bar or models if needed.
|
||||
parent: The parent QObject.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.main_window = main_window_ref # Store reference if needed for status updates etc.
|
||||
self.llm_processing_queue = [] # Unified queue for initial adds and re-interpretations
|
||||
self.llm_prediction_thread = None
|
||||
self.llm_prediction_handler = None
|
||||
self._is_processing = False # Internal flag to track processing state
|
||||
|
||||
def _set_processing_state(self, processing: bool):
|
||||
"""Updates the internal processing state and emits a signal."""
|
||||
if self._is_processing != processing:
|
||||
self._is_processing = processing
|
||||
log.debug(f"LLM Handler processing state changed to: {processing}")
|
||||
self.llm_processing_state_changed.emit(processing)
|
||||
|
||||
@Slot(str, list)
|
||||
def queue_llm_request(self, input_path: str, file_list: list | None):
|
||||
"""Adds a request to the LLM processing queue."""
|
||||
log.debug(f"Queueing LLM request for '{input_path}'. Current queue size: {len(self.llm_processing_queue)}")
|
||||
# Avoid duplicates? Check if already in queue
|
||||
is_in_queue = any(item[0] == input_path for item in self.llm_processing_queue)
|
||||
if not is_in_queue:
|
||||
self.llm_processing_queue.append((input_path, file_list))
|
||||
log.info(f"Added '{input_path}' to LLM queue. New size: {len(self.llm_processing_queue)}")
|
||||
# If not currently processing, start the queue
|
||||
if not self._is_processing:
|
||||
# Use QTimer.singleShot to avoid immediate processing if called rapidly
|
||||
QTimer.singleShot(0, self._process_next_llm_item)
|
||||
else:
|
||||
log.debug(f"Skipping duplicate add to LLM queue for: {input_path}")
|
||||
|
||||
@Slot(list)
|
||||
def queue_llm_requests_batch(self, requests: list[tuple[str, list | None]]):
|
||||
"""Adds multiple requests to the LLM processing queue."""
|
||||
added_count = 0
|
||||
for input_path, file_list in requests:
|
||||
is_in_queue = any(item[0] == input_path for item in self.llm_processing_queue)
|
||||
if not is_in_queue:
|
||||
self.llm_processing_queue.append((input_path, file_list))
|
||||
added_count += 1
|
||||
else:
|
||||
log.debug(f"Skipping duplicate add to LLM queue for: {input_path}")
|
||||
|
||||
if added_count > 0:
|
||||
log.info(f"Added {added_count} requests to LLM queue. New size: {len(self.llm_processing_queue)}")
|
||||
# If not currently processing, start the queue
|
||||
if not self._is_processing:
|
||||
QTimer.singleShot(0, self._process_next_llm_item)
|
||||
|
||||
# --- Methods to be moved from MainWindow ---
|
||||
|
||||
@Slot()
|
||||
def _reset_llm_thread_references(self):
|
||||
"""Resets LLM thread and handler references after the thread finishes."""
|
||||
log.debug("--> Entered LLMInteractionHandler._reset_llm_thread_references")
|
||||
log.debug("Resetting LLM prediction thread and handler references.")
|
||||
self.llm_prediction_thread = None
|
||||
self.llm_prediction_handler = None
|
||||
# --- Process next item now that the previous thread is fully finished ---
|
||||
log.debug("Previous LLM thread finished. Triggering processing for next item by calling _process_next_llm_item...")
|
||||
self._set_processing_state(False) # Mark processing as finished *before* trying next item
|
||||
# Use QTimer.singleShot to yield control briefly before starting next item
|
||||
QTimer.singleShot(0, self._process_next_llm_item)
|
||||
log.debug("<-- Exiting LLMInteractionHandler._reset_llm_thread_references")
|
||||
|
||||
|
||||
def _start_llm_prediction(self, input_path_str: str, file_list: list = None):
|
||||
"""
|
||||
Sets up and starts the LLMPredictionHandler in a separate thread.
|
||||
Emits signals for results, errors, or status updates.
|
||||
If file_list is not provided, it will be extracted.
|
||||
"""
|
||||
log.debug(f"Attempting to start LLM prediction for: {input_path_str}")
|
||||
# Extract file list if not provided (needed for re-interpretation calls)
|
||||
if file_list is None:
|
||||
log.debug(f"File list not provided for {input_path_str}, extracting...")
|
||||
# Need access to MainWindow's _extract_file_list or reimplement
|
||||
# For now, assume MainWindow provides it or pass it during queueing
|
||||
# Let's assume file_list is always provided correctly for now.
|
||||
# If extraction fails before queueing, it won't reach here.
|
||||
# If extraction needs to happen here, MainWindow ref is needed.
|
||||
# Re-evaluating: MainWindow._extract_file_list is complex.
|
||||
# It's better if the caller (MainWindow) extracts and passes the list.
|
||||
# We'll modify queue_llm_request to require a non-None list eventually,
|
||||
# or pass the main_window ref to call its extraction method.
|
||||
# Let's pass main_window ref for now.
|
||||
if hasattr(self.main_window, '_extract_file_list'):
|
||||
file_list = self.main_window._extract_file_list(input_path_str)
|
||||
if file_list is None:
|
||||
error_msg = f"Failed to extract file list for {input_path_str} in _start_llm_prediction."
|
||||
log.error(error_msg)
|
||||
self.llm_status_update.emit(f"Error extracting files for {os.path.basename(input_path_str)}")
|
||||
self.llm_prediction_error.emit(input_path_str, error_msg) # Signal error
|
||||
# If called as part of a queue, we need to ensure the next item is processed.
|
||||
# _reset_llm_thread_references handles this via the finished signal,
|
||||
# but if the thread never starts, we need to trigger manually.
|
||||
# This case should ideally be caught before calling _start_llm_prediction.
|
||||
# We'll assume the queue logic handles failed extraction before calling this.
|
||||
return # Stop if extraction failed
|
||||
else:
|
||||
error_msg = f"MainWindow reference does not have _extract_file_list method."
|
||||
log.error(error_msg)
|
||||
self.llm_status_update.emit(f"Internal Error: Cannot extract files for {os.path.basename(input_path_str)}")
|
||||
self.llm_prediction_error.emit(input_path_str, error_msg)
|
||||
return # Stop
|
||||
|
||||
input_path_obj = Path(input_path_str) # Still needed for basename
|
||||
|
||||
if not file_list:
|
||||
error_msg = f"LLM Error: No files found/extracted for {input_path_str}"
|
||||
log.error(error_msg)
|
||||
self.llm_status_update.emit(f"LLM Error: No files found for {input_path_obj.name}")
|
||||
self.llm_prediction_error.emit(input_path_str, error_msg)
|
||||
return
|
||||
|
||||
# --- Load Base Config for LLM Settings ---
|
||||
if load_base_config is None:
|
||||
log.critical("LLM Error: load_base_config function not available.")
|
||||
self.llm_status_update.emit("LLM Error: Cannot load base configuration.")
|
||||
self.llm_prediction_error.emit(input_path_str, "load_base_config function not available.")
|
||||
return
|
||||
try:
|
||||
base_config = load_base_config()
|
||||
if not base_config:
|
||||
raise ConfigurationError("Failed to load base configuration (app_settings.json).")
|
||||
|
||||
llm_settings = {
|
||||
"llm_endpoint_url": base_config.get('llm_endpoint_url'),
|
||||
"api_key": base_config.get('llm_api_key'),
|
||||
"model_name": base_config.get('llm_model_name', 'gemini-pro'),
|
||||
"prompt_template_content": base_config.get('llm_predictor_prompt'),
|
||||
"asset_types": base_config.get('ASSET_TYPE_DEFINITIONS', {}),
|
||||
"file_types": base_config.get('FILE_TYPE_DEFINITIONS', {}),
|
||||
"examples": base_config.get('llm_predictor_examples', [])
|
||||
}
|
||||
except ConfigurationError as e:
|
||||
log.error(f"LLM Configuration Error: {e}")
|
||||
self.llm_status_update.emit(f"LLM Config Error: {e}")
|
||||
self.llm_prediction_error.emit(input_path_str, f"LLM Configuration Error: {e}")
|
||||
# Optionally show a QMessageBox via main_window ref if critical
|
||||
# self.main_window.show_critical_error("LLM Config Error", str(e))
|
||||
return
|
||||
except Exception as e:
|
||||
log.exception(f"Unexpected error loading LLM configuration: {e}")
|
||||
self.llm_status_update.emit(f"LLM Config Error: {e}")
|
||||
self.llm_prediction_error.emit(input_path_str, f"Unexpected error loading LLM config: {e}")
|
||||
return
|
||||
# --- End Config Loading ---
|
||||
|
||||
if LLMPredictionHandler is None:
|
||||
log.critical("LLMPredictionHandler class not available.")
|
||||
self.llm_status_update.emit("LLM Error: Prediction handler component missing.")
|
||||
self.llm_prediction_error.emit(input_path_str, "LLMPredictionHandler class not available.")
|
||||
return
|
||||
|
||||
# Clean up previous thread/handler if any exist (should not happen if queue logic is correct)
|
||||
if self.llm_prediction_thread and self.llm_prediction_thread.isRunning():
|
||||
log.warning("Warning: Previous LLM prediction thread still running when trying to start new one. This indicates a potential logic error.")
|
||||
# Attempt graceful shutdown (might need more robust handling)
|
||||
if self.llm_prediction_handler:
|
||||
# Assuming LLMPredictionHandler has a cancel method or similar
|
||||
if hasattr(self.llm_prediction_handler, 'cancel'):
|
||||
self.llm_prediction_handler.cancel()
|
||||
self.llm_prediction_thread.quit()
|
||||
if not self.llm_prediction_thread.wait(1000): # Wait 1 sec
|
||||
log.warning("LLM thread did not quit gracefully. Forcing termination.")
|
||||
self.llm_prediction_thread.terminate()
|
||||
self.llm_prediction_thread.wait() # Wait after terminate
|
||||
# Reset references after ensuring termination
|
||||
self.llm_prediction_thread = None
|
||||
self.llm_prediction_handler = None
|
||||
|
||||
|
||||
log.info(f"Starting LLM prediction thread for source: {input_path_str} with {len(file_list)} files.")
|
||||
self.llm_status_update.emit(f"Starting LLM interpretation for {input_path_obj.name}...")
|
||||
|
||||
self.llm_prediction_thread = QThread(self.main_window) # Parent thread to main window's thread? Or self? Let's try self.
|
||||
self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, llm_settings)
|
||||
self.llm_prediction_handler.moveToThread(self.llm_prediction_thread)
|
||||
|
||||
# Connect signals from handler to *internal* slots or directly emit signals
|
||||
self.llm_prediction_handler.prediction_ready.connect(self._handle_llm_result)
|
||||
self.llm_prediction_handler.prediction_error.connect(self._handle_llm_error)
|
||||
self.llm_prediction_handler.status_update.connect(self.llm_status_update) # Pass status through
|
||||
|
||||
# Connect thread signals
|
||||
self.llm_prediction_thread.started.connect(self.llm_prediction_handler.run)
|
||||
# Clean up thread and handler when finished
|
||||
self.llm_prediction_thread.finished.connect(self._reset_llm_thread_references)
|
||||
self.llm_prediction_thread.finished.connect(self.llm_prediction_handler.deleteLater)
|
||||
self.llm_prediction_thread.finished.connect(self.llm_prediction_thread.deleteLater)
|
||||
# Also ensure thread quits when handler signals completion/error
|
||||
self.llm_prediction_handler.prediction_ready.connect(self.llm_prediction_thread.quit)
|
||||
self.llm_prediction_handler.prediction_error.connect(self.llm_prediction_thread.quit)
|
||||
|
||||
self.llm_prediction_thread.start()
|
||||
log.debug(f"LLM prediction thread started for {input_path_str}.")
|
||||
|
||||
|
||||
def is_processing(self) -> bool:
|
||||
"""Safely checks if the LLM prediction thread is currently running."""
|
||||
# Use the internal flag, which is more reliable than checking thread directly
|
||||
# due to potential race conditions during cleanup.
|
||||
# The thread check can be a fallback.
|
||||
is_running_flag = self._is_processing
|
||||
# Also check thread as a safeguard, though the flag should be primary
|
||||
try:
|
||||
is_thread_alive = self.llm_prediction_thread is not None and self.llm_prediction_thread.isRunning()
|
||||
if is_running_flag != is_thread_alive:
|
||||
# This might indicate the flag wasn't updated correctly, log it.
|
||||
log.warning(f"LLM Handler processing flag ({is_running_flag}) mismatch with thread state ({is_thread_alive}). Flag is primary.")
|
||||
return is_running_flag
|
||||
except RuntimeError:
|
||||
log.debug("is_processing: Caught RuntimeError checking isRunning (thread likely deleted).")
|
||||
# If thread died unexpectedly, the flag might be stale. Reset it.
|
||||
if self._is_processing:
|
||||
self._set_processing_state(False)
|
||||
return False
|
||||
|
||||
|
||||
def _process_next_llm_item(self):
|
||||
"""Processes the next directory in the unified LLM processing queue."""
|
||||
log.debug(f"--> Entered _process_next_llm_item. Queue size: {len(self.llm_processing_queue)}")
|
||||
|
||||
if self.is_processing():
|
||||
log.info("LLM processing already running. Waiting for current item to finish.")
|
||||
# Do not pop from queue if already running, wait for _reset_llm_thread_references to call this again
|
||||
return
|
||||
|
||||
if not self.llm_processing_queue:
|
||||
log.info("LLM processing queue is empty. Finishing.")
|
||||
self.llm_status_update.emit("LLM processing complete.")
|
||||
self._set_processing_state(False) # Ensure state is set to idle
|
||||
log.debug("<-- Exiting _process_next_llm_item (queue empty)")
|
||||
return
|
||||
|
||||
# Set state to busy *before* starting
|
||||
self._set_processing_state(True)
|
||||
|
||||
# Get next item *without* removing it yet
|
||||
next_item = self.llm_processing_queue[0] # Peek at the first item
|
||||
next_dir, file_list = next_item # Unpack the tuple
|
||||
|
||||
# --- Update Status/Progress ---
|
||||
total_in_queue_now = len(self.llm_processing_queue)
|
||||
status_msg = f"LLM Processing {os.path.basename(next_dir)} ({total_in_queue_now} remaining)..."
|
||||
self.llm_status_update.emit(status_msg)
|
||||
log.info(status_msg)
|
||||
|
||||
# --- Start Prediction (which might fail) ---
|
||||
try:
|
||||
# Pass the potentially None file_list. _start_llm_prediction handles extraction if needed.
|
||||
self._start_llm_prediction(next_dir, file_list=file_list)
|
||||
# --- Pop item *after* successfully starting prediction ---
|
||||
self.llm_processing_queue.pop(0)
|
||||
log.debug(f"Successfully started LLM prediction for {next_dir} and removed from queue.")
|
||||
except Exception as e:
|
||||
log.exception(f"Error occurred *during* _start_llm_prediction call for {next_dir}: {e}")
|
||||
error_msg = f"Error starting LLM for {os.path.basename(next_dir)}: {e}"
|
||||
self.llm_status_update.emit(error_msg)
|
||||
self.llm_prediction_error.emit(next_dir, error_msg) # Signal the error
|
||||
# --- Remove the failed item from the queue ---
|
||||
try:
|
||||
failed_item = self.llm_processing_queue.pop(0)
|
||||
log.warning(f"Removed failed item {failed_item} from LLM queue due to start error.")
|
||||
except IndexError:
|
||||
log.error("Attempted to pop failed item from already empty LLM queue after start error.")
|
||||
# --- Attempt to process the *next* item ---
|
||||
# Reset processing state since this one failed *before* the thread finished signal could
|
||||
self._set_processing_state(False)
|
||||
# Use QTimer.singleShot to avoid deep recursion
|
||||
QTimer.singleShot(100, self._process_next_llm_item) # Try next item after a short delay
|
||||
|
||||
# --- Internal Slots to Handle Results/Errors from LLMPredictionHandler ---
|
||||
@Slot(str, list)
|
||||
def _handle_llm_result(self, input_path: str, source_rules: list):
|
||||
"""Internal slot to receive results and emit the public signal."""
|
||||
log.debug(f"LLM Handler received result for {input_path}. Emitting llm_prediction_ready.")
|
||||
self.llm_prediction_ready.emit(input_path, source_rules)
|
||||
# Note: The thread's finished signal calls _reset_llm_thread_references,
|
||||
# which then calls _process_next_llm_item.
|
||||
|
||||
@Slot(str, str)
|
||||
def _handle_llm_error(self, input_path: str, error_message: str):
|
||||
"""Internal slot to receive errors and emit the public signal."""
|
||||
log.debug(f"LLM Handler received error for {input_path}: {error_message}. Emitting llm_prediction_error.")
|
||||
self.llm_prediction_error.emit(input_path, error_message)
|
||||
# Note: The thread's finished signal calls _reset_llm_thread_references,
|
||||
# which then calls _process_next_llm_item.
|
||||
|
||||
def clear_queue(self):
|
||||
"""Clears the LLM processing queue."""
|
||||
log.info(f"Clearing LLM processing queue ({len(self.llm_processing_queue)} items).")
|
||||
self.llm_processing_queue.clear()
|
||||
# TODO: Should we also attempt to cancel any *currently* running LLM task?
|
||||
# This might be complex. For now, just clears the queue of pending items.
|
||||
if self.is_processing():
|
||||
log.warning("LLM queue cleared, but a task is currently running. It will complete.")
|
||||
else:
|
||||
self.llm_status_update.emit("LLM queue cleared.")
|
||||
@@ -1,7 +1,11 @@
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
from PySide6.QtCore import QObject, Signal, Slot, QThread
|
||||
import re # Added import for regex
|
||||
import logging # Add logging
|
||||
from pathlib import Path # Add Path for basename
|
||||
from PySide6.QtCore import QObject, Slot # Keep QObject for parent type hint, Slot for cancel if kept separate
|
||||
# Removed Signal, QThread as they are handled by BasePredictionHandler or caller
|
||||
from typing import List, Dict, Any
|
||||
|
||||
# Assuming rule_structure defines SourceRule, AssetRule, FileRule etc.
|
||||
@@ -12,92 +16,115 @@ from rule_structure import SourceRule, AssetRule, FileRule # Ensure AssetRule an
|
||||
# Adjust the import path if necessary
|
||||
# Removed Configuration import, will use load_base_config if needed or passed settings
|
||||
# from configuration import Configuration
|
||||
from configuration import load_base_config # Keep this for now if needed elsewhere, or remove if settings are always passed
|
||||
# from configuration import load_base_config # No longer needed here
|
||||
from .base_prediction_handler import BasePredictionHandler # Import base class
|
||||
|
||||
class LLMPredictionHandler(QObject):
|
||||
log = logging.getLogger(__name__) # Setup logger
|
||||
|
||||
class LLMPredictionHandler(BasePredictionHandler):
|
||||
"""
|
||||
Handles the interaction with an LLM for predicting asset structures
|
||||
based on a directory's file list. Designed to run in a QThread.
|
||||
based on a directory's file list. Inherits from BasePredictionHandler.
|
||||
"""
|
||||
# Signal emitted when prediction for a directory is complete
|
||||
# Arguments: directory_path (str), results (List[SourceRule])
|
||||
prediction_ready = Signal(str, list)
|
||||
# Signal emitted on error
|
||||
# Arguments: directory_path (str), error_message (str)
|
||||
prediction_error = Signal(str, str)
|
||||
# Signal to update status message in the GUI
|
||||
status_update = Signal(str)
|
||||
# Signals (prediction_ready, prediction_error, status_update) are inherited
|
||||
|
||||
def __init__(self, input_path_str: str, file_list: list, llm_settings: dict, parent: QObject = None): # Accept input_path_str and file_list
|
||||
def __init__(self, input_source_identifier: str, file_list: list, llm_settings: dict, parent: QObject = None):
|
||||
"""
|
||||
Initializes the handler.
|
||||
Initializes the LLM handler.
|
||||
|
||||
Args:
|
||||
input_path_str: The absolute path to the original input source (directory or archive).
|
||||
file_list: A list of relative file paths extracted from the input source.
|
||||
llm_settings: A dictionary containing necessary LLM configuration.
|
||||
input_source_identifier: The unique identifier for the input source (e.g., file path).
|
||||
file_list: A list of *relative* file paths extracted from the input source.
|
||||
(LLM expects relative paths based on the prompt template).
|
||||
llm_settings: A dictionary containing necessary LLM configuration
|
||||
(endpoint_url, api_key, prompt_template_content, etc.).
|
||||
parent: The parent QObject.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.input_path_str = input_path_str # Store original input path
|
||||
self.file_list = file_list # Store the provided file list
|
||||
super().__init__(input_source_identifier, parent)
|
||||
# input_source_identifier is stored by the base class as self.input_source_identifier
|
||||
self.file_list = file_list # Store the provided relative file list
|
||||
self.llm_settings = llm_settings # Store the settings dictionary
|
||||
self.endpoint_url = self.llm_settings.get('llm_endpoint_url')
|
||||
self.api_key = self.llm_settings.get('llm_api_key')
|
||||
self._is_cancelled = False
|
||||
@Slot()
|
||||
def run(self):
|
||||
"""
|
||||
The main execution method to be called when the thread starts.
|
||||
Orchestrates the prediction process for the given directory.
|
||||
"""
|
||||
# Directory check is no longer needed here, input path is just for context
|
||||
# File list is provided via __init__
|
||||
# _is_running and _is_cancelled are handled by the base class
|
||||
|
||||
# The run() and cancel() slots are provided by the base class.
|
||||
# We only need to implement the core logic in _perform_prediction.
|
||||
|
||||
def _perform_prediction(self) -> List[SourceRule]:
|
||||
"""
|
||||
Performs the LLM prediction by preparing the prompt, calling the LLM,
|
||||
and parsing the response. Implements the abstract method from BasePredictionHandler.
|
||||
|
||||
Returns:
|
||||
A list containing a single SourceRule object based on the LLM response,
|
||||
or an empty list if prediction fails or yields no results.
|
||||
|
||||
Raises:
|
||||
ValueError: If required settings (like endpoint URL or prompt template) are missing.
|
||||
ConnectionError: If the LLM API call fails due to network issues or timeouts.
|
||||
Exception: For other errors during prompt preparation, API call, or parsing.
|
||||
"""
|
||||
log.info(f"Performing LLM prediction for: {self.input_source_identifier}")
|
||||
base_name = Path(self.input_source_identifier).name
|
||||
|
||||
# Use the file list passed during initialization
|
||||
if not self.file_list:
|
||||
log.warning(f"No files provided for LLM prediction for {self.input_source_identifier}. Returning empty list.")
|
||||
self.status_update.emit(f"No files found for {base_name}.") # Use base signal
|
||||
return [] # Return empty list, not an error
|
||||
|
||||
# Check for cancellation before preparing prompt
|
||||
if self._is_cancelled:
|
||||
log.info("LLM prediction cancelled before preparing prompt.")
|
||||
return []
|
||||
|
||||
# --- Prepare Prompt ---
|
||||
self.status_update.emit(f"Preparing LLM input for {base_name}...")
|
||||
try:
|
||||
self.status_update.emit(f"Preparing LLM input for {os.path.basename(self.input_path_str)}...")
|
||||
if self._is_cancelled: return
|
||||
|
||||
# Use the file list passed during initialization
|
||||
if not self.file_list:
|
||||
self.prediction_ready.emit(self.input_path_str, []) # Emit empty list if no files
|
||||
return
|
||||
if self._is_cancelled: return
|
||||
|
||||
prompt = self._prepare_prompt(self.file_list) # Use self.file_list
|
||||
if self._is_cancelled: return
|
||||
|
||||
self.status_update.emit(f"Calling LLM for {os.path.basename(self.input_path_str)}...")
|
||||
llm_response_json_str = self._call_llm(prompt)
|
||||
if self._is_cancelled: return
|
||||
|
||||
self.status_update.emit(f"Parsing LLM response for {os.path.basename(self.input_path_str)}...")
|
||||
predicted_rules = self._parse_llm_response(llm_response_json_str)
|
||||
if self._is_cancelled: return
|
||||
|
||||
self.prediction_ready.emit(self.input_path_str, predicted_rules) # Use input_path_str
|
||||
self.status_update.emit(f"LLM interpretation complete for {os.path.basename(self.input_path_str)}.")
|
||||
|
||||
# Pass relative file list
|
||||
prompt = self._prepare_prompt(self.file_list)
|
||||
except Exception as e:
|
||||
error_msg = f"Error during LLM prediction for {self.input_path_str}: {e}"
|
||||
print(error_msg) # Log the full error
|
||||
self.prediction_error.emit(self.input_path_str, f"An error occurred: {e}") # Use input_path_str
|
||||
finally:
|
||||
# Ensure thread cleanup or final signals if needed
|
||||
pass
|
||||
log.exception("Error preparing LLM prompt.")
|
||||
raise ValueError(f"Error preparing LLM prompt: {e}") from e # Re-raise for base handler
|
||||
|
||||
@Slot()
|
||||
def cancel(self):
|
||||
"""
|
||||
Sets the cancellation flag.
|
||||
"""
|
||||
self._is_cancelled = True
|
||||
self.status_update.emit(f"Cancellation requested for {os.path.basename(self.input_path_str)}...") # Use input_path_str
|
||||
if self._is_cancelled:
|
||||
log.info("LLM prediction cancelled after preparing prompt.")
|
||||
return []
|
||||
|
||||
# --- Call LLM ---
|
||||
self.status_update.emit(f"Calling LLM for {base_name}...")
|
||||
try:
|
||||
llm_response_json_str = self._call_llm(prompt)
|
||||
except Exception as e:
|
||||
log.exception("Error calling LLM API.")
|
||||
# Re-raise potentially specific errors (ConnectionError, ValueError) or a generic one
|
||||
raise RuntimeError(f"Error calling LLM: {e}") from e
|
||||
|
||||
if self._is_cancelled:
|
||||
log.info("LLM prediction cancelled after calling LLM.")
|
||||
return []
|
||||
|
||||
# --- Parse Response ---
|
||||
self.status_update.emit(f"Parsing LLM response for {base_name}...")
|
||||
try:
|
||||
predicted_rules = self._parse_llm_response(llm_response_json_str)
|
||||
except Exception as e:
|
||||
log.exception("Error parsing LLM response.")
|
||||
raise ValueError(f"Error parsing LLM response: {e}") from e # Re-raise for base handler
|
||||
|
||||
if self._is_cancelled:
|
||||
log.info("LLM prediction cancelled after parsing response.")
|
||||
return []
|
||||
|
||||
log.info(f"LLM prediction finished successfully for '{self.input_source_identifier}'.")
|
||||
# The base class run() method will emit prediction_ready with these results
|
||||
return predicted_rules
|
||||
|
||||
|
||||
# Removed _get_file_list method as file list is now passed in __init__
|
||||
# --- Helper Methods (Keep these internal to this class) ---
|
||||
|
||||
def _prepare_prompt(self, file_list: List[str]) -> str:
|
||||
def _prepare_prompt(self, relative_file_list: List[str]) -> str:
|
||||
"""
|
||||
Prepares the full prompt string to send to the LLM using stored settings.
|
||||
"""
|
||||
@@ -124,8 +151,8 @@ class LLMPredictionHandler(QObject):
|
||||
file_defs = json.dumps(self.llm_settings.get('file_types', {}), indent=4)
|
||||
examples = json.dumps(self.llm_settings.get('examples', []), indent=2)
|
||||
|
||||
# Format file list as a single string with newlines
|
||||
file_list_str = "\n".join(file_list)
|
||||
# Format *relative* file list as a single string with newlines
|
||||
file_list_str = "\n".join(relative_file_list)
|
||||
|
||||
# Replace placeholders
|
||||
prompt = prompt_template.replace('{ASSET_TYPE_DEFINITIONS}', asset_defs)
|
||||
@@ -173,75 +200,47 @@ class LLMPredictionHandler(QObject):
|
||||
# "response_format": { "type": "json_object" } # If supported by endpoint
|
||||
}
|
||||
|
||||
self.status_update.emit(f"Sending request to LLM at {self.endpoint_url}...")
|
||||
# Status update emitted by _perform_prediction before calling this
|
||||
# self.status_update.emit(f"Sending request to LLM at {self.endpoint_url}...")
|
||||
print(f"--- Calling LLM API: {self.endpoint_url} ---")
|
||||
# print(f"--- Payload Preview ---\n{json.dumps(payload, indent=2)[:500]}...\n--- END Payload Preview ---")
|
||||
|
||||
try:
|
||||
# Make the POST request with a timeout (e.g., 120 seconds for potentially long LLM responses)
|
||||
response = requests.post(
|
||||
self.endpoint_url,
|
||||
headers=headers,
|
||||
json=payload,
|
||||
# Make the POST request with configured timeout, default to 120
|
||||
timeout=self.llm_settings.get("llm_request_timeout", 120)
|
||||
)
|
||||
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
||||
# Note: Exceptions raised here (Timeout, RequestException, ValueError)
|
||||
# will be caught by the _perform_prediction method's handler.
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
error_msg = f"LLM request timed out after {self.llm_settings.get('llm_request_timeout', 120)} seconds."
|
||||
print(error_msg)
|
||||
raise ConnectionError(error_msg)
|
||||
except requests.exceptions.RequestException as e:
|
||||
error_msg = f"LLM request failed: {e}"
|
||||
print(error_msg)
|
||||
# Attempt to get more detail from response if available
|
||||
try:
|
||||
if e.response is not None:
|
||||
print(f"LLM Response Status Code: {e.response.status_code}")
|
||||
print(f"LLM Response Text: {e.response.text[:500]}...") # Log partial response text
|
||||
error_msg += f" (Status: {e.response.status_code})"
|
||||
except Exception:
|
||||
pass # Ignore errors during error reporting enhancement
|
||||
raise ConnectionError(error_msg) # Raise a more generic error for the GUI
|
||||
# Make the POST request with a timeout
|
||||
response = requests.post(
|
||||
self.endpoint_url,
|
||||
headers=headers,
|
||||
json=payload,
|
||||
timeout=self.llm_settings.get("llm_request_timeout", 120)
|
||||
)
|
||||
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
||||
|
||||
# Parse the JSON response
|
||||
try:
|
||||
response_data = response.json()
|
||||
# print(f"--- LLM Raw Response ---\n{json.dumps(response_data, indent=2)}\n--- END Raw Response ---") # Debugging
|
||||
response_data = response.json()
|
||||
# print(f"--- LLM Raw Response ---\n{json.dumps(response_data, indent=2)}\n--- END Raw Response ---") # Debugging
|
||||
|
||||
# Extract content - structure depends on the API (OpenAI format assumed)
|
||||
if "choices" in response_data and len(response_data["choices"]) > 0:
|
||||
message = response_data["choices"][0].get("message", {})
|
||||
content = message.get("content")
|
||||
if content:
|
||||
# The content itself should be the JSON string we asked for
|
||||
print("--- LLM Response Content Extracted Successfully ---")
|
||||
return content.strip()
|
||||
else:
|
||||
raise ValueError("LLM response missing 'content' in choices[0].message.")
|
||||
# Extract content - structure depends on the API (OpenAI format assumed)
|
||||
if "choices" in response_data and len(response_data["choices"]) > 0:
|
||||
message = response_data["choices"][0].get("message", {})
|
||||
content = message.get("content")
|
||||
if content:
|
||||
# The content itself should be the JSON string we asked for
|
||||
log.debug("--- LLM Response Content Extracted Successfully ---")
|
||||
return content.strip()
|
||||
else:
|
||||
raise ValueError("LLM response missing 'choices' array or it's empty.")
|
||||
|
||||
except json.JSONDecodeError:
|
||||
error_msg = f"Failed to decode LLM JSON response. Response text: {response.text[:500]}..."
|
||||
print(error_msg)
|
||||
raise ValueError(error_msg)
|
||||
except Exception as e:
|
||||
# Capture the potentially problematic response_data in the error message
|
||||
response_data_str = "Not available"
|
||||
try:
|
||||
response_data_str = json.dumps(response_data) if 'response_data' in locals() else response.text[:500] + "..."
|
||||
except Exception:
|
||||
pass # Avoid errors during error reporting
|
||||
error_msg = f"Error parsing LLM response structure: {e}. Response data: {response_data_str}"
|
||||
print(error_msg)
|
||||
raise ValueError(error_msg)
|
||||
raise ValueError("LLM response missing 'content' in choices[0].message.")
|
||||
else:
|
||||
raise ValueError("LLM response missing 'choices' array or it's empty.")
|
||||
|
||||
def _parse_llm_response(self, llm_response_json_str: str) -> List[SourceRule]:
|
||||
"""
|
||||
Parses the LLM's JSON response string into a list of SourceRule objects.
|
||||
"""
|
||||
# Note: Exceptions (JSONDecodeError, ValueError) raised here
|
||||
# will be caught by the _perform_prediction method's handler.
|
||||
|
||||
# Strip potential markdown code fences before parsing
|
||||
clean_json_str = llm_response_json_str.strip()
|
||||
if clean_json_str.startswith("```json"):
|
||||
@@ -250,102 +249,112 @@ class LLMPredictionHandler(QObject):
|
||||
clean_json_str = clean_json_str[:-3] # Remove ```
|
||||
clean_json_str = clean_json_str.strip() # Remove any extra whitespace
|
||||
|
||||
# --- ADDED: Remove <think> tags ---
|
||||
clean_json_str = re.sub(r'<think>.*?</think>', '', clean_json_str, flags=re.DOTALL | re.IGNORECASE)
|
||||
clean_json_str = clean_json_str.strip() # Strip again after potential removal
|
||||
# ---------------------------------
|
||||
|
||||
try:
|
||||
response_data = json.loads(clean_json_str)
|
||||
except json.JSONDecodeError as e:
|
||||
# Log the full cleaned string that caused the error for better debugging
|
||||
error_detail = f"Failed to decode LLM JSON response: {e}\nFull Cleaned Response:\n{clean_json_str}"
|
||||
print(f"ERROR: {error_detail}") # Print full error detail to console
|
||||
log.error(f"ERROR: {error_detail}") # Log full error detail to console
|
||||
raise ValueError(error_detail) # Raise the error with full detail
|
||||
|
||||
if "predicted_assets" not in response_data or not isinstance(response_data["predicted_assets"], list):
|
||||
raise ValueError("Invalid LLM response format: 'predicted_assets' key missing or not a list.")
|
||||
|
||||
source_rules = []
|
||||
# We assume one SourceRule per input source processed by this handler instance
|
||||
source_rule = SourceRule(input_path=self.input_path_str) # Use input_path_str
|
||||
# Use self.input_source_identifier from the base class
|
||||
source_rule = SourceRule(input_path=self.input_source_identifier)
|
||||
|
||||
# Access valid types from the settings dictionary
|
||||
valid_asset_types = list(self.llm_settings.get('asset_types', {}).keys())
|
||||
valid_file_types = list(self.llm_settings.get('file_types', {}).keys())
|
||||
|
||||
for asset_data in response_data["predicted_assets"]:
|
||||
# Check for cancellation within the loop
|
||||
if self._is_cancelled:
|
||||
log.info("LLM prediction cancelled during response parsing (assets).")
|
||||
return []
|
||||
|
||||
if not isinstance(asset_data, dict):
|
||||
print(f"Warning: Skipping invalid asset data (not a dict): {asset_data}")
|
||||
log.warning(f"Skipping invalid asset data (not a dict): {asset_data}")
|
||||
continue
|
||||
|
||||
asset_name = asset_data.get("suggested_asset_name", "Unnamed_Asset")
|
||||
asset_type = asset_data.get("predicted_asset_type")
|
||||
|
||||
if asset_type not in valid_asset_types:
|
||||
print(f"Warning: Invalid predicted_asset_type '{asset_type}' for asset '{asset_name}'. Defaulting or skipping.")
|
||||
# Decide handling: default to a generic type or skip? For now, skip.
|
||||
continue # Or assign a default like 'Unknown' if defined
|
||||
log.warning(f"Invalid predicted_asset_type '{asset_type}' for asset '{asset_name}'. Skipping asset.")
|
||||
continue # Skip this asset
|
||||
|
||||
# --- MODIFIED LINES for AssetRule ---
|
||||
# Create the AssetRule instance first
|
||||
asset_rule = AssetRule(asset_name=asset_name, asset_type=asset_type)
|
||||
source_rule.assets.append(asset_rule) # Append to the list
|
||||
source_rule.assets.append(asset_rule)
|
||||
|
||||
if "files" not in asset_data or not isinstance(asset_data["files"], list):
|
||||
print(f"Warning: 'files' key missing or not a list in asset '{asset_name}'. Skipping files for this asset.")
|
||||
log.warning(f"'files' key missing or not a list in asset '{asset_name}'. Skipping files for this asset.")
|
||||
continue
|
||||
|
||||
for file_data in asset_data["files"]:
|
||||
# Check for cancellation within the inner loop
|
||||
if self._is_cancelled:
|
||||
log.info("LLM prediction cancelled during response parsing (files).")
|
||||
return []
|
||||
|
||||
if not isinstance(file_data, dict):
|
||||
print(f"Warning: Skipping invalid file data (not a dict) in asset '{asset_name}': {file_data}")
|
||||
log.warning(f"Skipping invalid file data (not a dict) in asset '{asset_name}': {file_data}")
|
||||
continue
|
||||
|
||||
file_path_rel = file_data.get("file_path")
|
||||
file_path_rel = file_data.get("file_path") # LLM provides relative path
|
||||
file_type = file_data.get("predicted_file_type")
|
||||
|
||||
if not file_path_rel:
|
||||
print(f"Warning: Missing 'file_path' in file data for asset '{asset_name}'. Skipping file.")
|
||||
log.warning(f"Missing 'file_path' in file data for asset '{asset_name}'. Skipping file.")
|
||||
continue
|
||||
|
||||
# Convert relative path from LLM (using '/') back to absolute OS-specific path
|
||||
# Note: LLM gets relative paths, so we join with the handler's base input path
|
||||
file_path_abs = os.path.join(self.input_path_str, file_path_rel.replace('/', os.sep)) # Use input_path_str
|
||||
# We need the original input path (directory or archive) to make it absolute
|
||||
# Use self.input_source_identifier which holds the original path
|
||||
# IMPORTANT: Ensure the LLM is actually providing paths relative to the *root* of the input source.
|
||||
try:
|
||||
# Use Pathlib for safer joining, assuming input_source_identifier is the parent dir/archive path
|
||||
# If input_source_identifier is an archive file, this logic might need adjustment
|
||||
# depending on where files were extracted. For now, assume it's the base path.
|
||||
base_path = Path(self.input_source_identifier)
|
||||
# If the input was a file (like a zip), use its parent directory as the base for joining relative paths
|
||||
if base_path.is_file():
|
||||
base_path = base_path.parent
|
||||
# Clean the relative path potentially coming from LLM
|
||||
clean_rel_path = Path(file_path_rel.strip().replace('\\', '/'))
|
||||
file_path_abs = str(base_path / clean_rel_path)
|
||||
except Exception as path_e:
|
||||
log.warning(f"Error constructing absolute path for '{file_path_rel}' relative to '{self.input_source_identifier}': {path_e}. Skipping file.")
|
||||
continue
|
||||
|
||||
|
||||
if file_type not in valid_file_types:
|
||||
print(f"Warning: Invalid predicted_file_type '{file_type}' for file '{file_path_rel}'. Defaulting to EXTRA.")
|
||||
log.warning(f"Invalid predicted_file_type '{file_type}' for file '{file_path_rel}'. Defaulting to EXTRA.")
|
||||
file_type = "EXTRA" # Default to EXTRA if invalid type from LLM
|
||||
|
||||
# --- MODIFIED LINES for FileRule ---
|
||||
# Create the FileRule instance first
|
||||
file_rule = FileRule(file_path=file_path_abs, item_type=file_type) # Use correct field names
|
||||
asset_rule.files.append(file_rule) # Append to the list
|
||||
# Create the FileRule instance
|
||||
# Add default values for fields not provided by LLM
|
||||
file_rule = FileRule(
|
||||
file_path=file_path_abs,
|
||||
item_type=file_type,
|
||||
item_type_override=file_type, # Initial override
|
||||
target_asset_name_override=asset_name, # Default to asset name
|
||||
output_format_override=None,
|
||||
is_gloss_source=False, # LLM doesn't predict this
|
||||
standard_map_type=None, # LLM doesn't predict this directly
|
||||
resolution_override=None,
|
||||
channel_merge_instructions={}
|
||||
)
|
||||
asset_rule.files.append(file_rule)
|
||||
|
||||
source_rules.append(source_rule)
|
||||
return source_rules
|
||||
|
||||
# Example of how this might be used in MainWindow (conceptual)
|
||||
# class MainWindow(QMainWindow):
|
||||
# # ... other methods ...
|
||||
# def _start_llm_prediction(self, directory_path):
|
||||
# self.llm_thread = QThread()
|
||||
# self.llm_handler = LLMPredictionHandler(directory_path, self.config_manager)
|
||||
# self.llm_handler.moveToThread(self.llm_thread)
|
||||
#
|
||||
# # Connect signals
|
||||
# self.llm_handler.prediction_ready.connect(self._on_llm_prediction_ready)
|
||||
# self.llm_handler.prediction_error.connect(self._on_llm_prediction_error)
|
||||
# self.llm_handler.status_update.connect(self.statusBar().showMessage)
|
||||
# self.llm_thread.started.connect(self.llm_handler.run)
|
||||
# self.llm_thread.finished.connect(self.llm_thread.deleteLater)
|
||||
# self.llm_handler.prediction_ready.connect(self.llm_thread.quit) # Quit thread on success
|
||||
# self.llm_handler.prediction_error.connect(self.llm_thread.quit) # Quit thread on error
|
||||
#
|
||||
# self.llm_thread.start()
|
||||
#
|
||||
# @Slot(str, list)
|
||||
# def _on_llm_prediction_ready(self, directory_path, results):
|
||||
# print(f"LLM Prediction ready for {directory_path}: {len(results)} source rules found.")
|
||||
# # Process results, update model, etc.
|
||||
# # Make sure to clean up thread/handler references if needed
|
||||
# self.llm_handler.deleteLater() # Schedule handler for deletion
|
||||
#
|
||||
# @Slot(str, str)
|
||||
# def _on_llm_prediction_error(self, directory_path, error_message):
|
||||
# print(f"LLM Prediction error for {directory_path}: {error_message}")
|
||||
# # Show error to user, clean up thread/handler
|
||||
# self.llm_handler.deleteLater()
|
||||
# Removed conceptual example usage comments
|
||||
43
gui/log_console_widget.py
Normal file
43
gui/log_console_widget.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# gui/log_console_widget.py
|
||||
import logging
|
||||
from PySide6.QtWidgets import (
|
||||
QWidget, QVBoxLayout, QTextEdit, QLabel, QSizePolicy
|
||||
)
|
||||
from PySide6.QtCore import Slot
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class LogConsoleWidget(QWidget):
|
||||
"""
|
||||
A dedicated widget to display log messages.
|
||||
"""
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self._init_ui()
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes the UI elements for the log console."""
|
||||
layout = QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 5, 0, 0) # Add some top margin
|
||||
|
||||
log_console_label = QLabel("Log Console:")
|
||||
self.log_console_output = QTextEdit()
|
||||
self.log_console_output.setReadOnly(True)
|
||||
# self.log_console_output.setMaximumHeight(150) # Let the parent layout control height
|
||||
self.log_console_output.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding) # Allow vertical expansion
|
||||
|
||||
layout.addWidget(log_console_label)
|
||||
layout.addWidget(self.log_console_output)
|
||||
|
||||
# Initially hidden, visibility controlled by MainWindow
|
||||
self.setVisible(False)
|
||||
|
||||
@Slot(str)
|
||||
def _append_log_message(self, message):
|
||||
"""Appends a log message to the QTextEdit console."""
|
||||
self.log_console_output.append(message)
|
||||
# Auto-scroll to the bottom
|
||||
self.log_console_output.verticalScrollBar().setValue(self.log_console_output.verticalScrollBar().maximum())
|
||||
|
||||
# Note: Visibility is controlled externally via setVisible(),
|
||||
# so the _toggle_log_console_visibility slot is not needed here.
|
||||
633
gui/main_panel_widget.py
Normal file
633
gui/main_panel_widget.py
Normal file
@@ -0,0 +1,633 @@
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from functools import partial
|
||||
|
||||
from PySide6.QtWidgets import QApplication # Added for processEvents
|
||||
from PySide6.QtWidgets import (
|
||||
QWidget, QVBoxLayout, QHBoxLayout, QSplitter, QTableView,
|
||||
QPushButton, QComboBox, QTableWidget, QTableWidgetItem, QHeaderView,
|
||||
QProgressBar, QLabel, QFrame, QCheckBox, QSpinBox, QListWidget, QTextEdit,
|
||||
QLineEdit, QMessageBox, QFileDialog, QInputDialog, QListWidgetItem, QTabWidget,
|
||||
QFormLayout, QGroupBox, QAbstractItemView, QSizePolicy, QTreeView, QMenu
|
||||
)
|
||||
from PySide6.QtCore import Qt, Signal, Slot, QPoint, QModelIndex, QTimer
|
||||
from PySide6.QtGui import QColor, QAction, QPalette, QClipboard, QGuiApplication # Added QGuiApplication for clipboard
|
||||
|
||||
# --- Local GUI Imports ---
|
||||
# Import delegates and models needed by the panel
|
||||
from .delegates import LineEditDelegate, ComboBoxDelegate, SupplierSearchDelegate
|
||||
from .unified_view_model import UnifiedViewModel # Assuming UnifiedViewModel is passed in
|
||||
|
||||
# --- Backend Imports ---
|
||||
# Import Rule Structures if needed for context menus etc.
|
||||
from rule_structure import SourceRule, AssetRule, FileRule
|
||||
# Import config loading if defaults are needed directly here (though better passed from MainWindow)
|
||||
try:
|
||||
from configuration import ConfigurationError, load_base_config
|
||||
except ImportError:
|
||||
ConfigurationError = Exception
|
||||
load_base_config = None
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class MainPanelWidget(QWidget):
|
||||
"""
|
||||
Widget handling the main interaction panel:
|
||||
- Output directory selection
|
||||
- Asset preview/editing view (Unified View)
|
||||
- Blender post-processing options
|
||||
- Processing controls (Start, Cancel, Clear, LLM Re-interpret)
|
||||
"""
|
||||
# --- Signals Emitted by the Panel ---
|
||||
# Request to add new input paths (e.g., from drag/drop handled by MainWindow)
|
||||
# add_paths_requested = Signal(list) # Maybe not needed if MainWindow handles drop directly
|
||||
|
||||
# Request to start the main processing job
|
||||
process_requested = Signal(dict) # Emits dict with settings: output_dir, overwrite, workers, blender_enabled, ng_path, mat_path
|
||||
|
||||
# Request to cancel the ongoing processing job
|
||||
cancel_requested = Signal()
|
||||
|
||||
# Request to clear the current queue/view
|
||||
clear_queue_requested = Signal()
|
||||
|
||||
# Request to re-interpret selected items using LLM
|
||||
llm_reinterpret_requested = Signal(list) # Emits list of source paths
|
||||
|
||||
# Notify when the output directory changes
|
||||
output_dir_changed = Signal(str)
|
||||
|
||||
# Notify when Blender settings change
|
||||
blender_settings_changed = Signal(bool, str, str) # enabled, ng_path, mat_path
|
||||
|
||||
def __init__(self, unified_model: UnifiedViewModel, parent=None):
|
||||
"""
|
||||
Initializes the MainPanelWidget.
|
||||
|
||||
Args:
|
||||
unified_model: The shared UnifiedViewModel instance.
|
||||
parent: The parent widget.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.unified_model = unified_model
|
||||
self.llm_processing_active = False # Track if LLM is running (set by MainWindow)
|
||||
|
||||
# Get project root for resolving default paths if needed here
|
||||
script_dir = Path(__file__).parent
|
||||
self.project_root = script_dir.parent
|
||||
|
||||
self._setup_ui()
|
||||
self._connect_signals()
|
||||
|
||||
def _setup_ui(self):
|
||||
"""Sets up the UI elements for the panel."""
|
||||
main_layout = QVBoxLayout(self)
|
||||
main_layout.setContentsMargins(5, 5, 5, 5) # Reduce margins
|
||||
|
||||
# --- Output Directory Selection ---
|
||||
output_layout = QHBoxLayout()
|
||||
self.output_dir_label = QLabel("Output Directory:")
|
||||
self.output_path_edit = QLineEdit()
|
||||
self.browse_output_button = QPushButton("Browse...")
|
||||
output_layout.addWidget(self.output_dir_label)
|
||||
output_layout.addWidget(self.output_path_edit, 1)
|
||||
output_layout.addWidget(self.browse_output_button)
|
||||
main_layout.addLayout(output_layout)
|
||||
|
||||
# --- Set Initial Output Path (Copied from MainWindow) ---
|
||||
# Consider passing this default path from MainWindow instead of reloading config here
|
||||
if load_base_config:
|
||||
try:
|
||||
base_config = load_base_config()
|
||||
output_base_dir_config = base_config.get('OUTPUT_BASE_DIR', '../Asset_Processor_Output')
|
||||
default_output_dir = (self.project_root / output_base_dir_config).resolve()
|
||||
self.output_path_edit.setText(str(default_output_dir))
|
||||
log.info(f"MainPanelWidget: Default output directory set to: {default_output_dir}")
|
||||
except ConfigurationError as e:
|
||||
log.error(f"MainPanelWidget: Error reading base configuration for default output directory: {e}")
|
||||
self.output_path_edit.setText("")
|
||||
except Exception as e:
|
||||
log.exception(f"MainPanelWidget: Error setting default output directory: {e}")
|
||||
self.output_path_edit.setText("")
|
||||
else:
|
||||
log.warning("MainPanelWidget: load_base_config not available to set default output path.")
|
||||
self.output_path_edit.setText("")
|
||||
|
||||
|
||||
# --- Unified View Setup ---
|
||||
self.unified_view = QTreeView()
|
||||
self.unified_view.setModel(self.unified_model) # Set the passed-in model
|
||||
|
||||
# Instantiate Delegates
|
||||
lineEditDelegate = LineEditDelegate(self.unified_view)
|
||||
# ComboBoxDelegate needs access to MainWindow's get_llm_source_preset_name,
|
||||
# which might require passing MainWindow or a callback here.
|
||||
# For now, let's assume it can work without it or we adapt it later.
|
||||
# TODO: Revisit ComboBoxDelegate dependency
|
||||
comboBoxDelegate = ComboBoxDelegate(self) # Pass only parent (self)
|
||||
supplierSearchDelegate = SupplierSearchDelegate(self) # Pass parent
|
||||
|
||||
# Set Delegates for Columns
|
||||
self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_SUPPLIER, supplierSearchDelegate)
|
||||
self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_ASSET_TYPE, comboBoxDelegate)
|
||||
self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_TARGET_ASSET, lineEditDelegate)
|
||||
self.unified_view.setItemDelegateForColumn(UnifiedViewModel.COL_ITEM_TYPE, comboBoxDelegate)
|
||||
|
||||
# Configure View Appearance
|
||||
self.unified_view.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
|
||||
self.unified_view.setAlternatingRowColors(True)
|
||||
self.unified_view.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows)
|
||||
self.unified_view.setEditTriggers(QAbstractItemView.EditTrigger.DoubleClicked | QAbstractItemView.EditTrigger.SelectedClicked | QAbstractItemView.EditTrigger.EditKeyPressed)
|
||||
self.unified_view.setSelectionMode(QAbstractItemView.SelectionMode.ExtendedSelection) # Allow multi-select for re-interpret
|
||||
|
||||
# Configure Header Resize Modes
|
||||
header = self.unified_view.header()
|
||||
header.setStretchLastSection(False)
|
||||
header.setSectionResizeMode(UnifiedViewModel.COL_NAME, QHeaderView.ResizeMode.ResizeToContents)
|
||||
header.setSectionResizeMode(UnifiedViewModel.COL_TARGET_ASSET, QHeaderView.ResizeMode.Stretch)
|
||||
header.setSectionResizeMode(UnifiedViewModel.COL_SUPPLIER, QHeaderView.ResizeMode.ResizeToContents)
|
||||
header.setSectionResizeMode(UnifiedViewModel.COL_ASSET_TYPE, QHeaderView.ResizeMode.ResizeToContents)
|
||||
header.setSectionResizeMode(UnifiedViewModel.COL_ITEM_TYPE, QHeaderView.ResizeMode.ResizeToContents)
|
||||
|
||||
# Enable custom context menu
|
||||
self.unified_view.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
|
||||
|
||||
# Add the Unified View to the main layout
|
||||
main_layout.addWidget(self.unified_view, 1) # Give it stretch factor 1
|
||||
|
||||
# --- Progress Bar ---
|
||||
self.progress_bar = QProgressBar()
|
||||
self.progress_bar.setValue(0)
|
||||
self.progress_bar.setTextVisible(True)
|
||||
self.progress_bar.setFormat("Idle") # Initial format
|
||||
main_layout.addWidget(self.progress_bar)
|
||||
|
||||
# --- Blender Integration Controls ---
|
||||
blender_group = QGroupBox("Blender Post-Processing")
|
||||
blender_layout = QVBoxLayout(blender_group)
|
||||
|
||||
self.blender_integration_checkbox = QCheckBox("Run Blender Scripts After Processing")
|
||||
self.blender_integration_checkbox.setToolTip("If checked, attempts to run create_nodegroups.py and create_materials.py in Blender.")
|
||||
blender_layout.addWidget(self.blender_integration_checkbox)
|
||||
|
||||
# Nodegroup Blend Path
|
||||
nodegroup_layout = QHBoxLayout()
|
||||
nodegroup_layout.addWidget(QLabel("Nodegroup .blend:"))
|
||||
self.nodegroup_blend_path_input = QLineEdit()
|
||||
self.browse_nodegroup_blend_button = QPushButton("...")
|
||||
self.browse_nodegroup_blend_button.setFixedWidth(30)
|
||||
nodegroup_layout.addWidget(self.nodegroup_blend_path_input)
|
||||
nodegroup_layout.addWidget(self.browse_nodegroup_blend_button)
|
||||
blender_layout.addLayout(nodegroup_layout)
|
||||
|
||||
# Materials Blend Path
|
||||
materials_layout = QHBoxLayout()
|
||||
materials_layout.addWidget(QLabel("Materials .blend:"))
|
||||
self.materials_blend_path_input = QLineEdit()
|
||||
self.browse_materials_blend_button = QPushButton("...")
|
||||
self.browse_materials_blend_button.setFixedWidth(30)
|
||||
materials_layout.addWidget(self.materials_blend_path_input)
|
||||
materials_layout.addWidget(self.browse_materials_blend_button)
|
||||
blender_layout.addLayout(materials_layout)
|
||||
|
||||
# Initialize paths from config (Copied from MainWindow)
|
||||
# Consider passing these defaults from MainWindow
|
||||
if load_base_config:
|
||||
try:
|
||||
base_config = load_base_config()
|
||||
default_ng_path = base_config.get('DEFAULT_NODEGROUP_BLEND_PATH', '')
|
||||
default_mat_path = base_config.get('DEFAULT_MATERIALS_BLEND_PATH', '')
|
||||
self.nodegroup_blend_path_input.setText(default_ng_path if default_ng_path else "")
|
||||
self.materials_blend_path_input.setText(default_mat_path if default_mat_path else "")
|
||||
except ConfigurationError as e:
|
||||
log.error(f"MainPanelWidget: Error reading base configuration for default Blender paths: {e}")
|
||||
except Exception as e:
|
||||
log.error(f"MainPanelWidget: Error reading default Blender paths from config: {e}")
|
||||
else:
|
||||
log.warning("MainPanelWidget: load_base_config not available to set default Blender paths.")
|
||||
|
||||
|
||||
# Disable Blender controls initially if checkbox is unchecked
|
||||
self.nodegroup_blend_path_input.setEnabled(False)
|
||||
self.browse_nodegroup_blend_button.setEnabled(False)
|
||||
self.materials_blend_path_input.setEnabled(False)
|
||||
self.browse_materials_blend_button.setEnabled(False)
|
||||
|
||||
main_layout.addWidget(blender_group) # Add the group box to the main layout
|
||||
|
||||
# --- Bottom Controls ---
|
||||
bottom_controls_layout = QHBoxLayout()
|
||||
self.overwrite_checkbox = QCheckBox("Overwrite Existing")
|
||||
self.overwrite_checkbox.setToolTip("If checked, existing output folders for processed assets will be deleted and replaced.")
|
||||
bottom_controls_layout.addWidget(self.overwrite_checkbox)
|
||||
|
||||
self.workers_label = QLabel("Workers:")
|
||||
self.workers_spinbox = QSpinBox()
|
||||
default_workers = 1
|
||||
try:
|
||||
cores = os.cpu_count()
|
||||
if cores: default_workers = max(1, cores // 2)
|
||||
except NotImplementedError: pass
|
||||
self.workers_spinbox.setMinimum(1)
|
||||
self.workers_spinbox.setMaximum(os.cpu_count() or 32)
|
||||
self.workers_spinbox.setValue(default_workers)
|
||||
self.workers_spinbox.setToolTip("Number of assets to process concurrently.")
|
||||
bottom_controls_layout.addWidget(self.workers_label)
|
||||
bottom_controls_layout.addWidget(self.workers_spinbox)
|
||||
bottom_controls_layout.addStretch(1)
|
||||
|
||||
# --- LLM Re-interpret Button ---
|
||||
self.llm_reinterpret_button = QPushButton("Re-interpret Selected with LLM")
|
||||
self.llm_reinterpret_button.setToolTip("Re-run LLM interpretation on the selected source items.")
|
||||
self.llm_reinterpret_button.setEnabled(False) # Initially disabled
|
||||
bottom_controls_layout.addWidget(self.llm_reinterpret_button)
|
||||
|
||||
self.clear_queue_button = QPushButton("Clear Queue")
|
||||
self.start_button = QPushButton("Start Processing")
|
||||
self.cancel_button = QPushButton("Cancel")
|
||||
self.cancel_button.setEnabled(False)
|
||||
|
||||
bottom_controls_layout.addWidget(self.clear_queue_button)
|
||||
bottom_controls_layout.addWidget(self.start_button)
|
||||
bottom_controls_layout.addWidget(self.cancel_button)
|
||||
main_layout.addLayout(bottom_controls_layout)
|
||||
|
||||
def _connect_signals(self):
|
||||
"""Connect internal UI signals to slots or emit panel signals."""
|
||||
# Output Directory
|
||||
self.browse_output_button.clicked.connect(self._browse_for_output_directory)
|
||||
self.output_path_edit.editingFinished.connect(self._on_output_path_changed) # Emit signal when user finishes editing
|
||||
|
||||
# Unified View
|
||||
self.unified_view.selectionModel().selectionChanged.connect(self._update_llm_reinterpret_button_state)
|
||||
self.unified_view.customContextMenuRequested.connect(self._show_unified_view_context_menu)
|
||||
|
||||
# Blender Controls
|
||||
self.blender_integration_checkbox.toggled.connect(self._toggle_blender_controls)
|
||||
self.browse_nodegroup_blend_button.clicked.connect(self._browse_for_nodegroup_blend)
|
||||
self.browse_materials_blend_button.clicked.connect(self._browse_for_materials_blend)
|
||||
# Emit signal when paths change
|
||||
self.nodegroup_blend_path_input.editingFinished.connect(self._emit_blender_settings_changed)
|
||||
self.materials_blend_path_input.editingFinished.connect(self._emit_blender_settings_changed)
|
||||
self.blender_integration_checkbox.toggled.connect(self._emit_blender_settings_changed)
|
||||
|
||||
|
||||
# Bottom Buttons
|
||||
self.clear_queue_button.clicked.connect(self.clear_queue_requested) # Emit signal directly
|
||||
self.start_button.clicked.connect(self._on_start_processing_clicked) # Use slot to gather data
|
||||
self.cancel_button.clicked.connect(self.cancel_requested) # Emit signal directly
|
||||
self.llm_reinterpret_button.clicked.connect(self._on_llm_reinterpret_clicked) # Use slot to gather data
|
||||
|
||||
# --- Slots for Internal UI Logic ---
|
||||
|
||||
@Slot()
|
||||
def _browse_for_output_directory(self):
|
||||
"""Opens a dialog to select the output directory."""
|
||||
current_path = self.output_path_edit.text()
|
||||
if not current_path or not Path(current_path).is_dir():
|
||||
current_path = str(self.project_root) # Use project root as fallback
|
||||
|
||||
directory = QFileDialog.getExistingDirectory(
|
||||
self,
|
||||
"Select Output Directory",
|
||||
current_path,
|
||||
QFileDialog.Option.ShowDirsOnly | QFileDialog.Option.DontResolveSymlinks
|
||||
)
|
||||
if directory:
|
||||
self.output_path_edit.setText(directory)
|
||||
self._on_output_path_changed() # Explicitly call the change handler
|
||||
|
||||
@Slot()
|
||||
def _on_output_path_changed(self):
|
||||
"""Emits the output_dir_changed signal."""
|
||||
self.output_dir_changed.emit(self.output_path_edit.text())
|
||||
|
||||
@Slot(bool)
|
||||
def _toggle_blender_controls(self, checked):
|
||||
"""Enable/disable Blender path inputs based on the checkbox state."""
|
||||
self.nodegroup_blend_path_input.setEnabled(checked)
|
||||
self.browse_nodegroup_blend_button.setEnabled(checked)
|
||||
self.materials_blend_path_input.setEnabled(checked)
|
||||
self.browse_materials_blend_button.setEnabled(checked)
|
||||
# No need to emit here, the checkbox toggle signal is connected separately
|
||||
|
||||
def _browse_for_blend_file(self, line_edit_widget: QLineEdit):
|
||||
"""Opens a dialog to select a .blend file and updates the line edit."""
|
||||
current_path = line_edit_widget.text()
|
||||
start_dir = str(Path(current_path).parent) if current_path and Path(current_path).exists() else str(self.project_root)
|
||||
|
||||
file_path, _ = QFileDialog.getOpenFileName(
|
||||
self,
|
||||
"Select Blender File",
|
||||
start_dir,
|
||||
"Blender Files (*.blend);;All Files (*)"
|
||||
)
|
||||
if file_path:
|
||||
line_edit_widget.setText(file_path)
|
||||
line_edit_widget.editingFinished.emit() # Trigger editingFinished to emit change signal
|
||||
|
||||
@Slot()
|
||||
def _browse_for_nodegroup_blend(self):
|
||||
self._browse_for_blend_file(self.nodegroup_blend_path_input)
|
||||
|
||||
@Slot()
|
||||
def _browse_for_materials_blend(self):
|
||||
self._browse_for_blend_file(self.materials_blend_path_input)
|
||||
|
||||
@Slot()
|
||||
def _emit_blender_settings_changed(self):
|
||||
"""Gathers current Blender settings and emits the blender_settings_changed signal."""
|
||||
enabled = self.blender_integration_checkbox.isChecked()
|
||||
ng_path = self.nodegroup_blend_path_input.text()
|
||||
mat_path = self.materials_blend_path_input.text()
|
||||
self.blender_settings_changed.emit(enabled, ng_path, mat_path)
|
||||
|
||||
@Slot()
|
||||
def _on_start_processing_clicked(self):
|
||||
"""Gathers settings and emits the process_requested signal."""
|
||||
output_dir = self.output_path_edit.text().strip()
|
||||
if not output_dir:
|
||||
QMessageBox.warning(self, "Missing Output Directory", "Please select an output directory.")
|
||||
return
|
||||
|
||||
# Basic validation (MainWindow should do more thorough validation)
|
||||
try:
|
||||
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
||||
except Exception as e:
|
||||
QMessageBox.warning(self, "Invalid Output Directory", f"Cannot use output directory:\n{output_dir}\n\nError: {e}")
|
||||
return
|
||||
|
||||
settings = {
|
||||
"output_dir": output_dir,
|
||||
"overwrite": self.overwrite_checkbox.isChecked(),
|
||||
"workers": self.workers_spinbox.value(),
|
||||
"blender_enabled": self.blender_integration_checkbox.isChecked(),
|
||||
"nodegroup_blend_path": self.nodegroup_blend_path_input.text(),
|
||||
"materials_blend_path": self.materials_blend_path_input.text()
|
||||
}
|
||||
self.process_requested.emit(settings)
|
||||
|
||||
@Slot()
|
||||
def _update_llm_reinterpret_button_state(self):
|
||||
"""Enables/disables the LLM re-interpret button based on selection and LLM status."""
|
||||
selection_model = self.unified_view.selectionModel()
|
||||
has_selection = selection_model is not None and selection_model.hasSelection()
|
||||
# Enable only if there's a selection AND LLM is not currently active
|
||||
self.llm_reinterpret_button.setEnabled(has_selection and not self.llm_processing_active)
|
||||
|
||||
@Slot()
|
||||
def _on_llm_reinterpret_clicked(self):
|
||||
"""Gathers selected source paths and emits the llm_reinterpret_requested signal."""
|
||||
selected_indexes = self.unified_view.selectionModel().selectedIndexes()
|
||||
if not selected_indexes:
|
||||
return
|
||||
|
||||
if self.llm_processing_active:
|
||||
QMessageBox.warning(self, "Busy", "LLM processing is already in progress. Please wait.")
|
||||
return
|
||||
|
||||
unique_source_dirs = set()
|
||||
processed_source_paths = set() # Track processed source paths to avoid duplicates
|
||||
for index in selected_indexes:
|
||||
if not index.isValid(): continue
|
||||
item_node = index.internalPointer()
|
||||
if not item_node: continue
|
||||
|
||||
# Traverse up to find the SourceRule node (Simplified traversal)
|
||||
source_node = None
|
||||
current_node = item_node
|
||||
while current_node is not None:
|
||||
if isinstance(current_node, SourceRule):
|
||||
source_node = current_node
|
||||
break
|
||||
# Simplified parent traversal - adjust if model structure is different
|
||||
parent_attr = getattr(current_node, 'parent', None) # Check for generic 'parent'
|
||||
if callable(parent_attr): # Check if parent is a method (like in QStandardItemModel)
|
||||
current_node = parent_attr()
|
||||
elif parent_attr: # Check if parent is an attribute
|
||||
current_node = parent_attr
|
||||
else: # Try specific parent attributes if generic fails
|
||||
parent_source = getattr(current_node, 'parent_source', None)
|
||||
if parent_source:
|
||||
current_node = parent_source
|
||||
else:
|
||||
parent_asset = getattr(current_node, 'parent_asset', None)
|
||||
if parent_asset:
|
||||
current_node = parent_asset
|
||||
else: # Reached top or unexpected node type
|
||||
current_node = None
|
||||
|
||||
|
||||
if source_node and hasattr(source_node, 'input_path') and source_node.input_path:
|
||||
source_path_str = source_node.input_path
|
||||
if source_path_str in processed_source_paths:
|
||||
continue
|
||||
source_path_obj = Path(source_path_str)
|
||||
if source_path_obj.is_dir() or (source_path_obj.is_file() and source_path_obj.suffix.lower() == '.zip'):
|
||||
unique_source_dirs.add(source_path_str)
|
||||
processed_source_paths.add(source_path_str)
|
||||
else:
|
||||
log.warning(f"Skipping non-directory/zip source for re-interpretation: {source_path_str}")
|
||||
# else: # Reduce log noise
|
||||
# log.warning(f"Could not determine valid SourceRule or input_path for selected index: {index.row()},{index.column()} (Item type: {type(item_node).__name__})")
|
||||
|
||||
|
||||
if not unique_source_dirs:
|
||||
# self.statusBar().showMessage("No valid source directories found for selected items.", 5000) # Status bar is in MainWindow
|
||||
log.warning("No valid source directories found for selected items to re-interpret.")
|
||||
return
|
||||
|
||||
self.llm_reinterpret_requested.emit(list(unique_source_dirs))
|
||||
|
||||
|
||||
@Slot(QPoint)
|
||||
def _show_unified_view_context_menu(self, point: QPoint):
|
||||
"""Shows the context menu for the unified view."""
|
||||
index = self.unified_view.indexAt(point)
|
||||
if not index.isValid():
|
||||
return
|
||||
|
||||
item_node = index.internalPointer()
|
||||
is_source_item = isinstance(item_node, SourceRule)
|
||||
|
||||
menu = QMenu(self)
|
||||
|
||||
if is_source_item:
|
||||
copy_llm_example_action = QAction("Copy LLM Example to Clipboard", self)
|
||||
copy_llm_example_action.setToolTip("Copies a JSON structure representing the input files and predicted output, suitable for LLM examples.")
|
||||
copy_llm_example_action.triggered.connect(lambda: self._copy_llm_example_to_clipboard(index))
|
||||
menu.addAction(copy_llm_example_action)
|
||||
menu.addSeparator()
|
||||
|
||||
# Add other actions...
|
||||
|
||||
if not menu.isEmpty():
|
||||
menu.exec(self.unified_view.viewport().mapToGlobal(point))
|
||||
|
||||
@Slot(QModelIndex)
|
||||
def _copy_llm_example_to_clipboard(self, index: QModelIndex):
|
||||
"""Copies a JSON structure for the selected source item to the clipboard."""
|
||||
if not index.isValid(): return
|
||||
item_node = index.internalPointer()
|
||||
if not isinstance(item_node, SourceRule): return
|
||||
|
||||
source_rule: SourceRule = item_node
|
||||
log.info(f"Attempting to generate LLM example JSON for source: {source_rule.input_path}")
|
||||
|
||||
all_file_paths = []
|
||||
predicted_assets_data = []
|
||||
|
||||
for asset_rule in source_rule.assets:
|
||||
asset_files_data = []
|
||||
for file_rule in asset_rule.files:
|
||||
if file_rule.file_path:
|
||||
all_file_paths.append(file_rule.file_path)
|
||||
asset_files_data.append({
|
||||
"file_path": file_rule.file_path,
|
||||
"predicted_file_type": file_rule.item_type or "UNKNOWN"
|
||||
})
|
||||
asset_files_data.sort(key=lambda x: x['file_path'])
|
||||
predicted_assets_data.append({
|
||||
"suggested_asset_name": asset_rule.asset_name or "UnnamedAsset",
|
||||
"predicted_asset_type": asset_rule.asset_type or "UNKNOWN",
|
||||
"files": asset_files_data
|
||||
})
|
||||
|
||||
predicted_assets_data.sort(key=lambda x: x['suggested_asset_name'])
|
||||
all_file_paths.sort()
|
||||
|
||||
if not all_file_paths:
|
||||
log.warning(f"No file paths found for source: {source_rule.input_path}. Cannot generate example.")
|
||||
# Cannot show status bar message here
|
||||
return
|
||||
|
||||
llm_example = {
|
||||
"input": "\n".join(all_file_paths),
|
||||
"output": {"predicted_assets": predicted_assets_data}
|
||||
}
|
||||
|
||||
try:
|
||||
json_string = json.dumps(llm_example, indent=2)
|
||||
clipboard = QGuiApplication.clipboard() # Use QGuiApplication
|
||||
if clipboard:
|
||||
clipboard.setText(json_string)
|
||||
log.info(f"Copied LLM example JSON to clipboard for source: {source_rule.input_path}")
|
||||
# Cannot show status bar message here
|
||||
else:
|
||||
log.error("Failed to get system clipboard.")
|
||||
except Exception as e:
|
||||
log.exception(f"Error copying LLM example JSON to clipboard: {e}")
|
||||
|
||||
|
||||
# --- Public Slots for MainWindow to Call ---
|
||||
|
||||
@Slot(int, int)
|
||||
def update_progress_bar(self, current_count, total_count):
|
||||
"""Updates the progress bar display."""
|
||||
if total_count > 0:
|
||||
percentage = int((current_count / total_count) * 100)
|
||||
log.debug(f"Updating progress bar: current={current_count}, total={total_count}, calculated_percentage={percentage}") # DEBUG LOG
|
||||
self.progress_bar.setValue(percentage)
|
||||
self.progress_bar.setFormat(f"%p% ({current_count}/{total_count})")
|
||||
QApplication.processEvents() # Force GUI update
|
||||
else:
|
||||
self.progress_bar.setValue(0)
|
||||
self.progress_bar.setFormat("0/0")
|
||||
|
||||
@Slot(str)
|
||||
def set_progress_bar_text(self, text: str):
|
||||
"""Sets the text format of the progress bar."""
|
||||
self.progress_bar.setFormat(text)
|
||||
# Reset value if setting text like "Idle" or "Waiting..."
|
||||
if not "%" in text:
|
||||
self.progress_bar.setValue(0)
|
||||
|
||||
|
||||
@Slot(bool)
|
||||
def set_controls_enabled(self, enabled: bool):
|
||||
"""Enables or disables controls within the panel."""
|
||||
# Enable/disable most controls based on the 'enabled' flag
|
||||
self.output_path_edit.setEnabled(enabled)
|
||||
self.browse_output_button.setEnabled(enabled)
|
||||
self.unified_view.setEnabled(enabled)
|
||||
self.overwrite_checkbox.setEnabled(enabled)
|
||||
self.workers_spinbox.setEnabled(enabled)
|
||||
self.clear_queue_button.setEnabled(enabled)
|
||||
self.blender_integration_checkbox.setEnabled(enabled)
|
||||
|
||||
# Start button is enabled only if controls are generally enabled AND preset mode is active (handled by MainWindow)
|
||||
# Cancel button is enabled only when processing is active (handled by MainWindow)
|
||||
# LLM button state depends on selection and LLM status (handled by _update_llm_reinterpret_button_state)
|
||||
|
||||
# Blender path inputs depend on both 'enabled' and the checkbox state
|
||||
blender_paths_enabled = enabled and self.blender_integration_checkbox.isChecked()
|
||||
self.nodegroup_blend_path_input.setEnabled(blender_paths_enabled)
|
||||
self.browse_nodegroup_blend_button.setEnabled(blender_paths_enabled)
|
||||
self.materials_blend_path_input.setEnabled(blender_paths_enabled)
|
||||
self.browse_materials_blend_button.setEnabled(blender_paths_enabled)
|
||||
|
||||
# Update LLM button state explicitly when controls are enabled/disabled
|
||||
if enabled:
|
||||
self._update_llm_reinterpret_button_state()
|
||||
else:
|
||||
self.llm_reinterpret_button.setEnabled(False)
|
||||
|
||||
|
||||
@Slot(bool)
|
||||
def set_start_button_enabled(self, enabled: bool):
|
||||
"""Sets the enabled state of the Start Processing button."""
|
||||
self.start_button.setEnabled(enabled)
|
||||
|
||||
@Slot(str)
|
||||
def set_start_button_text(self, text: str):
|
||||
"""Sets the text of the Start Processing button."""
|
||||
self.start_button.setText(text)
|
||||
|
||||
@Slot(bool)
|
||||
def set_cancel_button_enabled(self, enabled: bool):
|
||||
"""Sets the enabled state of the Cancel button."""
|
||||
self.cancel_button.setEnabled(enabled)
|
||||
|
||||
@Slot(bool)
|
||||
def set_llm_processing_status(self, active: bool):
|
||||
"""Informs the panel whether LLM processing is active."""
|
||||
self.llm_processing_active = active
|
||||
self._update_llm_reinterpret_button_state() # Update button state based on new status
|
||||
|
||||
# TODO: Add method to get current output path if needed by MainWindow before processing
|
||||
def get_output_directory(self) -> str:
|
||||
return self.output_path_edit.text().strip()
|
||||
|
||||
# TODO: Add method to get current Blender settings if needed by MainWindow before processing
|
||||
def get_blender_settings(self) -> dict:
|
||||
return {
|
||||
"enabled": self.blender_integration_checkbox.isChecked(),
|
||||
"nodegroup_blend_path": self.nodegroup_blend_path_input.text(),
|
||||
"materials_blend_path": self.materials_blend_path_input.text()
|
||||
}
|
||||
|
||||
# TODO: Add method to get current worker count if needed by MainWindow before processing
|
||||
def get_worker_count(self) -> int:
|
||||
return self.workers_spinbox.value()
|
||||
|
||||
# TODO: Add method to get current overwrite setting if needed by MainWindow before processing
|
||||
def get_overwrite_setting(self) -> bool:
|
||||
return self.overwrite_checkbox.isChecked()
|
||||
|
||||
# --- Delegate Dependency ---
|
||||
# This method might be needed by ComboBoxDelegate if it relies on MainWindow's logic
|
||||
def get_llm_source_preset_name(self) -> str | None:
|
||||
"""
|
||||
Placeholder for providing context to delegates.
|
||||
Ideally, the required info (like last preset name) should be passed
|
||||
from MainWindow when the delegate needs it, or the delegate's dependency
|
||||
should be refactored.
|
||||
"""
|
||||
log.warning("MainPanelWidget.get_llm_source_preset_name called - needs proper implementation or refactoring.")
|
||||
# This needs to get the info from MainWindow, perhaps via a signal/slot or passed reference.
|
||||
# Returning None for now.
|
||||
return None
|
||||
2436
gui/main_window.py
2436
gui/main_window.py
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
# gui/prediction_handler.py
|
||||
# gui/rule_based_prediction_handler.py
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import time
|
||||
@@ -11,7 +11,8 @@ from collections import defaultdict, Counter # Added Counter
|
||||
from typing import List, Dict, Any # For type hinting
|
||||
|
||||
# --- PySide6 Imports ---
|
||||
from PySide6.QtCore import QObject, Signal, QThread, Slot
|
||||
from PySide6.QtCore import QObject, Slot # Keep QObject for parent type hint, Slot for classify_files if kept as method
|
||||
# Removed Signal, QThread as they are handled by BasePredictionHandler or caller
|
||||
|
||||
# --- Backend Imports ---
|
||||
import sys
|
||||
@@ -21,16 +22,13 @@ if str(project_root) not in sys.path:
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
try:
|
||||
from configuration import Configuration, ConfigurationError, load_base_config # Import Configuration, ConfigurationError, and load_base_config
|
||||
# AssetProcessor might not be needed directly anymore if logic is moved here
|
||||
# from asset_processor import AssetProcessor, AssetProcessingError
|
||||
from rule_structure import SourceRule, AssetRule, FileRule # Removed AssetType, ItemType
|
||||
# Removed: import config as app_config # Import project's config module
|
||||
# Removed: Import the new dictionaries directly for easier access
|
||||
# Removed: from config import ASSET_TYPE_DEFINITIONS, FILE_TYPE_DEFINITIONS
|
||||
from configuration import Configuration, ConfigurationError # load_base_config might not be needed here
|
||||
from rule_structure import SourceRule, AssetRule, FileRule
|
||||
from .base_prediction_handler import BasePredictionHandler # Import the base class
|
||||
BACKEND_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"ERROR (PredictionHandler): Failed to import backend/config modules: {e}")
|
||||
# Update error message source
|
||||
print(f"ERROR (RuleBasedPredictionHandler): Failed to import backend/config/base modules: {e}")
|
||||
# Define placeholders if imports fail
|
||||
Configuration = None
|
||||
load_base_config = None # Placeholder
|
||||
@@ -44,7 +42,7 @@ except ImportError as e:
|
||||
log = logging.getLogger(__name__)
|
||||
# Basic config if logger hasn't been set up elsewhere
|
||||
if not log.hasHandlers():
|
||||
logging.basicConfig(level=logging.INFO, format='%(levelname)s (PredictHandler): %(message)s')
|
||||
logging.basicConfig(level=logging.INFO, format='%(levelname)s (RuleBasedPredictHandler): %(message)s')
|
||||
|
||||
|
||||
# Helper function for classification (can be moved outside class if preferred)
|
||||
@@ -303,254 +301,191 @@ def classify_files(file_list: List[str], config: Configuration) -> Dict[str, Lis
|
||||
return dict(temp_grouped_files)
|
||||
|
||||
|
||||
class PredictionHandler(QObject):
|
||||
class RuleBasedPredictionHandler(BasePredictionHandler):
|
||||
"""
|
||||
Handles running predictions in a separate thread to avoid GUI freezes.
|
||||
Handles running rule-based predictions in a separate thread using presets.
|
||||
Generates the initial SourceRule hierarchy based on file lists and presets.
|
||||
Inherits from BasePredictionHandler for common threading and signaling.
|
||||
"""
|
||||
# --- Signals ---
|
||||
# Emitted when the hierarchical rule structure is ready for a single source
|
||||
rule_hierarchy_ready = Signal(list) # Emits a LIST containing ONE SourceRule object
|
||||
# Emitted when prediction/hierarchy generation for a source is done (emits the input_source_identifier)
|
||||
prediction_finished = Signal(str)
|
||||
# Emitted for status updates
|
||||
status_message = Signal(str, int)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self._is_running = False
|
||||
def __init__(self, input_source_identifier: str, original_input_paths: list[str], preset_name: str, parent: QObject = None):
|
||||
"""
|
||||
Initializes the rule-based handler.
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
return self._is_running
|
||||
Args:
|
||||
input_source_identifier: The unique identifier for the input source (e.g., file path).
|
||||
original_input_paths: List of absolute file paths extracted from the source.
|
||||
preset_name: The name of the preset configuration to use.
|
||||
parent: The parent QObject.
|
||||
"""
|
||||
super().__init__(input_source_identifier, parent)
|
||||
self.original_input_paths = original_input_paths
|
||||
self.preset_name = preset_name
|
||||
# _is_running is handled by the base class
|
||||
# Keep track of the current request being processed by this persistent handler
|
||||
self._current_input_path = None
|
||||
self._current_file_list = None
|
||||
self._current_preset_name = None
|
||||
|
||||
# Removed _predict_single_asset method
|
||||
|
||||
@Slot(str, list, str) # Explicitly define types for the slot
|
||||
# Re-introduce run_prediction as the main slot to receive requests
|
||||
@Slot(str, list, str)
|
||||
def run_prediction(self, input_source_identifier: str, original_input_paths: list[str], preset_name: str):
|
||||
"""
|
||||
Generates the initial SourceRule hierarchy for a given source identifier
|
||||
(which could be a folder or archive path), extracting the actual file list first.
|
||||
Generates the initial SourceRule hierarchy for a given source identifier,
|
||||
file list, and preset name. Populates only overridable fields based on
|
||||
classification and preset defaults.
|
||||
This method is intended to be run in a separate QThread.
|
||||
This method is intended to be run in the handler's QThread.
|
||||
Uses the base class signals for reporting results/errors.
|
||||
"""
|
||||
thread_id = QThread.currentThread()
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] --> Entered PredictionHandler.run_prediction.")
|
||||
# Note: file_list argument is renamed to original_input_paths for clarity,
|
||||
# but the signal passes the list of source paths, not the content files yet.
|
||||
# We use input_source_identifier as the primary path to analyze.
|
||||
log.info(f"VERIFY: PredictionHandler received request. Source: '{input_source_identifier}', Original Paths: {original_input_paths}, Preset: '{preset_name}'") # DEBUG Verify
|
||||
log.info(f"Source Identifier: '{input_source_identifier}', Preset: '{preset_name}'")
|
||||
|
||||
if self._is_running:
|
||||
log.warning("Prediction is already running for another source. Aborting this run.")
|
||||
# Don't emit finished, let the running one complete.
|
||||
# Check if already running a prediction for a *different* source
|
||||
# Allow re-triggering for the *same* source if needed (e.g., preset changed)
|
||||
if self._is_running and self._current_input_path != input_source_identifier:
|
||||
log.warning(f"RuleBasedPredictionHandler is busy with '{self._current_input_path}'. Ignoring request for '{input_source_identifier}'.")
|
||||
# Optionally emit an error signal specific to this condition
|
||||
# self.prediction_error.emit(input_source_identifier, "Handler busy with another prediction.")
|
||||
return
|
||||
if not BACKEND_AVAILABLE:
|
||||
log.error("Backend/config modules not available. Cannot run prediction.")
|
||||
self.status_message.emit("Error: Backend components missing.", 5000)
|
||||
# self.prediction_finished.emit() # Don't emit finished if never started properly
|
||||
return
|
||||
if not preset_name:
|
||||
log.warning("No preset selected for prediction.")
|
||||
self.status_message.emit("No preset selected.", 3000)
|
||||
# self.prediction_finished.emit()
|
||||
return
|
||||
# Check the identifier path itself
|
||||
source_path = Path(input_source_identifier)
|
||||
if not source_path.exists():
|
||||
log.warning(f"Input source path does not exist: '{input_source_identifier}'. Skipping prediction.")
|
||||
self.status_message.emit("Input path not found.", 3000)
|
||||
self.rule_hierarchy_ready.emit([])
|
||||
self.prediction_finished.emit(input_source_identifier)
|
||||
return
|
||||
|
||||
|
||||
self._is_running = True
|
||||
self.status_message.emit(f"Analyzing '{source_path.name}'...", 0)
|
||||
self._is_cancelled = False # Reset cancellation flag for new request
|
||||
self._current_input_path = input_source_identifier
|
||||
self._current_file_list = original_input_paths
|
||||
self._current_preset_name = preset_name
|
||||
|
||||
config: Configuration | None = None
|
||||
# Removed: asset_type_definitions: Dict[str, Dict] = {}
|
||||
# Removed: file_type_definitions: Dict[str, Dict] = {} # These are ItemType names
|
||||
log.info(f"Starting rule-based prediction for: {input_source_identifier} using preset: {preset_name}")
|
||||
self.status_update.emit(f"Starting analysis for '{Path(input_source_identifier).name}'...") # Use base signal
|
||||
|
||||
try:
|
||||
config = Configuration(preset_name)
|
||||
# Removed: Load allowed types from the project's config module (now dictionaries)
|
||||
# Removed: if app_config:
|
||||
# Removed: asset_type_definitions = getattr(app_config, 'ASSET_TYPE_DEFINITIONS', {})
|
||||
# Removed: file_type_definitions = getattr(app_config, 'FILE_TYPE_DEFINITIONS', {})
|
||||
# Removed: log.debug(f"Loaded AssetType Definitions: {list(asset_type_definitions.keys())}")
|
||||
# Removed: log.debug(f"Loaded FileType Definitions (ItemTypes): {list(file_type_definitions.keys())}")
|
||||
# Removed: else:
|
||||
# Removed: log.warning("Project config module not loaded. Cannot get type definitions.")
|
||||
|
||||
except ConfigurationError as e:
|
||||
log.error(f"Failed to load configuration for preset '{preset_name}': {e}")
|
||||
self.status_message.emit(f"Error loading preset '{preset_name}': {e}", 5000)
|
||||
self.prediction_finished.emit(input_source_identifier)
|
||||
self._is_running = False
|
||||
return
|
||||
except Exception as e:
|
||||
log.exception(f"Unexpected error loading configuration or allowed types for preset '{preset_name}': {e}")
|
||||
self.status_message.emit(f"Unexpected error loading preset '{preset_name}'.", 5000)
|
||||
self.prediction_finished.emit(input_source_identifier)
|
||||
self._is_running = False
|
||||
return
|
||||
|
||||
log.debug(f"DEBUG: Calling classify_files with file_list: {original_input_paths}") # DEBUG LOG
|
||||
# --- Perform Classification ---
|
||||
try:
|
||||
classified_assets = classify_files(original_input_paths, config)
|
||||
except Exception as e:
|
||||
log.exception(f"Error during file classification for source '{input_source_identifier}': {e}")
|
||||
self.status_message.emit(f"Error classifying files: {e}", 5000)
|
||||
self.prediction_finished.emit(input_source_identifier)
|
||||
self._is_running = False
|
||||
return
|
||||
|
||||
if not classified_assets:
|
||||
log.warning(f"Classification yielded no assets for source '{input_source_identifier}'.")
|
||||
self.status_message.emit("No assets identified from files.", 3000)
|
||||
self.rule_hierarchy_ready.emit([]) # Emit empty list
|
||||
self.prediction_finished.emit(input_source_identifier)
|
||||
self._is_running = False
|
||||
return
|
||||
|
||||
# --- Build the Hierarchy ---
|
||||
source_rules_list = []
|
||||
try:
|
||||
# Determine SourceRule level overrides/defaults
|
||||
# Get supplier name from the config property
|
||||
supplier_identifier = config.supplier_name # Use the property
|
||||
if not BACKEND_AVAILABLE:
|
||||
raise RuntimeError("Backend/config modules not available. Cannot run prediction.")
|
||||
|
||||
# Create the single SourceRule for this input source
|
||||
source_rule = SourceRule(
|
||||
input_path=input_source_identifier, # Use the identifier provided
|
||||
supplier_identifier=supplier_identifier, # Set overridable field
|
||||
preset_name=preset_name # Pass the selected preset name
|
||||
)
|
||||
log.debug(f"Created SourceRule for identifier: {input_source_identifier} with supplier: {supplier_identifier}")
|
||||
if not preset_name:
|
||||
log.warning("No preset selected for prediction.")
|
||||
self.status_update.emit("No preset selected.")
|
||||
# Emit empty list for non-critical issues, signal completion
|
||||
self.prediction_ready.emit(input_source_identifier, [])
|
||||
self._is_running = False # Mark as finished
|
||||
return
|
||||
|
||||
asset_rules = []
|
||||
# Get allowed asset types from config's internal core settings
|
||||
asset_type_definitions = config._core_settings.get('ASSET_TYPE_DEFINITIONS', {})
|
||||
log.debug(f"Loaded AssetType Definitions from config: {list(asset_type_definitions.keys())}")
|
||||
source_path = Path(input_source_identifier)
|
||||
if not source_path.exists():
|
||||
log.warning(f"Input source path does not exist: '{input_source_identifier}'. Skipping prediction.")
|
||||
raise FileNotFoundError(f"Input source path not found: {input_source_identifier}")
|
||||
|
||||
for asset_name, files_info in classified_assets.items():
|
||||
if not files_info: continue # Skip empty asset groups
|
||||
# --- Load Configuration ---
|
||||
config = Configuration(preset_name)
|
||||
log.info(f"Successfully loaded configuration for preset '{preset_name}'.")
|
||||
|
||||
# Determine AssetRule level overrides/defaults
|
||||
item_types_in_asset = {f_info['item_type'] for f_info in files_info}
|
||||
predicted_asset_type = "Surface" # Default to "Surface" string
|
||||
material_indicators = {"MAP_COL", "MAP_NRM", "MAP_ROUGH", "MAP_METAL", "MAP_AO", "MAP_DISP", "COL", "NRM", "ROUGH", "METAL", "AO", "DISP"} # Added base types too
|
||||
if any(it in material_indicators for it in item_types_in_asset if it not in ["EXTRA", "FILE_IGNORE"]): # Exclude non-maps
|
||||
predicted_asset_type = "Surface" # Predict as "Surface" string
|
||||
if self._is_cancelled: raise RuntimeError("Prediction cancelled before classification.")
|
||||
|
||||
# Ensure the predicted type is allowed, fallback if necessary
|
||||
if asset_type_definitions and predicted_asset_type not in asset_type_definitions:
|
||||
log.warning(f"Predicted AssetType '{predicted_asset_type}' for asset '{asset_name}' is not in ASSET_TYPE_DEFINITIONS from config. Falling back.")
|
||||
default_type = config.default_asset_category
|
||||
if default_type in asset_type_definitions:
|
||||
predicted_asset_type = default_type
|
||||
elif asset_type_definitions:
|
||||
predicted_asset_type = list(asset_type_definitions.keys())[0]
|
||||
else:
|
||||
pass # Keep the original prediction if definitions are empty
|
||||
# --- Perform Classification ---
|
||||
self.status_update.emit(f"Classifying files for '{source_path.name}'...")
|
||||
try:
|
||||
classified_assets = classify_files(original_input_paths, config)
|
||||
except Exception as e:
|
||||
log.exception(f"Error during file classification for source '{input_source_identifier}': {e}")
|
||||
raise RuntimeError(f"Error classifying files: {e}") from e
|
||||
|
||||
if self._is_cancelled: raise RuntimeError("Prediction cancelled after classification.")
|
||||
|
||||
asset_rule = AssetRule(
|
||||
asset_name=asset_name,
|
||||
asset_type=predicted_asset_type,
|
||||
if not classified_assets:
|
||||
log.warning(f"Classification yielded no assets for source '{input_source_identifier}'.")
|
||||
self.status_update.emit("No assets identified from files.")
|
||||
# Emit empty list, signal completion
|
||||
self.prediction_ready.emit(input_source_identifier, [])
|
||||
self._is_running = False # Mark as finished
|
||||
return
|
||||
|
||||
# --- Build the Hierarchy ---
|
||||
self.status_update.emit(f"Building rule hierarchy for '{source_path.name}'...")
|
||||
try:
|
||||
# (Hierarchy building logic remains the same as before)
|
||||
supplier_identifier = config.supplier_name
|
||||
source_rule = SourceRule(
|
||||
input_path=input_source_identifier,
|
||||
supplier_identifier=supplier_identifier,
|
||||
preset_name=preset_name
|
||||
)
|
||||
log.debug(f"Created AssetRule for asset: {asset_name} with type: {predicted_asset_type}")
|
||||
|
||||
file_rules = []
|
||||
asset_rules = []
|
||||
asset_type_definitions = config._core_settings.get('ASSET_TYPE_DEFINITIONS', {})
|
||||
file_type_definitions = config._core_settings.get('FILE_TYPE_DEFINITIONS', {})
|
||||
log.debug(f"Loaded FileType Definitions (ItemTypes) from config: {list(file_type_definitions.keys())}")
|
||||
|
||||
for file_info in files_info:
|
||||
base_item_type = file_info['item_type']
|
||||
target_asset_name_override = file_info['asset_name']
|
||||
for asset_name, files_info in classified_assets.items():
|
||||
if self._is_cancelled: raise RuntimeError("Prediction cancelled during hierarchy building (assets).")
|
||||
if not files_info: continue
|
||||
|
||||
# Determine the final item_type string (prefix maps, check if allowed)
|
||||
final_item_type = base_item_type
|
||||
if not base_item_type.startswith("MAP_") and base_item_type not in ["FILE_IGNORE", "EXTRA", "MODEL"]:
|
||||
final_item_type = f"MAP_{base_item_type}"
|
||||
item_types_in_asset = {f_info['item_type'] for f_info in files_info}
|
||||
predicted_asset_type = "Surface"
|
||||
material_indicators = {"MAP_COL", "MAP_NRM", "MAP_ROUGH", "MAP_METAL", "MAP_AO", "MAP_DISP", "COL", "NRM", "ROUGH", "METAL", "AO", "DISP"}
|
||||
if any(it in material_indicators for it in item_types_in_asset if it not in ["EXTRA", "FILE_IGNORE"]):
|
||||
predicted_asset_type = "Surface"
|
||||
|
||||
# Check if the final type is allowed
|
||||
if file_type_definitions and final_item_type not in file_type_definitions and base_item_type not in ["FILE_IGNORE", "EXTRA"]:
|
||||
log.warning(f"Predicted ItemType '{base_item_type}' (checked as '{final_item_type}') for file '{file_info['file_path']}' is not in FILE_TYPE_DEFINITIONS. Setting to FILE_IGNORE.")
|
||||
final_item_type = "FILE_IGNORE"
|
||||
if asset_type_definitions and predicted_asset_type not in asset_type_definitions:
|
||||
log.warning(f"Predicted AssetType '{predicted_asset_type}' for asset '{asset_name}' is not in ASSET_TYPE_DEFINITIONS. Falling back.")
|
||||
default_type = config.default_asset_category
|
||||
if default_type in asset_type_definitions: predicted_asset_type = default_type
|
||||
elif asset_type_definitions: predicted_asset_type = list(asset_type_definitions.keys())[0]
|
||||
|
||||
asset_rule = AssetRule(asset_name=asset_name, asset_type=predicted_asset_type)
|
||||
file_rules = []
|
||||
for file_info in files_info:
|
||||
if self._is_cancelled: raise RuntimeError("Prediction cancelled during hierarchy building (files).")
|
||||
|
||||
# Retrieve the standard_type
|
||||
standard_map_type = None
|
||||
file_type_details = file_type_definitions.get(final_item_type)
|
||||
if file_type_details:
|
||||
standard_map_type = file_type_details.get('standard_type')
|
||||
log.debug(f" Found standard_type '{standard_map_type}' for final_item_type '{final_item_type}'")
|
||||
else:
|
||||
file_type_details_alias = file_type_definitions.get(base_item_type)
|
||||
if file_type_details_alias:
|
||||
standard_map_type = file_type_details_alias.get('standard_type')
|
||||
log.debug(f" Found standard_type '{standard_map_type}' via alias lookup for base_item_type '{base_item_type}'")
|
||||
elif base_item_type in file_type_definitions:
|
||||
standard_map_type = base_item_type
|
||||
log.debug(f" Using base_item_type '{base_item_type}' itself as standard_map_type.")
|
||||
base_item_type = file_info['item_type']
|
||||
target_asset_name_override = file_info['asset_name']
|
||||
final_item_type = base_item_type
|
||||
if not base_item_type.startswith("MAP_") and base_item_type not in ["FILE_IGNORE", "EXTRA", "MODEL"]:
|
||||
final_item_type = f"MAP_{base_item_type}"
|
||||
|
||||
if file_type_definitions and final_item_type not in file_type_definitions and base_item_type not in ["FILE_IGNORE", "EXTRA"]:
|
||||
log.warning(f"Predicted ItemType '{base_item_type}' (checked as '{final_item_type}') for file '{file_info['file_path']}' is not in FILE_TYPE_DEFINITIONS. Setting to FILE_IGNORE.")
|
||||
final_item_type = "FILE_IGNORE"
|
||||
|
||||
standard_map_type = None
|
||||
file_type_details = file_type_definitions.get(final_item_type)
|
||||
if file_type_details: standard_map_type = file_type_details.get('standard_type')
|
||||
else:
|
||||
log.debug(f" Could not determine standard_map_type for base '{base_item_type}' / final '{final_item_type}'. Setting to None.")
|
||||
file_type_details_alias = file_type_definitions.get(base_item_type)
|
||||
if file_type_details_alias: standard_map_type = file_type_details_alias.get('standard_type')
|
||||
elif base_item_type in file_type_definitions: standard_map_type = base_item_type
|
||||
|
||||
is_gloss_source_value = file_info.get('is_gloss_source', False)
|
||||
|
||||
output_format_override = None
|
||||
item_type_override = None
|
||||
file_rule = FileRule(
|
||||
file_path=file_info['file_path'],
|
||||
item_type=final_item_type,
|
||||
item_type_override=final_item_type,
|
||||
target_asset_name_override=target_asset_name_override,
|
||||
output_format_override=None,
|
||||
is_gloss_source=is_gloss_source_value if isinstance(is_gloss_source_value, bool) else False,
|
||||
standard_map_type=standard_map_type,
|
||||
resolution_override=None,
|
||||
channel_merge_instructions={},
|
||||
)
|
||||
file_rules.append(file_rule)
|
||||
asset_rule.files = file_rules
|
||||
asset_rules.append(asset_rule)
|
||||
source_rule.assets = asset_rules
|
||||
source_rules_list.append(source_rule)
|
||||
|
||||
log.debug(f" Creating FileRule for: {file_info['file_path']}")
|
||||
log.debug(f" Base Item Type (from classification): {base_item_type}")
|
||||
log.debug(f" Final Item Type (for model): {final_item_type}")
|
||||
log.debug(f" Target Asset Name Override: {target_asset_name_override}")
|
||||
log.debug(f" Determined Standard Map Type: {standard_map_type}")
|
||||
is_gloss_source_value = file_info.get('is_gloss_source', 'MISSING')
|
||||
log.debug(f" Value for 'is_gloss_source' from file_info: {is_gloss_source_value}")
|
||||
except Exception as e:
|
||||
log.exception(f"Error building rule hierarchy for source '{input_source_identifier}': {e}")
|
||||
raise RuntimeError(f"Error building rule hierarchy: {e}") from e
|
||||
|
||||
|
||||
file_rule = FileRule(
|
||||
file_path=file_info['file_path'],
|
||||
item_type=final_item_type,
|
||||
item_type_override=final_item_type,
|
||||
target_asset_name_override=target_asset_name_override,
|
||||
output_format_override=output_format_override,
|
||||
is_gloss_source=is_gloss_source_value if isinstance(is_gloss_source_value, bool) else False,
|
||||
standard_map_type=standard_map_type,
|
||||
resolution_override=None,
|
||||
channel_merge_instructions={},
|
||||
)
|
||||
file_rules.append(file_rule)
|
||||
|
||||
asset_rule.files = file_rules
|
||||
asset_rules.append(asset_rule)
|
||||
|
||||
source_rule.assets = asset_rules
|
||||
log.debug(f"Built SourceRule '{source_rule.input_path}' with {len(asset_rules)} AssetRule(s).")
|
||||
source_rules_list.append(source_rule)
|
||||
# --- Emit Success Signal ---
|
||||
log.info(f"Rule-based prediction finished successfully for '{input_source_identifier}'.")
|
||||
self.prediction_ready.emit(input_source_identifier, source_rules_list) # Use base signal
|
||||
|
||||
except Exception as e:
|
||||
log.exception(f"Error building rule hierarchy for source '{input_source_identifier}': {e}")
|
||||
self.status_message.emit(f"Error building rules: {e}", 5000)
|
||||
self.prediction_finished.emit(input_source_identifier)
|
||||
self._is_running = False
|
||||
return
|
||||
# --- Emit Error Signal ---
|
||||
log.exception(f"Error during rule-based prediction for '{input_source_identifier}': {e}")
|
||||
error_msg = f"Error analyzing '{Path(input_source_identifier).name}': {e}"
|
||||
self.prediction_error.emit(input_source_identifier, error_msg) # Use base signal
|
||||
|
||||
|
||||
# --- Emit Results ---
|
||||
log.info(f"VERIFY: Emitting rule_hierarchy_ready with {len(source_rules_list)} SourceRule(s).")
|
||||
for i, rule in enumerate(source_rules_list):
|
||||
log.debug(f" VERIFY Rule {i}: Input='{rule.input_path}', Assets={len(rule.assets)}")
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Prediction run finished. Emitting hierarchy for '{input_source_identifier}'.")
|
||||
self.rule_hierarchy_ready.emit(source_rules_list)
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] Emitted rule_hierarchy_ready signal.")
|
||||
|
||||
self.status_message.emit(f"Analysis complete for '{input_source_identifier}'.", 3000)
|
||||
self.prediction_finished.emit(input_source_identifier)
|
||||
self._is_running = False
|
||||
log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting PredictionHandler.run_prediction.")
|
||||
finally:
|
||||
# --- Cleanup ---
|
||||
self._is_running = False
|
||||
self._current_input_path = None # Clear current task info
|
||||
self._current_file_list = None
|
||||
self._current_preset_name = None
|
||||
log.info(f"Finished rule-based prediction run for: {input_source_identifier}")
|
||||
|
||||
717
gui/preset_editor_widget.py
Normal file
717
gui/preset_editor_widget.py
Normal file
@@ -0,0 +1,717 @@
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from functools import partial
|
||||
|
||||
from PySide6.QtWidgets import (
|
||||
QWidget, QVBoxLayout, QHBoxLayout, QListWidget, QPushButton, QLabel, QTabWidget,
|
||||
QLineEdit, QTextEdit, QSpinBox, QTableWidget, QGroupBox, QFormLayout,
|
||||
QHeaderView, QAbstractItemView, QListWidgetItem, QTableWidgetItem, QMessageBox,
|
||||
QFileDialog, QInputDialog, QSizePolicy
|
||||
)
|
||||
from PySide6.QtCore import Qt, Signal, QObject, Slot
|
||||
from PySide6.QtGui import QAction # Keep QAction if needed for context menus within editor later
|
||||
|
||||
# --- Constants ---
|
||||
# Assuming project root is parent of the directory containing this file
|
||||
script_dir = Path(__file__).parent
|
||||
project_root = script_dir.parent
|
||||
PRESETS_DIR = project_root / "Presets" # Corrected path
|
||||
TEMPLATE_PATH = PRESETS_DIR / "_template.json"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# --- Preset Editor Widget ---
|
||||
|
||||
class PresetEditorWidget(QWidget):
|
||||
"""
|
||||
Widget dedicated to managing and editing presets.
|
||||
Contains the preset list, editor tabs, and save/load functionality.
|
||||
"""
|
||||
# Signal emitted when presets list changes (saved, deleted, new)
|
||||
presets_changed_signal = Signal()
|
||||
# Signal emitted when the selected preset (or LLM/Placeholder) changes
|
||||
# Emits: mode ("preset", "llm", "placeholder"), preset_name (str or None)
|
||||
preset_selection_changed_signal = Signal(str, str)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
|
||||
# --- Internal State ---
|
||||
self._last_valid_preset_name = None # Store the name of the last valid preset loaded
|
||||
self.current_editing_preset_path = None
|
||||
self.editor_unsaved_changes = False
|
||||
self._is_loading_editor = False # Flag to prevent signals during load
|
||||
|
||||
# --- UI Setup ---
|
||||
self._init_ui()
|
||||
|
||||
# --- Initial State ---
|
||||
self._clear_editor() # Clear/disable editor fields initially
|
||||
self._set_editor_enabled(False) # Disable editor initially
|
||||
self.populate_presets() # Populate preset list
|
||||
|
||||
# --- Connect Editor Signals ---
|
||||
self._connect_editor_change_signals()
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes the UI elements for the preset editor."""
|
||||
editor_layout = QVBoxLayout(self)
|
||||
editor_layout.setContentsMargins(5, 5, 5, 5) # Reduce margins
|
||||
|
||||
# Preset List and Controls
|
||||
list_layout = QVBoxLayout()
|
||||
list_layout.addWidget(QLabel("Presets:"))
|
||||
self.editor_preset_list = QListWidget()
|
||||
self.editor_preset_list.currentItemChanged.connect(self._load_selected_preset_for_editing)
|
||||
list_layout.addWidget(self.editor_preset_list)
|
||||
|
||||
list_button_layout = QHBoxLayout()
|
||||
self.editor_new_button = QPushButton("New")
|
||||
self.editor_delete_button = QPushButton("Delete")
|
||||
self.editor_new_button.clicked.connect(self._new_preset)
|
||||
self.editor_delete_button.clicked.connect(self._delete_selected_preset)
|
||||
list_button_layout.addWidget(self.editor_new_button)
|
||||
list_button_layout.addWidget(self.editor_delete_button)
|
||||
list_layout.addLayout(list_button_layout)
|
||||
editor_layout.addLayout(list_layout, 1) # Allow list to stretch
|
||||
|
||||
# Editor Tabs
|
||||
self.editor_tab_widget = QTabWidget()
|
||||
self.editor_tab_general_naming = QWidget()
|
||||
self.editor_tab_mapping_rules = QWidget()
|
||||
self.editor_tab_widget.addTab(self.editor_tab_general_naming, "General & Naming")
|
||||
self.editor_tab_widget.addTab(self.editor_tab_mapping_rules, "Mapping & Rules")
|
||||
self._create_editor_general_tab()
|
||||
self._create_editor_mapping_tab()
|
||||
editor_layout.addWidget(self.editor_tab_widget, 3) # Allow tabs to stretch more
|
||||
|
||||
# Save Buttons
|
||||
save_button_layout = QHBoxLayout()
|
||||
self.editor_save_button = QPushButton("Save")
|
||||
self.editor_save_as_button = QPushButton("Save As...")
|
||||
self.editor_save_button.setEnabled(False) # Disabled initially
|
||||
self.editor_save_button.clicked.connect(self._save_current_preset)
|
||||
self.editor_save_as_button.clicked.connect(self._save_preset_as)
|
||||
save_button_layout.addStretch()
|
||||
save_button_layout.addWidget(self.editor_save_button)
|
||||
save_button_layout.addWidget(self.editor_save_as_button)
|
||||
editor_layout.addLayout(save_button_layout)
|
||||
|
||||
def _create_editor_general_tab(self):
|
||||
"""Creates the widgets and layout for the 'General & Naming' editor tab."""
|
||||
layout = QVBoxLayout(self.editor_tab_general_naming)
|
||||
form_layout = QFormLayout()
|
||||
form_layout.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
|
||||
|
||||
# Basic Info
|
||||
self.editor_preset_name = QLineEdit()
|
||||
self.editor_supplier_name = QLineEdit()
|
||||
self.editor_notes = QTextEdit()
|
||||
self.editor_notes.setAcceptRichText(False)
|
||||
self.editor_notes.setFixedHeight(60)
|
||||
form_layout.addRow("Preset Name:", self.editor_preset_name)
|
||||
form_layout.addRow("Supplier Name:", self.editor_supplier_name)
|
||||
form_layout.addRow("Notes:", self.editor_notes)
|
||||
layout.addLayout(form_layout)
|
||||
|
||||
# Source Naming Group
|
||||
naming_group = QGroupBox("Source File Naming Rules")
|
||||
naming_layout_outer = QVBoxLayout(naming_group)
|
||||
naming_layout_form = QFormLayout()
|
||||
self.editor_separator = QLineEdit()
|
||||
self.editor_separator.setMaxLength(1)
|
||||
self.editor_spin_base_name_idx = QSpinBox()
|
||||
self.editor_spin_base_name_idx.setMinimum(-1)
|
||||
self.editor_spin_map_type_idx = QSpinBox()
|
||||
self.editor_spin_map_type_idx.setMinimum(-1)
|
||||
naming_layout_form.addRow("Separator:", self.editor_separator)
|
||||
naming_layout_form.addRow("Base Name Index:", self.editor_spin_base_name_idx)
|
||||
naming_layout_form.addRow("Map Type Index:", self.editor_spin_map_type_idx)
|
||||
naming_layout_outer.addLayout(naming_layout_form)
|
||||
# Gloss Keywords List
|
||||
self._setup_list_widget_with_controls(naming_layout_outer, "Glossiness Keywords", "editor_list_gloss_keywords")
|
||||
# Bit Depth Variants Table
|
||||
self._setup_table_widget_with_controls(naming_layout_outer, "16-bit Variant Patterns", "editor_table_bit_depth_variants", ["Map Type", "Pattern"])
|
||||
self.editor_table_bit_depth_variants.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents)
|
||||
self.editor_table_bit_depth_variants.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)
|
||||
layout.addWidget(naming_group)
|
||||
|
||||
# Extra Files Group
|
||||
self._setup_list_widget_with_controls(layout, "Move to 'Extra' Folder Patterns", "editor_list_extra_patterns")
|
||||
|
||||
layout.addStretch(1)
|
||||
|
||||
def _create_editor_mapping_tab(self):
|
||||
"""Creates the widgets and layout for the 'Mapping & Rules' editor tab."""
|
||||
layout = QVBoxLayout(self.editor_tab_mapping_rules)
|
||||
|
||||
# Map Type Mapping Group
|
||||
self._setup_table_widget_with_controls(layout, "Map Type Mapping (Standard Type <- Input Keywords)", "editor_table_map_type_mapping", ["Standard Type", "Input Keywords (comma-sep)"])
|
||||
self.editor_table_map_type_mapping.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents)
|
||||
self.editor_table_map_type_mapping.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)
|
||||
|
||||
# Category Rules Group
|
||||
category_group = QGroupBox("Asset Category Rules")
|
||||
category_layout = QVBoxLayout(category_group)
|
||||
self._setup_list_widget_with_controls(category_layout, "Model File Patterns", "editor_list_model_patterns")
|
||||
self._setup_list_widget_with_controls(category_layout, "Decal Keywords", "editor_list_decal_keywords")
|
||||
layout.addWidget(category_group)
|
||||
|
||||
# Archetype Rules Group
|
||||
self._setup_table_widget_with_controls(layout, "Archetype Rules", "editor_table_archetype_rules", ["Archetype Name", "Match Any (comma-sep)", "Match All (comma-sep)"])
|
||||
self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents)
|
||||
self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)
|
||||
self.editor_table_archetype_rules.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeMode.Stretch)
|
||||
|
||||
layout.addStretch(1)
|
||||
|
||||
# --- Helper Functions for UI Setup (Moved into class) ---
|
||||
def _setup_list_widget_with_controls(self, parent_layout, label_text, attribute_name):
|
||||
"""Adds a QListWidget with Add/Remove buttons to a layout."""
|
||||
list_widget = QListWidget()
|
||||
list_widget.setAlternatingRowColors(True)
|
||||
list_widget.setEditTriggers(QAbstractItemView.EditTrigger.DoubleClicked | QAbstractItemView.EditTrigger.SelectedClicked | QAbstractItemView.EditTrigger.EditKeyPressed)
|
||||
setattr(self, attribute_name, list_widget) # Store list widget on the instance
|
||||
|
||||
add_button = QPushButton("+")
|
||||
remove_button = QPushButton("-")
|
||||
add_button.setFixedWidth(30)
|
||||
remove_button.setFixedWidth(30)
|
||||
|
||||
button_layout = QVBoxLayout()
|
||||
button_layout.addWidget(add_button)
|
||||
button_layout.addWidget(remove_button)
|
||||
button_layout.addStretch()
|
||||
|
||||
list_layout = QHBoxLayout()
|
||||
list_layout.addWidget(list_widget)
|
||||
list_layout.addLayout(button_layout)
|
||||
|
||||
group_box = QGroupBox(label_text)
|
||||
group_box_layout = QVBoxLayout(group_box)
|
||||
group_box_layout.addLayout(list_layout)
|
||||
|
||||
parent_layout.addWidget(group_box)
|
||||
|
||||
# Connections
|
||||
add_button.clicked.connect(partial(self._editor_add_list_item, list_widget))
|
||||
remove_button.clicked.connect(partial(self._editor_remove_list_item, list_widget))
|
||||
list_widget.itemChanged.connect(self._mark_editor_unsaved) # Mark unsaved on item edit
|
||||
|
||||
def _setup_table_widget_with_controls(self, parent_layout, label_text, attribute_name, columns):
|
||||
"""Adds a QTableWidget with Add/Remove buttons to a layout."""
|
||||
table_widget = QTableWidget()
|
||||
table_widget.setColumnCount(len(columns))
|
||||
table_widget.setHorizontalHeaderLabels(columns)
|
||||
table_widget.setAlternatingRowColors(True)
|
||||
setattr(self, attribute_name, table_widget) # Store table widget
|
||||
|
||||
add_button = QPushButton("+ Row")
|
||||
remove_button = QPushButton("- Row")
|
||||
|
||||
button_layout = QHBoxLayout()
|
||||
button_layout.addStretch()
|
||||
button_layout.addWidget(add_button)
|
||||
button_layout.addWidget(remove_button)
|
||||
|
||||
group_box = QGroupBox(label_text)
|
||||
group_box_layout = QVBoxLayout(group_box)
|
||||
group_box_layout.addWidget(table_widget)
|
||||
group_box_layout.addLayout(button_layout)
|
||||
|
||||
parent_layout.addWidget(group_box)
|
||||
|
||||
# Connections
|
||||
add_button.clicked.connect(partial(self._editor_add_table_row, table_widget))
|
||||
remove_button.clicked.connect(partial(self._editor_remove_table_row, table_widget))
|
||||
table_widget.itemChanged.connect(self._mark_editor_unsaved) # Mark unsaved on item edit
|
||||
|
||||
# --- Preset Population and Handling ---
|
||||
def populate_presets(self):
|
||||
"""Scans presets dir and populates the editor list."""
|
||||
log.debug("Populating preset list in PresetEditorWidget...")
|
||||
current_list_item = self.editor_preset_list.currentItem()
|
||||
current_list_selection_text = current_list_item.text() if current_list_item else None
|
||||
|
||||
self.editor_preset_list.clear()
|
||||
log.debug("Preset list cleared.")
|
||||
|
||||
# Add the "Select a Preset" placeholder item
|
||||
placeholder_item = QListWidgetItem("--- Select a Preset ---")
|
||||
placeholder_item.setFlags(placeholder_item.flags() & ~Qt.ItemFlag.ItemIsSelectable & ~Qt.ItemFlag.ItemIsEditable)
|
||||
placeholder_item.setData(Qt.ItemDataRole.UserRole, "__PLACEHOLDER__")
|
||||
self.editor_preset_list.addItem(placeholder_item)
|
||||
log.debug("Added '--- Select a Preset ---' placeholder item.")
|
||||
|
||||
# Add LLM Option
|
||||
llm_item = QListWidgetItem("- LLM Interpretation -")
|
||||
llm_item.setData(Qt.ItemDataRole.UserRole, "__LLM__") # Special identifier
|
||||
self.editor_preset_list.addItem(llm_item)
|
||||
log.debug("Added '- LLM Interpretation -' item.")
|
||||
|
||||
if not PRESETS_DIR.is_dir():
|
||||
msg = f"Error: Presets directory not found at {PRESETS_DIR}"
|
||||
log.error(msg)
|
||||
# Consider emitting a status signal to MainWindow?
|
||||
return
|
||||
|
||||
presets = sorted([f for f in PRESETS_DIR.glob("*.json") if f.is_file() and not f.name.startswith('_')])
|
||||
|
||||
if not presets:
|
||||
msg = "Warning: No presets found in presets directory."
|
||||
log.warning(msg)
|
||||
else:
|
||||
for preset_path in presets:
|
||||
item = QListWidgetItem(preset_path.stem)
|
||||
item.setData(Qt.ItemDataRole.UserRole, preset_path) # Store full path
|
||||
self.editor_preset_list.addItem(item)
|
||||
log.info(f"Loaded {len(presets)} presets into editor list.")
|
||||
|
||||
# Select the "Select a Preset" item by default
|
||||
log.debug("Preset list populated. Selecting '--- Select a Preset ---' item.")
|
||||
self.editor_preset_list.setCurrentItem(placeholder_item) # Select the placeholder item
|
||||
|
||||
# --- Preset Editor Methods ---
|
||||
|
||||
def _editor_add_list_item(self, list_widget: QListWidget):
|
||||
"""Adds an editable item to the specified list widget in the editor."""
|
||||
text, ok = QInputDialog.getText(self, f"Add Item", "Enter value:")
|
||||
if ok and text:
|
||||
item = QListWidgetItem(text)
|
||||
list_widget.addItem(item)
|
||||
self._mark_editor_unsaved()
|
||||
|
||||
def _editor_remove_list_item(self, list_widget: QListWidget):
|
||||
"""Removes the selected item from the specified list widget in the editor."""
|
||||
selected_items = list_widget.selectedItems()
|
||||
if not selected_items: return
|
||||
for item in selected_items: list_widget.takeItem(list_widget.row(item))
|
||||
self._mark_editor_unsaved()
|
||||
|
||||
def _editor_add_table_row(self, table_widget: QTableWidget):
|
||||
"""Adds an empty row to the specified table widget in the editor."""
|
||||
row_count = table_widget.rowCount()
|
||||
table_widget.insertRow(row_count)
|
||||
for col in range(table_widget.columnCount()): table_widget.setItem(row_count, col, QTableWidgetItem(""))
|
||||
self._mark_editor_unsaved()
|
||||
|
||||
def _editor_remove_table_row(self, table_widget: QTableWidget):
|
||||
"""Removes the selected row(s) from the specified table widget in the editor."""
|
||||
selected_rows = sorted(list(set(index.row() for index in table_widget.selectedIndexes())), reverse=True)
|
||||
if not selected_rows:
|
||||
if table_widget.rowCount() > 0: selected_rows = [table_widget.rowCount() - 1]
|
||||
else: return
|
||||
for row in selected_rows: table_widget.removeRow(row)
|
||||
self._mark_editor_unsaved()
|
||||
|
||||
def _mark_editor_unsaved(self):
|
||||
"""Marks changes in the editor panel as unsaved."""
|
||||
if self._is_loading_editor: return
|
||||
self.editor_unsaved_changes = True
|
||||
self.editor_save_button.setEnabled(True)
|
||||
# Update window title (handled by MainWindow) - maybe emit signal?
|
||||
# preset_name = Path(self.current_editing_preset_path).name if self.current_editing_preset_path else 'New Preset'
|
||||
# self.window().setWindowTitle(f"Asset Processor Tool - {preset_name}*") # Access parent window
|
||||
|
||||
def _connect_editor_change_signals(self):
|
||||
"""Connect signals from all editor widgets to mark_editor_unsaved."""
|
||||
self.editor_preset_name.textChanged.connect(self._mark_editor_unsaved)
|
||||
self.editor_supplier_name.textChanged.connect(self._mark_editor_unsaved)
|
||||
self.editor_notes.textChanged.connect(self._mark_editor_unsaved)
|
||||
self.editor_separator.textChanged.connect(self._mark_editor_unsaved)
|
||||
self.editor_spin_base_name_idx.valueChanged.connect(self._mark_editor_unsaved)
|
||||
self.editor_spin_map_type_idx.valueChanged.connect(self._mark_editor_unsaved)
|
||||
# List/Table widgets are connected via helper functions
|
||||
|
||||
def check_unsaved_changes(self) -> bool:
|
||||
"""
|
||||
Checks for unsaved changes in the editor and prompts the user.
|
||||
Returns True if the calling action should be cancelled.
|
||||
(Called by MainWindow's closeEvent or before loading a new preset).
|
||||
"""
|
||||
if not self.editor_unsaved_changes: return False # No unsaved changes, proceed
|
||||
reply = QMessageBox.question(self, "Unsaved Preset Changes", # Use self as parent
|
||||
"You have unsaved changes in the preset editor. Discard them?",
|
||||
QMessageBox.StandardButton.Save | QMessageBox.StandardButton.Discard | QMessageBox.StandardButton.Cancel,
|
||||
QMessageBox.StandardButton.Cancel)
|
||||
if reply == QMessageBox.StandardButton.Save:
|
||||
save_successful = self._save_current_preset()
|
||||
return not save_successful # Return True (cancel) if save fails
|
||||
elif reply == QMessageBox.StandardButton.Discard:
|
||||
return False # Discarded, proceed
|
||||
else: # Cancelled
|
||||
return True # Cancel the original action
|
||||
|
||||
def _set_editor_enabled(self, enabled: bool):
|
||||
"""Enables or disables all editor widgets."""
|
||||
self.editor_tab_widget.setEnabled(enabled)
|
||||
self.editor_save_button.setEnabled(enabled and self.editor_unsaved_changes)
|
||||
self.editor_save_as_button.setEnabled(enabled) # Save As is always possible if editor is enabled
|
||||
|
||||
def _clear_editor(self):
|
||||
"""Clears the editor fields and resets state."""
|
||||
self._is_loading_editor = True
|
||||
try:
|
||||
self.editor_preset_name.clear()
|
||||
self.editor_supplier_name.clear()
|
||||
self.editor_notes.clear()
|
||||
self.editor_separator.clear()
|
||||
self.editor_spin_base_name_idx.setValue(0)
|
||||
self.editor_spin_map_type_idx.setValue(1)
|
||||
self.editor_list_gloss_keywords.clear()
|
||||
self.editor_table_bit_depth_variants.setRowCount(0)
|
||||
self.editor_list_extra_patterns.clear()
|
||||
self.editor_table_map_type_mapping.setRowCount(0)
|
||||
self.editor_list_model_patterns.clear()
|
||||
self.editor_list_decal_keywords.clear()
|
||||
self.editor_table_archetype_rules.setRowCount(0)
|
||||
self.current_editing_preset_path = None
|
||||
self.editor_unsaved_changes = False
|
||||
self.editor_save_button.setEnabled(False)
|
||||
# self.window().setWindowTitle("Asset Processor Tool") # Reset window title (handled by MainWindow)
|
||||
self._set_editor_enabled(False)
|
||||
finally:
|
||||
self._is_loading_editor = False
|
||||
|
||||
def _populate_editor_from_data(self, preset_data: dict):
|
||||
"""Helper method to populate editor UI widgets from a preset data dictionary."""
|
||||
self._is_loading_editor = True
|
||||
try:
|
||||
self.editor_preset_name.setText(preset_data.get("preset_name", ""))
|
||||
self.editor_supplier_name.setText(preset_data.get("supplier_name", ""))
|
||||
self.editor_notes.setText(preset_data.get("notes", ""))
|
||||
naming_data = preset_data.get("source_naming", {})
|
||||
self.editor_separator.setText(naming_data.get("separator", "_"))
|
||||
indices = naming_data.get("part_indices", {})
|
||||
self.editor_spin_base_name_idx.setValue(indices.get("base_name", 0))
|
||||
self.editor_spin_map_type_idx.setValue(indices.get("map_type", 1))
|
||||
self.editor_list_gloss_keywords.clear()
|
||||
self.editor_list_gloss_keywords.addItems(naming_data.get("glossiness_keywords", []))
|
||||
self.editor_table_bit_depth_variants.setRowCount(0)
|
||||
bit_depth_vars = naming_data.get("bit_depth_variants", {})
|
||||
for i, (map_type, pattern) in enumerate(bit_depth_vars.items()):
|
||||
self.editor_table_bit_depth_variants.insertRow(i)
|
||||
self.editor_table_bit_depth_variants.setItem(i, 0, QTableWidgetItem(map_type))
|
||||
self.editor_table_bit_depth_variants.setItem(i, 1, QTableWidgetItem(pattern))
|
||||
self.editor_list_extra_patterns.clear()
|
||||
self.editor_list_extra_patterns.addItems(preset_data.get("move_to_extra_patterns", []))
|
||||
self.editor_table_map_type_mapping.setRowCount(0)
|
||||
map_mappings = preset_data.get("map_type_mapping", [])
|
||||
for i, mapping_dict in enumerate(map_mappings):
|
||||
if isinstance(mapping_dict, dict) and "target_type" in mapping_dict and "keywords" in mapping_dict:
|
||||
std_type = mapping_dict["target_type"]
|
||||
keywords = mapping_dict["keywords"]
|
||||
self.editor_table_map_type_mapping.insertRow(i)
|
||||
self.editor_table_map_type_mapping.setItem(i, 0, QTableWidgetItem(std_type))
|
||||
keywords_str = [str(k) for k in keywords if isinstance(k, str)]
|
||||
self.editor_table_map_type_mapping.setItem(i, 1, QTableWidgetItem(", ".join(keywords_str)))
|
||||
else:
|
||||
log.warning(f"Skipping invalid map_type_mapping item during editor population: {mapping_dict}")
|
||||
category_rules = preset_data.get("asset_category_rules", {})
|
||||
self.editor_list_model_patterns.clear()
|
||||
self.editor_list_model_patterns.addItems(category_rules.get("model_patterns", []))
|
||||
self.editor_list_decal_keywords.clear()
|
||||
self.editor_list_decal_keywords.addItems(category_rules.get("decal_keywords", []))
|
||||
# Archetype rules population (assuming table exists)
|
||||
self.editor_table_archetype_rules.setRowCount(0)
|
||||
arch_rules_data = preset_data.get("archetype_rules", [])
|
||||
for i, rule_entry in enumerate(arch_rules_data):
|
||||
# Handle both list and dict format for backward compatibility? Assuming list for now.
|
||||
if isinstance(rule_entry, (list, tuple)) and len(rule_entry) == 2:
|
||||
name, conditions = rule_entry
|
||||
if isinstance(conditions, dict):
|
||||
match_any = conditions.get("match_any", [])
|
||||
match_all = conditions.get("match_all", [])
|
||||
self.editor_table_archetype_rules.insertRow(i)
|
||||
self.editor_table_archetype_rules.setItem(i, 0, QTableWidgetItem(str(name)))
|
||||
self.editor_table_archetype_rules.setItem(i, 1, QTableWidgetItem(", ".join(map(str, match_any))))
|
||||
self.editor_table_archetype_rules.setItem(i, 2, QTableWidgetItem(", ".join(map(str, match_all))))
|
||||
else:
|
||||
log.warning(f"Skipping invalid archetype rule condition format: {conditions}")
|
||||
else:
|
||||
log.warning(f"Skipping invalid archetype rule format: {rule_entry}")
|
||||
|
||||
finally:
|
||||
self._is_loading_editor = False
|
||||
|
||||
def _load_preset_for_editing(self, file_path: Path):
|
||||
"""Loads the content of the selected preset file into the editor widgets."""
|
||||
if not file_path or not file_path.is_file():
|
||||
self._clear_editor()
|
||||
return
|
||||
log.info(f"Loading preset into editor: {file_path.name}")
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f: preset_data = json.load(f)
|
||||
self._populate_editor_from_data(preset_data)
|
||||
self._set_editor_enabled(True)
|
||||
self.current_editing_preset_path = file_path
|
||||
self.editor_unsaved_changes = False
|
||||
self.editor_save_button.setEnabled(False)
|
||||
# self.window().setWindowTitle(f"Asset Processor Tool - {file_path.name}") # Handled by MainWindow
|
||||
log.info(f"Preset '{file_path.name}' loaded into editor.")
|
||||
except json.JSONDecodeError as json_err:
|
||||
log.error(f"Invalid JSON in {file_path.name}: {json_err}")
|
||||
QMessageBox.warning(self, "Load Error", f"Failed to load preset '{file_path.name}'.\nInvalid JSON structure:\n{json_err}")
|
||||
self._clear_editor()
|
||||
except Exception as e:
|
||||
log.exception(f"Error loading preset file {file_path}: {e}")
|
||||
QMessageBox.critical(self, "Error", f"Could not load preset file:\n{file_path}\n\nError: {e}")
|
||||
self._clear_editor()
|
||||
|
||||
@Slot(QListWidgetItem, QListWidgetItem)
|
||||
def _load_selected_preset_for_editing(self, current_item: QListWidgetItem, previous_item: QListWidgetItem):
|
||||
"""Loads the preset currently selected in the editor list and emits selection change signal."""
|
||||
log.debug(f"PresetEditor: currentItemChanged signal triggered. current: {current_item.text() if current_item else 'None'}")
|
||||
|
||||
mode = "placeholder"
|
||||
preset_name = None
|
||||
|
||||
# Check for unsaved changes before proceeding
|
||||
if self.check_unsaved_changes():
|
||||
# If user cancels, revert selection
|
||||
if previous_item:
|
||||
log.debug("Unsaved changes check cancelled. Reverting selection.")
|
||||
self.editor_preset_list.blockSignals(True)
|
||||
self.editor_preset_list.setCurrentItem(previous_item)
|
||||
self.editor_preset_list.blockSignals(False)
|
||||
return # Stop processing
|
||||
|
||||
# Determine mode and preset name based on selection
|
||||
if current_item:
|
||||
item_data = current_item.data(Qt.ItemDataRole.UserRole)
|
||||
if item_data == "__PLACEHOLDER__":
|
||||
log.debug("Placeholder item selected.")
|
||||
self._clear_editor()
|
||||
self._set_editor_enabled(False)
|
||||
mode = "placeholder"
|
||||
self._last_valid_preset_name = None # Clear last valid name
|
||||
elif item_data == "__LLM__":
|
||||
log.debug("LLM Interpretation item selected.")
|
||||
self._clear_editor()
|
||||
self._set_editor_enabled(False)
|
||||
mode = "llm"
|
||||
# Keep _last_valid_preset_name as it was
|
||||
elif isinstance(item_data, Path):
|
||||
log.debug(f"Loading preset for editing: {current_item.text()}")
|
||||
preset_path = item_data
|
||||
self._load_preset_for_editing(preset_path)
|
||||
self._last_valid_preset_name = preset_path.stem # Store the name
|
||||
mode = "preset"
|
||||
preset_name = self._last_valid_preset_name
|
||||
else:
|
||||
log.error(f"Invalid data type for preset path: {type(item_data)}. Clearing editor.")
|
||||
self._clear_editor()
|
||||
self._set_editor_enabled(False)
|
||||
mode = "placeholder" # Treat as placeholder on error
|
||||
self._last_valid_preset_name = None
|
||||
else:
|
||||
log.debug("No preset selected. Clearing editor.")
|
||||
self._clear_editor()
|
||||
self._set_editor_enabled(False)
|
||||
mode = "placeholder"
|
||||
self._last_valid_preset_name = None
|
||||
|
||||
# Emit the signal regardless of what was selected
|
||||
log.debug(f"Emitting preset_selection_changed_signal: mode='{mode}', preset_name='{preset_name}'")
|
||||
self.preset_selection_changed_signal.emit(mode, preset_name)
|
||||
|
||||
def _gather_editor_data(self) -> dict:
|
||||
"""Gathers data from all editor UI widgets and returns a dictionary."""
|
||||
preset_data = {}
|
||||
preset_data["preset_name"] = self.editor_preset_name.text().strip()
|
||||
preset_data["supplier_name"] = self.editor_supplier_name.text().strip()
|
||||
preset_data["notes"] = self.editor_notes.toPlainText().strip()
|
||||
naming_data = {}
|
||||
naming_data["separator"] = self.editor_separator.text()
|
||||
naming_data["part_indices"] = { "base_name": self.editor_spin_base_name_idx.value(), "map_type": self.editor_spin_map_type_idx.value() }
|
||||
naming_data["glossiness_keywords"] = [self.editor_list_gloss_keywords.item(i).text() for i in range(self.editor_list_gloss_keywords.count())]
|
||||
naming_data["bit_depth_variants"] = {self.editor_table_bit_depth_variants.item(r, 0).text(): self.editor_table_bit_depth_variants.item(r, 1).text()
|
||||
for r in range(self.editor_table_bit_depth_variants.rowCount()) if self.editor_table_bit_depth_variants.item(r, 0) and self.editor_table_bit_depth_variants.item(r, 1)}
|
||||
preset_data["source_naming"] = naming_data
|
||||
preset_data["move_to_extra_patterns"] = [self.editor_list_extra_patterns.item(i).text() for i in range(self.editor_list_extra_patterns.count())]
|
||||
map_mappings = []
|
||||
for r in range(self.editor_table_map_type_mapping.rowCount()):
|
||||
type_item = self.editor_table_map_type_mapping.item(r, 0)
|
||||
keywords_item = self.editor_table_map_type_mapping.item(r, 1)
|
||||
if type_item and type_item.text() and keywords_item and keywords_item.text():
|
||||
target_type = type_item.text().strip()
|
||||
keywords = [k.strip() for k in keywords_item.text().split(',') if k.strip()]
|
||||
if target_type and keywords:
|
||||
map_mappings.append({"target_type": target_type, "keywords": keywords})
|
||||
else: log.warning(f"Skipping row {r} in map type mapping table due to empty target type or keywords.")
|
||||
else: log.warning(f"Skipping row {r} in map type mapping table due to missing items.")
|
||||
preset_data["map_type_mapping"] = map_mappings
|
||||
category_rules = {}
|
||||
category_rules["model_patterns"] = [self.editor_list_model_patterns.item(i).text() for i in range(self.editor_list_model_patterns.count())]
|
||||
category_rules["decal_keywords"] = [self.editor_list_decal_keywords.item(i).text() for i in range(self.editor_list_decal_keywords.count())]
|
||||
preset_data["asset_category_rules"] = category_rules
|
||||
arch_rules = []
|
||||
for r in range(self.editor_table_archetype_rules.rowCount()):
|
||||
name_item = self.editor_table_archetype_rules.item(r, 0)
|
||||
any_item = self.editor_table_archetype_rules.item(r, 1)
|
||||
all_item = self.editor_table_archetype_rules.item(r, 2)
|
||||
if name_item and name_item.text() and any_item and all_item: # Check name has text
|
||||
match_any = [k.strip() for k in any_item.text().split(',') if k.strip()]
|
||||
match_all = [k.strip() for k in all_item.text().split(',') if k.strip()]
|
||||
# Only add if name is present and at least one condition list is non-empty? Or allow empty conditions?
|
||||
# Let's allow empty conditions for now.
|
||||
arch_rules.append([name_item.text().strip(), {"match_any": match_any, "match_all": match_all}])
|
||||
else:
|
||||
log.warning(f"Skipping row {r} in archetype rules table due to missing items or empty name.")
|
||||
preset_data["archetype_rules"] = arch_rules
|
||||
return preset_data
|
||||
|
||||
def _save_current_preset(self) -> bool:
|
||||
"""Saves the current editor content to the currently loaded file path."""
|
||||
if not self.current_editing_preset_path: return self._save_preset_as()
|
||||
log.info(f"Saving preset: {self.current_editing_preset_path.name}")
|
||||
try:
|
||||
preset_data = self._gather_editor_data()
|
||||
if not preset_data.get("preset_name"): QMessageBox.warning(self, "Save Error", "Preset Name cannot be empty."); return False
|
||||
if not preset_data.get("supplier_name"): QMessageBox.warning(self, "Save Error", "Supplier Name cannot be empty."); return False
|
||||
content_to_save = json.dumps(preset_data, indent=4, ensure_ascii=False)
|
||||
with open(self.current_editing_preset_path, 'w', encoding='utf-8') as f: f.write(content_to_save)
|
||||
self.editor_unsaved_changes = False
|
||||
self.editor_save_button.setEnabled(False)
|
||||
# self.window().setWindowTitle(f"Asset Processor Tool - {self.current_editing_preset_path.name}") # Handled by MainWindow
|
||||
self.presets_changed_signal.emit() # Signal that presets changed
|
||||
log.info("Preset saved successfully.")
|
||||
# Refresh list within the editor
|
||||
self.populate_presets()
|
||||
# Reselect the saved item
|
||||
items = self.editor_preset_list.findItems(self.current_editing_preset_path.stem, Qt.MatchFlag.MatchExactly)
|
||||
if items: self.editor_preset_list.setCurrentItem(items[0])
|
||||
return True
|
||||
except Exception as e:
|
||||
log.exception(f"Error saving preset file {self.current_editing_preset_path}: {e}")
|
||||
QMessageBox.critical(self, "Save Error", f"Could not save preset file:\n{self.current_editing_preset_path}\n\nError: {e}")
|
||||
return False
|
||||
|
||||
def _save_preset_as(self) -> bool:
|
||||
"""Saves the current editor content to a new file chosen by the user."""
|
||||
log.debug("Save As action triggered.")
|
||||
try:
|
||||
preset_data = self._gather_editor_data()
|
||||
new_preset_name = preset_data.get("preset_name")
|
||||
if not new_preset_name: QMessageBox.warning(self, "Save As Error", "Preset Name cannot be empty."); return False
|
||||
if not preset_data.get("supplier_name"): QMessageBox.warning(self, "Save As Error", "Supplier Name cannot be empty."); return False
|
||||
content_to_save = json.dumps(preset_data, indent=4, ensure_ascii=False)
|
||||
suggested_name = f"{new_preset_name}.json"
|
||||
default_path = PRESETS_DIR / suggested_name
|
||||
file_path_str, _ = QFileDialog.getSaveFileName(self, "Save Preset As", str(default_path), "JSON Files (*.json);;All Files (*)")
|
||||
if not file_path_str: log.debug("Save As cancelled by user."); return False
|
||||
save_path = Path(file_path_str)
|
||||
if save_path.suffix.lower() != ".json": save_path = save_path.with_suffix(".json")
|
||||
if save_path.exists() and save_path != self.current_editing_preset_path:
|
||||
reply = QMessageBox.warning(self, "Confirm Overwrite", f"Preset '{save_path.name}' already exists. Overwrite?", QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.No)
|
||||
if reply == QMessageBox.StandardButton.No: log.debug("Save As overwrite cancelled."); return False
|
||||
log.info(f"Saving preset as: {save_path.name}")
|
||||
with open(save_path, 'w', encoding='utf-8') as f: f.write(content_to_save)
|
||||
self.current_editing_preset_path = save_path # Update current path
|
||||
self.editor_unsaved_changes = False
|
||||
self.editor_save_button.setEnabled(False)
|
||||
# self.window().setWindowTitle(f"Asset Processor Tool - {save_path.name}") # Handled by MainWindow
|
||||
self.presets_changed_signal.emit() # Signal change
|
||||
log.info("Preset saved successfully (Save As).")
|
||||
# Refresh list and select the new item
|
||||
self.populate_presets()
|
||||
items = self.editor_preset_list.findItems(save_path.stem, Qt.MatchFlag.MatchExactly)
|
||||
if items: self.editor_preset_list.setCurrentItem(items[0])
|
||||
return True
|
||||
except Exception as e:
|
||||
log.exception(f"Error saving preset file (Save As): {e}")
|
||||
QMessageBox.critical(self, "Save Error", f"Could not save preset file.\n\nError: {e}")
|
||||
return False
|
||||
|
||||
def _new_preset(self):
|
||||
"""Clears the editor and loads data from _template.json."""
|
||||
log.debug("New Preset action triggered.")
|
||||
if self.check_unsaved_changes(): return # Check unsaved changes first
|
||||
self._clear_editor()
|
||||
if TEMPLATE_PATH.is_file():
|
||||
log.info("Loading new preset from _template.json")
|
||||
try:
|
||||
with open(TEMPLATE_PATH, 'r', encoding='utf-8') as f: template_data = json.load(f)
|
||||
self._populate_editor_from_data(template_data)
|
||||
# Override specific fields for a new preset
|
||||
self.editor_preset_name.setText("NewPreset")
|
||||
# self.window().setWindowTitle("Asset Processor Tool - New Preset*") # Handled by MainWindow
|
||||
except Exception as e:
|
||||
log.exception(f"Error loading template preset file {TEMPLATE_PATH}: {e}")
|
||||
QMessageBox.critical(self, "Error", f"Could not load template preset file:\n{TEMPLATE_PATH}\n\nError: {e}")
|
||||
self._clear_editor()
|
||||
# self.window().setWindowTitle("Asset Processor Tool - New Preset*") # Handled by MainWindow
|
||||
self.editor_supplier_name.setText("MySupplier") # Set a default supplier name
|
||||
else:
|
||||
log.warning("Presets/_template.json not found. Creating empty preset.")
|
||||
# self.window().setWindowTitle("Asset Processor Tool - New Preset*") # Handled by MainWindow
|
||||
self.editor_preset_name.setText("NewPreset")
|
||||
self.editor_supplier_name.setText("MySupplier") # Set a default supplier name
|
||||
self._set_editor_enabled(True)
|
||||
self.editor_unsaved_changes = True
|
||||
self.editor_save_button.setEnabled(True)
|
||||
# Select the placeholder item to avoid auto-loading the "NewPreset"
|
||||
placeholder_item = self.editor_preset_list.findItems("--- Select a Preset ---", Qt.MatchFlag.MatchExactly)
|
||||
if placeholder_item:
|
||||
self.editor_preset_list.setCurrentItem(placeholder_item[0])
|
||||
# Emit selection change for the new state (effectively placeholder)
|
||||
self.preset_selection_changed_signal.emit("placeholder", None)
|
||||
|
||||
|
||||
def _delete_selected_preset(self):
|
||||
"""Deletes the currently selected preset file from the editor list after confirmation."""
|
||||
current_item = self.editor_preset_list.currentItem()
|
||||
if not current_item: QMessageBox.information(self, "Delete Preset", "Please select a preset from the list to delete."); return
|
||||
|
||||
item_data = current_item.data(Qt.ItemDataRole.UserRole)
|
||||
# Ensure it's a real preset path before attempting delete
|
||||
if not isinstance(item_data, Path):
|
||||
QMessageBox.information(self, "Delete Preset", "Cannot delete placeholder or LLM option.")
|
||||
return
|
||||
|
||||
preset_path = item_data
|
||||
preset_name = preset_path.stem
|
||||
reply = QMessageBox.warning(self, "Confirm Delete", f"Are you sure you want to permanently delete the preset '{preset_name}'?", QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, QMessageBox.StandardButton.No)
|
||||
if reply == QMessageBox.StandardButton.Yes:
|
||||
log.info(f"Deleting preset: {preset_path.name}")
|
||||
try:
|
||||
preset_path.unlink()
|
||||
log.info("Preset deleted successfully.")
|
||||
if self.current_editing_preset_path == preset_path: self._clear_editor()
|
||||
self.presets_changed_signal.emit() # Signal change
|
||||
# Refresh list
|
||||
self.populate_presets()
|
||||
except Exception as e:
|
||||
log.exception(f"Error deleting preset file {preset_path}: {e}")
|
||||
QMessageBox.critical(self, "Delete Error", f"Could not delete preset file:\n{preset_path}\n\nError: {e}")
|
||||
|
||||
# --- Public Access Methods for MainWindow ---
|
||||
|
||||
def get_selected_preset_mode(self) -> tuple[str, str | None]:
|
||||
"""
|
||||
Returns the current selection mode and preset name (if applicable).
|
||||
Returns: tuple(mode_string, preset_name_string_or_None)
|
||||
mode_string can be "preset", "llm", "placeholder"
|
||||
"""
|
||||
current_item = self.editor_preset_list.currentItem()
|
||||
if current_item:
|
||||
item_data = current_item.data(Qt.ItemDataRole.UserRole)
|
||||
if item_data == "__PLACEHOLDER__":
|
||||
return "placeholder", None
|
||||
elif item_data == "__LLM__":
|
||||
return "llm", None
|
||||
elif isinstance(item_data, Path):
|
||||
return "preset", item_data.stem
|
||||
return "placeholder", None # Default or if no item selected
|
||||
|
||||
def get_last_valid_preset_name(self) -> str | None:
|
||||
"""
|
||||
Returns the name (stem) of the last valid preset that was loaded.
|
||||
Used by delegates to populate dropdowns based on the original context.
|
||||
"""
|
||||
return self._last_valid_preset_name
|
||||
|
||||
# --- Slots for MainWindow Interaction ---
|
||||
@@ -1,372 +0,0 @@
|
||||
# gui/processing_handler.py
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
import time # For potential delays if needed
|
||||
|
||||
import subprocess # <<< ADDED IMPORT
|
||||
import shutil # <<< ADDED IMPORT
|
||||
from typing import Optional # <<< ADDED IMPORT
|
||||
from rule_structure import SourceRule # Import SourceRule
|
||||
|
||||
# --- PySide6 Imports ---
|
||||
# Inherit from QObject to support signals/slots for thread communication
|
||||
from PySide6.QtCore import QObject, Signal
|
||||
|
||||
# --- Backend Imports ---
|
||||
# Need to import the worker function and potentially config/processor if needed directly
|
||||
# Adjust path to ensure modules can be found relative to this file's location
|
||||
import sys
|
||||
script_dir = Path(__file__).parent
|
||||
project_root = script_dir.parent
|
||||
if str(project_root) not in sys.path:
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
try:
|
||||
# Import the worker function from main.py
|
||||
from main import process_single_asset_wrapper
|
||||
# Import exceptions if needed for type hinting or specific handling
|
||||
from configuration import ConfigurationError, load_base_config # Import ConfigurationError and load_base_config
|
||||
from asset_processor import AssetProcessingError
|
||||
# Removed: import config as core_config # <<< ADDED IMPORT
|
||||
BACKEND_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"ERROR (ProcessingHandler): Failed to import backend modules/worker: {e}")
|
||||
# Define placeholders if imports fail, so the GUI doesn't crash immediately
|
||||
process_single_asset_wrapper = None
|
||||
ConfigurationError = Exception
|
||||
load_base_config = None # Placeholder
|
||||
AssetProcessingError = Exception
|
||||
BACKEND_AVAILABLE = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
# Basic config if logger hasn't been set up elsewhere
|
||||
if not log.hasHandlers():
|
||||
logging.basicConfig(level=logging.INFO, format='%(levelname)s (Handler): %(message)s')
|
||||
|
||||
|
||||
class ProcessingHandler(QObject):
|
||||
"""
|
||||
Handles the execution of the asset processing pipeline in a way that
|
||||
can be run in a separate thread and communicate progress via signals.
|
||||
"""
|
||||
# --- Signals ---
|
||||
# Emitted for overall progress bar update
|
||||
progress_updated = Signal(int, int) # current_count, total_count
|
||||
# Emitted for updating status of individual files in the list
|
||||
file_status_updated = Signal(str, str, str) # input_path_str, status ("processing", "processed", "skipped", "failed"), message
|
||||
# Emitted when the entire batch processing is finished
|
||||
processing_finished = Signal(int, int, int) # processed_count, skipped_count, failed_count
|
||||
# Emitted for general status messages to the status bar
|
||||
status_message = Signal(str, int) # message, timeout_ms
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self._executor = None
|
||||
self._futures = {} # Store future->input_path mapping
|
||||
self._is_running = False
|
||||
self._cancel_requested = False
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
return self._is_running
|
||||
|
||||
# Removed _predict_single_asset method
|
||||
|
||||
@Slot(str, list, str, str, bool, int,
|
||||
bool, str, str, bool, SourceRule) # Explicitly define types for the slot
|
||||
def run_processing(self, input_source_identifier: str, original_input_paths: list[str], preset_name: str, output_dir_str: str, overwrite: bool, num_workers: int,
|
||||
run_blender: bool, nodegroup_blend_path: str, materials_blend_path: str, verbose: bool, rules: SourceRule): # <<< ADDED verbose PARAM
|
||||
"""
|
||||
Starts the asset processing task and optionally runs Blender scripts afterwards.
|
||||
This method should be called when the handler is moved to a separate thread.
|
||||
"""
|
||||
if self._is_running:
|
||||
log.warning("Processing is already running.")
|
||||
self.status_message.emit("Processing already in progress.", 3000)
|
||||
return
|
||||
|
||||
if not BACKEND_AVAILABLE or not process_single_asset_wrapper:
|
||||
log.error("Backend modules or worker function not available. Cannot start processing.")
|
||||
self.status_message.emit("Error: Backend components missing. Cannot process.", 5000)
|
||||
self.processing_finished.emit(0, 0, len(original_input_paths)) # Emit finished with all failed
|
||||
return
|
||||
|
||||
self._is_running = True
|
||||
self._cancel_requested = False
|
||||
self._futures = {} # Reset futures
|
||||
total_files = len(original_input_paths) # Use original_input_paths for total count
|
||||
processed_count = 0
|
||||
skipped_count = 0
|
||||
failed_count = 0
|
||||
completed_count = 0
|
||||
|
||||
log.info(f"Starting processing run: {total_files} assets, Preset='{preset_name}', Workers={num_workers}, Overwrite={overwrite}")
|
||||
self.status_message.emit(f"Starting processing for {total_files} items...", 0) # Persistent message
|
||||
|
||||
try:
|
||||
# Use 'with' statement for ProcessPoolExecutor for cleanup
|
||||
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
||||
self._executor = executor # Store for potential cancellation
|
||||
|
||||
# Submit tasks
|
||||
for input_path in original_input_paths: # Iterate through the list of input paths
|
||||
if self._cancel_requested: break # Check before submitting more
|
||||
log.debug(f"Submitting task for: {input_path}")
|
||||
# Pass the single SourceRule object to the worker
|
||||
# --- DEBUG LOG: Inspect FileRule overrides before sending to worker ---
|
||||
log.debug(f"ProcessingHandler: Inspecting rules for input '{input_path}' before submitting to worker:")
|
||||
if rules: # Check if rules object exists
|
||||
for asset_rule in rules.assets:
|
||||
log.debug(f" Asset: {asset_rule.asset_name}")
|
||||
for file_rule in asset_rule.files:
|
||||
log.debug(f" File: {Path(file_rule.file_path).name}, ItemType: {file_rule.item_type}, Override: {file_rule.item_type_override}, StandardMap: {getattr(file_rule, 'standard_map_type', 'N/A')}")
|
||||
else:
|
||||
log.debug(" Rules object is None.")
|
||||
# --- END DEBUG LOG ---
|
||||
future = executor.submit(process_single_asset_wrapper, input_path, preset_name, output_dir_str, overwrite, verbose=verbose, rules=rules) # Pass verbose flag from GUI and rules
|
||||
self._futures[future] = input_path # Map future back to input path
|
||||
# Optionally emit "processing" status here
|
||||
self.file_status_updated.emit(input_path, "processing", "")
|
||||
|
||||
if self._cancel_requested:
|
||||
log.info("Processing cancelled during task submission.")
|
||||
# Count remaining unsubmitted tasks as failed/cancelled
|
||||
failed_count = total_files - len(self._futures)
|
||||
|
||||
# Process completed futures
|
||||
for future in as_completed(self._futures):
|
||||
completed_count += 1
|
||||
input_path = self._futures[future] # Get original path
|
||||
asset_name = Path(input_path).name
|
||||
status = "failed" # Default status
|
||||
error_message = "Unknown error"
|
||||
|
||||
if self._cancel_requested:
|
||||
# If cancelled after submission, try to get result but count as failed
|
||||
status = "failed"
|
||||
error_message = "Cancelled"
|
||||
failed_count += 1
|
||||
# Don't try future.result() if cancelled, it might raise CancelledError
|
||||
else:
|
||||
try:
|
||||
# Get result tuple: (input_path_str, status_string, error_message_or_None)
|
||||
result_tuple = future.result()
|
||||
_, status, error_message = result_tuple
|
||||
error_message = error_message or "" # Ensure it's a string
|
||||
|
||||
# Increment counters based on status
|
||||
if status == "processed":
|
||||
processed_count += 1
|
||||
elif status == "skipped":
|
||||
skipped_count += 1
|
||||
elif status == "failed":
|
||||
failed_count += 1
|
||||
else:
|
||||
log.warning(f"Unknown status '{status}' received for {asset_name}. Counting as failed.")
|
||||
failed_count += 1
|
||||
error_message = f"Unknown status: {status}"
|
||||
|
||||
except Exception as e:
|
||||
# Catch errors if the future itself fails (e.g., worker process crashed hard)
|
||||
log.exception(f"Critical worker failure for {asset_name}: {e}")
|
||||
failed_count += 1 # Count crashes as failures
|
||||
status = "failed"
|
||||
error_message = f"Worker process crashed: {e}"
|
||||
|
||||
# Emit progress signals
|
||||
self.progress_updated.emit(completed_count, total_files)
|
||||
self.file_status_updated.emit(input_path, status, error_message)
|
||||
|
||||
# Check for cancellation again after processing each result
|
||||
if self._cancel_requested:
|
||||
log.info("Cancellation detected after processing a result.")
|
||||
# Count remaining unprocessed futures as failed/cancelled
|
||||
remaining_futures = total_files - completed_count
|
||||
failed_count += remaining_futures
|
||||
break # Exit the as_completed loop
|
||||
|
||||
except Exception as pool_exc:
|
||||
log.exception(f"An error occurred with the process pool: {pool_exc}")
|
||||
self.status_message.emit(f"Error during processing: {pool_exc}", 5000)
|
||||
# Mark all remaining as failed
|
||||
failed_count = total_files - processed_count - skipped_count
|
||||
|
||||
finally:
|
||||
# --- Blender Script Execution (Optional) ---
|
||||
if run_blender and not self._cancel_requested:
|
||||
log.info("Asset processing complete. Checking for Blender script execution.")
|
||||
self.status_message.emit("Asset processing complete. Starting Blender scripts...", 0)
|
||||
blender_exe = self._find_blender_executable()
|
||||
if blender_exe:
|
||||
script_dir = Path(__file__).parent.parent / "blenderscripts" # Go up one level from gui/
|
||||
nodegroup_script_path = script_dir / "create_nodegroups.py"
|
||||
materials_script_path = script_dir / "create_materials.py"
|
||||
asset_output_root = output_dir_str # Use the same output dir
|
||||
|
||||
# Run Nodegroup Script
|
||||
if nodegroup_blend_path and Path(nodegroup_blend_path).is_file():
|
||||
if nodegroup_script_path.is_file():
|
||||
log.info("-" * 20 + " Running Nodegroup Script " + "-" * 20)
|
||||
self.status_message.emit(f"Running Blender nodegroup script on {Path(nodegroup_blend_path).name}...", 0)
|
||||
success_ng = self._run_blender_script_subprocess(
|
||||
blender_exe_path=blender_exe,
|
||||
blend_file_path=nodegroup_blend_path,
|
||||
python_script_path=str(nodegroup_script_path),
|
||||
asset_root_dir=asset_output_root
|
||||
)
|
||||
if not success_ng:
|
||||
log.error("Blender node group script execution failed.")
|
||||
self.status_message.emit("Blender nodegroup script failed.", 5000)
|
||||
else:
|
||||
log.info("Blender nodegroup script finished successfully.")
|
||||
self.status_message.emit("Blender nodegroup script finished.", 3000)
|
||||
else:
|
||||
log.error(f"Node group script not found: {nodegroup_script_path}")
|
||||
self.status_message.emit(f"Error: Nodegroup script not found.", 5000)
|
||||
elif run_blender and nodegroup_blend_path: # Log if path was provided but invalid
|
||||
log.warning(f"Nodegroup blend path provided but invalid: {nodegroup_blend_path}")
|
||||
self.status_message.emit(f"Warning: Invalid Nodegroup .blend path.", 5000)
|
||||
|
||||
|
||||
# Run Materials Script (only if nodegroup script was attempted or not needed)
|
||||
if materials_blend_path and Path(materials_blend_path).is_file():
|
||||
if materials_script_path.is_file():
|
||||
log.info("-" * 20 + " Running Materials Script " + "-" * 20)
|
||||
self.status_message.emit(f"Running Blender materials script on {Path(materials_blend_path).name}...", 0)
|
||||
# Pass the nodegroup blend path as the second argument to the script
|
||||
success_mat = self._run_blender_script_subprocess(
|
||||
blender_exe_path=blender_exe,
|
||||
blend_file_path=materials_blend_path,
|
||||
python_script_path=str(materials_script_path),
|
||||
asset_root_dir=asset_output_root,
|
||||
nodegroup_blend_file_path_arg=nodegroup_blend_path # Pass the nodegroup path
|
||||
)
|
||||
if not success_mat:
|
||||
log.error("Blender material script execution failed.")
|
||||
self.status_message.emit("Blender material script failed.", 5000)
|
||||
else:
|
||||
log.info("Blender material script finished successfully.")
|
||||
self.status_message.emit("Blender material script finished.", 3000)
|
||||
else:
|
||||
log.error(f"Material script not found: {materials_script_path}")
|
||||
self.status_message.emit(f"Error: Material script not found.", 5000)
|
||||
elif run_blender and materials_blend_path: # Log if path was provided but invalid
|
||||
log.warning(f"Materials blend path provided but invalid: {materials_blend_path}")
|
||||
self.status_message.emit(f"Warning: Invalid Materials .blend path.", 5000)
|
||||
|
||||
else:
|
||||
log.warning("Blender executable not found. Skipping Blender script execution.")
|
||||
self.status_message.emit("Warning: Blender executable not found. Skipping scripts.", 5000)
|
||||
elif self._cancel_requested:
|
||||
log.info("Processing was cancelled. Skipping Blender script execution.")
|
||||
# --- End Blender Script Execution ---
|
||||
|
||||
final_message = f"Finished. Processed: {processed_count}, Skipped: {skipped_count}, Failed: {failed_count}"
|
||||
log.info(final_message)
|
||||
self.status_message.emit(final_message, 5000) # Show final summary
|
||||
self.processing_finished.emit(processed_count, skipped_count, failed_count)
|
||||
self._is_running = False
|
||||
self._executor = None
|
||||
self._futures = {} # Clear futures
|
||||
|
||||
def request_cancel(self):
|
||||
"""Requests cancellation of the ongoing processing task."""
|
||||
if not self._is_running:
|
||||
log.warning("Cancel requested but no processing is running.")
|
||||
return
|
||||
|
||||
if self._cancel_requested:
|
||||
log.warning("Cancellation already requested.")
|
||||
return
|
||||
|
||||
log.info("Cancellation requested.")
|
||||
self.status_message.emit("Cancellation requested...", 3000)
|
||||
self._cancel_requested = True
|
||||
|
||||
# Attempt to shutdown the executor - this might cancel pending tasks
|
||||
# but won't forcefully stop running ones. `cancel_futures=True` is Python 3.9+
|
||||
if self._executor:
|
||||
log.debug("Requesting executor shutdown...")
|
||||
# For Python 3.9+: self._executor.shutdown(wait=False, cancel_futures=True)
|
||||
# For older Python:
|
||||
self._executor.shutdown(wait=False)
|
||||
# Manually try cancelling futures that haven't started
|
||||
for future in self._futures:
|
||||
if not future.running() and not future.done():
|
||||
future.cancel()
|
||||
log.debug("Executor shutdown requested.")
|
||||
|
||||
# Note: True cancellation of running ProcessPoolExecutor tasks is complex.
|
||||
# This implementation primarily prevents processing further results and
|
||||
# attempts to cancel pending/unstarted tasks.
|
||||
|
||||
def _find_blender_executable(self) -> Optional[str]:
|
||||
"""Finds the Blender executable path from config or system PATH."""
|
||||
try:
|
||||
# Use load_base_config to get the Blender executable path
|
||||
if load_base_config:
|
||||
base_config = load_base_config()
|
||||
blender_exe_config = base_config.get('BLENDER_EXECUTABLE_PATH', None)
|
||||
else:
|
||||
blender_exe_config = None
|
||||
log.warning("load_base_config not available. Cannot read BLENDER_EXECUTABLE_PATH from config.")
|
||||
|
||||
if blender_exe_config:
|
||||
p = Path(blender_exe_config)
|
||||
if p.is_file():
|
||||
log.info(f"Using Blender executable from config: {p}")
|
||||
return str(p.resolve())
|
||||
else:
|
||||
log.warning(f"Blender path in config not found: '{blender_exe_config}'. Trying PATH.")
|
||||
else:
|
||||
log.info("BLENDER_EXECUTABLE_PATH not set in config. Trying PATH.")
|
||||
|
||||
blender_exe = shutil.which("blender")
|
||||
if blender_exe:
|
||||
log.info(f"Found Blender executable in PATH: {blender_exe}")
|
||||
return blender_exe
|
||||
else:
|
||||
log.warning("Could not find 'blender' in system PATH.")
|
||||
return None
|
||||
except ConfigurationError as e:
|
||||
log.error(f"Error reading base configuration for Blender executable path: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
log.error(f"Error checking Blender executable path: {e}")
|
||||
return None
|
||||
|
||||
def _run_blender_script_subprocess(self, blender_exe_path: str, blend_file_path: str, python_script_path: str, asset_root_dir: str, nodegroup_blend_file_path_arg: Optional[str] = None) -> bool:
|
||||
"""Internal helper to run a single Blender script via subprocess."""
|
||||
command_base = [
|
||||
blender_exe_path,
|
||||
"--factory-startup",
|
||||
"-b",
|
||||
blend_file_path,
|
||||
"--log", "*", # <<< ADDED BLENDER LOGGING FLAG
|
||||
"--python", python_script_path,
|
||||
"--",
|
||||
asset_root_dir,
|
||||
]
|
||||
# Add nodegroup blend file path if provided (for create_materials script)
|
||||
if nodegroup_blend_file_path_arg:
|
||||
command = command_base + [nodegroup_blend_file_path_arg]
|
||||
else:
|
||||
command = command_base
|
||||
log.debug(f"Executing Blender command: {' '.join(map(str, command))}") # Ensure all parts are strings for join
|
||||
try:
|
||||
# Ensure all parts of the command are strings for subprocess
|
||||
str_command = [str(part) for part in command]
|
||||
result = subprocess.run(str_command, capture_output=True, text=True, check=False, encoding='utf-8') # Specify encoding
|
||||
log.info(f"Blender script '{Path(python_script_path).name}' finished with exit code: {result.returncode}")
|
||||
if result.stdout: log.debug(f"Blender stdout:\n{result.stdout.strip()}")
|
||||
if result.stderr:
|
||||
if result.returncode != 0: log.error(f"Blender stderr:\n{result.stderr.strip()}")
|
||||
else: log.warning(f"Blender stderr (RC=0):\n{result.stderr.strip()}")
|
||||
return result.returncode == 0
|
||||
except FileNotFoundError:
|
||||
log.error(f"Blender executable not found at: {blender_exe_path}")
|
||||
return False
|
||||
except Exception as e:
|
||||
log.exception(f"Error running Blender script '{Path(python_script_path).name}': {e}")
|
||||
return False
|
||||
@@ -9,158 +9,158 @@ from PySide6.QtCore import Signal, Slot, QObject
|
||||
# from rule_structure import SourceRule, AssetRule, FileRule # Assuming direct import is possible
|
||||
|
||||
class RuleEditorWidget(QWidget):
|
||||
"""
|
||||
A widget to display and edit hierarchical processing rules (Source, Asset, File).
|
||||
"""
|
||||
rule_updated = Signal(object) # Signal emitted when a rule is updated
|
||||
|
||||
def __init__(self, asset_types: list[str] | None = None, parent=None):
|
||||
"""
|
||||
Initializes the RuleEditorWidget.
|
||||
|
||||
Args:
|
||||
asset_types (list[str] | None): A list of available asset type names. Defaults to None.
|
||||
parent: The parent widget.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.asset_types = asset_types if asset_types else [] # Store asset types
|
||||
self.current_rule_type = None
|
||||
self.current_rule_object = None
|
||||
|
||||
self.layout = QVBoxLayout(self)
|
||||
self.rule_type_label = QLabel("Select an item in the hierarchy to view/edit rules.")
|
||||
self.layout.addWidget(self.rule_type_label)
|
||||
|
||||
self.form_layout = QFormLayout()
|
||||
self.layout.addLayout(self.form_layout)
|
||||
|
||||
self.layout.addStretch() # Add stretch to push content to the top
|
||||
|
||||
self.setLayout(self.layout)
|
||||
self.clear_editor()
|
||||
|
||||
@Slot(object, str)
|
||||
def load_rule(self, rule_object, rule_type_name):
|
||||
"""
|
||||
Loads a rule object into the editor.
|
||||
|
||||
Args:
|
||||
rule_object: The SourceRule, AssetRule, or FileRule object.
|
||||
rule_type_name: The name of the rule type ('SourceRule', 'AssetRule', 'FileRule').
|
||||
"""
|
||||
self.clear_editor()
|
||||
self.current_rule_object = rule_object
|
||||
self.current_rule_type = rule_type_name
|
||||
self.rule_type_label.setText(f"Editing: {rule_type_name}")
|
||||
|
||||
if rule_object:
|
||||
# Dynamically create form fields based on rule object attributes
|
||||
for attr_name, attr_value in vars(rule_object).items():
|
||||
if attr_name.startswith('_'): # Skip private attributes
|
||||
continue
|
||||
|
||||
label = QLabel(attr_name.replace('_', ' ').title() + ":")
|
||||
editor_widget = self._create_editor_widget(attr_name, attr_value)
|
||||
if editor_widget:
|
||||
self.form_layout.addRow(label, editor_widget)
|
||||
# Connect signal to update rule object
|
||||
self._connect_editor_signal(editor_widget, attr_name)
|
||||
|
||||
def _create_editor_widget(self, attr_name, attr_value):
|
||||
"""
|
||||
Creates an appropriate editor widget based on the attribute type.
|
||||
"""
|
||||
# --- Special Handling for Asset Type Dropdown ---
|
||||
if self.current_rule_type == 'AssetRule' and attr_name == 'asset_type' and self.asset_types:
|
||||
widget = QComboBox()
|
||||
widget.addItems(self.asset_types)
|
||||
if attr_value in self.asset_types:
|
||||
widget.setCurrentText(attr_value)
|
||||
elif self.asset_types: # Select first item if current value is invalid
|
||||
widget.setCurrentIndex(0)
|
||||
return widget
|
||||
# --- Standard Type Handling ---
|
||||
elif isinstance(attr_value, bool):
|
||||
widget = QCheckBox()
|
||||
widget.setChecked(attr_value)
|
||||
return widget
|
||||
elif isinstance(attr_value, int):
|
||||
widget = QSpinBox()
|
||||
widget.setRange(-2147483648, 2147483647) # Default integer range
|
||||
widget.setValue(attr_value)
|
||||
return widget
|
||||
elif isinstance(attr_value, float):
|
||||
widget = QDoubleSpinBox()
|
||||
widget.setRange(-sys.float_info.max, sys.float_info.max) # Default float range
|
||||
widget.setValue(attr_value)
|
||||
return widget
|
||||
elif isinstance(attr_value, (str, type(None))): # Handle None for strings
|
||||
widget = QLineEdit()
|
||||
widget.setText(str(attr_value) if attr_value is not None else "")
|
||||
return widget
|
||||
# Add more types as needed
|
||||
# elif isinstance(attr_value, list):
|
||||
# # Example for a simple list of strings
|
||||
# widget = QLineEdit()
|
||||
# widget.setText(", ".join(map(str, attr_value)))
|
||||
# return widget
|
||||
else:
|
||||
# For unsupported types, just display the value
|
||||
label = QLabel(str(attr_value))
|
||||
return label
|
||||
|
||||
def _connect_editor_signal(self, editor_widget, attr_name):
|
||||
"""
|
||||
Connects the appropriate signal of the editor widget to the update logic.
|
||||
"""
|
||||
if isinstance(editor_widget, QLineEdit):
|
||||
editor_widget.textChanged.connect(lambda text: self._update_rule_attribute(attr_name, text))
|
||||
elif isinstance(editor_widget, QCheckBox):
|
||||
editor_widget.toggled.connect(lambda checked: self._update_rule_attribute(attr_name, checked))
|
||||
elif isinstance(editor_widget, QSpinBox):
|
||||
editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value))
|
||||
elif isinstance(editor_widget, QDoubleSpinBox):
|
||||
editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value))
|
||||
elif isinstance(editor_widget, QComboBox):
|
||||
# Use currentTextChanged to get the string value directly
|
||||
editor_widget.currentTextChanged.connect(lambda text: self._update_rule_attribute(attr_name, text))
|
||||
# Add connections for other widget types
|
||||
|
||||
def _update_rule_attribute(self, attr_name, value):
|
||||
"""
|
||||
Updates the attribute of the current rule object and emits the signal.
|
||||
"""
|
||||
if self.current_rule_object:
|
||||
# Basic type conversion based on the original attribute type
|
||||
original_value = getattr(self.current_rule_object, attr_name)
|
||||
try:
|
||||
if isinstance(original_value, bool):
|
||||
converted_value = bool(value)
|
||||
elif isinstance(original_value, int):
|
||||
converted_value = int(value)
|
||||
elif isinstance(original_value, float):
|
||||
converted_value = float(value)
|
||||
elif isinstance(original_value, (str, type(None))):
|
||||
converted_value = str(value) if value != "" else None # Convert empty string to None for original None types
|
||||
else:
|
||||
converted_value = value # Fallback for other types
|
||||
setattr(self.current_rule_object, attr_name, converted_value)
|
||||
self.rule_updated.emit(self.current_rule_object)
|
||||
# print(f"Updated {attr_name} to {converted_value} in {self.current_rule_type}") # Debugging
|
||||
except ValueError:
|
||||
# Handle potential conversion errors (e.g., non-numeric input for int/float)
|
||||
print(f"Error converting value '{value}' for attribute '{attr_name}'")
|
||||
# Optionally, revert the editor widget to the original value or show an error indicator
|
||||
|
||||
def clear_editor(self):
|
||||
"""
|
||||
Clears the form layout.
|
||||
"""
|
||||
self.current_rule_object = None
|
||||
self.current_rule_type = None
|
||||
self.rule_type_label.setText("Select an item in the hierarchy to view/edit rules.")
|
||||
while self.form_layout.rowCount() > 0:
|
||||
self.form_layout.removeRow(0)
|
||||
"""
|
||||
A widget to display and edit hierarchical processing rules (Source, Asset, File).
|
||||
"""
|
||||
rule_updated = Signal(object) # Signal emitted when a rule is updated
|
||||
|
||||
def __init__(self, asset_types: list[str] | None = None, parent=None):
|
||||
"""
|
||||
Initializes the RuleEditorWidget.
|
||||
|
||||
Args:
|
||||
asset_types (list[str] | None): A list of available asset type names. Defaults to None.
|
||||
parent: The parent widget.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.asset_types = asset_types if asset_types else [] # Store asset types
|
||||
self.current_rule_type = None
|
||||
self.current_rule_object = None
|
||||
|
||||
self.layout = QVBoxLayout(self)
|
||||
self.rule_type_label = QLabel("Select an item in the hierarchy to view/edit rules.")
|
||||
self.layout.addWidget(self.rule_type_label)
|
||||
|
||||
self.form_layout = QFormLayout()
|
||||
self.layout.addLayout(self.form_layout)
|
||||
|
||||
self.layout.addStretch() # Add stretch to push content to the top
|
||||
|
||||
self.setLayout(self.layout)
|
||||
self.clear_editor()
|
||||
|
||||
@Slot(object, str)
|
||||
def load_rule(self, rule_object, rule_type_name):
|
||||
"""
|
||||
Loads a rule object into the editor.
|
||||
|
||||
Args:
|
||||
rule_object: The SourceRule, AssetRule, or FileRule object.
|
||||
rule_type_name: The name of the rule type ('SourceRule', 'AssetRule', 'FileRule').
|
||||
"""
|
||||
self.clear_editor()
|
||||
self.current_rule_object = rule_object
|
||||
self.current_rule_type = rule_type_name
|
||||
self.rule_type_label.setText(f"Editing: {rule_type_name}")
|
||||
|
||||
if rule_object:
|
||||
# Dynamically create form fields based on rule object attributes
|
||||
for attr_name, attr_value in vars(rule_object).items():
|
||||
if attr_name.startswith('_'): # Skip private attributes
|
||||
continue
|
||||
|
||||
label = QLabel(attr_name.replace('_', ' ').title() + ":")
|
||||
editor_widget = self._create_editor_widget(attr_name, attr_value)
|
||||
if editor_widget:
|
||||
self.form_layout.addRow(label, editor_widget)
|
||||
# Connect signal to update rule object
|
||||
self._connect_editor_signal(editor_widget, attr_name)
|
||||
|
||||
def _create_editor_widget(self, attr_name, attr_value):
|
||||
"""
|
||||
Creates an appropriate editor widget based on the attribute type.
|
||||
"""
|
||||
# --- Special Handling for Asset Type Dropdown ---
|
||||
if self.current_rule_type == 'AssetRule' and attr_name == 'asset_type' and self.asset_types:
|
||||
widget = QComboBox()
|
||||
widget.addItems(self.asset_types)
|
||||
if attr_value in self.asset_types:
|
||||
widget.setCurrentText(attr_value)
|
||||
elif self.asset_types: # Select first item if current value is invalid
|
||||
widget.setCurrentIndex(0)
|
||||
return widget
|
||||
# --- Standard Type Handling ---
|
||||
elif isinstance(attr_value, bool):
|
||||
widget = QCheckBox()
|
||||
widget.setChecked(attr_value)
|
||||
return widget
|
||||
elif isinstance(attr_value, int):
|
||||
widget = QSpinBox()
|
||||
widget.setRange(-2147483648, 2147483647) # Default integer range
|
||||
widget.setValue(attr_value)
|
||||
return widget
|
||||
elif isinstance(attr_value, float):
|
||||
widget = QDoubleSpinBox()
|
||||
widget.setRange(-sys.float_info.max, sys.float_info.max) # Default float range
|
||||
widget.setValue(attr_value)
|
||||
return widget
|
||||
elif isinstance(attr_value, (str, type(None))): # Handle None for strings
|
||||
widget = QLineEdit()
|
||||
widget.setText(str(attr_value) if attr_value is not None else "")
|
||||
return widget
|
||||
# Add more types as needed
|
||||
# elif isinstance(attr_value, list):
|
||||
# # Example for a simple list of strings
|
||||
# widget = QLineEdit()
|
||||
# widget.setText(", ".join(map(str, attr_value)))
|
||||
# return widget
|
||||
else:
|
||||
# For unsupported types, just display the value
|
||||
label = QLabel(str(attr_value))
|
||||
return label
|
||||
|
||||
def _connect_editor_signal(self, editor_widget, attr_name):
|
||||
"""
|
||||
Connects the appropriate signal of the editor widget to the update logic.
|
||||
"""
|
||||
if isinstance(editor_widget, QLineEdit):
|
||||
editor_widget.textChanged.connect(lambda text: self._update_rule_attribute(attr_name, text))
|
||||
elif isinstance(editor_widget, QCheckBox):
|
||||
editor_widget.toggled.connect(lambda checked: self._update_rule_attribute(attr_name, checked))
|
||||
elif isinstance(editor_widget, QSpinBox):
|
||||
editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value))
|
||||
elif isinstance(editor_widget, QDoubleSpinBox):
|
||||
editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value))
|
||||
elif isinstance(editor_widget, QComboBox):
|
||||
# Use currentTextChanged to get the string value directly
|
||||
editor_widget.currentTextChanged.connect(lambda text: self._update_rule_attribute(attr_name, text))
|
||||
# Add connections for other widget types
|
||||
|
||||
def _update_rule_attribute(self, attr_name, value):
|
||||
"""
|
||||
Updates the attribute of the current rule object and emits the signal.
|
||||
"""
|
||||
if self.current_rule_object:
|
||||
# Basic type conversion based on the original attribute type
|
||||
original_value = getattr(self.current_rule_object, attr_name)
|
||||
try:
|
||||
if isinstance(original_value, bool):
|
||||
converted_value = bool(value)
|
||||
elif isinstance(original_value, int):
|
||||
converted_value = int(value)
|
||||
elif isinstance(original_value, float):
|
||||
converted_value = float(value)
|
||||
elif isinstance(original_value, (str, type(None))):
|
||||
converted_value = str(value) if value != "" else None # Convert empty string to None for original None types
|
||||
else:
|
||||
converted_value = value # Fallback for other types
|
||||
setattr(self.current_rule_object, attr_name, converted_value)
|
||||
self.rule_updated.emit(self.current_rule_object)
|
||||
# print(f"Updated {attr_name} to {converted_value} in {self.current_rule_type}") # Debugging
|
||||
except ValueError:
|
||||
# Handle potential conversion errors (e.g., non-numeric input for int/float)
|
||||
print(f"Error converting value '{value}' for attribute '{attr_name}'")
|
||||
# Optionally, revert the editor widget to the original value or show an error indicator
|
||||
|
||||
def clear_editor(self):
|
||||
"""
|
||||
Clears the form layout.
|
||||
"""
|
||||
self.current_rule_object = None
|
||||
self.current_rule_type = None
|
||||
self.rule_type_label.setText("Select an item in the hierarchy to view/edit rules.")
|
||||
while self.form_layout.rowCount() > 0:
|
||||
self.form_layout.removeRow(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app = QApplication(sys.argv)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# gui/unified_view_model.py
|
||||
import logging # Added for debugging
|
||||
log = logging.getLogger(__name__) # Added for debugging
|
||||
from PySide6.QtCore import QAbstractItemModel, QModelIndex, Qt, Signal # Added Signal
|
||||
from PySide6.QtCore import QAbstractItemModel, QModelIndex, Qt, Signal, Slot # Added Signal and Slot
|
||||
from PySide6.QtGui import QColor # Added for background role
|
||||
from pathlib import Path # Added for file_name extraction
|
||||
from rule_structure import SourceRule, AssetRule, FileRule # Removed AssetType, ItemType import
|
||||
@@ -18,6 +18,10 @@ class UnifiedViewModel(QAbstractItemModel):
|
||||
A QAbstractItemModel for displaying and editing the hierarchical structure
|
||||
of SourceRule -> AssetRule -> FileRule.
|
||||
"""
|
||||
# Signal emitted when a FileRule's target asset override changes.
|
||||
# Carries the index of the FileRule and the new target asset path (or None).
|
||||
targetAssetOverrideChanged = Signal(QModelIndex, object)
|
||||
|
||||
Columns = [
|
||||
"Name", "Target Asset", "Supplier",
|
||||
"Asset Type", "Item Type"
|
||||
@@ -34,9 +38,52 @@ class UnifiedViewModel(QAbstractItemModel):
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self._source_rules = [] # Now stores a list of SourceRule objects
|
||||
# self._display_mode removed
|
||||
self._asset_type_colors = {}
|
||||
self._file_type_colors = {}
|
||||
self._asset_type_keys = [] # Store asset type keys
|
||||
self._file_type_keys = [] # Store file type keys
|
||||
self._load_definitions() # Load colors and keys
|
||||
|
||||
def _load_definitions(self):
|
||||
"""Loads configuration and caches colors and type keys."""
|
||||
try:
|
||||
base_config = load_base_config()
|
||||
asset_type_defs = base_config.get('ASSET_TYPE_DEFINITIONS', {})
|
||||
file_type_defs = base_config.get('FILE_TYPE_DEFINITIONS', {})
|
||||
|
||||
# Cache Asset Type Definitions (Keys and Colors)
|
||||
self._asset_type_keys = sorted(list(asset_type_defs.keys()))
|
||||
for type_name, type_info in asset_type_defs.items():
|
||||
hex_color = type_info.get("color")
|
||||
if hex_color:
|
||||
try:
|
||||
self._asset_type_colors[type_name] = QColor(hex_color)
|
||||
except ValueError:
|
||||
log.warning(f"Invalid hex color '{hex_color}' for asset type '{type_name}' in config.")
|
||||
|
||||
# Cache File Type Definitions (Keys and Colors)
|
||||
self._file_type_keys = sorted(list(file_type_defs.keys()))
|
||||
for type_name, type_info in file_type_defs.items():
|
||||
hex_color = type_info.get("color")
|
||||
if hex_color:
|
||||
try:
|
||||
self._file_type_colors[type_name] = QColor(hex_color)
|
||||
except ValueError:
|
||||
log.warning(f"Invalid hex color '{hex_color}' for file type '{type_name}' in config.")
|
||||
|
||||
except Exception as e:
|
||||
log.exception(f"Error loading or caching colors from configuration: {e}")
|
||||
# Ensure caches/lists are empty if loading fails
|
||||
self._asset_type_colors = {}
|
||||
self._file_type_colors = {}
|
||||
self._asset_type_keys = []
|
||||
self._file_type_keys = []
|
||||
|
||||
def load_data(self, source_rules_list: list): # Accepts a list
|
||||
"""Loads or reloads the model with a list of SourceRule objects."""
|
||||
# Consider if color cache needs refreshing if config can change dynamically
|
||||
# self._load_and_cache_colors() # Uncomment if config can change and needs refresh
|
||||
self.beginResetModel()
|
||||
self._source_rules = source_rules_list if source_rules_list else [] # Assign the new list
|
||||
# Ensure back-references for parent lookup are set on the NEW items
|
||||
@@ -56,26 +103,26 @@ class UnifiedViewModel(QAbstractItemModel):
|
||||
def get_all_source_rules(self) -> list:
|
||||
"""Returns the internal list of SourceRule objects."""
|
||||
return self._source_rules
|
||||
|
||||
# set_display_mode removed
|
||||
|
||||
def rowCount(self, parent: QModelIndex = QModelIndex()) -> int:
|
||||
"""Returns the number of rows under the given parent."""
|
||||
if not parent.isValid():
|
||||
# Parent is the invisible root. Children are the SourceRules.
|
||||
return len(self._source_rules)
|
||||
|
||||
# Always use detailed logic
|
||||
parent_item = parent.internalPointer()
|
||||
|
||||
if isinstance(parent_item, SourceRule):
|
||||
# Parent is a SourceRule. Children are AssetRules.
|
||||
return len(parent_item.assets)
|
||||
return len(parent_item.assets)
|
||||
elif isinstance(parent_item, AssetRule):
|
||||
# Parent is an AssetRule. Children are FileRules.
|
||||
return len(parent_item.files)
|
||||
elif isinstance(parent_item, FileRule):
|
||||
return 0 # FileRules have no children
|
||||
|
||||
return 0 # Should not happen for valid items
|
||||
|
||||
|
||||
def columnCount(self, parent: QModelIndex = QModelIndex()) -> int:
|
||||
"""Returns the number of columns."""
|
||||
return len(self.Columns)
|
||||
@@ -143,27 +190,22 @@ class UnifiedViewModel(QAbstractItemModel):
|
||||
# Parent is a valid index, get its item
|
||||
parent_item = parent.internalPointer()
|
||||
|
||||
# Always use detailed logic
|
||||
child_item = None
|
||||
if isinstance(parent_item, SourceRule):
|
||||
# Parent is SourceRule. Children are AssetRules.
|
||||
if row < len(parent_item.assets):
|
||||
child_item = parent_item.assets[row]
|
||||
# Ensure parent reference is set
|
||||
if not hasattr(child_item, 'parent_source'):
|
||||
child_item.parent_source = parent_item
|
||||
elif isinstance(parent_item, AssetRule):
|
||||
# Parent is AssetRule. Children are FileRules.
|
||||
if row < len(parent_item.files):
|
||||
child_item = parent_item.files[row]
|
||||
# Ensure parent reference is set
|
||||
if not hasattr(child_item, 'parent_asset'):
|
||||
child_item.parent_asset = parent_item
|
||||
|
||||
if child_item:
|
||||
# Create index for the child item under the parent
|
||||
return self.createIndex(row, column, child_item)
|
||||
else:
|
||||
# Invalid row or parent type has no children (FileRule)
|
||||
return QModelIndex()
|
||||
|
||||
def data(self, index: QModelIndex, role: int = Qt.DisplayRole):
|
||||
@@ -183,107 +225,79 @@ class UnifiedViewModel(QAbstractItemModel):
|
||||
# Determine effective asset type
|
||||
asset_type = item.asset_type_override if item.asset_type_override else item.asset_type
|
||||
if asset_type:
|
||||
try:
|
||||
base_config = load_base_config() # Load base config
|
||||
asset_type_definitions = base_config.get('ASSET_TYPE_DEFINITIONS', {}) # Get definitions
|
||||
type_info = asset_type_definitions.get(asset_type)
|
||||
if type_info:
|
||||
hex_color = type_info.get("color")
|
||||
if hex_color:
|
||||
try:
|
||||
return QColor(hex_color)
|
||||
except ValueError:
|
||||
# Optional: Add logging for invalid hex color
|
||||
# print(f"Warning: Invalid hex color '{hex_color}' for asset type '{asset_type}' in config.")
|
||||
return None # Fallback for invalid hex
|
||||
else:
|
||||
# Optional: Add logging for missing color key
|
||||
# print(f"Warning: No color defined for asset type '{asset_type}' in config.")
|
||||
return None # Fallback if color key missing
|
||||
else:
|
||||
# Optional: Add logging for missing asset type definition
|
||||
# print(f"Warning: Asset type '{asset_type}' not found in ASSET_TYPE_DEFINITIONS.")
|
||||
return None # Fallback if type not in config
|
||||
except Exception: # Catch errors during config loading
|
||||
return None # Fallback on error
|
||||
# Use cached color
|
||||
return self._asset_type_colors.get(asset_type) # Returns None if not found
|
||||
else:
|
||||
return None # Fallback if no asset_type determined
|
||||
elif isinstance(item, FileRule):
|
||||
# Determine effective item type: Prioritize override, then use base type
|
||||
effective_item_type = item.item_type_override if item.item_type_override is not None else item.item_type
|
||||
if effective_item_type:
|
||||
try:
|
||||
base_config = load_base_config() # Load base config
|
||||
file_type_definitions = base_config.get('FILE_TYPE_DEFINITIONS', {}) # Get definitions
|
||||
type_info = file_type_definitions.get(effective_item_type)
|
||||
if type_info:
|
||||
hex_color = type_info.get("color")
|
||||
if hex_color:
|
||||
try:
|
||||
return QColor(hex_color)
|
||||
except ValueError:
|
||||
# Optional: Add logging for invalid hex color
|
||||
# print(f"Warning: Invalid hex color '{hex_color}' for file type '{item_type}' in config.")
|
||||
return None # Fallback for invalid hex
|
||||
else:
|
||||
# Optional: Add logging for missing color key
|
||||
# print(f"Warning: No color defined for file type '{item_type}' in config.")
|
||||
return None # Fallback if color key missing
|
||||
else:
|
||||
# File types often don't have specific colors, so no warning needed unless debugging
|
||||
return None # Fallback if type not in config
|
||||
except Exception: # Catch errors during config loading
|
||||
return None # Fallback on error
|
||||
# --- New Logic: Darkened Parent Background ---
|
||||
parent_asset = getattr(item, 'parent_asset', None)
|
||||
if parent_asset:
|
||||
parent_asset_type = parent_asset.asset_type_override if parent_asset.asset_type_override else parent_asset.asset_type
|
||||
parent_bg_color = self._asset_type_colors.get(parent_asset_type) if parent_asset_type else None
|
||||
|
||||
if parent_bg_color:
|
||||
# Darken the parent color by ~30% (factor 130)
|
||||
return parent_bg_color.darker(130)
|
||||
else:
|
||||
# Parent has no specific color, use default background
|
||||
return None
|
||||
else:
|
||||
return None # Fallback if no item_type determined
|
||||
# Should not happen if structure is correct, but fallback to default
|
||||
return None
|
||||
# --- End New Logic ---
|
||||
else: # Other item types or if item is None
|
||||
return None
|
||||
# --- Handle Foreground Role (Text Color) ---
|
||||
elif role == Qt.ForegroundRole:
|
||||
if isinstance(item, FileRule):
|
||||
# Determine effective item type
|
||||
effective_item_type = item.item_type_override if item.item_type_override is not None else item.item_type
|
||||
if effective_item_type:
|
||||
# Use cached color for text
|
||||
return self._file_type_colors.get(effective_item_type) # Returns None if not found
|
||||
# For SourceRule and AssetRule, return None to use default text color (usually contrasts well)
|
||||
return None
|
||||
|
||||
# --- Handle other roles (Display, Edit, etc.) ---
|
||||
if isinstance(item, SourceRule):
|
||||
if role == Qt.DisplayRole or role == Qt.EditRole: # Combine Display and Edit logic
|
||||
if role == Qt.DisplayRole or role == Qt.EditRole:
|
||||
if column == self.COL_NAME:
|
||||
# Always display name
|
||||
return Path(item.input_path).name
|
||||
elif column == self.COL_SUPPLIER:
|
||||
# Return override if set, otherwise the original identifier, else empty string
|
||||
elif column == self.COL_SUPPLIER: # Always handle supplier
|
||||
display_value = item.supplier_override if item.supplier_override is not None else item.supplier_identifier
|
||||
return display_value if display_value is not None else ""
|
||||
# Other columns return None or "" for SourceRule in Display/Edit roles
|
||||
return None # Default for SourceRule for other roles/columns
|
||||
return None # Other columns/roles are blank for SourceRule
|
||||
|
||||
# --- Logic for AssetRule and FileRule (previously detailed mode only) ---
|
||||
elif isinstance(item, AssetRule):
|
||||
if role == Qt.DisplayRole:
|
||||
if column == self.COL_NAME: return item.asset_name
|
||||
elif column == self.COL_ASSET_TYPE:
|
||||
display_value = item.asset_type_override if item.asset_type_override is not None else item.asset_type
|
||||
return display_value if display_value else ""
|
||||
# Removed Status and Output Path columns
|
||||
elif role == Qt.EditRole:
|
||||
if column == self.COL_ASSET_TYPE:
|
||||
return item.asset_type_override
|
||||
return None # Default for AssetRule
|
||||
return None
|
||||
|
||||
elif isinstance(item, FileRule):
|
||||
if role == Qt.DisplayRole:
|
||||
if column == self.COL_NAME: return Path(item.file_path).name # Display only filename
|
||||
if column == self.COL_NAME: return Path(item.file_path).name
|
||||
elif column == self.COL_TARGET_ASSET:
|
||||
return item.target_asset_name_override if item.target_asset_name_override is not None else ""
|
||||
elif column == self.COL_ITEM_TYPE:
|
||||
# Reverted Logic: Display override if set, otherwise base type. Shows prefixed keys.
|
||||
override = item.item_type_override
|
||||
initial_type = item.item_type
|
||||
|
||||
if override is not None:
|
||||
return override
|
||||
else:
|
||||
return initial_type if initial_type else ""
|
||||
# Removed Status and Output Path columns
|
||||
if override is not None: return override
|
||||
else: return initial_type if initial_type else ""
|
||||
elif role == Qt.EditRole:
|
||||
if column == self.COL_TARGET_ASSET: return item.target_asset_name_override if item.target_asset_name_override is not None else "" # Return string or ""
|
||||
elif column == self.COL_ITEM_TYPE: return item.item_type_override # Return string or None
|
||||
return None # Default for FileRule
|
||||
if column == self.COL_TARGET_ASSET: return item.target_asset_name_override if item.target_asset_name_override is not None else ""
|
||||
elif column == self.COL_ITEM_TYPE: return item.item_type_override
|
||||
return None
|
||||
|
||||
return None # Default return if role/item combination not handled
|
||||
return None
|
||||
|
||||
def setData(self, index: QModelIndex, value, role: int = Qt.EditRole) -> bool:
|
||||
"""Sets the role data for the item at index to value."""
|
||||
@@ -335,119 +349,8 @@ class UnifiedViewModel(QAbstractItemModel):
|
||||
old_value = item.target_asset_name_override # Store old value for potential revert/comparison
|
||||
item.target_asset_name_override = new_value
|
||||
changed = True
|
||||
|
||||
# --- Start: New Direct Model Restructuring Logic ---
|
||||
old_parent_asset = getattr(item, 'parent_asset', None)
|
||||
if old_parent_asset: # Ensure we have the old parent
|
||||
source_rule = getattr(old_parent_asset, 'parent_source', None)
|
||||
if source_rule: # Ensure we have the grandparent
|
||||
new_target_name = new_value # Can be None or a string
|
||||
|
||||
# Get old parent index and source row
|
||||
try:
|
||||
grandparent_row = self._source_rules.index(source_rule)
|
||||
old_parent_row = source_rule.assets.index(old_parent_asset)
|
||||
source_row = old_parent_asset.files.index(item)
|
||||
old_parent_index = self.createIndex(old_parent_row, 0, old_parent_asset)
|
||||
grandparent_index = self.createIndex(grandparent_row, 0, source_rule) # Needed for insert/remove parent
|
||||
except ValueError:
|
||||
print("Error: Could not find item, parent, or grandparent in model structure during setData.")
|
||||
item.target_asset_name_override = old_value # Revert data change
|
||||
return False # Indicate failure
|
||||
|
||||
target_parent_asset = None
|
||||
target_parent_index = QModelIndex()
|
||||
target_parent_row = -1 # Row within source_rule.assets
|
||||
target_row = -1 # Row within target_parent_asset.files
|
||||
move_occurred = False # Flag to track if a move happened
|
||||
|
||||
# 1. Find existing target parent
|
||||
if new_target_name: # Only search if a specific target is given
|
||||
for i, asset in enumerate(source_rule.assets):
|
||||
if asset.asset_name == new_target_name:
|
||||
target_parent_asset = asset
|
||||
target_parent_row = i
|
||||
target_parent_index = self.createIndex(target_parent_row, 0, target_parent_asset)
|
||||
break
|
||||
|
||||
# 2. Handle Move/Creation
|
||||
if target_parent_asset:
|
||||
# --- Move to Existing Parent ---
|
||||
if target_parent_asset != old_parent_asset: # Don't move if target is the same as old parent
|
||||
target_row = len(target_parent_asset.files) # Append to the end
|
||||
# print(f"DEBUG: Moving {Path(item.file_path).name} from {old_parent_asset.asset_name} ({source_row}) to {target_parent_asset.asset_name} ({target_row})")
|
||||
self.beginMoveRows(old_parent_index, source_row, source_row, target_parent_index, target_row)
|
||||
# Restructure internal data
|
||||
old_parent_asset.files.pop(source_row)
|
||||
target_parent_asset.files.append(item)
|
||||
item.parent_asset = target_parent_asset # Update parent reference
|
||||
self.endMoveRows()
|
||||
move_occurred = True
|
||||
else:
|
||||
# Target is the same as the old parent. No move needed.
|
||||
pass
|
||||
|
||||
elif new_target_name: # Only create if a *new* specific target name was given
|
||||
# --- Create New Parent and Move ---
|
||||
# print(f"DEBUG: Creating new parent '{new_target_name}' and moving {Path(item.file_path).name}")
|
||||
# Create new AssetRule
|
||||
new_asset_rule = AssetRule(asset_name=new_target_name)
|
||||
new_asset_rule.asset_type = old_parent_asset.asset_type # Copy type from old parent
|
||||
new_asset_rule.asset_type_override = old_parent_asset.asset_type_override # Copy override too
|
||||
new_asset_rule.parent_source = source_rule # Set parent reference
|
||||
|
||||
# Determine insertion row for the new parent (e.g., append)
|
||||
new_parent_row = len(source_rule.assets)
|
||||
# print(f"DEBUG: Inserting new parent at row {new_parent_row} under {Path(source_rule.input_path).name}")
|
||||
|
||||
# Emit signals for inserting the new parent row
|
||||
self.beginInsertRows(grandparent_index, new_parent_row, new_parent_row)
|
||||
source_rule.assets.insert(new_parent_row, new_asset_rule) # Insert into data structure
|
||||
self.endInsertRows()
|
||||
|
||||
# Get index for the newly inserted parent
|
||||
target_parent_index = self.createIndex(new_parent_row, 0, new_asset_rule)
|
||||
target_row = 0 # Insert file at the beginning of the new parent (for signal)
|
||||
|
||||
# Emit signals for moving the file row
|
||||
# print(f"DEBUG: Moving {Path(item.file_path).name} from {old_parent_asset.asset_name} ({source_row}) to new {new_asset_rule.asset_name} ({target_row})")
|
||||
self.beginMoveRows(old_parent_index, source_row, source_row, target_parent_index, target_row)
|
||||
# Restructure internal data
|
||||
old_parent_asset.files.pop(source_row)
|
||||
new_asset_rule.files.append(item) # Append is fine, target_row=0 was for signal
|
||||
item.parent_asset = new_asset_rule # Update parent reference
|
||||
self.endMoveRows()
|
||||
move_occurred = True
|
||||
|
||||
# Update target_parent_asset for potential cleanup check later
|
||||
target_parent_asset = new_asset_rule
|
||||
|
||||
else: # new_target_name is None or empty
|
||||
# No move happens when the override is simply cleared.
|
||||
pass
|
||||
|
||||
# 3. Cleanup Empty Old Parent (only if a move occurred and old parent is empty)
|
||||
if move_occurred and not old_parent_asset.files:
|
||||
# print(f"DEBUG: Removing empty old parent {old_parent_asset.asset_name}")
|
||||
try:
|
||||
# Find the row of the old parent again, as it might have shifted
|
||||
old_parent_row_for_removal = source_rule.assets.index(old_parent_asset)
|
||||
# print(f"DEBUG: Removing parent at row {old_parent_row_for_removal} under {Path(source_rule.input_path).name}")
|
||||
self.beginRemoveRows(grandparent_index, old_parent_row_for_removal, old_parent_row_for_removal)
|
||||
source_rule.assets.pop(old_parent_row_for_removal)
|
||||
self.endRemoveRows()
|
||||
except ValueError:
|
||||
print(f"Error: Could not find old parent '{old_parent_asset.asset_name}' for removal.")
|
||||
# Log error, but continue
|
||||
else:
|
||||
print("Error: Could not find grandparent SourceRule during setData restructuring.")
|
||||
item.target_asset_name_override = old_value # Revert
|
||||
return False
|
||||
else:
|
||||
print("Error: Could not find parent AssetRule during setData restructuring.")
|
||||
item.target_asset_name_override = old_value # Revert
|
||||
return False
|
||||
# --- End: New Direct Model Restructuring Logic ---
|
||||
# Emit signal that the override changed, let handler deal with restructuring
|
||||
self.targetAssetOverrideChanged.emit(index, new_value)
|
||||
elif column == self.COL_ITEM_TYPE: # Item-Type Override
|
||||
# Delegate provides string value (e.g., "MAP_COL") or None
|
||||
new_value = str(value) if value is not None else None
|
||||
@@ -515,15 +418,15 @@ class UnifiedViewModel(QAbstractItemModel):
|
||||
item = index.internalPointer()
|
||||
column = index.column()
|
||||
|
||||
# Always use detailed mode editability logic
|
||||
can_edit = False
|
||||
# Determine editability based on item type and column
|
||||
if isinstance(item, SourceRule): # If SourceRule is displayed/editable
|
||||
if column == self.COL_SUPPLIER: can_edit = True # Supplier is editable
|
||||
if isinstance(item, SourceRule):
|
||||
if column == self.COL_SUPPLIER: can_edit = True
|
||||
elif isinstance(item, AssetRule):
|
||||
if column == self.COL_ASSET_TYPE: can_edit = True # Asset Type is editable
|
||||
if column == self.COL_ASSET_TYPE: can_edit = True
|
||||
elif isinstance(item, FileRule):
|
||||
if column == self.COL_TARGET_ASSET: can_edit = True # Target Asset is editable
|
||||
if column == self.COL_ITEM_TYPE: can_edit = True # Item Type is editable
|
||||
if column == self.COL_TARGET_ASSET: can_edit = True
|
||||
if column == self.COL_ITEM_TYPE: can_edit = True
|
||||
|
||||
if can_edit:
|
||||
return default_flags | Qt.ItemIsEditable
|
||||
@@ -548,98 +451,316 @@ class UnifiedViewModel(QAbstractItemModel):
|
||||
if item: # Ensure internal pointer is not None
|
||||
return item
|
||||
return None # Return None for invalid index or None pointer
|
||||
# --- Method to update model based on LLM predictions ---
|
||||
def update_rules_for_sources(self, source_rules: List[SourceRule]):
|
||||
# --- Method to update model based on prediction results, preserving overrides ---
|
||||
def update_rules_for_sources(self, new_source_rules: List[SourceRule]):
|
||||
"""
|
||||
Updates the model's internal data based on a list of SourceRule objects,
|
||||
typically containing predictions for one or more source directories.
|
||||
Updates the model's internal data based on a list of new SourceRule objects
|
||||
(typically from prediction results), merging them with existing data while
|
||||
preserving user overrides.
|
||||
|
||||
Args:
|
||||
source_rules: A list of SourceRule objects containing the new structure.
|
||||
new_source_rules: A list of SourceRule objects containing the new structure.
|
||||
"""
|
||||
if not source_rules:
|
||||
print("UnifiedViewModel: update_rules_for_sources called with empty list.")
|
||||
if not new_source_rules:
|
||||
log.warning("UnifiedViewModel: update_rules_for_sources called with empty list.")
|
||||
return
|
||||
|
||||
# --- Important: Model Change Signaling ---
|
||||
# Using Option 2 (per-source update) as it's generally more efficient.
|
||||
print(f"UnifiedViewModel: Updating rules for {len(source_rules)} source(s).")
|
||||
log.info(f"UnifiedViewModel: Updating rules for {len(new_source_rules)} source(s).")
|
||||
|
||||
# --- Node Class Placeholders ---
|
||||
# Ensure these match your actual node implementation if different.
|
||||
# These might be imported from another module or defined within this model.
|
||||
# Example: from .your_node_module import SourceNode, AssetNode, FileNode
|
||||
# For now, we assume they are available in the scope.
|
||||
for new_source_rule in new_source_rules:
|
||||
source_path = new_source_rule.input_path
|
||||
existing_source_rule = None
|
||||
existing_source_row = -1
|
||||
|
||||
for rule in source_rules:
|
||||
source_path = rule.input_path # Use input_path as per SourceRule definition
|
||||
# --- Find the corresponding SourceRule in the model's internal list ---
|
||||
# This replaces the placeholder _find_source_node_by_path logic
|
||||
# We need the *object* and its *index* in self._source_rules
|
||||
source_rule_obj = None
|
||||
source_rule_row = -1
|
||||
for i, existing_rule in enumerate(self._source_rules):
|
||||
if existing_rule.input_path == source_path:
|
||||
source_rule_obj = existing_rule
|
||||
source_rule_row = i
|
||||
# 1. Find existing SourceRule in the model
|
||||
for i, rule in enumerate(self._source_rules):
|
||||
if rule.input_path == source_path:
|
||||
existing_source_rule = rule
|
||||
existing_source_row = i
|
||||
break
|
||||
|
||||
if source_rule_obj is None:
|
||||
# --- ADD NEW RULE LOGIC ---
|
||||
log.debug(f"No existing rule found for '{source_path}'. Adding new rule to model.")
|
||||
# Ensure parent references are set within the new rule
|
||||
for asset_rule in rule.assets:
|
||||
asset_rule.parent_source = rule # Set parent to the rule being added
|
||||
if existing_source_rule is None:
|
||||
# 2. Add New SourceRule if not found
|
||||
log.debug(f"Adding new SourceRule for '{source_path}'")
|
||||
# Ensure parent references are set within the new rule hierarchy
|
||||
for asset_rule in new_source_rule.assets:
|
||||
asset_rule.parent_source = new_source_rule
|
||||
for file_rule in asset_rule.files:
|
||||
file_rule.parent_asset = asset_rule
|
||||
|
||||
# Add to model's internal list and emit signal
|
||||
current_row_count = len(self._source_rules)
|
||||
self.beginInsertRows(QModelIndex(), current_row_count, current_row_count)
|
||||
self._source_rules.append(rule) # Append the new rule
|
||||
insert_row = len(self._source_rules)
|
||||
self.beginInsertRows(QModelIndex(), insert_row, insert_row)
|
||||
self._source_rules.append(new_source_rule)
|
||||
self.endInsertRows()
|
||||
continue # Skip the rest of the loop for this rule as it's newly added
|
||||
# --- END ADD NEW RULE LOGIC ---
|
||||
continue # Process next new_source_rule
|
||||
|
||||
# Get the QModelIndex corresponding to the source_rule_obj
|
||||
# This index represents the parent for layout changes.
|
||||
source_index = self.createIndex(source_rule_row, 0, source_rule_obj)
|
||||
# 3. Merge Existing SourceRule
|
||||
log.debug(f"Merging SourceRule for '{source_path}'")
|
||||
existing_source_index = self.createIndex(existing_source_row, 0, existing_source_rule)
|
||||
if not existing_source_index.isValid():
|
||||
log.error(f"Could not create valid index for existing SourceRule: {source_path}. Skipping.")
|
||||
continue
|
||||
|
||||
if not source_index.isValid():
|
||||
print(f"Warning: Could not create valid QModelIndex for SourceRule: {source_path}. Skipping update.")
|
||||
continue
|
||||
# Update non-override SourceRule fields (e.g., supplier identifier if needed)
|
||||
if existing_source_rule.supplier_identifier != new_source_rule.supplier_identifier:
|
||||
# Only update if override is not set, or if you want prediction to always update base identifier
|
||||
if existing_source_rule.supplier_override is None:
|
||||
existing_source_rule.supplier_identifier = new_source_rule.supplier_identifier
|
||||
# Emit dataChanged for the supplier column if it's displayed/editable at source level
|
||||
supplier_col_index = self.createIndex(existing_source_row, self.COL_SUPPLIER, existing_source_rule)
|
||||
self.dataChanged.emit(supplier_col_index, supplier_col_index, [Qt.DisplayRole, Qt.EditRole])
|
||||
|
||||
# --- Signal layout change for the specific source node ---
|
||||
# We are changing the children (AssetRules) of this SourceRule.
|
||||
# Emit with parent index list and orientation.
|
||||
self.layoutAboutToBeChanged.emit() # Emit without arguments
|
||||
|
||||
# --- Clear existing children (AssetRules) ---
|
||||
# Directly modify the assets list of the found SourceRule object
|
||||
source_rule_obj.assets.clear() # Clear the list in place
|
||||
# --- Merge AssetRules ---
|
||||
existing_assets_dict = {asset.asset_name: asset for asset in existing_source_rule.assets}
|
||||
new_assets_dict = {asset.asset_name: asset for asset in new_source_rule.assets}
|
||||
processed_asset_names = set()
|
||||
|
||||
# --- Rebuild children based on the new rule ---
|
||||
for asset_rule in rule.assets:
|
||||
# Add the new AssetRule object directly
|
||||
source_rule_obj.assets.append(asset_rule)
|
||||
# Set the parent reference on the new asset rule
|
||||
asset_rule.parent_source = source_rule_obj
|
||||
# Iterate through new assets to update existing or add new ones
|
||||
for asset_name, new_asset in new_assets_dict.items():
|
||||
processed_asset_names.add(asset_name)
|
||||
existing_asset = existing_assets_dict.get(asset_name)
|
||||
|
||||
# Set parent references for the FileRules within the new AssetRule
|
||||
for file_rule in asset_rule.files:
|
||||
file_rule.parent_asset = asset_rule
|
||||
if existing_asset:
|
||||
# --- Update Existing AssetRule ---
|
||||
log.debug(f" Merging AssetRule: {asset_name}")
|
||||
existing_asset_row = existing_source_rule.assets.index(existing_asset)
|
||||
existing_asset_index = self.createIndex(existing_asset_row, 0, existing_asset)
|
||||
|
||||
# --- Signal layout change completion ---
|
||||
self.layoutChanged.emit() # Emit without arguments
|
||||
print(f"UnifiedViewModel: Updated children for SourceRule: {source_path}")
|
||||
# Update non-override fields (e.g., asset_type)
|
||||
if existing_asset.asset_type != new_asset.asset_type and existing_asset.asset_type_override is None:
|
||||
existing_asset.asset_type = new_asset.asset_type
|
||||
asset_type_col_index = self.createIndex(existing_asset_row, self.COL_ASSET_TYPE, existing_asset)
|
||||
self.dataChanged.emit(asset_type_col_index, asset_type_col_index, [Qt.DisplayRole, Qt.EditRole, Qt.BackgroundRole]) # Include BackgroundRole for color
|
||||
|
||||
# --- Merge FileRules within the AssetRule ---
|
||||
self._merge_file_rules(existing_asset, new_asset, existing_asset_index)
|
||||
|
||||
else:
|
||||
# --- Add New AssetRule ---
|
||||
log.debug(f" Adding new AssetRule: {asset_name}")
|
||||
new_asset.parent_source = existing_source_rule # Set parent
|
||||
# Ensure file parents are set
|
||||
for file_rule in new_asset.files:
|
||||
file_rule.parent_asset = new_asset
|
||||
|
||||
insert_row = len(existing_source_rule.assets)
|
||||
self.beginInsertRows(existing_source_index, insert_row, insert_row)
|
||||
existing_source_rule.assets.append(new_asset)
|
||||
self.endInsertRows()
|
||||
|
||||
# --- Remove Old AssetRules ---
|
||||
# Find assets in existing but not in new, and remove them in reverse order
|
||||
assets_to_remove = []
|
||||
for i, existing_asset in reversed(list(enumerate(existing_source_rule.assets))):
|
||||
if existing_asset.asset_name not in processed_asset_names:
|
||||
assets_to_remove.append((i, existing_asset.asset_name)) # Store index and name
|
||||
|
||||
for row_index, asset_name_to_remove in assets_to_remove:
|
||||
log.debug(f" Removing old AssetRule: {asset_name_to_remove}")
|
||||
self.beginRemoveRows(existing_source_index, row_index, row_index)
|
||||
existing_source_rule.assets.pop(row_index)
|
||||
self.endRemoveRows()
|
||||
|
||||
|
||||
def _merge_file_rules(self, existing_asset: AssetRule, new_asset: AssetRule, parent_asset_index: QModelIndex):
|
||||
"""Helper method to merge FileRules for a given AssetRule."""
|
||||
existing_files_dict = {file.file_path: file for file in existing_asset.files}
|
||||
new_files_dict = {file.file_path: file for file in new_asset.files}
|
||||
processed_file_paths = set()
|
||||
|
||||
# Iterate through new files to update existing or add new ones
|
||||
for file_path, new_file in new_files_dict.items():
|
||||
processed_file_paths.add(file_path)
|
||||
existing_file = existing_files_dict.get(file_path)
|
||||
|
||||
if existing_file:
|
||||
# --- Update Existing FileRule ---
|
||||
log.debug(f" Merging FileRule: {Path(file_path).name}")
|
||||
existing_file_row = existing_asset.files.index(existing_file)
|
||||
existing_file_index = self.createIndex(existing_file_row, 0, existing_file) # Index relative to parent_asset_index
|
||||
|
||||
# Update non-override fields (item_type, standard_map_type)
|
||||
changed_roles = []
|
||||
if existing_file.item_type != new_file.item_type and existing_file.item_type_override is None:
|
||||
existing_file.item_type = new_file.item_type
|
||||
changed_roles.extend([Qt.DisplayRole, Qt.EditRole, Qt.BackgroundRole]) # Include BackgroundRole for color
|
||||
|
||||
# Update standard_map_type (assuming it's derived/set during prediction)
|
||||
# Check if standard_map_type exists on both objects before comparing
|
||||
new_standard_type = getattr(new_file, 'standard_map_type', None)
|
||||
old_standard_type = getattr(existing_file, 'standard_map_type', None)
|
||||
if old_standard_type != new_standard_type:
|
||||
# Update only if item_type_override is not set, as override dictates standard type
|
||||
if existing_file.item_type_override is None:
|
||||
existing_file.standard_map_type = new_standard_type
|
||||
# standard_map_type might not directly affect display, but item_type change covers it
|
||||
if Qt.DisplayRole not in changed_roles: # Avoid duplicates
|
||||
changed_roles.extend([Qt.DisplayRole, Qt.EditRole])
|
||||
|
||||
|
||||
# Emit dataChanged only if something actually changed
|
||||
if changed_roles:
|
||||
# Emit for all relevant columns potentially affected by type changes
|
||||
for col in [self.COL_ITEM_TYPE]: # Add other cols if needed
|
||||
col_index = self.createIndex(existing_file_row, col, existing_file)
|
||||
self.dataChanged.emit(col_index, col_index, changed_roles)
|
||||
|
||||
else:
|
||||
# --- Add New FileRule ---
|
||||
log.debug(f" Adding new FileRule: {Path(file_path).name}")
|
||||
new_file.parent_asset = existing_asset # Set parent
|
||||
insert_row = len(existing_asset.files)
|
||||
self.beginInsertRows(parent_asset_index, insert_row, insert_row)
|
||||
existing_asset.files.append(new_file)
|
||||
self.endInsertRows()
|
||||
|
||||
# --- Remove Old FileRules ---
|
||||
files_to_remove = []
|
||||
for i, existing_file in reversed(list(enumerate(existing_asset.files))):
|
||||
if existing_file.file_path not in processed_file_paths:
|
||||
files_to_remove.append((i, Path(existing_file.file_path).name))
|
||||
|
||||
for row_index, file_name_to_remove in files_to_remove:
|
||||
log.debug(f" Removing old FileRule: {file_name_to_remove}")
|
||||
self.beginRemoveRows(parent_asset_index, row_index, row_index)
|
||||
existing_asset.files.pop(row_index)
|
||||
self.endRemoveRows()
|
||||
|
||||
|
||||
# --- Dedicated Model Restructuring Methods ---
|
||||
|
||||
def moveFileRule(self, source_file_index: QModelIndex, target_parent_asset_index: QModelIndex):
|
||||
"""Moves a FileRule (source_file_index) to a different AssetRule parent (target_parent_asset_index)."""
|
||||
if not source_file_index.isValid() or not target_parent_asset_index.isValid():
|
||||
log.error("moveFileRule: Invalid source or target index provided.")
|
||||
return False
|
||||
|
||||
file_item = source_file_index.internalPointer()
|
||||
target_parent_asset = target_parent_asset_index.internalPointer()
|
||||
|
||||
if not isinstance(file_item, FileRule) or not isinstance(target_parent_asset, AssetRule):
|
||||
log.error("moveFileRule: Invalid item types for source or target.")
|
||||
return False
|
||||
|
||||
old_parent_asset = getattr(file_item, 'parent_asset', None)
|
||||
if not old_parent_asset:
|
||||
log.error(f"moveFileRule: Source file '{Path(file_item.file_path).name}' has no parent asset.")
|
||||
return False
|
||||
|
||||
if old_parent_asset == target_parent_asset:
|
||||
log.debug("moveFileRule: Source and target parent are the same. No move needed.")
|
||||
return True # Technically successful, no change needed
|
||||
|
||||
# Get old parent index
|
||||
source_rule = getattr(old_parent_asset, 'parent_source', None)
|
||||
if not source_rule:
|
||||
log.error(f"moveFileRule: Could not find SourceRule parent for old asset '{old_parent_asset.asset_name}'.")
|
||||
return False
|
||||
|
||||
try:
|
||||
old_parent_row = source_rule.assets.index(old_parent_asset)
|
||||
old_parent_index = self.createIndex(old_parent_row, 0, old_parent_asset)
|
||||
source_row = old_parent_asset.files.index(file_item)
|
||||
except ValueError:
|
||||
log.error("moveFileRule: Could not find old parent or source file within their respective lists.")
|
||||
return False
|
||||
|
||||
target_row = len(target_parent_asset.files) # Append to the end of the target
|
||||
|
||||
log.debug(f"Moving file '{Path(file_item.file_path).name}' from '{old_parent_asset.asset_name}' (row {source_row}) to '{target_parent_asset.asset_name}' (row {target_row})")
|
||||
self.beginMoveRows(old_parent_index, source_row, source_row, target_parent_asset_index, target_row)
|
||||
# Restructure internal data
|
||||
old_parent_asset.files.pop(source_row)
|
||||
target_parent_asset.files.append(file_item)
|
||||
file_item.parent_asset = target_parent_asset # Update parent reference
|
||||
self.endMoveRows()
|
||||
return True
|
||||
|
||||
def createAssetRule(self, source_rule: SourceRule, new_asset_name: str, copy_from_asset: AssetRule = None) -> QModelIndex:
|
||||
"""Creates a new AssetRule under the given SourceRule and returns its index."""
|
||||
if not isinstance(source_rule, SourceRule) or not new_asset_name:
|
||||
log.error("createAssetRule: Invalid SourceRule or empty asset name provided.")
|
||||
return QModelIndex()
|
||||
|
||||
# Check if asset already exists under this source
|
||||
for asset in source_rule.assets:
|
||||
if asset.asset_name == new_asset_name:
|
||||
log.warning(f"createAssetRule: Asset '{new_asset_name}' already exists under '{Path(source_rule.input_path).name}'.")
|
||||
# Return existing index? Or fail? Let's return existing for now.
|
||||
try:
|
||||
existing_row = source_rule.assets.index(asset)
|
||||
return self.createIndex(existing_row, 0, asset)
|
||||
except ValueError:
|
||||
log.error("createAssetRule: Found existing asset but failed to get its index.")
|
||||
return QModelIndex() # Should not happen
|
||||
|
||||
log.debug(f"Creating new AssetRule '{new_asset_name}' under '{Path(source_rule.input_path).name}'")
|
||||
new_asset_rule = AssetRule(asset_name=new_asset_name)
|
||||
new_asset_rule.parent_source = source_rule # Set parent reference
|
||||
|
||||
# Optionally copy type info from another asset
|
||||
if isinstance(copy_from_asset, AssetRule):
|
||||
new_asset_rule.asset_type = copy_from_asset.asset_type
|
||||
new_asset_rule.asset_type_override = copy_from_asset.asset_type_override
|
||||
|
||||
# Find parent SourceRule index
|
||||
try:
|
||||
grandparent_row = self._source_rules.index(source_rule)
|
||||
grandparent_index = self.createIndex(grandparent_row, 0, source_rule)
|
||||
except ValueError:
|
||||
log.error(f"createAssetRule: Could not find SourceRule '{Path(source_rule.input_path).name}' in the model's root list.")
|
||||
return QModelIndex()
|
||||
|
||||
# Determine insertion row for the new parent (e.g., append)
|
||||
new_parent_row = len(source_rule.assets)
|
||||
|
||||
# Emit signals for inserting the new parent row
|
||||
self.beginInsertRows(grandparent_index, new_parent_row, new_parent_row)
|
||||
source_rule.assets.insert(new_parent_row, new_asset_rule) # Insert into data structure
|
||||
self.endInsertRows()
|
||||
|
||||
# Return index for the newly created asset
|
||||
return self.createIndex(new_parent_row, 0, new_asset_rule)
|
||||
|
||||
|
||||
def removeAssetRule(self, asset_rule_to_remove: AssetRule):
|
||||
"""Removes an AssetRule if it's empty."""
|
||||
if not isinstance(asset_rule_to_remove, AssetRule):
|
||||
log.error("removeAssetRule: Invalid AssetRule provided.")
|
||||
return False
|
||||
|
||||
if asset_rule_to_remove.files:
|
||||
log.warning(f"removeAssetRule: Asset '{asset_rule_to_remove.asset_name}' is not empty. Removal aborted.")
|
||||
return False # Do not remove non-empty assets automatically
|
||||
|
||||
source_rule = getattr(asset_rule_to_remove, 'parent_source', None)
|
||||
if not source_rule:
|
||||
log.error(f"removeAssetRule: Could not find parent SourceRule for asset '{asset_rule_to_remove.asset_name}'.")
|
||||
return False
|
||||
|
||||
# Find parent SourceRule index and the row of the asset to remove
|
||||
try:
|
||||
grandparent_row = self._source_rules.index(source_rule)
|
||||
grandparent_index = self.createIndex(grandparent_row, 0, source_rule)
|
||||
asset_row_for_removal = source_rule.assets.index(asset_rule_to_remove)
|
||||
except ValueError:
|
||||
log.error(f"removeAssetRule: Could not find parent SourceRule or the AssetRule within its parent's list.")
|
||||
return False
|
||||
|
||||
def get_asset_type_keys(self) -> List[str]:
|
||||
"""Returns the cached list of asset type keys."""
|
||||
return self._asset_type_keys
|
||||
|
||||
def get_file_type_keys(self) -> List[str]:
|
||||
"""Returns the cached list of file type keys."""
|
||||
return self._file_type_keys
|
||||
log.debug(f"Removing empty AssetRule '{asset_rule_to_remove.asset_name}' at row {asset_row_for_removal} under '{Path(source_rule.input_path).name}'")
|
||||
self.beginRemoveRows(grandparent_index, asset_row_for_removal, asset_row_for_removal)
|
||||
source_rule.assets.pop(asset_row_for_removal)
|
||||
self.endRemoveRows()
|
||||
return True
|
||||
|
||||
# --- Placeholder for node finding method (Original Request - Replaced by direct list search above) ---
|
||||
# Kept for reference, but the logic above directly searches self._source_rules
|
||||
# def _find_source_node_by_path(self, path: str) -> 'SourceRule | None':
|
||||
# """Placeholder: Finds a top-level SourceRule by its input_path."""
|
||||
# # This assumes the model uses separate node objects, which it doesn't.
|
||||
# # The current implementation uses the Rule objects directly.
|
||||
# for i, rule in enumerate(self._source_rules):
|
||||
# if rule.input_path == path:
|
||||
# return rule # Return the SourceRule object itself
|
||||
# return None
|
||||
|
||||
# Kept for reference, but the logic above directly searches self._source_rules
|
||||
Reference in New Issue
Block a user