Alternative LLM Predictor Implementation

This commit is contained in:
2025-05-02 13:38:42 +02:00
parent 1ac23eb252
commit 667f119c61
15 changed files with 1454 additions and 100 deletions

View File

@@ -0,0 +1,351 @@
import os
import json
import requests
from PySide6.QtCore import QObject, Signal, Slot, QThread
from typing import List, Dict, Any
# Assuming rule_structure defines SourceRule, AssetRule, FileRule etc.
# Adjust the import path if necessary based on project structure
from rule_structure import SourceRule, AssetRule, FileRule # Ensure AssetRule and FileRule are imported
# Assuming configuration loads app_settings.json
# Adjust the import path if necessary
# Removed Configuration import, will use load_base_config if needed or passed settings
# from configuration import Configuration
from configuration import load_base_config # Keep this for now if needed elsewhere, or remove if settings are always passed
class LLMPredictionHandler(QObject):
"""
Handles the interaction with an LLM for predicting asset structures
based on a directory's file list. Designed to run in a QThread.
"""
# Signal emitted when prediction for a directory is complete
# Arguments: directory_path (str), results (List[SourceRule])
prediction_ready = Signal(str, list)
# Signal emitted on error
# Arguments: directory_path (str), error_message (str)
prediction_error = Signal(str, str)
# Signal to update status message in the GUI
status_update = Signal(str)
def __init__(self, input_path_str: str, file_list: list, llm_settings: dict, parent: QObject = None): # Accept input_path_str and file_list
"""
Initializes the handler.
Args:
input_path_str: The absolute path to the original input source (directory or archive).
file_list: A list of relative file paths extracted from the input source.
llm_settings: A dictionary containing necessary LLM configuration.
parent: The parent QObject.
"""
super().__init__(parent)
self.input_path_str = input_path_str # Store original input path
self.file_list = file_list # Store the provided file list
self.llm_settings = llm_settings # Store the settings dictionary
self.endpoint_url = self.llm_settings.get('llm_endpoint_url')
self.api_key = self.llm_settings.get('llm_api_key')
self._is_cancelled = False
@Slot()
def run(self):
"""
The main execution method to be called when the thread starts.
Orchestrates the prediction process for the given directory.
"""
# Directory check is no longer needed here, input path is just for context
# File list is provided via __init__
try:
self.status_update.emit(f"Preparing LLM input for {os.path.basename(self.input_path_str)}...")
if self._is_cancelled: return
# Use the file list passed during initialization
if not self.file_list:
self.prediction_ready.emit(self.input_path_str, []) # Emit empty list if no files
return
if self._is_cancelled: return
prompt = self._prepare_prompt(self.file_list) # Use self.file_list
if self._is_cancelled: return
self.status_update.emit(f"Calling LLM for {os.path.basename(self.input_path_str)}...")
llm_response_json_str = self._call_llm(prompt)
if self._is_cancelled: return
self.status_update.emit(f"Parsing LLM response for {os.path.basename(self.input_path_str)}...")
predicted_rules = self._parse_llm_response(llm_response_json_str)
if self._is_cancelled: return
self.prediction_ready.emit(self.input_path_str, predicted_rules) # Use input_path_str
self.status_update.emit(f"LLM interpretation complete for {os.path.basename(self.input_path_str)}.")
except Exception as e:
error_msg = f"Error during LLM prediction for {self.input_path_str}: {e}"
print(error_msg) # Log the full error
self.prediction_error.emit(self.input_path_str, f"An error occurred: {e}") # Use input_path_str
finally:
# Ensure thread cleanup or final signals if needed
pass
@Slot()
def cancel(self):
"""
Sets the cancellation flag.
"""
self._is_cancelled = True
self.status_update.emit(f"Cancellation requested for {os.path.basename(self.input_path_str)}...") # Use input_path_str
# Removed _get_file_list method as file list is now passed in __init__
def _prepare_prompt(self, file_list: List[str]) -> str:
"""
Prepares the full prompt string to send to the LLM using stored settings.
"""
# Access settings from the stored dictionary
prompt_template = self.llm_settings.get('prompt_template_content')
if not prompt_template:
# Attempt to fall back to reading the default file path if content is missing
default_template_path = 'llm_prototype/prompt_template.txt'
print(f"Warning: 'prompt_template_content' missing in llm_settings. Falling back to reading default file: {default_template_path}")
try:
with open(default_template_path, 'r', encoding='utf-8') as f:
prompt_template = f.read()
except FileNotFoundError:
raise ValueError(f"LLM predictor prompt template content missing in settings and default file not found at: {default_template_path}")
except Exception as e:
raise ValueError(f"Error reading default LLM prompt template file {default_template_path}: {e}")
if not prompt_template: # Final check after potential fallback
raise ValueError("LLM predictor prompt template content is empty or could not be loaded.")
# Access definitions and examples from the settings dictionary
asset_defs = json.dumps(self.llm_settings.get('asset_types', {}), indent=4)
file_defs = json.dumps(self.llm_settings.get('file_types', {}), indent=4)
examples = json.dumps(self.llm_settings.get('examples', []), indent=2)
# Format file list as a single string with newlines
file_list_str = "\n".join(file_list)
# Replace placeholders
prompt = prompt_template.replace('{ASSET_TYPE_DEFINITIONS}', asset_defs)
prompt = prompt.replace('{FILE_TYPE_DEFINITIONS}', file_defs)
prompt = prompt.replace('{EXAMPLE_INPUT_OUTPUT_PAIRS}', examples)
prompt = prompt.replace('{FILE_LIST}', file_list_str)
return prompt
def _call_llm(self, prompt: str) -> str:
"""
Calls the configured LLM API endpoint with the prepared prompt.
Args:
prompt: The complete prompt string.
Returns:
The content string from the LLM response, expected to be JSON.
Raises:
ConnectionError: If the request fails due to network issues or timeouts.
ValueError: If the endpoint URL is not configured or the response is invalid.
requests.exceptions.RequestException: For other request-related errors.
"""
if not self.endpoint_url:
raise ValueError("LLM endpoint URL is not configured in settings.")
headers = {
"Content-Type": "application/json",
}
if self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"
# Construct payload based on OpenAI Chat Completions format
payload = {
# Use configured model name, default to 'local-model'
"model": self.llm_settings.get("llm_model_name", "local-model"),
"messages": [{"role": "user", "content": prompt}],
# Use configured temperature, default to 0.5
"temperature": self.llm_settings.get("llm_temperature", 0.5),
# Add max_tokens if needed/configurable:
# "max_tokens": self.llm_settings.get("llm_max_tokens", 1024),
# Ensure the LLM is instructed to return JSON in the prompt itself
# Some models/endpoints support a specific json mode:
# "response_format": { "type": "json_object" } # If supported by endpoint
}
self.status_update.emit(f"Sending request to LLM at {self.endpoint_url}...")
print(f"--- Calling LLM API: {self.endpoint_url} ---")
# print(f"--- Payload Preview ---\n{json.dumps(payload, indent=2)[:500]}...\n--- END Payload Preview ---")
try:
# Make the POST request with a timeout (e.g., 120 seconds for potentially long LLM responses)
response = requests.post(
self.endpoint_url,
headers=headers,
json=payload,
# Make the POST request with configured timeout, default to 120
timeout=self.llm_settings.get("llm_request_timeout", 120)
)
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
except requests.exceptions.Timeout:
error_msg = f"LLM request timed out after {self.llm_settings.get('llm_request_timeout', 120)} seconds."
print(error_msg)
raise ConnectionError(error_msg)
except requests.exceptions.RequestException as e:
error_msg = f"LLM request failed: {e}"
print(error_msg)
# Attempt to get more detail from response if available
try:
if e.response is not None:
print(f"LLM Response Status Code: {e.response.status_code}")
print(f"LLM Response Text: {e.response.text[:500]}...") # Log partial response text
error_msg += f" (Status: {e.response.status_code})"
except Exception:
pass # Ignore errors during error reporting enhancement
raise ConnectionError(error_msg) # Raise a more generic error for the GUI
# Parse the JSON response
try:
response_data = response.json()
# print(f"--- LLM Raw Response ---\n{json.dumps(response_data, indent=2)}\n--- END Raw Response ---") # Debugging
# Extract content - structure depends on the API (OpenAI format assumed)
if "choices" in response_data and len(response_data["choices"]) > 0:
message = response_data["choices"][0].get("message", {})
content = message.get("content")
if content:
# The content itself should be the JSON string we asked for
print("--- LLM Response Content Extracted Successfully ---")
return content.strip()
else:
raise ValueError("LLM response missing 'content' in choices[0].message.")
else:
raise ValueError("LLM response missing 'choices' array or it's empty.")
except json.JSONDecodeError:
error_msg = f"Failed to decode LLM JSON response. Response text: {response.text[:500]}..."
print(error_msg)
raise ValueError(error_msg)
except Exception as e:
# Capture the potentially problematic response_data in the error message
response_data_str = "Not available"
try:
response_data_str = json.dumps(response_data) if 'response_data' in locals() else response.text[:500] + "..."
except Exception:
pass # Avoid errors during error reporting
error_msg = f"Error parsing LLM response structure: {e}. Response data: {response_data_str}"
print(error_msg)
raise ValueError(error_msg)
def _parse_llm_response(self, llm_response_json_str: str) -> List[SourceRule]:
"""
Parses the LLM's JSON response string into a list of SourceRule objects.
"""
# Strip potential markdown code fences before parsing
clean_json_str = llm_response_json_str.strip()
if clean_json_str.startswith("```json"):
clean_json_str = clean_json_str[7:] # Remove ```json\n
if clean_json_str.endswith("```"):
clean_json_str = clean_json_str[:-3] # Remove ```
clean_json_str = clean_json_str.strip() # Remove any extra whitespace
try:
response_data = json.loads(clean_json_str)
except json.JSONDecodeError as e:
# Log the full cleaned string that caused the error for better debugging
error_detail = f"Failed to decode LLM JSON response: {e}\nFull Cleaned Response:\n{clean_json_str}"
print(f"ERROR: {error_detail}") # Print full error detail to console
raise ValueError(error_detail) # Raise the error with full detail
if "predicted_assets" not in response_data or not isinstance(response_data["predicted_assets"], list):
raise ValueError("Invalid LLM response format: 'predicted_assets' key missing or not a list.")
source_rules = []
# We assume one SourceRule per input source processed by this handler instance
source_rule = SourceRule(input_path=self.input_path_str) # Use input_path_str
# Access valid types from the settings dictionary
valid_asset_types = list(self.llm_settings.get('asset_types', {}).keys())
valid_file_types = list(self.llm_settings.get('file_types', {}).keys())
for asset_data in response_data["predicted_assets"]:
if not isinstance(asset_data, dict):
print(f"Warning: Skipping invalid asset data (not a dict): {asset_data}")
continue
asset_name = asset_data.get("suggested_asset_name", "Unnamed_Asset")
asset_type = asset_data.get("predicted_asset_type")
if asset_type not in valid_asset_types:
print(f"Warning: Invalid predicted_asset_type '{asset_type}' for asset '{asset_name}'. Defaulting or skipping.")
# Decide handling: default to a generic type or skip? For now, skip.
continue # Or assign a default like 'Unknown' if defined
# --- MODIFIED LINES for AssetRule ---
# Create the AssetRule instance first
asset_rule = AssetRule(asset_name=asset_name, asset_type=asset_type)
source_rule.assets.append(asset_rule) # Append to the list
if "files" not in asset_data or not isinstance(asset_data["files"], list):
print(f"Warning: 'files' key missing or not a list in asset '{asset_name}'. Skipping files for this asset.")
continue
for file_data in asset_data["files"]:
if not isinstance(file_data, dict):
print(f"Warning: Skipping invalid file data (not a dict) in asset '{asset_name}': {file_data}")
continue
file_path_rel = file_data.get("file_path")
file_type = file_data.get("predicted_file_type")
if not file_path_rel:
print(f"Warning: Missing 'file_path' in file data for asset '{asset_name}'. Skipping file.")
continue
# Convert relative path from LLM (using '/') back to absolute OS-specific path
# Note: LLM gets relative paths, so we join with the handler's base input path
file_path_abs = os.path.join(self.input_path_str, file_path_rel.replace('/', os.sep)) # Use input_path_str
if file_type not in valid_file_types:
print(f"Warning: Invalid predicted_file_type '{file_type}' for file '{file_path_rel}'. Defaulting to EXTRA.")
file_type = "EXTRA" # Default to EXTRA if invalid type from LLM
# --- MODIFIED LINES for FileRule ---
# Create the FileRule instance first
file_rule = FileRule(file_path=file_path_abs, item_type=file_type) # Use correct field names
asset_rule.files.append(file_rule) # Append to the list
source_rules.append(source_rule)
return source_rules
# Example of how this might be used in MainWindow (conceptual)
# class MainWindow(QMainWindow):
# # ... other methods ...
# def _start_llm_prediction(self, directory_path):
# self.llm_thread = QThread()
# self.llm_handler = LLMPredictionHandler(directory_path, self.config_manager)
# self.llm_handler.moveToThread(self.llm_thread)
#
# # Connect signals
# self.llm_handler.prediction_ready.connect(self._on_llm_prediction_ready)
# self.llm_handler.prediction_error.connect(self._on_llm_prediction_error)
# self.llm_handler.status_update.connect(self.statusBar().showMessage)
# self.llm_thread.started.connect(self.llm_handler.run)
# self.llm_thread.finished.connect(self.llm_thread.deleteLater)
# self.llm_handler.prediction_ready.connect(self.llm_thread.quit) # Quit thread on success
# self.llm_handler.prediction_error.connect(self.llm_thread.quit) # Quit thread on error
#
# self.llm_thread.start()
#
# @Slot(str, list)
# def _on_llm_prediction_ready(self, directory_path, results):
# print(f"LLM Prediction ready for {directory_path}: {len(results)} source rules found.")
# # Process results, update model, etc.
# # Make sure to clean up thread/handler references if needed
# self.llm_handler.deleteLater() # Schedule handler for deletion
#
# @Slot(str, str)
# def _on_llm_prediction_error(self, directory_path, error_message):
# print(f"LLM Prediction error for {directory_path}: {error_message}")
# # Show error to user, clean up thread/handler
# self.llm_handler.deleteLater()

View File

@@ -17,8 +17,8 @@ from PySide6.QtWidgets import (
QFormLayout, QGroupBox, QAbstractItemView, QSizePolicy, # Added more layout/widget items
QMenuBar, QMenu # Added for menu
)
from PySide6.QtCore import Qt, QThread, Slot, Signal, QObject, QModelIndex # Added Signal, QObject, QModelIndex
from PySide6.QtGui import QColor, QAction, QPalette # Add QColor import, QAction, QPalette
from PySide6.QtCore import Qt, QThread, Slot, Signal, QObject, QModelIndex, QItemSelectionModel, QPoint # Added Signal, QObject, QModelIndex, QItemSelectionModel, QPoint
from PySide6.QtGui import QColor, QAction, QPalette, QClipboard # Add QColor import, QAction, QPalette, QClipboard
# --- Backend Imports for Data Structures ---
from rule_structure import SourceRule, AssetRule, FileRule # Import Rule Structures
@@ -31,8 +31,9 @@ from rule_structure import SourceRule, AssetRule, FileRule # Import Rule Structu
from gui.unified_view_model import UnifiedViewModel # Import the new unified model
from gui.delegates import LineEditDelegate, ComboBoxDelegate, SupplierSearchDelegate # Import delegates
from gui.delegates import LineEditDelegate, ComboBoxDelegate # Import delegates
# --- Backend Imports ---
from .llm_prediction_handler import LLMPredictionHandler # Added for LLM integration
# --- Backend Imports ---
script_dir = Path(__file__).parent
project_root = script_dir.parent
if str(project_root) not in sys.path:
@@ -179,7 +180,13 @@ class MainWindow(QMainWindow):
self._source_file_lists = {} # Store {input_path: [file_list]} for context
# Removed: self.rule_hierarchy_model = RuleHierarchyModel()
# Removed: self._current_source_rule = None # The new model will hold the data
# Removed the problematic instantiation of Configuration without a preset.
# self.config_manager will be set when a specific preset is loaded,
# or LLM settings will be loaded directly via load_base_config().
self.config_manager = None # Initialize as None
# self.llm_reinterpret_queue = [] # Removed, using unified queue
self.llm_processing_queue = [] # Unified queue for initial adds and re-interpretations
# --- Editor State ---
self.current_editing_preset_path = None
self.editor_unsaved_changes = False
@@ -190,6 +197,8 @@ class MainWindow(QMainWindow):
# self.processing_handler = None # REMOVED Obsolete Handler
self.prediction_thread = None
self.prediction_handler = None
self.llm_prediction_thread = None # Added for LLM
self.llm_prediction_handler = None # Added for LLM
self.setup_threads()
# --- Preview Area (Table) Setup --- REMOVED ---
@@ -442,6 +451,13 @@ class MainWindow(QMainWindow):
# Add the Unified View to the main layout
main_layout.addWidget(self.unified_view, 1) # Give it stretch factor 1
# Connect selection change signal for LLM button state
self.unified_view.selectionModel().selectionChanged.connect(self._update_llm_reinterpret_button_state)
# Enable custom context menu
self.unified_view.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
self.unified_view.customContextMenuRequested.connect(self._show_unified_view_context_menu)
# --- REMOVED Old Hierarchy/Rule/Preview Splitter and Contents ---
# --- Progress Bar ---
@@ -531,6 +547,15 @@ class MainWindow(QMainWindow):
bottom_controls_layout.addWidget(self.workers_label)
bottom_controls_layout.addWidget(self.workers_spinbox)
bottom_controls_layout.addStretch(1)
# --- LLM Re-interpret Button ---
self.llm_reinterpret_button = QPushButton("Re-interpret Selected with LLM")
self.llm_reinterpret_button.setToolTip("Re-run LLM interpretation on the selected source items.")
self.llm_reinterpret_button.setEnabled(False) # Initially disabled
self.llm_reinterpret_button.clicked.connect(self._on_llm_reinterpret_clicked)
bottom_controls_layout.addWidget(self.llm_reinterpret_button)
# --- End LLM Button ---
self.clear_queue_button = QPushButton("Clear Queue") # Added Clear button
self.start_button = QPushButton("Start Processing")
self.cancel_button = QPushButton("Cancel")
@@ -565,6 +590,12 @@ class MainWindow(QMainWindow):
self.editor_preset_list.addItem(placeholder_item)
log.debug("Added '--- Select a Preset ---' placeholder item.")
# Add LLM Option
llm_item = QListWidgetItem("- LLM Interpretation -")
llm_item.setData(Qt.ItemDataRole.UserRole, "__LLM__") # Special identifier
self.editor_preset_list.addItem(llm_item)
log.debug("Added '- LLM Interpretation -' item.")
if not PRESETS_DIR.is_dir():
msg = f"Error: Presets directory not found at {PRESETS_DIR}"
self.statusBar().showMessage(msg)
@@ -680,11 +711,62 @@ class MainWindow(QMainWindow):
# --- Trigger prediction for newly added paths ---
current_editor_item = self.editor_preset_list.currentItem()
is_placeholder = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__"
selected_preset = current_editor_item.text() if current_editor_item and not is_placeholder else None
is_llm = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__LLM__"
selected_preset_text = current_editor_item.text() if current_editor_item and not is_placeholder else None
if selected_preset:
log.info(f"Preset '{selected_preset}' selected. Triggering prediction for {len(newly_added_paths)} new paths.")
# Ensure the prediction thread is running before emitting signals
if is_llm:
# --- LLM Prediction Path ---
log.info(f"LLM Interpretation selected. Queueing LLM prediction for {len(newly_added_paths)} new paths.")
llm_requests_added = 0
for input_path_str in newly_added_paths:
file_list = self._extract_file_list(input_path_str)
if file_list is not None: # Check if extraction was successful
log.info(f"Extracted {len(file_list)} files for LLM prediction from: {input_path_str}")
# Store file list and mark as pending before adding to queue
self._source_file_lists[input_path_str] = file_list
self._pending_predictions.add(input_path_str) # Use the same pending set for now
# --- Queue the initial LLM request ---
# --- Queue the unified LLM request ---
self.llm_processing_queue.append((input_path_str, file_list))
log.debug(f"Queued LLM request for '{input_path_str}'. Queue size: {len(self.llm_processing_queue)}")
llm_requests_added += 1
# --- End Queue ---
else:
log.warning(f"Skipping LLM prediction queuing for {input_path_str} due to extraction error.")
# --- Trigger queue processing *after* the loop ---
if llm_requests_added > 0:
# Check if an LLM thread is already running (from re-interpret or previous add)
is_llm_running = False
try:
if self.llm_prediction_thread is not None:
is_llm_running = self.llm_prediction_thread.isRunning()
except RuntimeError:
is_llm_running = False # Treat as not running if deleted
if not is_llm_running:
log.info("No LLM thread running. Starting unified queue processing.")
self._process_next_llm_item() # Start processing the unified queue
else:
log.info("LLM thread already running. Queue will be processed when current task finishes.")
# --- Trigger queue processing *after* the loop ---
if llm_requests_added > 0:
# Check if an LLM thread is already running (from re-interpret or previous add)
is_llm_running = False
try:
if self.llm_prediction_thread is not None:
is_llm_running = self.llm_prediction_thread.isRunning()
except RuntimeError:
is_llm_running = False # Treat as not running if deleted
if not is_llm_running:
log.info("No LLM thread running. Starting initial add queue processing.")
self._process_next_llm_initial_add() # Start processing the queue
else:
log.info("LLM thread already running. Queue will be processed when current task finishes.")
elif selected_preset_text:
# --- Existing Rule-Based Prediction Path ---
log.info(f"Preset '{selected_preset_text}' selected. Triggering prediction for {len(newly_added_paths)} new paths.")
# Ensure the prediction thread is running before emitting signals
if self.prediction_thread and not self.prediction_thread.isRunning():
log.debug("Starting prediction thread from add_input_paths.")
self.prediction_thread.start()
@@ -697,12 +779,12 @@ class MainWindow(QMainWindow):
self._source_file_lists[input_path_str] = file_list
self._pending_predictions.add(input_path_str)
log.debug(f"Added '{input_path_str}' to pending predictions. Current pending: {self._pending_predictions}")
self.start_prediction_signal.emit(input_path_str, file_list, selected_preset)
self.start_prediction_signal.emit(input_path_str, file_list, selected_preset_text)
else:
log.warning(f"Skipping prediction for {input_path_str} due to extraction error.")
else:
log.warning(f"Added {added_count} asset(s), but no valid preset selected. Prediction not triggered.")
self.statusBar().showMessage(f"Added {added_count} asset(s). Select a preset to generate preview.", 3000)
self.statusBar().showMessage(f"Added {added_count} asset(s). Select a preset or LLM to generate preview.", 3000)
# --- REMOVED call to self.update_preview() ---
# The preview update is now triggered per-item via the signal emission above,
@@ -902,63 +984,107 @@ class MainWindow(QMainWindow):
# Get preset from editor list
current_editor_item = self.editor_preset_list.currentItem()
# Check if the selected item is the placeholder
# Check if the selected item is the placeholder or LLM
is_placeholder = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__"
is_llm = current_editor_item and current_editor_item.data(Qt.ItemDataRole.UserRole) == "__LLM__"
if is_placeholder:
log.debug("Update preview called with placeholder preset selected. Clearing unified view.")
self.unified_model.clear_data() # Clear the new model if placeholder selected
self.statusBar().showMessage("Select a preset from the list on the left to update preview.", 3000)
# No placeholder label to manage for unified view
return # Stop prediction as no valid preset is selected
# Existing logic to get selected_preset text and proceed
selected_preset = current_editor_item.text() if current_editor_item else None
if not selected_preset:
log.debug("Update preview called with no preset selected in the editor list.")
self.unified_model.clear_data() # Clear the new model if no preset selected
self.statusBar().showMessage("Select a preset from the list on the left to update preview.", 3000)
return
# Get asset paths
if not hasattr(self, 'current_asset_paths') or not self.current_asset_paths:
log.debug("Update preview called with no assets tracked.")
self.unified_model.clear_data() # Clear the new model if no assets
return
input_paths = list(self.current_asset_paths)
# --- Handle LLM Mode ---
if is_llm:
log.info(f"[{time.time():.4f}] LLM mode selected. Triggering LLM prediction for {len(input_paths)} assets.")
self.statusBar().showMessage(f"Starting LLM interpretation for assets...", 0)
# --- Reset Accumulation State (might not be strictly needed for LLM, but good practice) ---
log.debug("Clearing accumulated rules and pending predictions for LLM batch.")
self._accumulated_rules.clear()
self._pending_predictions.clear() # Clear pending standard predictions
# --- Queue all current assets for LLM processing ---
llm_requests_added = 0
if input_paths:
log.info(f"Queueing LLM prediction for {len(input_paths)} existing assets.")
for input_path_str in input_paths:
# Check if already in queue to avoid duplicates if user clicks quickly
is_in_queue = any(item[0] == input_path_str for item in self.llm_processing_queue)
if is_in_queue:
log.debug(f"Skipping duplicate add to LLM queue for existing asset: {input_path_str}")
continue
file_list = self._extract_file_list(input_path_str)
if file_list is not None:
log.debug(f"Extracted {len(file_list)} files for LLM prediction from existing asset: {input_path_str}")
# Store file list and mark as pending before adding to queue
self._source_file_lists[input_path_str] = file_list
# self._pending_predictions.add(input_path_str) # Pending is handled by the queue process itself now
self.llm_processing_queue.append((input_path_str, file_list))
log.debug(f"Queued LLM request for existing asset '{input_path_str}'. Queue size: {len(self.llm_processing_queue)}")
llm_requests_added += 1
else:
log.warning(f"Skipping LLM prediction queuing for existing asset {input_path_str} due to extraction error.")
else:
log.warning("LLM selected, but no input paths currently in view to process.")
self.statusBar().showMessage("LLM selected, but no assets are loaded.", 3000)
# --- Trigger queue processing if items were added and it's not running ---
if llm_requests_added > 0:
is_llm_running = False
try:
if self.llm_prediction_thread is not None:
is_llm_running = self.llm_prediction_thread.isRunning()
except RuntimeError:
is_llm_running = False # Treat as not running if deleted
if not is_llm_running:
log.info("LLM thread not running. Starting unified queue processing from update_preview.")
self._process_next_llm_item() # Start processing the unified queue
else:
log.info("LLM thread already running. Queue will be processed when current task finishes.")
# --- End Trigger ---
# Do not return here; let the function exit normally after handling LLM case.
# The standard prediction path below will be skipped because is_llm is True.
# --- Handle Standard Preset Mode ---
selected_preset = current_editor_item.text() if current_editor_item else None
if not selected_preset: # Should not happen if placeholder/LLM checks passed, but safety check
log.error("Update preview called with invalid state (no preset, not placeholder, not LLM).")
self.unified_model.clear_data()
return
log.info(f"[{time.time():.4f}] Requesting background preview update for {len(input_paths)} items using Preset='{selected_preset}'")
self.statusBar().showMessage(f"Updating preview for '{selected_preset}'...", 0)
# --- Reset Accumulation State for this batch ---
log.debug("Clearing accumulated rules for new preview batch.")
# --- Reset Accumulation State for standard prediction batch ---
log.debug("Clearing accumulated rules for new standard preview batch.")
self._accumulated_rules.clear()
# Reset pending predictions to only include paths in this update request
self._pending_predictions = set(input_paths)
log.debug(f"Reset pending predictions for batch: {self._pending_predictions}")
# Keep _source_file_lists, it might contain lists for paths already processed
self._pending_predictions = set(input_paths) # Reset pending standard predictions
log.debug(f"Reset pending standard predictions for batch: {self._pending_predictions}")
# Clearing is handled by model's set_data now, no need to clear table view directly
# Trigger standard prediction handler
if self.prediction_thread and self.prediction_handler:
# Commented-out code moved to Deprecated/Old-Code/gui_main_window_py_old_placeholder_sourcerule_creation_line_922.py
# Start the prediction thread
# The thread should already be running or started once. Don't restart it here.
# log.debug(f"[{time.time():.4f}] Starting prediction thread...")
self.prediction_thread.start() # Ensure thread is running
# log.debug(f"[{time.time():.4f}] Prediction thread start requested.")
# Iterate through all current paths, extract files, and emit signal for each
log.debug(f"[{time.time():.4f}] Iterating through {len(input_paths)} paths to extract files and emit signals.")
log.debug(f"[{time.time():.4f}] Iterating through {len(input_paths)} paths to extract files and emit standard prediction signals.")
for input_path_str in input_paths:
file_list = self._extract_file_list(input_path_str)
if file_list is not None: # Check if extraction was successful
if file_list is not None:
log.debug(f"[{time.time():.4f}] Emitting start_prediction_signal for: {input_path_str} with {len(file_list)} files.")
self.start_prediction_signal.emit(input_path_str, file_list, selected_preset)
else:
log.warning(f"[{time.time():.4f}] Skipping prediction signal for {input_path_str} due to extraction error during preview update.")
log.warning(f"[{time.time():.4f}] Skipping standard prediction signal for {input_path_str} due to extraction error.")
else:
log.error(f"[{time.time():.4f}][T:{thread_id}] Failed to trigger prediction: Thread or handler not initialized.")
self.statusBar().showMessage("Error: Failed to initialize prediction thread.", 5000)
log.error(f"[{time.time():.4f}][T:{thread_id}] Failed to trigger standard prediction: Thread or handler not initialized.")
self.statusBar().showMessage("Error: Failed to initialize standard prediction thread.", 5000)
log.info(f"[{time.time():.4f}][T:{thread_id}] <-- Exiting update_preview.")
@@ -993,6 +1119,20 @@ class MainWindow(QMainWindow):
# self.prediction_thread = None # Keep references alive
# self.prediction_handler = None # Keep references alive
@Slot()
def _reset_llm_thread_references(self):
"""Resets LLM thread and handler references after the thread finishes."""
log.debug("--> Entered _reset_llm_thread_references")
log.debug("Resetting LLM prediction thread and handler references.")
self.llm_prediction_thread = None
self.llm_prediction_handler = None
# Update button state now that thread is confirmed finished
log.debug("Calling _update_llm_reinterpret_button_state...")
self._update_llm_reinterpret_button_state()
# --- Process next item now that the previous thread is fully finished ---
log.debug("Previous LLM thread finished. Triggering processing for next item by calling _process_next_llm_item...")
self._process_next_llm_item()
log.debug("<-- Exiting _reset_llm_thread_references")
@Slot(int, int)
def update_progress_bar(self, current_count, total_count):
if total_count > 0:
@@ -1018,11 +1158,11 @@ class MainWindow(QMainWindow):
self._pending_predictions.discard(input_path)
# Check if this was the last pending item after an error
if not self._pending_predictions:
log.info("Prediction finished, and no more predictions are pending (potentially due to error). Finalizing model update.")
self._finalize_model_update()
log.info("Prediction finished, and no more predictions are pending (potentially due to error).")
# self._finalize_model_update() # Removed call to obsolete method
else:
# Update status about remaining items
completed_count = len(self._accumulated_rules)
completed_count = len(self._accumulated_rules) # Note: _accumulated_rules might not be accurate if prediction failed
pending_count = len(self._pending_predictions)
# total_count = completed_count + pending_count # This might be slightly off if some failed without rules
# We don't have the total count of *requested* predictions here easily,
@@ -1110,6 +1250,105 @@ class MainWindow(QMainWindow):
def _browse_for_materials_blend(self):
self._browse_for_blend_file(self.materials_blend_path_input)
def _start_llm_prediction(self, input_path_str: str, file_list: list = None):
"""Starts the LLM prediction process in a separate thread.
If file_list is not provided, it will be extracted.
"""
# Extract file list if not provided (needed for re-interpretation calls)
if file_list is None:
log.debug(f"File list not provided for {input_path_str}, extracting...")
file_list = self._extract_file_list(input_path_str)
if file_list is None:
log.error(f"Failed to extract file list for {input_path_str} in _start_llm_prediction.")
self.statusBar().showMessage(f"Error extracting files for {os.path.basename(input_path_str)}", 5000)
# If called as part of a queue, we need to process the next item
self._process_next_llm_reinterpret()
return
# Input path validation is now done before calling this function
input_path_obj = Path(input_path_str) # Still needed for basename
if not file_list:
self.statusBar().showMessage(f"LLM Error: No files extracted for {input_path_str}", 5000)
log.error(f"LLM Error: Received empty file list for {input_path_str}")
# Ensure path is removed from pending if we error out here
self._pending_predictions.discard(input_path_str)
log.debug(f"Removed '{input_path_str}' from pending predictions due to empty file list.")
return
# --- Load Base Config for LLM Settings ---
if load_base_config is None: # Check if function was imported successfully
log.critical("LLM Error: load_base_config function not available.")
self.statusBar().showMessage("LLM Error: Cannot load base configuration.", 5000)
return
try:
base_config = load_base_config()
if not base_config:
raise ConfigurationError("Failed to load base configuration (app_settings.json).")
# Extract necessary LLM settings
llm_settings = {
"llm_endpoint_url": base_config.get('llm_endpoint_url'), # Add the endpoint URL
"api_key": base_config.get('llm_api_key'),
"model_name": base_config.get('llm_model_name', 'gemini-pro'),
"prompt_template_content": base_config.get('llm_predictor_prompt'), # Get the prompt content directly
"asset_types": base_config.get('ASSET_TYPE_DEFINITIONS', {}),
"file_types": base_config.get('FILE_TYPE_DEFINITIONS', {}),
"examples": base_config.get('llm_predictor_examples', [])
}
# Validate essential settings
# Removed check for empty API key to support local LLMs without keys
except ConfigurationError as e:
log.error(f"LLM Configuration Error: {e}")
self.statusBar().showMessage(f"LLM Config Error: {e}", 5000)
QMessageBox.warning(self, "LLM Configuration Error", f"Could not load necessary LLM settings from app_settings.json:\n\n{e}")
return
except Exception as e: # Catch other potential errors during loading
log.exception(f"Unexpected error loading LLM configuration: {e}")
self.statusBar().showMessage(f"LLM Config Error: {e}", 5000)
QMessageBox.critical(self, "LLM Configuration Error", f"An unexpected error occurred while loading LLM settings:\n\n{e}")
return
# --- End Config Loading ---
# Clean up previous thread/handler if any are still running (basic cleanup)
if self.llm_prediction_thread and self.llm_prediction_thread.isRunning():
log.warning("Warning: Previous LLM prediction thread still running. Attempting to cancel/quit.")
# Add more robust cleanup if needed (e.g., wait loop, force quit)
if self.llm_prediction_handler:
self.llm_prediction_handler.cancel() # Request cancellation
self.llm_prediction_thread.quit()
# self.llm_prediction_thread.wait(1000) # Optional wait
log.info(f"Starting LLM prediction for source: {input_path_str} with {len(file_list)} files.")
self.statusBar().showMessage(f"Starting LLM interpretation for {os.path.basename(input_path_str)}...")
self.llm_prediction_thread = QThread(self)
# Pass the input path (for context), the file list, and settings to the handler
self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, llm_settings) # Pass input_path_str, file_list, settings
self.llm_prediction_handler.moveToThread(self.llm_prediction_thread)
# Connect signals from handler to slots in MainWindow
self.llm_prediction_handler.prediction_ready.connect(self._on_llm_prediction_ready)
self.llm_prediction_handler.prediction_error.connect(self._on_llm_prediction_error)
self.llm_prediction_handler.status_update.connect(self.statusBar().showMessage) # Connect status updates
# Connect thread signals
self.llm_prediction_thread.started.connect(self.llm_prediction_handler.run)
# Clean up thread and handler when finished
# --- Connect thread finished signal to cleanup slot ---
self.llm_prediction_thread.finished.connect(self._reset_llm_thread_references)
# --- End Connect ---
self.llm_prediction_thread.finished.connect(self.llm_prediction_handler.deleteLater)
self.llm_prediction_thread.finished.connect(self.llm_prediction_thread.deleteLater)
# Also ensure thread quits when handler signals completion/error
self.llm_prediction_handler.prediction_ready.connect(self.llm_prediction_thread.quit)
self.llm_prediction_handler.prediction_error.connect(self.llm_prediction_thread.quit)
# UI disabling is now handled by the calling function (_on_llm_reinterpret_clicked)
# when the queue processing starts.
self.llm_prediction_thread.start()
# --- Preset Editor Methods (Adapted from PresetEditorDialog) ---
def _editor_add_list_item(self, list_widget: QListWidget):
@@ -1295,8 +1534,9 @@ class MainWindow(QMainWindow):
"""Loads the preset currently selected in the editor list."""
log.debug(f"currentItemChanged signal triggered. current_item: {current_item.text() if current_item else 'None'}, previous_item: {previous_item.text() if previous_item else 'None'}")
# Check if the selected item is the placeholder
# Check if the selected item is the placeholder or LLM
is_placeholder = current_item and current_item.data(Qt.ItemDataRole.UserRole) == "__PLACEHOLDER__"
is_llm = current_item and current_item.data(Qt.ItemDataRole.UserRole) == "__LLM__" # Added check
if self._check_editor_unsaved_changes():
# If user cancels, revert selection
@@ -1315,15 +1555,31 @@ class MainWindow(QMainWindow):
self.start_button.setEnabled(False) # Disable start button
return # Stop processing as no real preset is selected
if is_llm: # Added block
log.debug("LLM Interpretation item selected. Clearing editor and triggering preview.")
self._clear_editor() # Clear editor fields
self._set_editor_enabled(False) # Disable editor fields for LLM mode
self.start_button.setEnabled(False) # Disable start processing button for LLM mode
self._update_llm_reinterpret_button_state() # Update re-interpret button state
self.update_preview() # Trigger preview update (which will handle LLM if assets exist)
return # Stop processing here, don't load as preset
# Existing logic for handling real preset items starts here
if current_item:
if current_item: # This will now only run for actual preset files
log.debug(f"Loading preset for editing: {current_item.text()}")
preset_path = current_item.data(Qt.ItemDataRole.UserRole)
self._load_preset_for_editing(preset_path)
self.start_button.setEnabled(True) # Enable start button
# --- Trigger preview update after loading editor ---
self.update_preview()
# --- End Trigger ---
# Ensure preset_path is actually a Path object before calling _load_preset_for_editing
if isinstance(preset_path, Path):
self._load_preset_for_editing(preset_path)
self.start_button.setEnabled(True) # Enable start button for presets
self._update_llm_reinterpret_button_state() # Update re-interpret button state
# --- Trigger preview update after loading editor ---
self.update_preview()
# --- End Trigger ---
else:
log.error(f"Invalid data type for preset path: {type(preset_path)}. Expected Path object. Clearing editor.")
self._clear_editor()
self.start_button.setEnabled(False)
# No placeholder/table view visibility to manage
else:
@@ -1737,19 +1993,425 @@ class MainWindow(QMainWindow):
self.statusBar().showMessage(status_msg, 5000)
log.debug(status_msg)
@Slot(str, list)
def _on_llm_prediction_ready(self, directory_path, source_rules):
"""Handles the successful LLM prediction result and processes the next item in the queue."""
log.info(f"Received LLM prediction for {directory_path}. {len(source_rules)} source rule(s) found.")
self.statusBar().showMessage(f"LLM interpretation complete for {os.path.basename(directory_path)}.", 5000)
# --- Update the model ---
if source_rules:
try:
# Assuming the model has a method like this:
# It should intelligently update/replace rules only for the sources
# contained within the source_rules list (which should correspond
# to the directory_path processed by the handler).
log.info(f"Updating model with rules for sources: {[rule.input_path for rule in source_rules]}") # Corrected source_path to input_path for logging
# --- DIAGNOSTIC LOGGING ---
log.debug(f"DIAGNOSTIC: Type of self.unified_model: {type(self.unified_model)}")
log.debug(f"DIAGNOSTIC: hasattr(self.unified_model, 'update_rules_for_sources'): {hasattr(self.unified_model, 'update_rules_for_sources')}")
# --- END DIAGNOSTIC ---
# Ensure the model method exists and handles the update correctly.
# This might involve finding existing rules for the source_path and replacing them,
# or adding new ones if they don't exist.
self.unified_model.update_rules_for_sources(source_rules)
log.info("Model update call successful.")
# --- Expand items after model update ---
self.unified_view.expandToDepth(1) # Expand Source -> Asset level
# --- End Expand ---
except AttributeError as e: # Capture the exception object
# Log the specific attribute error message
error_msg = f"AttributeError: {e}. Attempted to call 'update_rules_for_sources' on object of type {type(self.unified_model)}."
log.error(error_msg)
self.statusBar().showMessage(error_msg, 8000)
# Consider showing a QMessageBox critical error here
except Exception as e:
error_msg = f"Error updating model with LLM results: {e}"
log.exception(error_msg) # Use log.exception to include traceback
self.statusBar().showMessage(error_msg, 8000)
# Consider showing a QMessageBox critical error here
else:
log.info(f"No source rules returned by LLM for {directory_path}. Model not updated.")
# UI re-enabling is handled by _process_next_llm_reinterpret when the queue is empty.
# Clean up references (optional, as deleteLater is connected to finished)
# self.llm_prediction_handler = None # Keep references until queue is done? No, handler/thread are per-item.
# self.llm_prediction_thread = None
# --- Process next item in queue (MOVED TO _reset_llm_thread_references) ---
# self._process_next_llm_item() # Ensure this calls the correct unified method
# Explicitly update button state after successful prediction (handled in _reset_llm_thread_references now)
# self._update_llm_reinterpret_button_state() # Moved to _reset_llm_thread_references
@Slot(str, str)
def _on_llm_prediction_error(self, directory_path, error_message):
"""Handles errors reported by the LLM prediction handler."""
log.debug(f"--> Entered _on_llm_prediction_error for: {directory_path}")
log.error(f"LLM Prediction Error for {directory_path}: {error_message}")
# Simplify status bar message
simple_error_msg = f"LLM Error ({os.path.basename(directory_path)}): Request failed (see log)."
self.statusBar().showMessage(simple_error_msg, 8000)
# Optionally show a QMessageBox to the user
# QMessageBox.critical(self, "LLM Prediction Error", f"Failed to get LLM prediction for {directory_path}:\n{error_message}")
log.debug(f"<-- Exiting _on_llm_prediction_error for: {directory_path}")
# UI re-enabling is handled by _process_next_llm_reinterpret when the queue is empty.
# Clean up references (optional, as deleteLater is connected to finished)
# self.llm_prediction_handler = None
# self.llm_prediction_thread = None
# --- Process next item in queue ---
# Even on error, try to process the next directory in the queue (MOVED TO _reset_llm_thread_references)
# self._process_next_llm_item() # Ensure this calls the correct unified method
# Explicitly update button state after prediction error (handled in _reset_llm_thread_references now)
# self._update_llm_reinterpret_button_state() # Moved to _reset_llm_thread_references
# REMOVED _finalize_model_update method as it's no longer needed
# def _finalize_model_update(self):
# """Combines accumulated rules and updates the UI model and view."""
# ... (old code removed) ...
# --- Slots for LLM Re-interpretation ---
@Slot()
def _update_llm_reinterpret_button_state(self):
"""Enables/disables the LLM re-interpret button based on selection in the unified view."""
if hasattr(self, 'llm_reinterpret_button') and hasattr(self, 'unified_view'):
# Check if the selection model exists and has a selection
selection_model = self.unified_view.selectionModel()
has_selection = selection_model is not None and selection_model.hasSelection()
# Also check if LLM processing is currently running (safely)
is_llm_running = False
try:
# Check if thread exists and hasn't been deleted yet before calling isRunning
if self.llm_prediction_thread is not None:
is_llm_running = self.llm_prediction_thread.isRunning()
except RuntimeError:
# Handle the case where the C++ object is deleted between checks
log.debug("_update_llm_reinterpret_button_state: Caught RuntimeError checking isRunning (thread likely deleted).")
is_llm_running = False # Treat as not running if deleted
# Enable only if there's a selection AND LLM is not currently running
self.llm_reinterpret_button.setEnabled(has_selection and not is_llm_running)
elif hasattr(self, 'llm_reinterpret_button'):
# Ensure button is disabled if view/model isn't ready
self.llm_reinterpret_button.setEnabled(False)
@Slot()
def _on_llm_reinterpret_clicked(self):
"""Handles the click of the 'Re-interpret Selected with LLM' button."""
selected_indexes = self.unified_view.selectionModel().selectedIndexes()
if not selected_indexes:
return
if self.llm_prediction_thread and self.llm_prediction_thread.isRunning():
QMessageBox.warning(self, "Busy", "LLM prediction is already in progress. Please wait.")
return
unique_source_dirs = set()
try:
# --- Get unique source directories for selected items ---
log.debug(f"Finding unique source directories from {len(selected_indexes)} selected indexes for LLM re-interpretation.")
processed_source_paths = set() # Track processed source paths to avoid duplicates
for index in selected_indexes:
if not index.isValid(): continue
# Get the node associated with the index
item_node = index.internalPointer() # Use internalPointer() for tree models
if not item_node: continue # Skip if node is invalid
# Traverse up to find the SourceRule node
current_node = item_node
source_node = None
while current_node is not None:
if isinstance(current_node, SourceRule):
source_node = current_node
break
# Traverse using parent attributes (adjust if model structure differs)
if hasattr(current_node, 'parent_asset'):
current_node = getattr(current_node, 'parent_asset', None)
if hasattr(current_node, 'parent_source'):
current_node = getattr(current_node, 'parent_source', None)
else: # Should not happen if structure is consistent
current_node = None
elif hasattr(current_node, 'parent_source'):
current_node = getattr(current_node, 'parent_source', None)
else: # Reached top or unexpected node type
current_node = None
if source_node and hasattr(source_node, 'input_path') and source_node.input_path:
source_path_str = source_node.input_path
# Check if this source path has already been processed for this selection
if source_path_str in processed_source_paths:
continue # Skip if already added
# Ensure it's a directory path suitable for processing (or zip)
source_path_obj = Path(source_path_str)
if source_path_obj.is_dir() or (source_path_obj.is_file() and source_path_obj.suffix.lower() == '.zip'):
unique_source_dirs.add(source_path_str)
processed_source_paths.add(source_path_str) # Mark this source path as processed
else:
# Handle archives if needed, or just log/ignore
log.warning(f"Skipping non-directory/zip source for re-interpretation: {source_path_str}")
else:
log.warning(f"Could not determine valid SourceRule or input_path for selected index: {index.row()},{index.column()} (Item type: {type(item_node).__name__})")
except Exception as e:
log.exception(f"Error getting source directories for LLM re-interpretation: {e}")
QMessageBox.warning(self, "Error", f"Could not determine source directories for selected items: {e}")
return
if not unique_source_dirs:
self.statusBar().showMessage("No valid source directories found for selected items.", 5000)
return
# --- Queue directories and start processing ---
# Add directories to the unified queue, checking for duplicates
items_added_to_queue = 0
for source_dir in unique_source_dirs:
# Check if the source_dir is already in the queue (avoids duplicate processing requests)
# Note: This checks only the path, assuming file_list is None for re-interpret requests
is_in_queue = any(item[0] == source_dir for item in self.llm_processing_queue)
if not is_in_queue:
# Re-interpretation needs to extract file list again, so pass None for file_list
self.llm_processing_queue.append((source_dir, None))
items_added_to_queue += 1
else:
log.debug(f"Skipping duplicate add to LLM queue for: {source_dir}")
if items_added_to_queue > 0:
log.info(f"Added {items_added_to_queue} unique directories to LLM processing queue. Queue size: {len(self.llm_processing_queue)}")
else:
log.info(f"No new unique directories added to LLM queue (already present or none selected). Queue size: {len(self.llm_processing_queue)}")
# Start processing if not already running
is_llm_running = False
try:
# Safely check if thread exists and is running
if self.llm_prediction_thread is not None:
is_llm_running = self.llm_prediction_thread.isRunning()
except RuntimeError:
log.debug("RuntimeError checking llm_prediction_thread.isRunning() in _on_llm_reinterpret_clicked (likely deleted).")
is_llm_running = False
if not is_llm_running:
if self.llm_processing_queue: # Only start if queue is not empty
log.info("LLM thread not running. Starting unified queue processing.")
# --- Disable UI ---
self.llm_reinterpret_button.setEnabled(False)
self.editor_preset_list.setEnabled(False) # Keep preset list disabled
# --- End Disable ---
self._process_next_llm_item() # Start processing the first item
else:
log.info("LLM thread not running, but queue is empty. Nothing to start.")
else:
log.info(f"LLM thread already running. Added {items_added_to_queue} directories to queue.")
if items_added_to_queue > 0:
self.statusBar().showMessage(f"Added {items_added_to_queue} directories to running LLM queue.", 3000)
def _process_next_llm_item(self):
"""Processes the next directory in the unified LLM processing queue."""
log.debug(f"--> Entered _process_next_llm_item. Queue size: {len(self.llm_processing_queue)}")
if not self.llm_processing_queue:
log.info("LLM processing queue is empty. Finishing.")
self.statusBar().showMessage("LLM processing complete.", 5000)
# --- Re-enable UI ---
log.debug("Re-enabling UI controls.")
self._update_llm_reinterpret_button_state() # Update based on selection/state
self.editor_preset_list.setEnabled(True) # Re-enable preset list
# --- End Re-enable ---
log.debug("<-- Exiting _process_next_llm_item (queue empty)")
return
# Check if already running - crucial for unified queue
is_llm_running = False
try:
# Safely check if thread exists and is running
if self.llm_prediction_thread is not None:
is_llm_running = self.llm_prediction_thread.isRunning()
except RuntimeError:
log.debug("RuntimeError checking llm_prediction_thread.isRunning() in _process_next_llm_item (likely deleted).")
is_llm_running = False
if is_llm_running:
log.info("LLM processing already running. Waiting for current item to finish.")
# Do not pop from queue if already running, wait for _on_llm_prediction_ready/error to call this again
return
# Ensure UI is disabled while processing starts/continues
# (Might be redundant if called correctly, but good safety)
self.llm_reinterpret_button.setEnabled(False)
self.editor_preset_list.setEnabled(False)
# Get next item *without* removing it yet, in case _start_llm_prediction fails immediately
if not self.llm_processing_queue: # Double check queue isn't empty after potential wait
log.warning("_process_next_llm_item: Queue became empty unexpectedly.")
# Re-enable UI just in case
self._update_llm_reinterpret_button_state()
self.editor_preset_list.setEnabled(True)
return
next_item = self.llm_processing_queue[0] # Peek at the first item
next_dir, file_list = next_item # Unpack the tuple (file_list might be None)
# --- Calculate approximate progress ---
total_in_queue_now = len(self.llm_processing_queue)
status_msg = f"LLM Processing {os.path.basename(next_dir)} (Approx. {total_in_queue_now} remaining)..."
self.statusBar().showMessage(status_msg)
log.info(status_msg)
# --- Start Prediction (which might fail) ---
try:
# Pass the potentially None file_list. _start_llm_prediction handles extraction if None.
self._start_llm_prediction(next_dir, file_list=file_list)
# --- Pop item *after* successfully starting prediction ---
self.llm_processing_queue.pop(0)
log.debug(f"Successfully started LLM prediction for {next_dir} and removed from queue.")
except Exception as e:
log.exception(f"Error occurred *during* _start_llm_prediction call for {next_dir}: {e}")
self.statusBar().showMessage(f"Error starting LLM for {os.path.basename(next_dir)}: {e}", 8000)
# --- Remove the failed item from the queue ---
try:
failed_item = self.llm_processing_queue.pop(0)
log.warning(f"Removed failed item {failed_item} from LLM queue.")
except IndexError:
log.error("Attempted to pop failed item from already empty LLM queue.")
# --- Attempt to process the *next* item ---
# Use QTimer.singleShot to avoid deep recursion if many items fail quickly
from PySide6.QtCore import QTimer
QTimer.singleShot(100, self._process_next_llm_item) # Try next item after a short delay
# --- Context Menu for Unified View ---
@Slot(QPoint)
def _show_unified_view_context_menu(self, point: QPoint):
"""Shows the context menu for the unified view."""
index = self.unified_view.indexAt(point)
if not index.isValid():
return # Clicked on empty area
# Determine the type of item clicked (Source, Asset, File)
item_node = index.internalPointer()
is_source_item = isinstance(item_node, SourceRule)
menu = QMenu(self)
# --- Add "Copy Source Files" action only for SourceRule items ---
if is_source_item:
# Renamed action
copy_llm_example_action = QAction("Copy LLM Example to Clipboard", self)
copy_llm_example_action.setToolTip("Copies a JSON structure representing the input files and predicted output, suitable for LLM examples.")
# Pass the index to the slot using functools.partial or a lambda
copy_llm_example_action.triggered.connect(lambda: self._copy_llm_example_to_clipboard(index)) # Renamed slot
menu.addAction(copy_llm_example_action)
menu.addSeparator() # Add separator if other actions might be added
# --- Add other potential actions here based on item_node type ---
# Example:
# if isinstance(item_node, AssetRule):
# asset_action = QAction("Asset Action...", self)
# menu.addAction(asset_action)
# Show the menu if any actions were added
if not menu.isEmpty():
menu.exec(self.unified_view.viewport().mapToGlobal(point))
@Slot(QModelIndex)
def _copy_llm_example_to_clipboard(self, index: QModelIndex):
"""Copies a JSON structure for the selected source item to the clipboard,
matching the LLM predictor example format."""
if not index.isValid():
log.warning("Copy LLM example called with invalid index.")
return
item_node = index.internalPointer()
if not isinstance(item_node, SourceRule):
log.warning(f"Copy LLM example called on non-SourceRule item: {type(item_node)}")
self.statusBar().showMessage("Please right-click directly on the Source item.", 3000)
return
source_rule: SourceRule = item_node
log.info(f"Attempting to generate LLM example JSON for source: {source_rule.input_path}")
all_file_paths = []
predicted_assets_data = []
# Iterate through assets and files to gather data
for asset_rule in source_rule.assets:
asset_files_data = []
for file_rule in asset_rule.files:
if file_rule.file_path:
# Add to the overall list for the "input" field
all_file_paths.append(file_rule.file_path)
# Add to the specific asset's file list for the "output" field
asset_files_data.append({
"file_path": file_rule.file_path,
# Use item_type as the predicted file type
"predicted_file_type": file_rule.item_type or "UNKNOWN" # Use UNKNOWN if None
})
# Sort files within the asset for consistency
asset_files_data.sort(key=lambda x: x['file_path'])
# Add the asset data to the list
predicted_assets_data.append({
# Use asset_name as the suggested name
"suggested_asset_name": asset_rule.asset_name or "UnnamedAsset", # Use default if None
# Use asset_type as the predicted asset type
"predicted_asset_type": asset_rule.asset_type or "UNKNOWN", # Use UNKNOWN if None
"files": asset_files_data
})
# Sort assets by name for consistency
predicted_assets_data.sort(key=lambda x: x['suggested_asset_name'])
# Sort all file paths for the input field
all_file_paths.sort()
if not all_file_paths:
log.warning(f"No file paths found for source: {source_rule.input_path}. Cannot generate example.")
self.statusBar().showMessage(f"No files found for source '{os.path.basename(source_rule.input_path)}'.", 3000)
return
# Construct the final dictionary
llm_example = {
"input": "\n".join(all_file_paths),
"output": {
"predicted_assets": predicted_assets_data
}
}
# Serialize to JSON string
try:
json_string = json.dumps(llm_example, indent=2) # Set indent=2 for matching format
except Exception as e:
log.exception(f"Error serializing LLM example data to JSON for source {source_rule.input_path}: {e}")
self.statusBar().showMessage(f"Error generating JSON: {e}", 5000)
return
# Copy to clipboard
try:
clipboard = QApplication.clipboard()
if clipboard:
clipboard.setText(json_string)
log.info(f"Copied LLM example JSON to clipboard for source: {source_rule.input_path}")
self.statusBar().showMessage("Copied LLM example JSON to clipboard.", 3000)
else:
log.error("Failed to get system clipboard.")
self.statusBar().showMessage("Error: Could not access clipboard.", 5000)
except Exception as e:
log.exception(f"Error copying LLM example JSON to clipboard: {e}")
self.statusBar().showMessage(f"Error copying to clipboard: {e}", 5000)
# --- Main Execution ---
def run_gui():
"""Initializes and runs the Qt application."""
print("--- Reached run_gui() ---")
app = QApplication(sys.argv)
app.setStyle('Fusion')
#app.setStyle('Fusion')
# Set a custom palette to override default Fusion colors
palette = app.palette()

View File

@@ -9,13 +9,21 @@ from PySide6.QtCore import Signal, Slot, QObject
# from rule_structure import SourceRule, AssetRule, FileRule # Assuming direct import is possible
class RuleEditorWidget(QWidget):
"""
A widget to display and edit hierarchical processing rules (Source, Asset, File).
"""
rule_updated = Signal(object) # Signal emitted when a rule is updated
"""
A widget to display and edit hierarchical processing rules (Source, Asset, File).
"""
rule_updated = Signal(object) # Signal emitted when a rule is updated
def __init__(self, parent=None):
def __init__(self, asset_types: list[str] | None = None, parent=None):
"""
Initializes the RuleEditorWidget.
Args:
asset_types (list[str] | None): A list of available asset type names. Defaults to None.
parent: The parent widget.
"""
super().__init__(parent)
self.asset_types = asset_types if asset_types else [] # Store asset types
self.current_rule_type = None
self.current_rule_object = None
@@ -62,7 +70,17 @@ class RuleEditorWidget(QWidget):
"""
Creates an appropriate editor widget based on the attribute type.
"""
if isinstance(attr_value, bool):
# --- Special Handling for Asset Type Dropdown ---
if self.current_rule_type == 'AssetRule' and attr_name == 'asset_type' and self.asset_types:
widget = QComboBox()
widget.addItems(self.asset_types)
if attr_value in self.asset_types:
widget.setCurrentText(attr_value)
elif self.asset_types: # Select first item if current value is invalid
widget.setCurrentIndex(0)
return widget
# --- Standard Type Handling ---
elif isinstance(attr_value, bool):
widget = QCheckBox()
widget.setChecked(attr_value)
return widget
@@ -80,7 +98,7 @@ class RuleEditorWidget(QWidget):
widget = QLineEdit()
widget.setText(str(attr_value) if attr_value is not None else "")
return widget
# Add more types as needed (e.g., dropdowns for enums/choices)
# Add more types as needed
# elif isinstance(attr_value, list):
# # Example for a simple list of strings
# widget = QLineEdit()
@@ -103,6 +121,9 @@ class RuleEditorWidget(QWidget):
editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value))
elif isinstance(editor_widget, QDoubleSpinBox):
editor_widget.valueChanged.connect(lambda value: self._update_rule_attribute(attr_name, value))
elif isinstance(editor_widget, QComboBox):
# Use currentTextChanged to get the string value directly
editor_widget.currentTextChanged.connect(lambda text: self._update_rule_attribute(attr_name, text))
# Add connections for other widget types
def _update_rule_attribute(self, attr_name, value):
@@ -164,7 +185,9 @@ if __name__ == '__main__':
file_setting_x: int = 789
file_setting_y: str = "default_file_string"
editor = RuleEditorWidget()
# Example usage: Provide asset types during instantiation
asset_types_from_config = ["Surface", "Model", "Decal", "Atlas", "UtilityMap"] # Example list
editor = RuleEditorWidget(asset_types=asset_types_from_config)
# Test loading different rule types
source_rule = SourceRule()

View File

@@ -6,6 +6,7 @@ from PySide6.QtGui import QColor # Added for background role
from pathlib import Path # Added for file_name extraction
from rule_structure import SourceRule, AssetRule, FileRule # Removed AssetType, ItemType import
from configuration import load_base_config # Import load_base_config
from typing import List # Added for type hinting
class UnifiedViewModel(QAbstractItemModel):
# --- Color Constants for Row Backgrounds ---
@@ -547,4 +548,98 @@ class UnifiedViewModel(QAbstractItemModel):
if item: # Ensure internal pointer is not None
return item
return None # Return None for invalid index or None pointer
# --- Method to update model based on LLM predictions ---
def update_rules_for_sources(self, source_rules: List[SourceRule]):
"""
Updates the model's internal data based on a list of SourceRule objects,
typically containing predictions for one or more source directories.
Args:
source_rules: A list of SourceRule objects containing the new structure.
"""
if not source_rules:
print("UnifiedViewModel: update_rules_for_sources called with empty list.")
return
# --- Important: Model Change Signaling ---
# Using Option 2 (per-source update) as it's generally more efficient.
print(f"UnifiedViewModel: Updating rules for {len(source_rules)} source(s).")
# --- Node Class Placeholders ---
# Ensure these match your actual node implementation if different.
# These might be imported from another module or defined within this model.
# Example: from .your_node_module import SourceNode, AssetNode, FileNode
# For now, we assume they are available in the scope.
for rule in source_rules:
source_path = rule.input_path # Use input_path as per SourceRule definition
# --- Find the corresponding SourceRule in the model's internal list ---
# This replaces the placeholder _find_source_node_by_path logic
# We need the *object* and its *index* in self._source_rules
source_rule_obj = None
source_rule_row = -1
for i, existing_rule in enumerate(self._source_rules):
if existing_rule.input_path == source_path:
source_rule_obj = existing_rule
source_rule_row = i
break
if source_rule_obj is None:
# --- ADD NEW RULE LOGIC ---
log.debug(f"No existing rule found for '{source_path}'. Adding new rule to model.")
# Ensure parent references are set within the new rule
for asset_rule in rule.assets:
asset_rule.parent_source = rule # Set parent to the rule being added
for file_rule in asset_rule.files:
file_rule.parent_asset = asset_rule
# Add to model's internal list and emit signal
current_row_count = len(self._source_rules)
self.beginInsertRows(QModelIndex(), current_row_count, current_row_count)
self._source_rules.append(rule) # Append the new rule
self.endInsertRows()
continue # Skip the rest of the loop for this rule as it's newly added
# --- END ADD NEW RULE LOGIC ---
# Get the QModelIndex corresponding to the source_rule_obj
# This index represents the parent for layout changes.
source_index = self.createIndex(source_rule_row, 0, source_rule_obj)
if not source_index.isValid():
print(f"Warning: Could not create valid QModelIndex for SourceRule: {source_path}. Skipping update.")
continue
# --- Signal layout change for the specific source node ---
# We are changing the children (AssetRules) of this SourceRule.
# Emit with parent index list and orientation.
self.layoutAboutToBeChanged.emit() # Emit without arguments
# --- Clear existing children (AssetRules) ---
# Directly modify the assets list of the found SourceRule object
source_rule_obj.assets.clear() # Clear the list in place
# --- Rebuild children based on the new rule ---
for asset_rule in rule.assets:
# Add the new AssetRule object directly
source_rule_obj.assets.append(asset_rule)
# Set the parent reference on the new asset rule
asset_rule.parent_source = source_rule_obj
# Set parent references for the FileRules within the new AssetRule
for file_rule in asset_rule.files:
file_rule.parent_asset = asset_rule
# --- Signal layout change completion ---
self.layoutChanged.emit() # Emit without arguments
print(f"UnifiedViewModel: Updated children for SourceRule: {source_path}")
# --- Placeholder for node finding method (Original Request - Replaced by direct list search above) ---
# Kept for reference, but the logic above directly searches self._source_rules
# def _find_source_node_by_path(self, path: str) -> 'SourceRule | None':
# """Placeholder: Finds a top-level SourceRule by its input_path."""
# # This assumes the model uses separate node objects, which it doesn't.
# # The current implementation uses the Rule objects directly.
# for i, rule in enumerate(self._source_rules):
# if rule.input_path == path:
# return rule # Return the SourceRule object itself
# return None