LLM GUI updates and tests

This commit is contained in:
2025-05-04 14:33:18 +02:00
parent 336d698f9b
commit 6b704c561a
11 changed files with 813 additions and 196 deletions

View File

@@ -65,6 +65,7 @@ class BasePredictionHandler(QObject, ABC, metaclass=QtABCMeta):
Main execution slot intended to be connected to QThread.started.
Handles the overall process: setup, execution, error handling, signaling.
"""
log.debug(f"--> Entered BasePredictionHandler.run() for {self.input_source_identifier}") # ADDED DEBUG LOG
if self._is_running:
log.warning(f"Handler for '{self.input_source_identifier}' is already running. Aborting.")
return

318
gui/llm_editor_widget.py Normal file
View File

@@ -0,0 +1,318 @@
# gui/llm_editor_widget.py
import json
import logging
from PySide6.QtWidgets import (
QWidget, QVBoxLayout, QTabWidget, QPlainTextEdit, QGroupBox,
QHBoxLayout, QPushButton, QFormLayout, QLineEdit, QDoubleSpinBox,
QSpinBox, QMessageBox, QTextEdit
)
from PySide6.QtCore import Slot as pyqtSlot, Signal as pyqtSignal # Use PySide6 equivalents
# Assuming configuration module exists and has relevant functions later
from configuration import save_llm_config, ConfigurationError # Import necessary items
# For now, define path directly for initial structure
LLM_CONFIG_PATH = "config/llm_settings.json"
logger = logging.getLogger(__name__)
class LLMEditorWidget(QWidget):
"""
Widget for editing LLM settings stored in config/llm_settings.json.
"""
settings_saved = pyqtSignal() # Signal emitted when settings are successfully saved
def __init__(self, parent=None):
super().__init__(parent)
self._unsaved_changes = False
self._init_ui()
self._connect_signals()
self.save_button.setEnabled(False) # Initially disabled
def _init_ui(self):
"""Initialize the user interface components."""
main_layout = QVBoxLayout(self)
# --- Main Tab Widget ---
self.tab_widget = QTabWidget()
main_layout.addWidget(self.tab_widget)
# --- Tab 1: Prompt Settings ---
self.tab_prompt = QWidget()
prompt_layout = QVBoxLayout(self.tab_prompt)
self.tab_widget.addTab(self.tab_prompt, "Prompt Settings")
self.prompt_editor = QPlainTextEdit()
self.prompt_editor.setPlaceholderText("Enter the main LLM predictor prompt here...")
prompt_layout.addWidget(self.prompt_editor)
# Examples GroupBox
examples_groupbox = QGroupBox("Examples")
examples_layout = QVBoxLayout(examples_groupbox)
prompt_layout.addWidget(examples_groupbox)
self.examples_tab_widget = QTabWidget()
self.examples_tab_widget.setTabsClosable(True)
examples_layout.addWidget(self.examples_tab_widget)
example_button_layout = QHBoxLayout()
examples_layout.addLayout(example_button_layout)
self.add_example_button = QPushButton("Add Example")
example_button_layout.addWidget(self.add_example_button)
self.delete_example_button = QPushButton("Delete Current Example")
example_button_layout.addWidget(self.delete_example_button)
example_button_layout.addStretch()
# --- Tab 2: API Settings ---
self.tab_api = QWidget()
api_layout = QFormLayout(self.tab_api)
self.tab_widget.addTab(self.tab_api, "API Settings")
self.endpoint_url_edit = QLineEdit()
api_layout.addRow("Endpoint URL:", self.endpoint_url_edit)
self.api_key_edit = QLineEdit()
self.api_key_edit.setEchoMode(QLineEdit.Password)
api_layout.addRow("API Key:", self.api_key_edit)
self.model_name_edit = QLineEdit()
api_layout.addRow("Model Name:", self.model_name_edit)
self.temperature_spinbox = QDoubleSpinBox()
self.temperature_spinbox.setRange(0.0, 2.0)
self.temperature_spinbox.setSingleStep(0.1)
self.temperature_spinbox.setDecimals(2)
api_layout.addRow("Temperature:", self.temperature_spinbox)
self.timeout_spinbox = QSpinBox()
self.timeout_spinbox.setRange(1, 600)
self.timeout_spinbox.setSuffix(" s")
api_layout.addRow("Request Timeout:", self.timeout_spinbox)
# --- Save Button ---
save_button_layout = QHBoxLayout()
main_layout.addLayout(save_button_layout)
save_button_layout.addStretch()
self.save_button = QPushButton("Save LLM Settings")
save_button_layout.addWidget(self.save_button)
self.setLayout(main_layout)
def _connect_signals(self):
"""Connect signals to slots."""
# Save button
self.save_button.clicked.connect(self._save_settings)
# Fields triggering unsaved changes
self.prompt_editor.textChanged.connect(self._mark_unsaved)
self.endpoint_url_edit.textChanged.connect(self._mark_unsaved)
self.api_key_edit.textChanged.connect(self._mark_unsaved)
self.model_name_edit.textChanged.connect(self._mark_unsaved)
self.temperature_spinbox.valueChanged.connect(self._mark_unsaved)
self.timeout_spinbox.valueChanged.connect(self._mark_unsaved)
# Example management buttons and tab close signal
self.add_example_button.clicked.connect(self._add_example_tab)
self.delete_example_button.clicked.connect(self._delete_current_example_tab)
self.examples_tab_widget.tabCloseRequested.connect(self._remove_example_tab)
# Note: Connecting textChanged for example editors needs to happen
# when the tabs/editors are created (in load_settings and _add_example_tab)
@pyqtSlot()
def load_settings(self):
"""Load settings from the JSON file and populate the UI."""
logger.info(f"Attempting to load LLM settings from {LLM_CONFIG_PATH}")
self.setEnabled(True) # Enable widget before trying to load
# Clear previous examples
while self.examples_tab_widget.count() > 0:
self.examples_tab_widget.removeTab(0)
try:
with open(LLM_CONFIG_PATH, 'r', encoding='utf-8') as f:
settings = json.load(f)
# Populate Prompt Settings
self.prompt_editor.setPlainText(settings.get("llm_predictor_prompt", ""))
# Populate Examples
examples = settings.get("llm_predictor_examples", [])
for i, example in enumerate(examples):
try:
example_text = json.dumps(example, indent=4)
example_editor = QTextEdit()
example_editor.setPlainText(example_text)
example_editor.textChanged.connect(self._mark_unsaved) # Connect here
self.examples_tab_widget.addTab(example_editor, f"Example {i+1}")
except TypeError as e:
logger.error(f"Error formatting example {i+1}: {e}. Skipping.")
QMessageBox.warning(self, "Load Error", f"Could not format example {i+1}. It might be invalid.\nError: {e}")
# Populate API Settings
self.endpoint_url_edit.setText(settings.get("llm_endpoint_url", ""))
self.api_key_edit.setText(settings.get("llm_api_key", "")) # Consider security implications
self.model_name_edit.setText(settings.get("llm_model_name", ""))
self.temperature_spinbox.setValue(settings.get("llm_temperature", 0.7))
self.timeout_spinbox.setValue(settings.get("llm_request_timeout", 120))
logger.info("LLM settings loaded successfully.")
except FileNotFoundError:
logger.warning(f"LLM settings file not found: {LLM_CONFIG_PATH}. Using defaults and disabling editor.")
QMessageBox.warning(self, "Load Error",
f"LLM settings file not found:\n{LLM_CONFIG_PATH}\n\nPlease ensure the file exists. Using default values.")
# Reset to defaults (optional, or leave fields empty)
self.prompt_editor.clear()
self.endpoint_url_edit.clear()
self.api_key_edit.clear()
self.model_name_edit.clear()
self.temperature_spinbox.setValue(0.7)
self.timeout_spinbox.setValue(120)
# self.setEnabled(False) # Disabling might be too harsh if user wants to create settings
except json.JSONDecodeError as e:
logger.error(f"Error decoding JSON from {LLM_CONFIG_PATH}: {e}")
QMessageBox.critical(self, "Load Error",
f"Failed to parse LLM settings file:\n{LLM_CONFIG_PATH}\n\nError: {e}\n\nPlease check the file for syntax errors. Editor will be disabled.")
self.setEnabled(False) # Disable editor on critical load error
except Exception as e: # Catch other potential errors during loading/populating
logger.error(f"An unexpected error occurred loading LLM settings: {e}", exc_info=True)
QMessageBox.critical(self, "Load Error",
f"An unexpected error occurred while loading settings:\n{e}\n\nEditor will be disabled.")
self.setEnabled(False)
# Reset unsaved changes flag and disable save button after loading
self.save_button.setEnabled(False)
self._unsaved_changes = False
@pyqtSlot()
def _mark_unsaved(self):
"""Mark settings as having unsaved changes and enable the save button."""
if not self._unsaved_changes:
self._unsaved_changes = True
self.save_button.setEnabled(True)
logger.debug("Unsaved changes marked.")
@pyqtSlot()
def _save_settings(self):
"""Gather data from UI, save to JSON file, and handle errors."""
logger.info("Attempting to save LLM settings...")
settings_dict = {}
parsed_examples = []
has_errors = False
# Gather API Settings
settings_dict["llm_endpoint_url"] = self.endpoint_url_edit.text().strip()
settings_dict["llm_api_key"] = self.api_key_edit.text() # Keep as is, don't strip
settings_dict["llm_model_name"] = self.model_name_edit.text().strip()
settings_dict["llm_temperature"] = self.temperature_spinbox.value()
settings_dict["llm_request_timeout"] = self.timeout_spinbox.value()
# Gather Prompt Settings
settings_dict["llm_predictor_prompt"] = self.prompt_editor.toPlainText().strip()
# Gather and Parse Examples
for i in range(self.examples_tab_widget.count()):
example_editor = self.examples_tab_widget.widget(i)
if isinstance(example_editor, QTextEdit):
example_text = example_editor.toPlainText().strip()
if not example_text: # Skip empty examples silently
continue
try:
parsed_example = json.loads(example_text)
parsed_examples.append(parsed_example)
except json.JSONDecodeError as e:
has_errors = True
tab_name = self.examples_tab_widget.tabText(i)
logger.warning(f"Invalid JSON in '{tab_name}': {e}. Skipping example.")
QMessageBox.warning(self, "Invalid Example",
f"The content in '{tab_name}' is not valid JSON and will not be saved.\n\nError: {e}\n\nPlease correct it or remove the tab.")
# Optionally switch to the tab with the error:
# self.examples_tab_widget.setCurrentIndex(i)
else:
logger.warning(f"Widget at index {i} in examples tab is not a QTextEdit. Skipping.")
if has_errors:
logger.warning("LLM settings not saved due to invalid JSON in examples.")
# Keep save button enabled if there were errors, allowing user to fix and retry
# self.save_button.setEnabled(True)
# self._unsaved_changes = True
return # Stop saving process
settings_dict["llm_predictor_examples"] = parsed_examples
# Save the dictionary to file
try:
save_llm_config(settings_dict)
QMessageBox.information(self, "Save Successful", f"LLM settings saved to:\n{LLM_CONFIG_PATH}")
self.save_button.setEnabled(False)
self._unsaved_changes = False
self.settings_saved.emit() # Notify MainWindow or others
logger.info("LLM settings saved successfully.")
except ConfigurationError as e:
logger.error(f"Failed to save LLM settings: {e}")
QMessageBox.critical(self, "Save Error", f"Could not save LLM settings.\n\nError: {e}")
# Keep save button enabled as save failed
self.save_button.setEnabled(True)
self._unsaved_changes = True
except Exception as e: # Catch unexpected errors during save
logger.error(f"An unexpected error occurred during LLM settings save: {e}", exc_info=True)
QMessageBox.critical(self, "Save Error", f"An unexpected error occurred while saving settings:\n{e}")
self.save_button.setEnabled(True)
self._unsaved_changes = True
# --- Example Management Slots ---
@pyqtSlot()
def _add_example_tab(self):
"""Add a new, empty tab for an LLM example."""
logger.debug("Adding new example tab.")
new_example_editor = QTextEdit()
new_example_editor.setPlaceholderText("Enter example JSON here...")
new_example_editor.textChanged.connect(self._mark_unsaved) # Connect signal
# Determine the next example number
next_example_num = self.examples_tab_widget.count() + 1
index = self.examples_tab_widget.addTab(new_example_editor, f"Example {next_example_num}")
self.examples_tab_widget.setCurrentIndex(index) # Focus the new tab
new_example_editor.setFocus() # Focus the editor within the tab
self._mark_unsaved() # Mark changes since we added a tab
@pyqtSlot()
def _delete_current_example_tab(self):
"""Delete the currently selected example tab."""
current_index = self.examples_tab_widget.currentIndex()
if current_index != -1: # Check if a tab is selected
logger.debug(f"Deleting current example tab at index {current_index}.")
self._remove_example_tab(current_index) # Reuse the remove logic
else:
logger.debug("Delete current example tab called, but no tab is selected.")
@pyqtSlot(int)
def _remove_example_tab(self, index):
"""Remove the example tab at the given index."""
if 0 <= index < self.examples_tab_widget.count():
widget_to_remove = self.examples_tab_widget.widget(index)
self.examples_tab_widget.removeTab(index)
if widget_to_remove:
# Disconnect signals if necessary, though Python's GC should handle it
# widget_to_remove.textChanged.disconnect(self._mark_unsaved) # Optional cleanup
widget_to_remove.deleteLater() # Ensure proper cleanup of the widget
logger.debug(f"Removed example tab at index {index}.")
# Renumber subsequent tabs
for i in range(index, self.examples_tab_widget.count()):
self.examples_tab_widget.setTabText(i, f"Example {i+1}")
self._mark_unsaved() # Mark changes since we removed a tab
else:
logger.warning(f"Attempted to remove example tab at invalid index {index}.")

View File

@@ -1,4 +1,5 @@
import os
import json # Added for direct config loading
import logging
from pathlib import Path
@@ -8,18 +9,23 @@ from PySide6.QtCore import QObject, Signal, QThread, Slot, QTimer
# Assuming these might be needed based on MainWindow's usage
try:
# Removed load_base_config import
from configuration import Configuration, ConfigurationError
# Removed Configuration import as we load manually now
from configuration import ConfigurationError # Keep error class
from .llm_prediction_handler import LLMPredictionHandler # Backend handler
from rule_structure import SourceRule # For signal emission type hint
except ImportError as e:
logging.getLogger(__name__).critical(f"Failed to import backend modules for LLMInteractionHandler: {e}")
LLMPredictionHandler = None
load_base_config = None
# load_base_config = None # Removed
ConfigurationError = Exception
SourceRule = None # Define as None if import fails
Configuration = None # Define as None if import fails
# Configuration = None # Removed
log = logging.getLogger(__name__)
# Define config file paths relative to this handler's location
CONFIG_DIR = Path(__file__).parent.parent / "config"
APP_SETTINGS_PATH = CONFIG_DIR / "app_settings.json"
LLM_SETTINGS_PATH = CONFIG_DIR / "llm_settings.json"
class LLMInteractionHandler(QObject):
"""
@@ -55,6 +61,22 @@ class LLMInteractionHandler(QObject):
log.debug(f"LLM Handler processing state changed to: {processing}")
self.llm_processing_state_changed.emit(processing)
def force_reset_state(self):
"""Forces the processing state to False. Use with caution."""
log.warning("Forcing LLMInteractionHandler state reset.")
if self.llm_prediction_thread and self.llm_prediction_thread.isRunning():
log.warning("Force reset called while thread is running. Attempting to stop thread.")
# Attempt graceful shutdown first
self.llm_prediction_thread.quit()
if not self.llm_prediction_thread.wait(500): # Wait 0.5 sec
log.warning("LLM thread did not quit gracefully after force reset. Terminating.")
self.llm_prediction_thread.terminate()
self.llm_prediction_thread.wait() # Wait after terminate
self.llm_prediction_thread = None
self.llm_prediction_handler = None
self._set_processing_state(False)
# Do NOT clear the queue here, let the user decide via Clear Queue button
@Slot(str, list)
def queue_llm_request(self, input_path: str, file_list: list | None):
"""Adds a request to the LLM processing queue."""
@@ -75,6 +97,7 @@ class LLMInteractionHandler(QObject):
def queue_llm_requests_batch(self, requests: list[tuple[str, list | None]]):
"""Adds multiple requests to the LLM processing queue."""
added_count = 0
log.debug(f"Queueing batch. Current queue content: {self.llm_processing_queue}") # ADDED DEBUG LOG
for input_path, file_list in requests:
is_in_queue = any(item[0] == input_path for item in self.llm_processing_queue)
if not is_in_queue:
@@ -99,10 +122,10 @@ class LLMInteractionHandler(QObject):
self.llm_prediction_thread = None
self.llm_prediction_handler = None
# --- Process next item now that the previous thread is fully finished ---
log.debug("Previous LLM thread finished. Triggering processing for next item by calling _process_next_llm_item...")
self._set_processing_state(False) # Mark processing as finished *before* trying next item
# Use QTimer.singleShot to yield control briefly before starting next item
QTimer.singleShot(0, self._process_next_llm_item)
log.debug("Previous LLM thread finished. Setting processing state to False.")
self._set_processing_state(False) # Mark processing as finished
# The next item will be processed when _handle_llm_result or _handle_llm_error
# calls _process_next_llm_item after popping the completed item.
log.debug("<-- Exiting LLMInteractionHandler._reset_llm_thread_references")
@@ -140,64 +163,143 @@ class LLMInteractionHandler(QObject):
self.llm_prediction_error.emit(input_path_str, error_msg)
return
# --- Get Configuration Object ---
if not hasattr(self.main_window, 'config') or not isinstance(self.main_window.config, Configuration):
error_msg = "LLM Error: Main window does not have a valid Configuration object."
# --- Load Required Settings Directly ---
llm_settings = {}
try:
log.debug(f"Loading LLM settings from: {LLM_SETTINGS_PATH}")
with open(LLM_SETTINGS_PATH, 'r') as f:
llm_data = json.load(f)
# Extract required fields with defaults
llm_settings['endpoint_url'] = llm_data.get('llm_endpoint_url')
llm_settings['api_key'] = llm_data.get('llm_api_key') # Can be None
llm_settings['model_name'] = llm_data.get('llm_model_name', 'local-model')
llm_settings['temperature'] = llm_data.get('llm_temperature', 0.5)
llm_settings['request_timeout'] = llm_data.get('llm_request_timeout', 120)
llm_settings['predictor_prompt'] = llm_data.get('llm_predictor_prompt', '')
llm_settings['examples'] = llm_data.get('llm_examples', [])
log.debug(f"Loading App settings from: {APP_SETTINGS_PATH}")
with open(APP_SETTINGS_PATH, 'r') as f:
app_data = json.load(f)
# Extract required fields
llm_settings['asset_type_definitions'] = app_data.get('ASSET_TYPE_DEFINITIONS', {})
llm_settings['file_type_definitions'] = app_data.get('FILE_TYPE_DEFINITIONS', {})
# Validate essential settings
if not llm_settings['endpoint_url']:
raise ValueError("LLM endpoint URL is missing in llm_settings.json")
if not llm_settings['predictor_prompt']:
raise ValueError("LLM predictor prompt is missing in llm_settings.json")
log.debug("LLM and App settings loaded successfully for LLMInteractionHandler.")
except FileNotFoundError as e:
error_msg = f"LLM Error: Configuration file not found: {e.filename}"
log.critical(error_msg)
self.llm_status_update.emit("LLM Error: Cannot access application configuration.")
self.llm_status_update.emit("LLM Error: Cannot load configuration file.")
self.llm_prediction_error.emit(input_path_str, error_msg)
return
except json.JSONDecodeError as e:
error_msg = f"LLM Error: Failed to parse configuration file: {e}"
log.critical(error_msg)
self.llm_status_update.emit("LLM Error: Cannot parse configuration file.")
self.llm_prediction_error.emit(input_path_str, error_msg)
return
except ValueError as e: # Catch validation errors
error_msg = f"LLM Error: Invalid configuration - {e}"
log.critical(error_msg)
self.llm_status_update.emit("LLM Error: Invalid configuration.")
self.llm_prediction_error.emit(input_path_str, error_msg)
return
except Exception as e: # Catch other potential errors
error_msg = f"LLM Error: Unexpected error loading configuration: {e}"
log.critical(error_msg, exc_info=True)
self.llm_status_update.emit("LLM Error: Cannot load application configuration.")
self.llm_prediction_error.emit(input_path_str, error_msg)
return
config = self.main_window.config # Get the config object
# --- Wrap thread/handler setup and start in try...except ---
try:
# --- Check if Handler Class is Available ---
if LLMPredictionHandler is None:
# Raise ValueError to be caught below
raise ValueError("LLMPredictionHandler class not available.")
# --- Check if Handler Class is Available ---
if LLMPredictionHandler is None:
log.critical("LLMPredictionHandler class not available.")
self.llm_status_update.emit("LLM Error: Prediction handler component missing.")
self.llm_prediction_error.emit(input_path_str, "LLMPredictionHandler class not available.")
return
# --- Clean up previous thread/handler if necessary ---
# (Keep this cleanup logic as it handles potential stale threads)
if self.llm_prediction_thread and self.llm_prediction_thread.isRunning():
log.warning("Warning: Previous LLM prediction thread still running when trying to start new one. Attempting cleanup.")
if self.llm_prediction_handler:
if hasattr(self.llm_prediction_handler, 'cancel'):
self.llm_prediction_handler.cancel()
self.llm_prediction_thread.quit()
if not self.llm_prediction_thread.wait(1000): # Wait 1 sec
log.warning("LLM thread did not quit gracefully. Forcing termination.")
self.llm_prediction_thread.terminate()
self.llm_prediction_thread.wait() # Wait after terminate
self.llm_prediction_thread = None
self.llm_prediction_handler = None
# --- Clean up previous thread/handler if necessary ---
if self.llm_prediction_thread and self.llm_prediction_thread.isRunning():
log.warning("Warning: Previous LLM prediction thread still running when trying to start new one. Attempting cleanup.")
log.info(f"Starting LLM prediction thread for source: {input_path_str} with {len(file_list)} files.")
self.llm_status_update.emit(f"Starting LLM interpretation for {input_path_obj.name}...")
# --- Create Thread and Handler ---
self.llm_prediction_thread = QThread(self) # Parent thread to self
# Pass the loaded settings dictionary
self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, llm_settings)
self.llm_prediction_handler.moveToThread(self.llm_prediction_thread)
# Connect signals from handler to *internal* slots or directly emit signals
self.llm_prediction_handler.prediction_ready.connect(self._handle_llm_result)
self.llm_prediction_handler.prediction_error.connect(self._handle_llm_error)
self.llm_prediction_handler.status_update.connect(self.llm_status_update) # Pass status through
# Connect thread signals
self.llm_prediction_thread.started.connect(self.llm_prediction_handler.run)
# Clean up thread and handler when finished
self.llm_prediction_thread.finished.connect(self._reset_llm_thread_references)
self.llm_prediction_thread.finished.connect(self.llm_prediction_handler.deleteLater)
self.llm_prediction_thread.finished.connect(self.llm_prediction_thread.deleteLater)
# Also ensure thread quits when handler signals completion/error
self.llm_prediction_handler.prediction_ready.connect(self.llm_prediction_thread.quit)
self.llm_prediction_handler.prediction_error.connect(self.llm_prediction_thread.quit)
# TODO: Add a logging.debug statement at the very beginning of LLMPredictionHandler.run()
# to confirm if the method is being reached. Example:
# log.debug(f"--> Entered LLMPredictionHandler.run() for {self.input_path}")
self.llm_prediction_thread.start()
log.debug(f"LLM prediction thread start() called for {input_path_str}. Is running: {self.llm_prediction_thread.isRunning()}") # ADDED DEBUG LOG
# Log success *after* start() is called successfully
log.debug(f"Successfully initiated LLM prediction thread for {input_path_str}.") # MOVED/REWORDED LOG
except Exception as e:
# --- Handle errors during setup/start ---
log.exception(f"Critical error during LLM thread setup/start for {input_path_str}: {e}")
error_msg = f"Error initializing LLM task for {input_path_obj.name}: {e}"
self.llm_status_update.emit(error_msg)
self.llm_prediction_error.emit(input_path_str, error_msg) # Signal the error
# --- Crucially, reset processing state if setup fails ---
log.warning("Resetting processing state due to thread setup/start error.")
self._set_processing_state(False)
# Clean up potentially partially created objects
if self.llm_prediction_handler:
if hasattr(self.llm_prediction_handler, 'cancel'):
self.llm_prediction_handler.cancel()
self.llm_prediction_thread.quit()
if not self.llm_prediction_thread.wait(1000): # Wait 1 sec
log.warning("LLM thread did not quit gracefully. Forcing termination.")
self.llm_prediction_thread.terminate()
self.llm_prediction_thread.wait() # Wait after terminate
self.llm_prediction_thread = None
self.llm_prediction_handler = None
self.llm_prediction_handler.deleteLater()
self.llm_prediction_handler = None
if self.llm_prediction_thread:
if self.llm_prediction_thread.isRunning():
self.llm_prediction_thread.quit()
self.llm_prediction_thread.wait(500)
self.llm_prediction_thread.terminate() # Force if needed
self.llm_prediction_thread.wait()
self.llm_prediction_thread.deleteLater()
self.llm_prediction_thread = None
log.info(f"Starting LLM prediction thread for source: {input_path_str} with {len(file_list)} files.")
self.llm_status_update.emit(f"Starting LLM interpretation for {input_path_obj.name}...")
# --- Create Thread and Handler ---
self.llm_prediction_thread = QThread(self) # Parent thread to self
# Pass the Configuration object directly
self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, config)
self.llm_prediction_handler.moveToThread(self.llm_prediction_thread)
# Connect signals from handler to *internal* slots or directly emit signals
self.llm_prediction_handler.prediction_ready.connect(self._handle_llm_result)
self.llm_prediction_handler.prediction_error.connect(self._handle_llm_error)
self.llm_prediction_handler.status_update.connect(self.llm_status_update) # Pass status through
# Connect thread signals
self.llm_prediction_thread.started.connect(self.llm_prediction_handler.run)
# Clean up thread and handler when finished
self.llm_prediction_thread.finished.connect(self._reset_llm_thread_references)
self.llm_prediction_thread.finished.connect(self.llm_prediction_handler.deleteLater)
self.llm_prediction_thread.finished.connect(self.llm_prediction_thread.deleteLater)
# Also ensure thread quits when handler signals completion/error
self.llm_prediction_handler.prediction_ready.connect(self.llm_prediction_thread.quit)
self.llm_prediction_handler.prediction_error.connect(self.llm_prediction_thread.quit)
self.llm_prediction_thread.start()
log.debug(f"LLM prediction thread started for {input_path_str}.")
# Do NOT automatically try the next item here, as the error might be persistent.
# Let the error signal handle popping the item and trying the next one.
# The error signal (_handle_llm_error) will pop the item and call _process_next_llm_item.
def is_processing(self) -> bool:
@@ -254,10 +356,11 @@ class LLMInteractionHandler(QObject):
try:
# Pass the potentially None file_list. _start_llm_prediction handles extraction if needed.
self._start_llm_prediction(next_dir, file_list=file_list)
# --- Pop item *after* successfully starting prediction ---
self.llm_processing_queue.pop(0)
log.debug(f"Successfully started LLM prediction for {next_dir} and removed from queue.")
# --- DO NOT pop item here. Item is popped in _handle_llm_result or _handle_llm_error ---
# Log message moved into the try block of _start_llm_prediction
# log.debug(f"Successfully started LLM prediction thread for {next_dir}. Item remains in queue until finished.")
except Exception as e:
# This block now catches errors from _start_llm_prediction itself
log.exception(f"Error occurred *during* _start_llm_prediction call for {next_dir}: {e}")
error_msg = f"Error starting LLM for {os.path.basename(next_dir)}: {e}"
self.llm_status_update.emit(error_msg)
@@ -277,19 +380,37 @@ class LLMInteractionHandler(QObject):
# --- Internal Slots to Handle Results/Errors from LLMPredictionHandler ---
@Slot(str, list)
def _handle_llm_result(self, input_path: str, source_rules: list):
"""Internal slot to receive results and emit the public signal."""
log.debug(f"LLM Handler received result for {input_path}. Emitting llm_prediction_ready.")
"""Internal slot to receive results, pop item, and emit the public signal."""
log.debug(f"LLM Handler received result for {input_path}. Removing from queue and emitting llm_prediction_ready.")
# Remove the completed item from the queue
try:
# Find and remove the item by input_path
self.llm_processing_queue = [item for item in self.llm_processing_queue if item[0] != input_path]
log.debug(f"Removed '{input_path}' from LLM queue after successful prediction. New size: {len(self.llm_processing_queue)}")
except Exception as e:
log.error(f"Error removing '{input_path}' from LLM queue after success: {e}")
self.llm_prediction_ready.emit(input_path, source_rules)
# Note: The thread's finished signal calls _reset_llm_thread_references,
# which then calls _process_next_llm_item.
# Process the next item in the queue
QTimer.singleShot(0, self._process_next_llm_item)
@Slot(str, str)
def _handle_llm_error(self, input_path: str, error_message: str):
"""Internal slot to receive errors and emit the public signal."""
log.debug(f"LLM Handler received error for {input_path}: {error_message}. Emitting llm_prediction_error.")
"""Internal slot to receive errors, pop item, and emit the public signal."""
log.debug(f"LLM Handler received error for {input_path}: {error_message}. Removing from queue and emitting llm_prediction_error.")
# Remove the failed item from the queue
try:
# Find and remove the item by input_path
self.llm_processing_queue = [item for item in self.llm_processing_queue if item[0] != input_path]
log.debug(f"Removed '{input_path}' from LLM queue after error. New size: {len(self.llm_processing_queue)}")
except Exception as e:
log.error(f"Error removing '{input_path}' from LLM queue after error: {e}")
self.llm_prediction_error.emit(input_path, error_message)
# Note: The thread's finished signal calls _reset_llm_thread_references,
# which then calls _process_next_llm_item.
# Process the next item in the queue
QTimer.singleShot(0, self._process_next_llm_item)
def clear_queue(self):
"""Clears the LLM processing queue."""

View File

@@ -14,8 +14,8 @@ from rule_structure import SourceRule, AssetRule, FileRule # Ensure AssetRule an
# Assuming configuration loads app_settings.json
# Adjust the import path if necessary
# Import Configuration class
from configuration import Configuration
# Removed Configuration import
# from configuration import Configuration
# from configuration import load_base_config # No longer needed here
from .base_prediction_handler import BasePredictionHandler # Import base class
@@ -28,7 +28,8 @@ class LLMPredictionHandler(BasePredictionHandler):
"""
# Signals (prediction_ready, prediction_error, status_update) are inherited
def __init__(self, input_source_identifier: str, file_list: list, config: Configuration, parent: QObject = None):
# Changed 'config: Configuration' to 'settings: dict'
def __init__(self, input_source_identifier: str, file_list: list, settings: dict, parent: QObject = None):
"""
Initializes the LLM handler.
@@ -36,15 +37,14 @@ class LLMPredictionHandler(BasePredictionHandler):
input_source_identifier: The unique identifier for the input source (e.g., file path).
file_list: A list of *relative* file paths extracted from the input source.
(LLM expects relative paths based on the prompt template).
config: The loaded Configuration object containing all settings.
settings: A dictionary containing required LLM and App settings.
parent: The parent QObject.
"""
super().__init__(input_source_identifier, parent)
# input_source_identifier is stored by the base class as self.input_source_identifier
self.file_list = file_list # Store the provided relative file list
self.config = config # Store the Configuration object
# Access LLM settings via self.config properties when needed
# e.g., self.config.llm_endpoint_url, self.config.llm_api_key
self.settings = settings # Store the settings dictionary
# Access LLM settings via self.settings['key']
# _is_running and _is_cancelled are handled by the base class
# The run() and cancel() slots are provided by the base class.
@@ -64,6 +64,7 @@ class LLMPredictionHandler(BasePredictionHandler):
ConnectionError: If the LLM API call fails due to network issues or timeouts.
Exception: For other errors during prompt preparation, API call, or parsing.
"""
log.debug(f"--> Entered LLMPredictionHandler._perform_prediction() for {self.input_source_identifier}")
log.info(f"Performing LLM prediction for: {self.input_source_identifier}")
base_name = Path(self.input_source_identifier).name
@@ -127,17 +128,25 @@ class LLMPredictionHandler(BasePredictionHandler):
"""
Prepares the full prompt string to send to the LLM using stored settings.
"""
# Access settings via the Configuration object
prompt_template = self.config.llm_predictor_prompt
# Access settings via the settings dictionary
prompt_template = self.settings.get('predictor_prompt')
if not prompt_template:
# Config object should handle defaults or raise error during init if critical prompt is missing
raise ValueError("LLM predictor prompt template content is empty or could not be loaded from configuration.")
raise ValueError("LLM predictor prompt template content is empty or missing in settings.")
# Access definitions and examples via Configuration object methods/properties
asset_defs = json.dumps(self.config.get_asset_type_definitions(), indent=4)
file_defs = json.dumps(self.config.get_file_type_definitions_with_examples(), indent=4)
examples = json.dumps(self.config.get_llm_examples(), indent=2)
# Access definitions and examples directly from the settings dictionary
asset_defs = json.dumps(self.settings.get('asset_type_definitions', {}), indent=4)
# Combine file type defs and examples (assuming structure from Configuration class)
file_type_defs_combined = {}
file_type_defs = self.settings.get('file_type_definitions', {})
for key, definition in file_type_defs.items():
# Add examples if they exist within the definition structure
file_type_defs_combined[key] = {
"description": definition.get("description", ""),
"examples": definition.get("examples", [])
}
file_defs = json.dumps(file_type_defs_combined, indent=4)
examples = json.dumps(self.settings.get('examples', []), indent=2)
# Format *relative* file list as a single string with newlines
file_list_str = "\n".join(relative_file_list)
@@ -165,26 +174,26 @@ class LLMPredictionHandler(BasePredictionHandler):
ValueError: If the endpoint URL is not configured or the response is invalid.
requests.exceptions.RequestException: For other request-related errors.
"""
endpoint_url = self.config.llm_endpoint_url # Get from config
endpoint_url = self.settings.get('endpoint_url') # Get from settings dict
if not endpoint_url:
raise ValueError("LLM endpoint URL is not configured in settings.")
headers = {
"Content-Type": "application/json",
}
api_key = self.config.llm_api_key # Get from config
api_key = self.settings.get('api_key') # Get from settings dict
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
# Construct payload based on OpenAI Chat Completions format
payload = {
# Use configured model name, default to 'local-model'
"model": self.config.llm_model_name or "local-model", # Use config property, fallback
# Use configured model name from settings dict
"model": self.settings.get('model_name', 'local-model'),
"messages": [{"role": "user", "content": prompt}],
# Use configured temperature, default to 0.5
"temperature": self.config.llm_temperature, # Use config property (has default)
# Use configured temperature from settings dict
"temperature": self.settings.get('temperature', 0.5),
# Add max_tokens if needed/configurable:
# "max_tokens": self.config.llm_max_tokens, # Example if added to config
# "max_tokens": self.settings.get('max_tokens'), # Example if added to settings
# Ensure the LLM is instructed to return JSON in the prompt itself
# Some models/endpoints support a specific json mode:
# "response_format": { "type": "json_object" } # If supported by endpoint
@@ -203,7 +212,7 @@ class LLMPredictionHandler(BasePredictionHandler):
endpoint_url,
headers=headers,
json=payload,
timeout=self.config.llm_request_timeout # Use config property (has default)
timeout=self.settings.get('request_timeout', 120) # Use settings dict (with default)
)
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
@@ -318,8 +327,9 @@ class LLMPredictionHandler(BasePredictionHandler):
# --- Prepare for Rule Creation ---
source_rule = SourceRule(input_path=self.input_source_identifier)
valid_asset_types = self.config.get_asset_type_keys() # Use config method
valid_file_types = self.config.get_file_type_keys() # Use config method
# Get valid types directly from the settings dictionary
valid_asset_types = list(self.settings.get('asset_type_definitions', {}).keys())
valid_file_types = list(self.settings.get('file_type_definitions', {}).keys())
asset_rules_map: Dict[str, AssetRule] = {} # Maps group_name to AssetRule
# --- Process Individual Files and Build Rules ---

View File

@@ -11,7 +11,7 @@ log.info(f"sys.path: {sys.path}")
from PySide6.QtWidgets import (
QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QSplitter, QTableView, # Added QSplitter, QTableView
QPushButton, QComboBox, QTableWidget, QTableWidgetItem, QHeaderView,
QPushButton, QComboBox, QTableWidget, QTableWidgetItem, QHeaderView, QStackedWidget, # Added QStackedWidget
QProgressBar, QLabel, QFrame, QCheckBox, QSpinBox, QListWidget, QTextEdit, # Added QListWidget, QTextEdit
QLineEdit, QMessageBox, QFileDialog, QInputDialog, QListWidgetItem, QTabWidget, # Added more widgets
QFormLayout, QGroupBox, QAbstractItemView, QSizePolicy, # Added more layout/widget items
@@ -21,9 +21,10 @@ from PySide6.QtCore import Qt, QThread, Slot, Signal, QObject, QModelIndex, QIte
from PySide6.QtGui import QColor, QAction, QPalette, QClipboard # Add QColor import, QAction, QPalette, QClipboard
# --- Local GUI Imports ---
from .preset_editor_widget import PresetEditorWidget # Import the new widget
from .log_console_widget import LogConsoleWidget # Import the log console widget
from .main_panel_widget import MainPanelWidget # Import the new main panel widget
from .preset_editor_widget import PresetEditorWidget
from .llm_editor_widget import LLMEditorWidget # Import the new LLM editor
from .log_console_widget import LogConsoleWidget
from .main_panel_widget import MainPanelWidget
# --- Backend Imports for Data Structures ---
from rule_structure import SourceRule, AssetRule, FileRule # Import Rule Structures
@@ -158,13 +159,30 @@ class MainWindow(QMainWindow):
self.restructure_handler = AssetRestructureHandler(self.unified_model, self) # Instantiate the restructure handler
# --- Create Panels ---
self.preset_editor_widget = PresetEditorWidget() # Instantiate the preset editor
self.preset_editor_widget = PresetEditorWidget()
self.llm_editor_widget = LLMEditorWidget() # Instantiate the LLM editor
# Instantiate MainPanelWidget, passing the model and self (MainWindow) for context
self.main_panel_widget = MainPanelWidget(self.unified_model, self)
self.log_console = LogConsoleWidget(self) # Instantiate the log console
self.log_console = LogConsoleWidget(self)
self.splitter.addWidget(self.preset_editor_widget) # Add the preset editor
self.splitter.addWidget(self.main_panel_widget) # Add the new main panel widget
# --- Create Left Pane with Static Selector and Stacked Editor ---
self.left_pane_widget = QWidget()
left_pane_layout = QVBoxLayout(self.left_pane_widget)
left_pane_layout.setContentsMargins(0, 0, 0, 0)
left_pane_layout.setSpacing(0) # No space between selector and stack
# Add the selector part from PresetEditorWidget
left_pane_layout.addWidget(self.preset_editor_widget.selector_container)
# Create the stacked widget for swappable editors
self.editor_stack = QStackedWidget()
self.editor_stack.addWidget(self.preset_editor_widget.json_editor_container) # Page 0: Preset JSON Editor
self.editor_stack.addWidget(self.llm_editor_widget) # Page 1: LLM Editor
left_pane_layout.addWidget(self.editor_stack)
# Add the new left pane and the main panel to the splitter
self.splitter.addWidget(self.left_pane_widget)
self.splitter.addWidget(self.main_panel_widget)
# --- Setup UI Elements ---
# Main panel UI is handled internally by MainPanelWidget
@@ -198,6 +216,8 @@ class MainWindow(QMainWindow):
# --- Connect Model Signals ---
self.unified_model.targetAssetOverrideChanged.connect(self.restructure_handler.handle_target_asset_override)
# --- Connect LLM Editor Signals ---
self.llm_editor_widget.settings_saved.connect(self._on_llm_settings_saved) # Connect save signal
# --- Adjust Splitter ---
self.splitter.setSizes([400, 800]) # Initial size ratio
@@ -633,8 +653,8 @@ class MainWindow(QMainWindow):
# Check if rule-based prediction is already running (optional, handler might manage internally)
# Note: QueuedConnection on the signal helps, but check anyway for immediate feedback/logging
# TODO: Add is_running() method to RuleBasedPredictionHandler if needed for this check
if self.prediction_handler and hasattr(self.prediction_handler, 'is_running') and self.prediction_handler.is_running():
# TODO: Add is_running() method to RuleBasedPredictionHandler if needed for this check - NOTE: is_running is a property now
if self.prediction_handler and hasattr(self.prediction_handler, 'is_running') and self.prediction_handler.is_running: # Removed ()
log.warning("Rule-based prediction is already running. Queuing re-interpretation request.")
# Proceed, relying on QueuedConnection
@@ -1180,9 +1200,34 @@ class MainWindow(QMainWindow):
# --- Slot for Preset Editor Selection Changes ---
@Slot(str, str)
def _on_preset_selection_changed(self, mode: str, preset_name: str | None):
"""Handles changes in the preset editor selection (preset, LLM, placeholder)."""
"""
Handles changes in the preset editor selection (preset, LLM, placeholder).
Switches between PresetEditorWidget and LLMEditorWidget.
"""
log.info(f"Preset selection changed: mode='{mode}', preset_name='{preset_name}'")
# --- Editor Stack Switching ---
if mode == "llm":
log.debug("Switching editor stack to LLM Editor Widget.")
# Force reset the LLM handler state in case it got stuck
if hasattr(self, 'llm_interaction_handler'):
self.llm_interaction_handler.force_reset_state()
self.editor_stack.setCurrentWidget(self.llm_editor_widget)
# Load settings *after* switching the stack
try:
self.llm_editor_widget.load_settings()
except Exception as e:
log.exception(f"Error loading LLM settings in _on_preset_selection_changed: {e}")
QMessageBox.critical(self, "LLM Settings Error", f"Failed to load LLM settings:\n{e}")
elif mode == "preset":
log.debug("Switching editor stack to Preset JSON Editor Widget.")
self.editor_stack.setCurrentWidget(self.preset_editor_widget.json_editor_container)
else: # "placeholder"
log.debug("Switching editor stack to Preset JSON Editor Widget (placeholder selected).")
self.editor_stack.setCurrentWidget(self.preset_editor_widget.json_editor_container)
# The PresetEditorWidget's internal logic handles disabling/clearing the editor fields.
# --- End Editor Stack Switching ---
# Update window title based on selection
if mode == "preset" and preset_name:
# Check for unsaved changes *within the editor widget*
@@ -1212,6 +1257,17 @@ class MainWindow(QMainWindow):
# update_preview will now respect the mode set above
self.update_preview()
@Slot()
def _on_llm_settings_saved(self):
"""Slot called when LLM settings are saved successfully."""
log.info("LLM settings saved signal received by MainWindow.")
self.statusBar().showMessage("LLM settings saved successfully.", 3000)
# Optionally, trigger a reload of configuration if needed elsewhere,
# or update the LLMInteractionHandler if it caches settings.
# For now, just show a status message.
# If the LLM handler uses the config directly, no action needed here.
# If it caches, we might need: self.llm_interaction_handler.reload_settings()
# --- Slot for LLM Processing State Changes from Handler ---
@Slot(bool)
def _on_llm_processing_state_changed(self, is_processing: bool):

View File

@@ -58,15 +58,19 @@ class PresetEditorWidget(QWidget):
def _init_ui(self):
"""Initializes the UI elements for the preset editor."""
editor_layout = QVBoxLayout(self)
editor_layout.setContentsMargins(5, 5, 5, 5) # Reduce margins
main_layout = QVBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0) # Let containers manage margins
main_layout.setSpacing(0) # No space between selector and editor containers
# Preset List and Controls
list_layout = QVBoxLayout()
list_layout.addWidget(QLabel("Presets:"))
self.selector_container = QWidget()
selector_layout = QVBoxLayout(self.selector_container)
selector_layout.setContentsMargins(5, 5, 5, 5) # Margins for selector area
selector_layout.addWidget(QLabel("Presets:"))
self.editor_preset_list = QListWidget()
self.editor_preset_list.currentItemChanged.connect(self._load_selected_preset_for_editing)
list_layout.addWidget(self.editor_preset_list)
selector_layout.addWidget(self.editor_preset_list) # Corrected: Add to selector_layout
list_button_layout = QHBoxLayout()
self.editor_new_button = QPushButton("New")
@@ -75,10 +79,14 @@ class PresetEditorWidget(QWidget):
self.editor_delete_button.clicked.connect(self._delete_selected_preset)
list_button_layout.addWidget(self.editor_new_button)
list_button_layout.addWidget(self.editor_delete_button)
list_layout.addLayout(list_button_layout)
editor_layout.addLayout(list_layout, 1) # Allow list to stretch
selector_layout.addLayout(list_button_layout)
main_layout.addWidget(self.selector_container) # Add selector container to main layout
# Editor Tabs
self.json_editor_container = QWidget()
editor_layout = QVBoxLayout(self.json_editor_container)
editor_layout.setContentsMargins(5, 0, 5, 5) # Margins for editor area (no top margin)
self.editor_tab_widget = QTabWidget()
self.editor_tab_general_naming = QWidget()
self.editor_tab_mapping_rules = QWidget()
@@ -86,7 +94,7 @@ class PresetEditorWidget(QWidget):
self.editor_tab_widget.addTab(self.editor_tab_mapping_rules, "Mapping & Rules")
self._create_editor_general_tab()
self._create_editor_mapping_tab()
editor_layout.addWidget(self.editor_tab_widget, 3) # Allow tabs to stretch more
editor_layout.addWidget(self.editor_tab_widget, 1) # Allow tabs to stretch
# Save Buttons
save_button_layout = QHBoxLayout()
@@ -100,6 +108,8 @@ class PresetEditorWidget(QWidget):
save_button_layout.addWidget(self.editor_save_as_button)
editor_layout.addLayout(save_button_layout)
main_layout.addWidget(self.json_editor_container) # Add editor container to main layout
def _create_editor_general_tab(self):
"""Creates the widgets and layout for the 'General & Naming' editor tab."""
layout = QVBoxLayout(self.editor_tab_general_naming)
@@ -347,9 +357,10 @@ class PresetEditorWidget(QWidget):
def _set_editor_enabled(self, enabled: bool):
"""Enables or disables all editor widgets."""
self.editor_tab_widget.setEnabled(enabled)
# Target the container holding the tabs and save buttons
self.json_editor_container.setEnabled(enabled)
# Save button state still depends on unsaved changes, but only if container is enabled
self.editor_save_button.setEnabled(enabled and self.editor_unsaved_changes)
self.editor_save_as_button.setEnabled(enabled) # Save As is always possible if editor is enabled
def _clear_editor(self):
"""Clears the editor fields and resets state."""