import os import logging from pathlib import Path from PySide6.QtCore import QObject, Signal, QThread, Slot, QTimer # --- Backend Imports --- # Assuming these might be needed based on MainWindow's usage try: # Removed load_base_config import from configuration import Configuration, ConfigurationError from .llm_prediction_handler import LLMPredictionHandler # Backend handler from rule_structure import SourceRule # For signal emission type hint except ImportError as e: logging.getLogger(__name__).critical(f"Failed to import backend modules for LLMInteractionHandler: {e}") LLMPredictionHandler = None load_base_config = None ConfigurationError = Exception SourceRule = None # Define as None if import fails Configuration = None # Define as None if import fails log = logging.getLogger(__name__) class LLMInteractionHandler(QObject): """ Handles the logic for interacting with the LLM prediction service, including managing the queue, thread, and communication. """ # Signals to communicate results/status back to MainWindow or other components llm_prediction_ready = Signal(str, list) # input_path, List[SourceRule] llm_prediction_error = Signal(str, str) # input_path, error_message llm_status_update = Signal(str) # status_message llm_processing_state_changed = Signal(bool) # is_processing (True when busy, False when idle) def __init__(self, main_window_ref, parent=None): """ Initializes the handler. Args: main_window_ref: A reference to the MainWindow instance for accessing shared components like status bar or models if needed. parent: The parent QObject. """ super().__init__(parent) self.main_window = main_window_ref # Store reference if needed for status updates etc. self.llm_processing_queue = [] # Unified queue for initial adds and re-interpretations self.llm_prediction_thread = None self.llm_prediction_handler = None self._is_processing = False # Internal flag to track processing state def _set_processing_state(self, processing: bool): """Updates the internal processing state and emits a signal.""" if self._is_processing != processing: self._is_processing = processing log.debug(f"LLM Handler processing state changed to: {processing}") self.llm_processing_state_changed.emit(processing) @Slot(str, list) def queue_llm_request(self, input_path: str, file_list: list | None): """Adds a request to the LLM processing queue.""" log.debug(f"Queueing LLM request for '{input_path}'. Current queue size: {len(self.llm_processing_queue)}") # Avoid duplicates? Check if already in queue is_in_queue = any(item[0] == input_path for item in self.llm_processing_queue) if not is_in_queue: self.llm_processing_queue.append((input_path, file_list)) log.info(f"Added '{input_path}' to LLM queue. New size: {len(self.llm_processing_queue)}") # If not currently processing, start the queue if not self._is_processing: # Use QTimer.singleShot to avoid immediate processing if called rapidly QTimer.singleShot(0, self._process_next_llm_item) else: log.debug(f"Skipping duplicate add to LLM queue for: {input_path}") @Slot(list) def queue_llm_requests_batch(self, requests: list[tuple[str, list | None]]): """Adds multiple requests to the LLM processing queue.""" added_count = 0 for input_path, file_list in requests: is_in_queue = any(item[0] == input_path for item in self.llm_processing_queue) if not is_in_queue: self.llm_processing_queue.append((input_path, file_list)) added_count += 1 else: log.debug(f"Skipping duplicate add to LLM queue for: {input_path}") if added_count > 0: log.info(f"Added {added_count} requests to LLM queue. New size: {len(self.llm_processing_queue)}") # If not currently processing, start the queue if not self._is_processing: QTimer.singleShot(0, self._process_next_llm_item) # --- Methods to be moved from MainWindow --- @Slot() def _reset_llm_thread_references(self): """Resets LLM thread and handler references after the thread finishes.""" log.debug("--> Entered LLMInteractionHandler._reset_llm_thread_references") log.debug("Resetting LLM prediction thread and handler references.") self.llm_prediction_thread = None self.llm_prediction_handler = None # --- Process next item now that the previous thread is fully finished --- log.debug("Previous LLM thread finished. Triggering processing for next item by calling _process_next_llm_item...") self._set_processing_state(False) # Mark processing as finished *before* trying next item # Use QTimer.singleShot to yield control briefly before starting next item QTimer.singleShot(0, self._process_next_llm_item) log.debug("<-- Exiting LLMInteractionHandler._reset_llm_thread_references") def _start_llm_prediction(self, input_path_str: str, file_list: list = None): """ Sets up and starts the LLMPredictionHandler in a separate thread. Emits signals for results, errors, or status updates. If file_list is not provided, it will be extracted. """ log.debug(f"Attempting to start LLM prediction for: {input_path_str}") # Extract file list if not provided (needed for re-interpretation calls) if file_list is None: log.debug(f"File list not provided for {input_path_str}, extracting...") if hasattr(self.main_window, '_extract_file_list'): file_list = self.main_window._extract_file_list(input_path_str) if file_list is None: error_msg = f"Failed to extract file list for {input_path_str} in _start_llm_prediction." log.error(error_msg) self.llm_status_update.emit(f"Error extracting files for {os.path.basename(input_path_str)}") self.llm_prediction_error.emit(input_path_str, error_msg) # Signal error return # Stop if extraction failed else: error_msg = f"MainWindow reference does not have _extract_file_list method." log.error(error_msg) self.llm_status_update.emit(f"Internal Error: Cannot extract files for {os.path.basename(input_path_str)}") self.llm_prediction_error.emit(input_path_str, error_msg) return # Stop input_path_obj = Path(input_path_str) # Still needed for basename if not file_list: error_msg = f"LLM Error: No files found/extracted for {input_path_str}" log.error(error_msg) self.llm_status_update.emit(f"LLM Error: No files found for {input_path_obj.name}") self.llm_prediction_error.emit(input_path_str, error_msg) return # --- Get Configuration Object --- if not hasattr(self.main_window, 'config') or not isinstance(self.main_window.config, Configuration): error_msg = "LLM Error: Main window does not have a valid Configuration object." log.critical(error_msg) self.llm_status_update.emit("LLM Error: Cannot access application configuration.") self.llm_prediction_error.emit(input_path_str, error_msg) return config = self.main_window.config # Get the config object # --- Check if Handler Class is Available --- if LLMPredictionHandler is None: log.critical("LLMPredictionHandler class not available.") self.llm_status_update.emit("LLM Error: Prediction handler component missing.") self.llm_prediction_error.emit(input_path_str, "LLMPredictionHandler class not available.") return # --- Clean up previous thread/handler if necessary --- if self.llm_prediction_thread and self.llm_prediction_thread.isRunning(): log.warning("Warning: Previous LLM prediction thread still running when trying to start new one. Attempting cleanup.") if self.llm_prediction_handler: if hasattr(self.llm_prediction_handler, 'cancel'): self.llm_prediction_handler.cancel() self.llm_prediction_thread.quit() if not self.llm_prediction_thread.wait(1000): # Wait 1 sec log.warning("LLM thread did not quit gracefully. Forcing termination.") self.llm_prediction_thread.terminate() self.llm_prediction_thread.wait() # Wait after terminate self.llm_prediction_thread = None self.llm_prediction_handler = None log.info(f"Starting LLM prediction thread for source: {input_path_str} with {len(file_list)} files.") self.llm_status_update.emit(f"Starting LLM interpretation for {input_path_obj.name}...") # --- Create Thread and Handler --- self.llm_prediction_thread = QThread(self) # Parent thread to self # Pass the Configuration object directly self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, config) self.llm_prediction_handler.moveToThread(self.llm_prediction_thread) # Connect signals from handler to *internal* slots or directly emit signals self.llm_prediction_handler.prediction_ready.connect(self._handle_llm_result) self.llm_prediction_handler.prediction_error.connect(self._handle_llm_error) self.llm_prediction_handler.status_update.connect(self.llm_status_update) # Pass status through # Connect thread signals self.llm_prediction_thread.started.connect(self.llm_prediction_handler.run) # Clean up thread and handler when finished self.llm_prediction_thread.finished.connect(self._reset_llm_thread_references) self.llm_prediction_thread.finished.connect(self.llm_prediction_handler.deleteLater) self.llm_prediction_thread.finished.connect(self.llm_prediction_thread.deleteLater) # Also ensure thread quits when handler signals completion/error self.llm_prediction_handler.prediction_ready.connect(self.llm_prediction_thread.quit) self.llm_prediction_handler.prediction_error.connect(self.llm_prediction_thread.quit) self.llm_prediction_thread.start() log.debug(f"LLM prediction thread started for {input_path_str}.") def is_processing(self) -> bool: """Safely checks if the LLM prediction thread is currently running.""" # Use the internal flag, which is more reliable than checking thread directly # due to potential race conditions during cleanup. # The thread check can be a fallback. is_running_flag = self._is_processing # Also check thread as a safeguard, though the flag should be primary try: is_thread_alive = self.llm_prediction_thread is not None and self.llm_prediction_thread.isRunning() if is_running_flag != is_thread_alive: # This might indicate the flag wasn't updated correctly, log it. log.warning(f"LLM Handler processing flag ({is_running_flag}) mismatch with thread state ({is_thread_alive}). Flag is primary.") return is_running_flag except RuntimeError: log.debug("is_processing: Caught RuntimeError checking isRunning (thread likely deleted).") # If thread died unexpectedly, the flag might be stale. Reset it. if self._is_processing: self._set_processing_state(False) return False def _process_next_llm_item(self): """Processes the next directory in the unified LLM processing queue.""" log.debug(f"--> Entered _process_next_llm_item. Queue size: {len(self.llm_processing_queue)}") if self.is_processing(): log.info("LLM processing already running. Waiting for current item to finish.") # Do not pop from queue if already running, wait for _reset_llm_thread_references to call this again return if not self.llm_processing_queue: log.info("LLM processing queue is empty. Finishing.") self.llm_status_update.emit("LLM processing complete.") self._set_processing_state(False) # Ensure state is set to idle log.debug("<-- Exiting _process_next_llm_item (queue empty)") return # Set state to busy *before* starting self._set_processing_state(True) # Get next item *without* removing it yet next_item = self.llm_processing_queue[0] # Peek at the first item next_dir, file_list = next_item # Unpack the tuple # --- Update Status/Progress --- total_in_queue_now = len(self.llm_processing_queue) status_msg = f"LLM Processing {os.path.basename(next_dir)} ({total_in_queue_now} remaining)..." self.llm_status_update.emit(status_msg) log.info(status_msg) # --- Start Prediction (which might fail) --- try: # Pass the potentially None file_list. _start_llm_prediction handles extraction if needed. self._start_llm_prediction(next_dir, file_list=file_list) # --- Pop item *after* successfully starting prediction --- self.llm_processing_queue.pop(0) log.debug(f"Successfully started LLM prediction for {next_dir} and removed from queue.") except Exception as e: log.exception(f"Error occurred *during* _start_llm_prediction call for {next_dir}: {e}") error_msg = f"Error starting LLM for {os.path.basename(next_dir)}: {e}" self.llm_status_update.emit(error_msg) self.llm_prediction_error.emit(next_dir, error_msg) # Signal the error # --- Remove the failed item from the queue --- try: failed_item = self.llm_processing_queue.pop(0) log.warning(f"Removed failed item {failed_item} from LLM queue due to start error.") except IndexError: log.error("Attempted to pop failed item from already empty LLM queue after start error.") # --- Attempt to process the *next* item --- # Reset processing state since this one failed *before* the thread finished signal could self._set_processing_state(False) # Use QTimer.singleShot to avoid deep recursion QTimer.singleShot(100, self._process_next_llm_item) # Try next item after a short delay # --- Internal Slots to Handle Results/Errors from LLMPredictionHandler --- @Slot(str, list) def _handle_llm_result(self, input_path: str, source_rules: list): """Internal slot to receive results and emit the public signal.""" log.debug(f"LLM Handler received result for {input_path}. Emitting llm_prediction_ready.") self.llm_prediction_ready.emit(input_path, source_rules) # Note: The thread's finished signal calls _reset_llm_thread_references, # which then calls _process_next_llm_item. @Slot(str, str) def _handle_llm_error(self, input_path: str, error_message: str): """Internal slot to receive errors and emit the public signal.""" log.debug(f"LLM Handler received error for {input_path}: {error_message}. Emitting llm_prediction_error.") self.llm_prediction_error.emit(input_path, error_message) # Note: The thread's finished signal calls _reset_llm_thread_references, # which then calls _process_next_llm_item. def clear_queue(self): """Clears the LLM processing queue.""" log.info(f"Clearing LLM processing queue ({len(self.llm_processing_queue)} items).") self.llm_processing_queue.clear() # TODO: Should we also attempt to cancel any *currently* running LLM task? # This might be complex. For now, just clears the queue of pending items. if self.is_processing(): log.warning("LLM queue cleared, but a task is currently running. It will complete.") else: self.llm_status_update.emit("LLM queue cleared.")