Dedicated LLM settings file - UNTESTED!

This commit is contained in:
2025-05-04 13:24:10 +02:00
parent 01c8f68ea0
commit 336d698f9b
7 changed files with 403 additions and 381 deletions

View File

@@ -7,7 +7,8 @@ from PySide6.QtCore import QObject, Signal, QThread, Slot, QTimer
# --- Backend Imports ---
# Assuming these might be needed based on MainWindow's usage
try:
from configuration import Configuration, ConfigurationError, load_base_config
# Removed load_base_config import
from configuration import Configuration, ConfigurationError
from .llm_prediction_handler import LLMPredictionHandler # Backend handler
from rule_structure import SourceRule # For signal emission type hint
except ImportError as e:
@@ -16,6 +17,7 @@ except ImportError as e:
load_base_config = None
ConfigurationError = Exception
SourceRule = None # Define as None if import fails
Configuration = None # Define as None if import fails
log = logging.getLogger(__name__)
@@ -114,16 +116,6 @@ class LLMInteractionHandler(QObject):
# Extract file list if not provided (needed for re-interpretation calls)
if file_list is None:
log.debug(f"File list not provided for {input_path_str}, extracting...")
# Need access to MainWindow's _extract_file_list or reimplement
# For now, assume MainWindow provides it or pass it during queueing
# Let's assume file_list is always provided correctly for now.
# If extraction fails before queueing, it won't reach here.
# If extraction needs to happen here, MainWindow ref is needed.
# Re-evaluating: MainWindow._extract_file_list is complex.
# It's better if the caller (MainWindow) extracts and passes the list.
# We'll modify queue_llm_request to require a non-None list eventually,
# or pass the main_window ref to call its extraction method.
# Let's pass main_window ref for now.
if hasattr(self.main_window, '_extract_file_list'):
file_list = self.main_window._extract_file_list(input_path_str)
if file_list is None:
@@ -131,11 +123,6 @@ class LLMInteractionHandler(QObject):
log.error(error_msg)
self.llm_status_update.emit(f"Error extracting files for {os.path.basename(input_path_str)}")
self.llm_prediction_error.emit(input_path_str, error_msg) # Signal error
# If called as part of a queue, we need to ensure the next item is processed.
# _reset_llm_thread_references handles this via the finished signal,
# but if the thread never starts, we need to trigger manually.
# This case should ideally be caught before calling _start_llm_prediction.
# We'll assume the queue logic handles failed extraction before calling this.
return # Stop if extraction failed
else:
error_msg = f"MainWindow reference does not have _extract_file_list method."
@@ -153,52 +140,27 @@ class LLMInteractionHandler(QObject):
self.llm_prediction_error.emit(input_path_str, error_msg)
return
# --- Load Base Config for LLM Settings ---
if load_base_config is None:
log.critical("LLM Error: load_base_config function not available.")
self.llm_status_update.emit("LLM Error: Cannot load base configuration.")
self.llm_prediction_error.emit(input_path_str, "load_base_config function not available.")
return
try:
base_config = load_base_config()
if not base_config:
raise ConfigurationError("Failed to load base configuration (app_settings.json).")
llm_settings = {
"llm_endpoint_url": base_config.get('llm_endpoint_url'),
"api_key": base_config.get('llm_api_key'),
"model_name": base_config.get('llm_model_name', 'gemini-pro'),
"prompt_template_content": base_config.get('llm_predictor_prompt'),
"asset_types": base_config.get('ASSET_TYPE_DEFINITIONS', {}),
"file_types": base_config.get('FILE_TYPE_DEFINITIONS', {}),
"examples": base_config.get('llm_predictor_examples', [])
}
except ConfigurationError as e:
log.error(f"LLM Configuration Error: {e}")
self.llm_status_update.emit(f"LLM Config Error: {e}")
self.llm_prediction_error.emit(input_path_str, f"LLM Configuration Error: {e}")
# Optionally show a QMessageBox via main_window ref if critical
# self.main_window.show_critical_error("LLM Config Error", str(e))
# --- Get Configuration Object ---
if not hasattr(self.main_window, 'config') or not isinstance(self.main_window.config, Configuration):
error_msg = "LLM Error: Main window does not have a valid Configuration object."
log.critical(error_msg)
self.llm_status_update.emit("LLM Error: Cannot access application configuration.")
self.llm_prediction_error.emit(input_path_str, error_msg)
return
except Exception as e:
log.exception(f"Unexpected error loading LLM configuration: {e}")
self.llm_status_update.emit(f"LLM Config Error: {e}")
self.llm_prediction_error.emit(input_path_str, f"Unexpected error loading LLM config: {e}")
return
# --- End Config Loading ---
config = self.main_window.config # Get the config object
# --- Check if Handler Class is Available ---
if LLMPredictionHandler is None:
log.critical("LLMPredictionHandler class not available.")
self.llm_status_update.emit("LLM Error: Prediction handler component missing.")
self.llm_prediction_error.emit(input_path_str, "LLMPredictionHandler class not available.")
return
# Clean up previous thread/handler if any exist (should not happen if queue logic is correct)
# --- Clean up previous thread/handler if necessary ---
if self.llm_prediction_thread and self.llm_prediction_thread.isRunning():
log.warning("Warning: Previous LLM prediction thread still running when trying to start new one. This indicates a potential logic error.")
# Attempt graceful shutdown (might need more robust handling)
log.warning("Warning: Previous LLM prediction thread still running when trying to start new one. Attempting cleanup.")
if self.llm_prediction_handler:
# Assuming LLMPredictionHandler has a cancel method or similar
if hasattr(self.llm_prediction_handler, 'cancel'):
self.llm_prediction_handler.cancel()
self.llm_prediction_thread.quit()
@@ -206,7 +168,6 @@ class LLMInteractionHandler(QObject):
log.warning("LLM thread did not quit gracefully. Forcing termination.")
self.llm_prediction_thread.terminate()
self.llm_prediction_thread.wait() # Wait after terminate
# Reset references after ensuring termination
self.llm_prediction_thread = None
self.llm_prediction_handler = None
@@ -214,8 +175,10 @@ class LLMInteractionHandler(QObject):
log.info(f"Starting LLM prediction thread for source: {input_path_str} with {len(file_list)} files.")
self.llm_status_update.emit(f"Starting LLM interpretation for {input_path_obj.name}...")
self.llm_prediction_thread = QThread(self.main_window) # Parent thread to main window's thread? Or self? Let's try self.
self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, llm_settings)
# --- Create Thread and Handler ---
self.llm_prediction_thread = QThread(self) # Parent thread to self
# Pass the Configuration object directly
self.llm_prediction_handler = LLMPredictionHandler(input_path_str, file_list, config)
self.llm_prediction_handler.moveToThread(self.llm_prediction_thread)
# Connect signals from handler to *internal* slots or directly emit signals

View File

@@ -14,8 +14,8 @@ from rule_structure import SourceRule, AssetRule, FileRule # Ensure AssetRule an
# Assuming configuration loads app_settings.json
# Adjust the import path if necessary
# Removed Configuration import, will use load_base_config if needed or passed settings
# from configuration import Configuration
# Import Configuration class
from configuration import Configuration
# from configuration import load_base_config # No longer needed here
from .base_prediction_handler import BasePredictionHandler # Import base class
@@ -28,7 +28,7 @@ class LLMPredictionHandler(BasePredictionHandler):
"""
# Signals (prediction_ready, prediction_error, status_update) are inherited
def __init__(self, input_source_identifier: str, file_list: list, llm_settings: dict, parent: QObject = None):
def __init__(self, input_source_identifier: str, file_list: list, config: Configuration, parent: QObject = None):
"""
Initializes the LLM handler.
@@ -36,16 +36,15 @@ class LLMPredictionHandler(BasePredictionHandler):
input_source_identifier: The unique identifier for the input source (e.g., file path).
file_list: A list of *relative* file paths extracted from the input source.
(LLM expects relative paths based on the prompt template).
llm_settings: A dictionary containing necessary LLM configuration
(endpoint_url, api_key, prompt_template_content, etc.).
config: The loaded Configuration object containing all settings.
parent: The parent QObject.
"""
super().__init__(input_source_identifier, parent)
# input_source_identifier is stored by the base class as self.input_source_identifier
self.file_list = file_list # Store the provided relative file list
self.llm_settings = llm_settings # Store the settings dictionary
self.endpoint_url = self.llm_settings.get('llm_endpoint_url')
self.api_key = self.llm_settings.get('llm_api_key')
self.file_list = file_list # Store the provided relative file list
self.config = config # Store the Configuration object
# Access LLM settings via self.config properties when needed
# e.g., self.config.llm_endpoint_url, self.config.llm_api_key
# _is_running and _is_cancelled are handled by the base class
# The run() and cancel() slots are provided by the base class.
@@ -128,28 +127,17 @@ class LLMPredictionHandler(BasePredictionHandler):
"""
Prepares the full prompt string to send to the LLM using stored settings.
"""
# Access settings from the stored dictionary
prompt_template = self.llm_settings.get('prompt_template_content')
# Access settings via the Configuration object
prompt_template = self.config.llm_predictor_prompt
if not prompt_template:
# Attempt to fall back to reading the default file path if content is missing
default_template_path = 'llm_prototype/prompt_template.txt'
print(f"Warning: 'prompt_template_content' missing in llm_settings. Falling back to reading default file: {default_template_path}")
try:
with open(default_template_path, 'r', encoding='utf-8') as f:
prompt_template = f.read()
except FileNotFoundError:
raise ValueError(f"LLM predictor prompt template content missing in settings and default file not found at: {default_template_path}")
except Exception as e:
raise ValueError(f"Error reading default LLM prompt template file {default_template_path}: {e}")
if not prompt_template: # Final check after potential fallback
raise ValueError("LLM predictor prompt template content is empty or could not be loaded.")
# Config object should handle defaults or raise error during init if critical prompt is missing
raise ValueError("LLM predictor prompt template content is empty or could not be loaded from configuration.")
# Access definitions and examples from the settings dictionary
asset_defs = json.dumps(self.llm_settings.get('asset_types', {}), indent=4)
file_defs = json.dumps(self.llm_settings.get('file_types', {}), indent=4)
examples = json.dumps(self.llm_settings.get('examples', []), indent=2)
# Access definitions and examples via Configuration object methods/properties
asset_defs = json.dumps(self.config.get_asset_type_definitions(), indent=4)
file_defs = json.dumps(self.config.get_file_type_definitions_with_examples(), indent=4)
examples = json.dumps(self.config.get_llm_examples(), indent=2)
# Format *relative* file list as a single string with newlines
file_list_str = "\n".join(relative_file_list)
@@ -177,32 +165,34 @@ class LLMPredictionHandler(BasePredictionHandler):
ValueError: If the endpoint URL is not configured or the response is invalid.
requests.exceptions.RequestException: For other request-related errors.
"""
if not self.endpoint_url:
endpoint_url = self.config.llm_endpoint_url # Get from config
if not endpoint_url:
raise ValueError("LLM endpoint URL is not configured in settings.")
headers = {
"Content-Type": "application/json",
}
if self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"
api_key = self.config.llm_api_key # Get from config
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
# Construct payload based on OpenAI Chat Completions format
payload = {
# Use configured model name, default to 'local-model'
"model": self.llm_settings.get("llm_model_name", "local-model"),
"model": self.config.llm_model_name or "local-model", # Use config property, fallback
"messages": [{"role": "user", "content": prompt}],
# Use configured temperature, default to 0.5
"temperature": self.llm_settings.get("llm_temperature", 0.5),
"temperature": self.config.llm_temperature, # Use config property (has default)
# Add max_tokens if needed/configurable:
# "max_tokens": self.llm_settings.get("llm_max_tokens", 1024),
# "max_tokens": self.config.llm_max_tokens, # Example if added to config
# Ensure the LLM is instructed to return JSON in the prompt itself
# Some models/endpoints support a specific json mode:
# "response_format": { "type": "json_object" } # If supported by endpoint
}
# Status update emitted by _perform_prediction before calling this
# self.status_update.emit(f"Sending request to LLM at {self.endpoint_url}...")
print(f"--- Calling LLM API: {self.endpoint_url} ---")
# self.status_update.emit(f"Sending request to LLM at {endpoint_url}...")
print(f"--- Calling LLM API: {endpoint_url} ---")
# print(f"--- Payload Preview ---\n{json.dumps(payload, indent=2)[:500]}...\n--- END Payload Preview ---")
# Note: Exceptions raised here (Timeout, RequestException, ValueError)
@@ -210,10 +200,10 @@ class LLMPredictionHandler(BasePredictionHandler):
# Make the POST request with a timeout
response = requests.post(
self.endpoint_url,
endpoint_url,
headers=headers,
json=payload,
timeout=self.llm_settings.get("llm_request_timeout", 120)
timeout=self.config.llm_request_timeout # Use config property (has default)
)
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
@@ -328,8 +318,8 @@ class LLMPredictionHandler(BasePredictionHandler):
# --- Prepare for Rule Creation ---
source_rule = SourceRule(input_path=self.input_source_identifier)
valid_asset_types = list(self.llm_settings.get('asset_types', {}).keys())
valid_file_types = list(self.llm_settings.get('file_types', {}).keys())
valid_asset_types = self.config.get_asset_type_keys() # Use config method
valid_file_types = self.config.get_file_type_keys() # Use config method
asset_rules_map: Dict[str, AssetRule] = {} # Maps group_name to AssetRule
# --- Process Individual Files and Build Rules ---