diff --git a/autobot-backend/chat_workflow/tool_handler.py b/autobot-backend/chat_workflow/tool_handler.py index eff84bad8..04ddb1bcd 100644 --- a/autobot-backend/chat_workflow/tool_handler.py +++ b/autobot-backend/chat_workflow/tool_handler.py @@ -8,12 +8,14 @@ and approval workflows. """ +from __future__ import annotations + import asyncio import html import json import logging import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any from async_chat_workflow import WorkflowMessage from utils.errors import RepairableException @@ -184,8 +186,8 @@ def _match_repairable_error( def _create_execution_result( - command: str, host: str, result: Dict[str, Any], approved: bool = False -) -> Dict[str, Any]: + command: str, host: str, result: dict[str, Any], approved: bool = False +) -> dict[str, Any]: """Create standardized execution result record (Issue #315: extracted). Args: @@ -210,10 +212,10 @@ def _create_execution_result( async def _try_mcp_dispatch( tool_name: str, - tool_call: Dict[str, Any], - execution_results: List[Dict[str, Any]], + tool_call: dict[str, Any], + execution_results: list[dict[str, Any]], role: str = "user", -) -> Optional[WorkflowMessage]: +) -> WorkflowMessage | None: """Attempt to dispatch tool_name via the MCP registry. Issue #2513. Args: @@ -294,7 +296,7 @@ def _init_terminal_tool(self): def _parse_tool_calls( self, text: str, is_first_iteration: bool = False - ) -> List[Dict[str, Any]]: + ) -> list[dict[str, Any]]: """ Parse tool calls from LLM response using XML-style markers. @@ -357,7 +359,7 @@ def _should_defer_for_planning( def _extract_tool_calls_from_text( self, text: str - ) -> tuple[List[Dict[str, Any]], int]: + ) -> tuple[list[dict[str, Any]], int]: """Extract tool calls using regex pattern. Issue #650, #620.""" tool_calls = [] match_count = 0 @@ -393,7 +395,7 @@ def _extract_tool_calls_from_text( def _log_parsing_result( self, - tool_calls: List, + tool_calls: list, match_count: int, has_tool_call: bool, is_first_iteration: bool, @@ -418,7 +420,7 @@ def _log_parsing_result( async def _execute_terminal_command( self, session_id: str, command: str, host: str = "main", description: str = "" - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Execute terminal command via terminal tool. @@ -493,7 +495,7 @@ def _check_empty_command_history(self, elapsed_time: float) -> tuple: def _check_command_mismatch( self, command: str, - last_command: Dict[str, Any], + last_command: dict[str, Any], elapsed_time: float, max_wait_time: float, ) -> tuple | None: @@ -513,8 +515,8 @@ def _check_command_mismatch( return None, None, False def _build_approval_status_msg( - self, last_command: Dict[str, Any] - ) -> Dict[str, Any]: + self, last_command: dict[str, Any] + ) -> dict[str, Any]: """Build approval status message from command history. Issue #620.""" approval_status = "approved" if last_command.get("approved_by") else "denied" comment = last_command.get("approval_comment") or last_command.get( @@ -524,7 +526,7 @@ def _build_approval_status_msg( def _check_approval_completion( self, - session_info: Dict[str, Any], + session_info: dict[str, Any], command: str, elapsed_time: float, max_wait_time: float, @@ -560,7 +562,7 @@ def _build_approval_request_message( self, session_id: str, command: str, - result: Dict[str, Any], + result: dict[str, Any], terminal_session_id: str, description: str, ) -> WorkflowMessage: @@ -580,7 +582,7 @@ def _build_approval_request_message( ) def _build_waiting_message( - self, command: str, result: Dict[str, Any] + self, command: str, result: dict[str, Any] ) -> WorkflowMessage: """Build the waiting for approval WorkflowMessage.""" return WorkflowMessage( @@ -594,7 +596,7 @@ def _build_waiting_message( ) def _log_polling_status( - self, poll_count: int, session_info: Dict[str, Any] | None, elapsed_time: float + self, poll_count: int, session_info: dict[str, Any] | None, elapsed_time: float ) -> None: """Log periodic polling status updates. Issue #620.""" if poll_count % 20 != 0: @@ -651,7 +653,7 @@ async def _handle_pending_approval( self, session_id: str, command: str, - result: Dict[str, Any], + result: dict[str, Any], terminal_session_id: str, description: str, ): @@ -701,7 +703,7 @@ async def _handle_approved_command( self, command: str, host: str, - approval_result: Dict[str, Any], + approval_result: dict[str, Any], ollama_endpoint: str, selected_model: str, session_id: str = "", @@ -756,7 +758,7 @@ async def _handle_approved_command( yield (exec_result, additional_text) def _handle_approval_failure( - self, command: str, approval_result: Dict[str, Any] | None + self, command: str, approval_result: dict[str, Any] | None ) -> tuple[WorkflowMessage, str]: """Issue #665: Extracted from _handle_approval_workflow to reduce function length. @@ -790,7 +792,7 @@ async def _handle_approval_workflow( session_id: str, command: str, host: str, - result: Dict[str, Any], + result: dict[str, Any], terminal_session_id: str, description: str, ollama_endpoint: str, @@ -832,7 +834,7 @@ async def _handle_direct_execution( self, command: str, host: str, - result: Dict[str, Any], + result: dict[str, Any], ollama_endpoint: str, selected_model: str, session_id: str = "", @@ -874,14 +876,14 @@ async def _handle_direct_execution( yield (exec_result, f"\n\n{interpretation}") async def _collect_workflow_results( - self, workflow_gen, execution_results: List, additional_response_parts: List + self, workflow_gen, execution_results: list, additional_response_parts: list ): """Collect results from workflow generator (Issue #315: extracted). Args: workflow_gen: Async generator from workflow handler - execution_results: List to append exec results to - additional_response_parts: List to append text parts to + execution_results: list to append exec results to + additional_response_parts: list to append text parts to Yields: WorkflowMessage items from the generator @@ -902,12 +904,12 @@ async def _handle_pending_approval_command( terminal_session_id: str, command: str, host: str, - result: Dict[str, Any], + result: dict[str, Any], description: str, ollama_endpoint: str, selected_model: str, - execution_results: List, - additional_response_parts: List, + execution_results: list, + additional_response_parts: list, ): """Handle command requiring approval workflow. Issue #620.""" if not terminal_session_id: @@ -938,11 +940,11 @@ async def _handle_successful_command( self, command: str, host: str, - result: Dict[str, Any], + result: dict[str, Any], ollama_endpoint: str, selected_model: str, - execution_results: List, - additional_response_parts: List, + execution_results: list, + additional_response_parts: list, session_id: str = "", ): """Handle successful direct command execution. Issue #620.""" @@ -955,7 +957,7 @@ async def _handle_successful_command( yield msg def _extract_command_params( - self, tool_call: Dict[str, Any] + self, tool_call: dict[str, Any] ) -> tuple[str, str, str]: """Extract command parameters from tool call dict. Issue #620.""" command = tool_call["params"].get("command") @@ -970,12 +972,12 @@ async def _dispatch_command_by_status( terminal_session_id: str, command: str, host: str, - result: Dict[str, Any], + result: dict[str, Any], description: str, ollama_endpoint: str, selected_model: str, - execution_results: List, - additional_response_parts: List, + execution_results: list, + additional_response_parts: list, ): """Dispatch command handling based on execution status. Issue #620.""" if status == "pending_approval": @@ -1012,13 +1014,13 @@ async def _dispatch_command_by_status( async def _process_single_command( self, - tool_call: Dict[str, Any], + tool_call: dict[str, Any], session_id: str, terminal_session_id: str, ollama_endpoint: str, selected_model: str, - execution_results: List, - additional_response_parts: List, + execution_results: list, + additional_response_parts: list, ): """Process a single execute_command tool call. Issue #620. @@ -1052,8 +1054,8 @@ async def _process_single_command( async def _handle_command_error( self, command: str, - result: Dict[str, Any], - additional_response_parts: List, + result: dict[str, Any], + additional_response_parts: list, ): """Handle command execution error (Issue #665: extracted helper). @@ -1062,7 +1064,7 @@ async def _handle_command_error( Args: command: The command that failed result: Execution result dict with error/stderr - additional_response_parts: List to append context to + additional_response_parts: list to append context to Yields: WorkflowMessage with error details @@ -1136,7 +1138,7 @@ def _classify_command_error( ) def _handle_respond_tool( - self, tool_call: Dict[str, Any] + self, tool_call: dict[str, Any] ) -> tuple[WorkflowMessage, bool, str]: """ Handle the 'respond' tool for explicit task completion. @@ -1170,7 +1172,7 @@ def _handle_respond_tool( return message, break_loop_requested, respond_content def _handle_delegate_tool( - self, tool_call: Dict[str, Any], execution_results: List[Dict[str, Any]] + self, tool_call: dict[str, Any], execution_results: list[dict[str, Any]] ) -> WorkflowMessage: """ Handle the 'delegate' tool for subordinate agent delegation. @@ -1212,7 +1214,7 @@ def _handle_delegate_tool( ) def _validate_browser_params( - self, tool_name: str, params: Dict[str, Any] + self, tool_name: str, params: dict[str, Any] ) -> str | None: """Validate browser tool params. Returns error message or None. #1368.""" from api.browser_mcp import is_script_safe, is_url_allowed @@ -1225,8 +1227,8 @@ def _validate_browser_params( async def _handle_browser_tool( self, - tool_call: Dict[str, Any], - execution_results: List[Dict[str, Any]], + tool_call: dict[str, Any], + execution_results: list[dict[str, Any]], ): """Execute a browser tool call via browser_mcp. Issue #1368. @@ -1295,8 +1297,8 @@ async def _handle_browser_tool( def _format_browser_result( self, tool_name: str, - params: Dict[str, Any], - result: Dict[str, Any], + params: dict[str, Any], + result: dict[str, Any], ) -> str: """Format browser tool result as text for LLM context. Issue #1368. @@ -1354,8 +1356,8 @@ def _format_browser_result( async def _handle_web_search_tool( self, - tool_call: Dict[str, Any], - execution_results: List[Dict[str, Any]], + tool_call: dict[str, Any], + execution_results: list[dict[str, Any]], ): """Execute a web search via browser VM. Issue #2306. @@ -1483,7 +1485,7 @@ async def _web_search_via_browser_vm(self, query: str) -> str: return f"No search results found for: {query}" def _build_execution_summary( - self, execution_results: List[Dict[str, Any]] + self, execution_results: list[dict[str, Any]] ) -> WorkflowMessage: """Build execution summary message from results. Issue #620.""" return WorkflowMessage( @@ -1501,8 +1503,8 @@ def _build_execution_summary( def _build_unknown_tool_error( self, tool_name: str, - ctx: Optional["LLMIterationContext"], - execution_results: List[Dict[str, Any]], + ctx: "LLMIterationContext" | None, + execution_results: list[dict[str, Any]], ) -> WorkflowMessage: """Build error message for an unknown tool call (#2305, #2310).""" known_tools = sorted( @@ -1529,14 +1531,14 @@ def _build_unknown_tool_error( async def _dispatch_tool_call( self, - tool_call: Dict[str, Any], + tool_call: dict[str, Any], session_id: str, terminal_session_id: str, ollama_endpoint: str, selected_model: str, - execution_results: List[Dict[str, Any]], - additional_response_parts: List[str], - ctx: Optional["LLMIterationContext"] = None, + execution_results: list[dict[str, Any]], + additional_response_parts: list[str], + ctx: "LLMIterationContext" | None = None, role: str = "user", ): """Dispatch a single tool call to appropriate handler. Issue #620. @@ -1610,12 +1612,12 @@ async def _dispatch_tool_call( async def _process_tool_calls( self, - tool_calls: List[Dict[str, Any]], + tool_calls: list[dict[str, Any]], session_id: str, terminal_session_id: str, ollama_endpoint: str, selected_model: str, - ctx: Optional["LLMIterationContext"] = None, + ctx: "LLMIterationContext" | None = None, ): """Process all tool calls from LLM response. diff --git a/autobot-backend/code_analysis/src/env_analyzer.py b/autobot-backend/code_analysis/src/env_analyzer.py index 036b26f9d..4d6ecfa27 100644 --- a/autobot-backend/code_analysis/src/env_analyzer.py +++ b/autobot-backend/code_analysis/src/env_analyzer.py @@ -6,6 +6,8 @@ Analyzes codebase for hardcoded values that should be environment variables """ +from __future__ import annotations + import ast import asyncio import json @@ -15,7 +17,7 @@ import time from dataclasses import dataclass from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple +from typing import Any from autobot_shared.ssot_config import QUALITY_MODEL @@ -29,9 +31,10 @@ sys.path.insert(0, str(_project_root)) try: - from autobot_shared.redis_client import get_redis_client from config import UnifiedConfig + from autobot_shared.redis_client import get_redis_client + _REDIS_AVAILABLE = True _CONFIG_AVAILABLE = True except ImportError: @@ -299,7 +302,7 @@ class HardcodedValue: file_path: str line_number: int - variable_name: Optional[str] + variable_name: str | None value: str value_type: str # path, url, port, key, etc. context: str # surrounding code context @@ -316,7 +319,7 @@ class ConfigRecommendation: default_value: str description: str category: str # database, api, security, paths, etc. - affected_files: List[str] + affected_files: list[str] priority: str # high, medium, low @@ -386,8 +389,8 @@ def __init__(self, redis_client=None): logger.info("Environment Analyzer initialized") async def analyze_codebase( - self, root_path: str = ".", patterns: List[str] = None - ) -> Dict[str, Any]: + self, root_path: str = ".", patterns: list[str] = None + ) -> dict[str, Any]: """Analyze entire codebase for hardcoded values""" start_time = time.time() @@ -450,8 +453,8 @@ async def analyze_codebase( return results async def _scan_for_hardcoded_values( - self, root_path: str, patterns: List[str] - ) -> List[HardcodedValue]: + self, root_path: str, patterns: list[str] + ) -> list[HardcodedValue]: """Scan files for hardcoded values (Issue #340 - refactored)""" hardcoded_values = [] root = Path(root_path) @@ -463,7 +466,7 @@ async def _scan_for_hardcoded_values( return hardcoded_values async def _process_file_for_values( - self, file_path: Path, hardcoded_values: List[HardcodedValue] + self, file_path: Path, hardcoded_values: list[HardcodedValue] ) -> None: """Process a single file for hardcoded values (Issue #340 - extracted)""" if not file_path.is_file() or self._should_skip_file(file_path): @@ -503,7 +506,7 @@ def _should_skip_file(self, file_path: Path) -> bool: async def _scan_file_for_hardcoded_values( self, file_path: str - ) -> List[HardcodedValue]: + ) -> list[HardcodedValue]: """Scan a single file for hardcoded values""" hardcoded_values = [] @@ -534,8 +537,8 @@ async def _scan_file_for_hardcoded_values( return hardcoded_values async def _scan_ast_for_hardcoded_values( - self, file_path: str, tree: ast.AST, lines: List[str] - ) -> List[HardcodedValue]: + self, file_path: str, tree: ast.AST, lines: list[str] + ) -> list[HardcodedValue]: """Issue #632: Scan AST for hardcoded values (docstring + line-level filtering)""" hardcoded_values = [] @@ -634,8 +637,8 @@ def _is_config_access_line(self, line: str) -> bool: return False def _extract_hardcoded_from_node( - self, node: ast.AST, file_path: str, lines: List[str] - ) -> Optional[HardcodedValue]: + self, node: ast.AST, file_path: str, lines: list[str] + ) -> HardcodedValue | None: """Extract hardcoded value from AST node (Issue #340 - extracted)""" # String literals if isinstance(node, ast.Str): @@ -649,8 +652,8 @@ def _extract_hardcoded_from_node( return None def _extract_from_str_node( - self, node: ast.Str, file_path: str, lines: List[str] - ) -> Optional[HardcodedValue]: + self, node: ast.Str, file_path: str, lines: list[str] + ) -> HardcodedValue | None: """Extract from string literal node (Issue #340 - extracted)""" value = node.s if not self._is_potentially_configurable(value): @@ -658,8 +661,8 @@ def _extract_from_str_node( return self._create_hardcoded_value(file_path, node.lineno, None, value, lines) def _extract_from_num_node( - self, node: ast.Num, file_path: str, lines: List[str] - ) -> Optional[HardcodedValue]: + self, node: ast.Num, file_path: str, lines: list[str] + ) -> HardcodedValue | None: """Extract from numeric node (Issue #340 - extracted, Issue #630 - context filtering)""" value = str(node.n) if not self._is_numeric_config_candidate(value): @@ -695,8 +698,8 @@ def _is_non_config_numeric_context(self, line: str) -> bool: return False def _extract_from_assign_node( - self, node: ast.Assign, file_path: str, lines: List[str] - ) -> Optional[HardcodedValue]: + self, node: ast.Assign, file_path: str, lines: list[str] + ) -> HardcodedValue | None: """Extract from assignment node (Issue #340 - extracted)""" for target in node.targets: hv = self._try_extract_named_value( @@ -712,8 +715,8 @@ def _try_extract_named_value( value_node: ast.AST, file_path: str, lineno: int, - lines: List[str], - ) -> Optional[HardcodedValue]: + lines: list[str], + ) -> HardcodedValue | None: """Try to extract a named hardcoded value (Issue #340 - extracted)""" if not isinstance(target, ast.Name): return None @@ -732,7 +735,7 @@ def _try_extract_named_value( return self._create_hardcoded_value(file_path, lineno, var_name, value, lines) @staticmethod - def _extract_match_value(match: "re.Match") -> Optional[str]: + def _extract_match_value(match: "re.Match") -> str | None: """Return the first non-None captured group, or the full match. Issue #1183: Extracted from _regex_scan_file() to reduce function length. @@ -744,8 +747,8 @@ def _extract_match_value(match: "re.Match") -> Optional[str]: return match.group(0) async def _regex_scan_file( - self, file_path: str, content: str, lines: List[str] - ) -> List[HardcodedValue]: + self, file_path: str, content: str, lines: list[str] + ) -> list[HardcodedValue]: """Issue #632: Scan file using regex with smart filtering (aligned with shell script). Applies: @@ -808,10 +811,10 @@ def _create_hardcoded_value( self, file_path: str, line_num: int, - var_name: Optional[str], + var_name: str | None, value: str, - lines: List[str], - category: Optional[str] = None, + lines: list[str], + category: str | None = None, ) -> HardcodedValue: """Create a HardcodedValue object with analysis""" @@ -1008,8 +1011,8 @@ def _is_numeric_config_candidate(self, value: str) -> bool: return False def _classify_value( - self, value: str, category: Optional[str], context: str - ) -> Tuple[str, str]: + self, value: str, category: str | None, context: str + ) -> tuple[str, str]: """Issue #632: Classify value and severity (aligned with shell script priorities)""" # Guard against None values @@ -1041,7 +1044,7 @@ def _classify_value( # MEDIUM severity: URLs and hostnames (not example domains) return self._classify_non_numeric_value(value, category, _WEB_PROTOCOL_PREFIXES) - def _classify_numeric_value(self, num: int) -> Tuple[str, str]: + def _classify_numeric_value(self, num: int) -> tuple[str, str]: """Classify numeric values by severity. Helper for _classify_value (#632). @@ -1070,8 +1073,8 @@ def _classify_numeric_value(self, num: int) -> Tuple[str, str]: return "numeric", "low" def _classify_non_numeric_value( - self, value: str, category: Optional[str], web_prefixes: tuple - ) -> Tuple[str, str]: + self, value: str, category: str | None, web_prefixes: tuple + ) -> tuple[str, str]: """Classify non-numeric string values by severity. Helper for _classify_value (#632). @@ -1094,7 +1097,7 @@ def _classify_non_numeric_value( return category or "string", "low" def _suggest_env_var_name( - self, var_name: Optional[str], value: str, value_type: str, file_path: str + self, var_name: str | None, value: str, value_type: str, file_path: str ) -> str: """Suggest an environment variable name""" @@ -1132,8 +1135,8 @@ def _suggest_env_var_name( ) async def _categorize_values( - self, hardcoded_values: List[HardcodedValue] - ) -> Dict[str, List[HardcodedValue]]: + self, hardcoded_values: list[HardcodedValue] + ) -> dict[str, list[HardcodedValue]]: """Categorize hardcoded values by type""" categories = {} @@ -1145,8 +1148,8 @@ async def _categorize_values( return categories async def _generate_recommendations( - self, categorized: Dict[str, List[HardcodedValue]] - ) -> List[ConfigRecommendation]: + self, categorized: dict[str, list[HardcodedValue]] + ) -> list[ConfigRecommendation]: """Generate configuration recommendations""" recommendations = [] @@ -1203,9 +1206,9 @@ def _map_to_config_category(self, value_type: str) -> str: def _calculate_env_metrics( self, - hardcoded_values: List[HardcodedValue], - recommendations: List[ConfigRecommendation], - ) -> Dict[str, Any]: + hardcoded_values: list[HardcodedValue], + recommendations: list[ConfigRecommendation], + ) -> dict[str, Any]: """Calculate environment analysis metrics""" severity_counts = { @@ -1230,7 +1233,7 @@ def _calculate_env_metrics( "configuration_complexity": len(category_counts), } - def _serialize_hardcoded_value(self, value: HardcodedValue) -> Dict[str, Any]: + def _serialize_hardcoded_value(self, value: HardcodedValue) -> dict[str, Any]: """Serialize hardcoded value for output with SSOT mapping (Issue #642)""" result = { "file": value.file_path, @@ -1269,7 +1272,7 @@ def _serialize_hardcoded_value(self, value: HardcodedValue) -> Dict[str, Any]: return result - def _serialize_recommendation(self, rec: ConfigRecommendation) -> Dict[str, Any]: + def _serialize_recommendation(self, rec: ConfigRecommendation) -> dict[str, Any]: """Serialize configuration recommendation for output""" return { "env_var_name": rec.env_var_name, @@ -1280,7 +1283,7 @@ def _serialize_recommendation(self, rec: ConfigRecommendation) -> Dict[str, Any] "priority": rec.priority, } - async def _cache_results(self, results: Dict[str, Any]): + async def _cache_results(self, results: dict[str, Any]): """Cache analysis results in Redis""" if self.redis_client: try: @@ -1307,7 +1310,7 @@ async def _clear_cache(self): except Exception as e: logger.warning(f"Failed to clear cache: {e}") - async def get_cached_results(self) -> Optional[Dict[str, Any]]: + async def get_cached_results(self) -> dict[str, Any | None]: """Get cached analysis results""" if self.redis_client: try: @@ -1324,11 +1327,11 @@ async def get_cached_results(self) -> Optional[Dict[str, Any]]: async def llm_filter_hardcoded( self, - hardcoded_values: List[HardcodedValue], + hardcoded_values: list[HardcodedValue], model: str = QUALITY_MODEL, batch_size: int = 100, - priority_filter: Optional[str] = None, - ) -> List[HardcodedValue]: + priority_filter: str | None = None, + ) -> list[HardcodedValue]: """Use LLM to filter false positives. Issue #633.""" import os @@ -1357,9 +1360,9 @@ async def llm_filter_hardcoded( def _select_llm_candidates( self, - hardcoded_values: List[HardcodedValue], - priority_filter: Optional[str], - ) -> List[HardcodedValue]: + hardcoded_values: list[HardcodedValue], + priority_filter: str | None, + ) -> list[HardcodedValue]: """Select candidates for LLM filtering. Helper for llm_filter_hardcoded (#633). @@ -1395,11 +1398,11 @@ async def _check_ollama_health(self, host: str, port: str) -> bool: async def _process_llm_batches( self, - candidates: List[HardcodedValue], + candidates: list[HardcodedValue], ollama_url: str, model: str, batch_size: int, - ) -> List[HardcodedValue]: + ) -> list[HardcodedValue]: """Process candidates through LLM in batches. Helper for llm_filter_hardcoded (#633). @@ -1441,14 +1444,14 @@ def _log_llm_reduction(self, original: int, filtered: int) -> None: f"({pct:.1f}% reduction)" ) - def _build_llm_filter_prompt(self, batch: List[HardcodedValue]) -> str: + def _build_llm_filter_prompt(self, batch: list[HardcodedValue]) -> str: """ Build LLM prompt for filtering a batch of hardcoded value candidates. Issue #633: Simple yes/no classification prompt optimized for small models. Args: - batch: List of HardcodedValue candidates to evaluate + batch: list of HardcodedValue candidates to evaluate Returns: Formatted prompt string @@ -1503,8 +1506,8 @@ async def _call_ollama_filter( url: str, model: str, prompt: str, - session: Optional[Any] = None, - ) -> List[int]: + session: Any | None = None, + ) -> list[int]: """ Call Ollama API for filtering and parse response. @@ -1552,7 +1555,7 @@ async def _call_ollama_filter( if should_close: await session.close() - def _parse_llm_filter_response(self, response_text: str) -> List[int]: + def _parse_llm_filter_response(self, response_text: str) -> list[int]: """ Parse LLM response to extract line numbers of true issues. @@ -1589,10 +1592,10 @@ def _parse_llm_filter_response(self, response_text: str) -> List[int]: async def analyze_codebase_with_llm_filter( self, root_path: str = ".", - patterns: List[str] = None, + patterns: list[str] = None, llm_model: str = QUALITY_MODEL, - filter_priority: Optional[str] = "high", - ) -> Dict[str, Any]: + filter_priority: str | None = "high", + ) -> dict[str, Any]: """Analyze codebase with LLM filtering. Issue #633.""" results = await self.analyze_codebase(root_path, patterns) original_count = results.get("total_hardcoded_values", 0) @@ -1618,8 +1621,8 @@ async def analyze_codebase_with_llm_filter( return results def _deserialize_hardcoded_details( - self, details: List[Dict[str, Any]] - ) -> List[HardcodedValue]: + self, details: list[dict[str, Any]] + ) -> list[HardcodedValue]: """Reconstruct HardcodedValue objects from serialized data. Helper for analyze_codebase_with_llm_filter (#633). @@ -1641,11 +1644,11 @@ def _deserialize_hardcoded_details( def _apply_filtered_results( self, - results: Dict[str, Any], - filtered_values: List[HardcodedValue], + results: dict[str, Any], + filtered_values: list[HardcodedValue], original_count: int, llm_model: str, - filter_priority: Optional[str], + filter_priority: str | None, ) -> None: """Update results dict with LLM-filtered data. @@ -1675,8 +1678,8 @@ def _build_llm_metadata( model: str, original: int, filtered: int, - priority: Optional[str], - ) -> Dict[str, Any]: + priority: str | None, + ) -> dict[str, Any]: """Build LLM filtering metadata dict. Helper for analyze_codebase_with_llm_filter (#633). diff --git a/autobot-backend/llm_interface_pkg/interface.py b/autobot-backend/llm_interface_pkg/interface.py index 753ed948d..ea1d21f7a 100644 --- a/autobot-backend/llm_interface_pkg/interface.py +++ b/autobot-backend/llm_interface_pkg/interface.py @@ -8,22 +8,24 @@ This simplified class delegates to specialized provider modules. """ +from __future__ import annotations + import asyncio import logging import os import re import time import uuid -from typing import Any, Dict, List, Optional +from typing import Any import aiohttp import xxhash +from config import ConfigManager from constants.model_constants import ModelConstants from autobot_shared.error_boundaries import error_boundary, get_error_boundary_manager from autobot_shared.http_client import get_http_client from autobot_shared.tracing import get_tracer -from config import ConfigManager # Issue #1403: Adapter registry from .adapters.registry import get_adapter_registry @@ -101,7 +103,7 @@ class LLMInterface: - Structured request/response handling """ - def __init__(self, settings: Optional[LLMSettings] = None): + def __init__(self, settings: LLMSettings | None = None): """ Initialize LLM interface with optional settings and configure providers. @@ -192,13 +194,13 @@ def _init_async_components(self) -> None: Initialize async components including HTTP client and caching. """ self._http_client = get_http_client() - self._models_cache: Optional[List[str]] = None + self._models_cache: list[str | None] = None self._models_cache_time: float = 0 self._lock = asyncio.Lock() # Issue #551: L1/L2 dual-tier caching system self._response_cache = get_llm_cache() - def _init_metrics(self) -> Dict[str, Any]: + def _init_metrics(self) -> dict[str, Any]: """ Issue #665: Extracted from __init__ to reduce function length. @@ -423,7 +425,7 @@ def _apply_tiered_routing( lightweight models and complex requests to capable models. Args: - messages: List of message dicts to analyze + messages: list of message dicts to analyze provider: Current provider name current_model: Currently selected model name @@ -460,7 +462,7 @@ def _apply_tiered_routing( async def switch_provider( self, provider: str, model: str = "", validate: bool = False - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """Switch the active LLM provider at runtime (#536). Args: @@ -491,7 +493,7 @@ async def switch_provider( logger.info("Switched LLM provider to %s (model=%s)", provider, model) return {"success": True, "provider": provider, "model": model} - async def get_all_provider_status(self) -> Dict[str, Any]: + async def get_all_provider_status(self) -> dict[str, Any]: """Get status of all configured providers (#536). Returns: @@ -635,7 +637,7 @@ def base_url(self) -> str: """Get Ollama base URL.""" return f"http://{self.settings.ollama_host}:{self.settings.ollama_port}" - async def _generate_cache_key(self, messages: List[ChatMessage], **params) -> str: + async def _generate_cache_key(self, messages: list[ChatMessage], **params) -> str: """Generate cache key with high-performance hashing.""" key_data = ( tuple((m.role, m.content) for m in messages), @@ -753,7 +755,7 @@ async def _check_cache( request_id: str, start_time: float, **kwargs, - ) -> tuple[Optional[LLMResponse], Optional[str]]: + ) -> tuple[LLMResponse | None, str | None]: """ Check L1/L2 cache for existing response. @@ -812,7 +814,7 @@ async def _prepare_request_context( Prepare request context with provider, model, and optimizations. Issue #620. Args: - messages: List of message dicts + messages: list of message dicts llm_type: Type of LLM **kwargs: Additional parameters @@ -834,17 +836,17 @@ async def _finalize_response( messages: list, model_name: str, provider: str, - cache_key: Optional[str], + cache_key: str | None, request_id: str, start_time: float, - session_id: Optional[str], + session_id: str | None, ) -> LLMResponse: """ Finalize response with metrics, caching, and usage tracking. Issue #620. Args: response: LLM response object - messages: List of message dicts + messages: list of message dicts model_name: Model name used provider: Provider name cache_key: Cache key if applicable @@ -907,7 +909,7 @@ async def _execute_chat_request( Execute chat request with caching and fallback. Issue #620. Args: - messages: List of message dicts + messages: list of message dicts llm_type: Type of LLM request_id: Request identifier start_time: Request start time @@ -956,7 +958,7 @@ async def chat_completion( Issue #665: Refactored to use helper methods for reduced complexity. Args: - messages: List of message dicts + messages: list of message dicts llm_type: Type of LLM ("orchestrator", "task", "chat", etc.) **kwargs: Additional parameters (provider, model_name, etc.) @@ -1010,7 +1012,7 @@ def _mark_fallback_response( ) def _build_all_providers_failed_response( - self, request_id: str, last_error: Optional[str] + self, request_id: str, last_error: str | None ) -> LLMResponse: """Build error response when all providers fail. Issue #620.""" logger.error(f"All providers failed. Last error: {last_error}") @@ -1204,7 +1206,7 @@ async def _track_llm_usage( model: str, response: LLMResponse, processing_time: float, - session_id: Optional[str] = None, + session_id: str | None = None, ): """Track LLM usage for cost optimization analysis (Issue #229).""" if not PATTERN_ANALYZER_AVAILABLE: @@ -1269,7 +1271,7 @@ async def _handle_local_request(self, request: LLMRequest) -> LLMResponse: return await self._local_handler.chat_completion(request) # Utility methods - async def get_available_models(self, provider: str = "ollama") -> List[str]: + async def get_available_models(self, provider: str = "ollama") -> list[str]: """Get available models for a provider.""" if provider == "ollama": ollama_host = os.getenv("AUTOBOT_OLLAMA_HOST") @@ -1292,7 +1294,7 @@ async def get_available_models(self, provider: str = "ollama") -> List[str]: return [] - def get_metrics(self) -> Dict[str, Any]: + def get_metrics(self) -> dict[str, Any]: """Get performance metrics including cache and optimization statistics.""" metrics = self._metrics.copy() # Issue #551: Include L1/L2 cache metrics @@ -1311,7 +1313,7 @@ def get_metrics(self) -> Dict[str, Any]: metrics["tiered_routing"] = self._tier_router.get_metrics() return metrics - def get_cache_metrics(self) -> Dict[str, Any]: + def get_cache_metrics(self) -> dict[str, Any]: """ Get detailed cache performance metrics. @@ -1322,7 +1324,7 @@ def get_cache_metrics(self) -> Dict[str, Any]: """ return self._response_cache.get_metrics() - async def clear_cache(self, l1: bool = True, l2: bool = True) -> Dict[str, int]: + async def clear_cache(self, l1: bool = True, l2: bool = True) -> dict[str, int]: """ Clear LLM response cache. @@ -1356,11 +1358,11 @@ async def chat_completion_optimized( agent_type: str, user_message: str, session_id: str, - user_name: Optional[str] = None, - user_role: Optional[str] = None, - available_tools: Optional[list] = None, - recent_context: Optional[str] = None, - additional_params: Optional[dict] = None, + user_name: str | None = None, + user_role: str | None = None, + available_tools: list | None = None, + recent_context: str | None = None, + additional_params: dict | None = None, **llm_params, ) -> LLMResponse: """Chat completion with vLLM-optimized prompts. Issue #620.""" diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.19/http_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.19/http_client.py index f9889905d..6f29db9ca 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.19/http_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.19/http_client.py @@ -6,10 +6,12 @@ Provides efficient aiohttp client session management to prevent resource exhaustion """ +from __future__ import annotations + import asyncio import logging import time -from typing import Any, Dict, Optional +from typing import Any import aiohttp from aiohttp import ClientSession, ClientTimeout, TCPConnector @@ -24,8 +26,8 @@ class HTTPClientManager: Prevents creating new ClientSession for each request which causes resource exhaustion. """ - _instance: Optional["HTTPClientManager"] = None - _session: Optional[ClientSession] = None + _instance: "HTTPClientManager" | None = None + _session: ClientSession | None = None _lock = asyncio.Lock() def __new__(cls): @@ -288,7 +290,7 @@ async def post(self, url: str, **kwargs) -> aiohttp.ClientResponse: """Convenience method for POST requests.""" return await self.request("POST", url, **kwargs) - async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: + async def get_json(self, url: str, **kwargs) -> dict[str, Any]: """ Make a GET request and return JSON response. @@ -304,8 +306,8 @@ async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: return await response.json() async def post_json( - self, url: str, json_data: Dict[str, Any], **kwargs - ) -> Dict[str, Any]: + self, url: str, json_data: dict[str, Any], **kwargs + ) -> dict[str, Any]: """ Make a POST request with JSON data and return JSON response. @@ -336,7 +338,7 @@ async def close(self): self._connector = None self._closed = True - def get_stats(self) -> Dict[str, Any]: + def get_stats(self) -> dict[str, Any]: """Get client usage statistics.""" utilization = ( self._active_requests / self._current_pool_size @@ -374,7 +376,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): # Global singleton instance (thread-safe) import threading -_http_client: Optional[HTTPClientManager] = None +_http_client: HTTPClientManager | None = None _http_client_lock = threading.Lock() diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.19/redis_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.19/redis_client.py index 703d79a28..cb15d8d81 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.19/redis_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.19/redis_client.py @@ -77,11 +77,13 @@ backward compatibility by re-exporting all classes. """ +from __future__ import annotations + import logging # Thread safety support for concurrent access patterns from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Dict, Optional, Union +from typing import Any, AsyncGenerator, Union import redis import redis.asyncio as async_redis @@ -154,7 +156,7 @@ # Lazy initialization prevents Redis connection errors during module imports # on dev machines where Redis VM (172.16.168.23) is unreachable. -_connection_manager: Optional[RedisConnectionManager] = None +_connection_manager: RedisConnectionManager | None = None def _get_connection_manager() -> RedisConnectionManager: @@ -253,27 +255,27 @@ def get_redis_client( # ============================================================================= -def get_knowledge_base_redis(**kwargs) -> Optional[redis.Redis]: +def get_knowledge_base_redis(**kwargs) -> redis.Redis | None: """Get Redis client for knowledge base data.""" return get_redis_client(database="knowledge", **kwargs) -def get_prompts_redis(**kwargs) -> Optional[redis.Redis]: +def get_prompts_redis(**kwargs) -> redis.Redis | None: """Get Redis client for prompt templates.""" return get_redis_client(database="prompts", **kwargs) -def get_agents_redis(**kwargs) -> Optional[redis.Redis]: +def get_agents_redis(**kwargs) -> redis.Redis | None: """Get Redis client for agent communication.""" return get_redis_client(database="agents", **kwargs) -def get_metrics_redis(**kwargs) -> Optional[redis.Redis]: +def get_metrics_redis(**kwargs) -> redis.Redis | None: """Get Redis client for performance metrics.""" return get_redis_client(database="metrics", **kwargs) -def get_main_redis(**kwargs) -> Optional[redis.Redis]: +def get_main_redis(**kwargs) -> redis.Redis | None: """Get Redis client for main application data.""" return get_redis_client(database="main", **kwargs) @@ -283,17 +285,17 @@ def get_main_redis(**kwargs) -> Optional[redis.Redis]: # ============================================================================= -def get_redis_health() -> Dict[str, Any]: +def get_redis_health() -> dict[str, Any]: """Get Redis health status.""" return _get_connection_manager().get_health_status() -def get_redis_metrics(database: Optional[str] = None) -> Dict[str, Any]: +def get_redis_metrics(database: str | None = None) -> dict[str, Any]: """Get Redis connection metrics.""" return _get_connection_manager().get_metrics(database) -def get_connection_info(database: str = "main") -> Dict[str, Any]: +def get_connection_info(database: str = "main") -> dict[str, Any]: """ Get detailed connection status and info for a specific database. @@ -391,7 +393,7 @@ async def close_all_redis_connections(): # ============================================================================= -async def redis_get(key: str, database: str = "main") -> Optional[Any]: +async def redis_get(key: str, database: str = "main") -> Any | None: """ Async Redis GET operation with consolidated backend. @@ -414,7 +416,7 @@ async def redis_get(key: str, database: str = "main") -> Optional[Any]: async def redis_set( - key: str, value: Any, expire: Optional[int] = None, database: str = "main" + key: str, value: Any, expire: int | None = None, database: str = "main" ) -> bool: """ Async Redis SET operation with optional expiration. @@ -520,9 +522,7 @@ def __init__(self): "Use get_redis_client() from backend.utils.redis_client instead." ) - def get_connection( - self, database: Union[RedisDatabase, str] - ) -> Optional[redis.Redis]: + def get_connection(self, database: Union[RedisDatabase, str]) -> redis.Redis | None: """Get synchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -531,7 +531,7 @@ def get_connection( async def get_async_connection( self, database: Union[RedisDatabase, str] - ) -> Optional[async_redis.Redis]: + ) -> async_redis.Redis | None: """Get asynchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -541,7 +541,7 @@ async def get_async_connection( # Global instance for backward compatibility (lazy-loaded) # Issue #665: Use lazy initialization to avoid deprecation warning at import time -_redis_db_manager_instance: Optional[RedisDatabaseManager] = None +_redis_db_manager_instance: RedisDatabaseManager | None = None def _get_redis_db_manager() -> RedisDatabaseManager: diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.20/http_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.20/http_client.py index f9889905d..6f29db9ca 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.20/http_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.20/http_client.py @@ -6,10 +6,12 @@ Provides efficient aiohttp client session management to prevent resource exhaustion """ +from __future__ import annotations + import asyncio import logging import time -from typing import Any, Dict, Optional +from typing import Any import aiohttp from aiohttp import ClientSession, ClientTimeout, TCPConnector @@ -24,8 +26,8 @@ class HTTPClientManager: Prevents creating new ClientSession for each request which causes resource exhaustion. """ - _instance: Optional["HTTPClientManager"] = None - _session: Optional[ClientSession] = None + _instance: "HTTPClientManager" | None = None + _session: ClientSession | None = None _lock = asyncio.Lock() def __new__(cls): @@ -288,7 +290,7 @@ async def post(self, url: str, **kwargs) -> aiohttp.ClientResponse: """Convenience method for POST requests.""" return await self.request("POST", url, **kwargs) - async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: + async def get_json(self, url: str, **kwargs) -> dict[str, Any]: """ Make a GET request and return JSON response. @@ -304,8 +306,8 @@ async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: return await response.json() async def post_json( - self, url: str, json_data: Dict[str, Any], **kwargs - ) -> Dict[str, Any]: + self, url: str, json_data: dict[str, Any], **kwargs + ) -> dict[str, Any]: """ Make a POST request with JSON data and return JSON response. @@ -336,7 +338,7 @@ async def close(self): self._connector = None self._closed = True - def get_stats(self) -> Dict[str, Any]: + def get_stats(self) -> dict[str, Any]: """Get client usage statistics.""" utilization = ( self._active_requests / self._current_pool_size @@ -374,7 +376,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): # Global singleton instance (thread-safe) import threading -_http_client: Optional[HTTPClientManager] = None +_http_client: HTTPClientManager | None = None _http_client_lock = threading.Lock() diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.20/redis_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.20/redis_client.py index 703d79a28..cb15d8d81 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.20/redis_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.20/redis_client.py @@ -77,11 +77,13 @@ backward compatibility by re-exporting all classes. """ +from __future__ import annotations + import logging # Thread safety support for concurrent access patterns from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Dict, Optional, Union +from typing import Any, AsyncGenerator, Union import redis import redis.asyncio as async_redis @@ -154,7 +156,7 @@ # Lazy initialization prevents Redis connection errors during module imports # on dev machines where Redis VM (172.16.168.23) is unreachable. -_connection_manager: Optional[RedisConnectionManager] = None +_connection_manager: RedisConnectionManager | None = None def _get_connection_manager() -> RedisConnectionManager: @@ -253,27 +255,27 @@ def get_redis_client( # ============================================================================= -def get_knowledge_base_redis(**kwargs) -> Optional[redis.Redis]: +def get_knowledge_base_redis(**kwargs) -> redis.Redis | None: """Get Redis client for knowledge base data.""" return get_redis_client(database="knowledge", **kwargs) -def get_prompts_redis(**kwargs) -> Optional[redis.Redis]: +def get_prompts_redis(**kwargs) -> redis.Redis | None: """Get Redis client for prompt templates.""" return get_redis_client(database="prompts", **kwargs) -def get_agents_redis(**kwargs) -> Optional[redis.Redis]: +def get_agents_redis(**kwargs) -> redis.Redis | None: """Get Redis client for agent communication.""" return get_redis_client(database="agents", **kwargs) -def get_metrics_redis(**kwargs) -> Optional[redis.Redis]: +def get_metrics_redis(**kwargs) -> redis.Redis | None: """Get Redis client for performance metrics.""" return get_redis_client(database="metrics", **kwargs) -def get_main_redis(**kwargs) -> Optional[redis.Redis]: +def get_main_redis(**kwargs) -> redis.Redis | None: """Get Redis client for main application data.""" return get_redis_client(database="main", **kwargs) @@ -283,17 +285,17 @@ def get_main_redis(**kwargs) -> Optional[redis.Redis]: # ============================================================================= -def get_redis_health() -> Dict[str, Any]: +def get_redis_health() -> dict[str, Any]: """Get Redis health status.""" return _get_connection_manager().get_health_status() -def get_redis_metrics(database: Optional[str] = None) -> Dict[str, Any]: +def get_redis_metrics(database: str | None = None) -> dict[str, Any]: """Get Redis connection metrics.""" return _get_connection_manager().get_metrics(database) -def get_connection_info(database: str = "main") -> Dict[str, Any]: +def get_connection_info(database: str = "main") -> dict[str, Any]: """ Get detailed connection status and info for a specific database. @@ -391,7 +393,7 @@ async def close_all_redis_connections(): # ============================================================================= -async def redis_get(key: str, database: str = "main") -> Optional[Any]: +async def redis_get(key: str, database: str = "main") -> Any | None: """ Async Redis GET operation with consolidated backend. @@ -414,7 +416,7 @@ async def redis_get(key: str, database: str = "main") -> Optional[Any]: async def redis_set( - key: str, value: Any, expire: Optional[int] = None, database: str = "main" + key: str, value: Any, expire: int | None = None, database: str = "main" ) -> bool: """ Async Redis SET operation with optional expiration. @@ -520,9 +522,7 @@ def __init__(self): "Use get_redis_client() from backend.utils.redis_client instead." ) - def get_connection( - self, database: Union[RedisDatabase, str] - ) -> Optional[redis.Redis]: + def get_connection(self, database: Union[RedisDatabase, str]) -> redis.Redis | None: """Get synchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -531,7 +531,7 @@ def get_connection( async def get_async_connection( self, database: Union[RedisDatabase, str] - ) -> Optional[async_redis.Redis]: + ) -> async_redis.Redis | None: """Get asynchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -541,7 +541,7 @@ async def get_async_connection( # Global instance for backward compatibility (lazy-loaded) # Issue #665: Use lazy initialization to avoid deprecation warning at import time -_redis_db_manager_instance: Optional[RedisDatabaseManager] = None +_redis_db_manager_instance: RedisDatabaseManager | None = None def _get_redis_db_manager() -> RedisDatabaseManager: diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.21/http_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.21/http_client.py index f9889905d..6f29db9ca 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.21/http_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.21/http_client.py @@ -6,10 +6,12 @@ Provides efficient aiohttp client session management to prevent resource exhaustion """ +from __future__ import annotations + import asyncio import logging import time -from typing import Any, Dict, Optional +from typing import Any import aiohttp from aiohttp import ClientSession, ClientTimeout, TCPConnector @@ -24,8 +26,8 @@ class HTTPClientManager: Prevents creating new ClientSession for each request which causes resource exhaustion. """ - _instance: Optional["HTTPClientManager"] = None - _session: Optional[ClientSession] = None + _instance: "HTTPClientManager" | None = None + _session: ClientSession | None = None _lock = asyncio.Lock() def __new__(cls): @@ -288,7 +290,7 @@ async def post(self, url: str, **kwargs) -> aiohttp.ClientResponse: """Convenience method for POST requests.""" return await self.request("POST", url, **kwargs) - async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: + async def get_json(self, url: str, **kwargs) -> dict[str, Any]: """ Make a GET request and return JSON response. @@ -304,8 +306,8 @@ async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: return await response.json() async def post_json( - self, url: str, json_data: Dict[str, Any], **kwargs - ) -> Dict[str, Any]: + self, url: str, json_data: dict[str, Any], **kwargs + ) -> dict[str, Any]: """ Make a POST request with JSON data and return JSON response. @@ -336,7 +338,7 @@ async def close(self): self._connector = None self._closed = True - def get_stats(self) -> Dict[str, Any]: + def get_stats(self) -> dict[str, Any]: """Get client usage statistics.""" utilization = ( self._active_requests / self._current_pool_size @@ -374,7 +376,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): # Global singleton instance (thread-safe) import threading -_http_client: Optional[HTTPClientManager] = None +_http_client: HTTPClientManager | None = None _http_client_lock = threading.Lock() diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.21/redis_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.21/redis_client.py index 703d79a28..cb15d8d81 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.21/redis_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.21/redis_client.py @@ -77,11 +77,13 @@ backward compatibility by re-exporting all classes. """ +from __future__ import annotations + import logging # Thread safety support for concurrent access patterns from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Dict, Optional, Union +from typing import Any, AsyncGenerator, Union import redis import redis.asyncio as async_redis @@ -154,7 +156,7 @@ # Lazy initialization prevents Redis connection errors during module imports # on dev machines where Redis VM (172.16.168.23) is unreachable. -_connection_manager: Optional[RedisConnectionManager] = None +_connection_manager: RedisConnectionManager | None = None def _get_connection_manager() -> RedisConnectionManager: @@ -253,27 +255,27 @@ def get_redis_client( # ============================================================================= -def get_knowledge_base_redis(**kwargs) -> Optional[redis.Redis]: +def get_knowledge_base_redis(**kwargs) -> redis.Redis | None: """Get Redis client for knowledge base data.""" return get_redis_client(database="knowledge", **kwargs) -def get_prompts_redis(**kwargs) -> Optional[redis.Redis]: +def get_prompts_redis(**kwargs) -> redis.Redis | None: """Get Redis client for prompt templates.""" return get_redis_client(database="prompts", **kwargs) -def get_agents_redis(**kwargs) -> Optional[redis.Redis]: +def get_agents_redis(**kwargs) -> redis.Redis | None: """Get Redis client for agent communication.""" return get_redis_client(database="agents", **kwargs) -def get_metrics_redis(**kwargs) -> Optional[redis.Redis]: +def get_metrics_redis(**kwargs) -> redis.Redis | None: """Get Redis client for performance metrics.""" return get_redis_client(database="metrics", **kwargs) -def get_main_redis(**kwargs) -> Optional[redis.Redis]: +def get_main_redis(**kwargs) -> redis.Redis | None: """Get Redis client for main application data.""" return get_redis_client(database="main", **kwargs) @@ -283,17 +285,17 @@ def get_main_redis(**kwargs) -> Optional[redis.Redis]: # ============================================================================= -def get_redis_health() -> Dict[str, Any]: +def get_redis_health() -> dict[str, Any]: """Get Redis health status.""" return _get_connection_manager().get_health_status() -def get_redis_metrics(database: Optional[str] = None) -> Dict[str, Any]: +def get_redis_metrics(database: str | None = None) -> dict[str, Any]: """Get Redis connection metrics.""" return _get_connection_manager().get_metrics(database) -def get_connection_info(database: str = "main") -> Dict[str, Any]: +def get_connection_info(database: str = "main") -> dict[str, Any]: """ Get detailed connection status and info for a specific database. @@ -391,7 +393,7 @@ async def close_all_redis_connections(): # ============================================================================= -async def redis_get(key: str, database: str = "main") -> Optional[Any]: +async def redis_get(key: str, database: str = "main") -> Any | None: """ Async Redis GET operation with consolidated backend. @@ -414,7 +416,7 @@ async def redis_get(key: str, database: str = "main") -> Optional[Any]: async def redis_set( - key: str, value: Any, expire: Optional[int] = None, database: str = "main" + key: str, value: Any, expire: int | None = None, database: str = "main" ) -> bool: """ Async Redis SET operation with optional expiration. @@ -520,9 +522,7 @@ def __init__(self): "Use get_redis_client() from backend.utils.redis_client instead." ) - def get_connection( - self, database: Union[RedisDatabase, str] - ) -> Optional[redis.Redis]: + def get_connection(self, database: Union[RedisDatabase, str]) -> redis.Redis | None: """Get synchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -531,7 +531,7 @@ def get_connection( async def get_async_connection( self, database: Union[RedisDatabase, str] - ) -> Optional[async_redis.Redis]: + ) -> async_redis.Redis | None: """Get asynchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -541,7 +541,7 @@ async def get_async_connection( # Global instance for backward compatibility (lazy-loaded) # Issue #665: Use lazy initialization to avoid deprecation warning at import time -_redis_db_manager_instance: Optional[RedisDatabaseManager] = None +_redis_db_manager_instance: RedisDatabaseManager | None = None def _get_redis_db_manager() -> RedisDatabaseManager: diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.22/http_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.22/http_client.py index f9889905d..6f29db9ca 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.22/http_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.22/http_client.py @@ -6,10 +6,12 @@ Provides efficient aiohttp client session management to prevent resource exhaustion """ +from __future__ import annotations + import asyncio import logging import time -from typing import Any, Dict, Optional +from typing import Any import aiohttp from aiohttp import ClientSession, ClientTimeout, TCPConnector @@ -24,8 +26,8 @@ class HTTPClientManager: Prevents creating new ClientSession for each request which causes resource exhaustion. """ - _instance: Optional["HTTPClientManager"] = None - _session: Optional[ClientSession] = None + _instance: "HTTPClientManager" | None = None + _session: ClientSession | None = None _lock = asyncio.Lock() def __new__(cls): @@ -288,7 +290,7 @@ async def post(self, url: str, **kwargs) -> aiohttp.ClientResponse: """Convenience method for POST requests.""" return await self.request("POST", url, **kwargs) - async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: + async def get_json(self, url: str, **kwargs) -> dict[str, Any]: """ Make a GET request and return JSON response. @@ -304,8 +306,8 @@ async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: return await response.json() async def post_json( - self, url: str, json_data: Dict[str, Any], **kwargs - ) -> Dict[str, Any]: + self, url: str, json_data: dict[str, Any], **kwargs + ) -> dict[str, Any]: """ Make a POST request with JSON data and return JSON response. @@ -336,7 +338,7 @@ async def close(self): self._connector = None self._closed = True - def get_stats(self) -> Dict[str, Any]: + def get_stats(self) -> dict[str, Any]: """Get client usage statistics.""" utilization = ( self._active_requests / self._current_pool_size @@ -374,7 +376,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): # Global singleton instance (thread-safe) import threading -_http_client: Optional[HTTPClientManager] = None +_http_client: HTTPClientManager | None = None _http_client_lock = threading.Lock() diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.22/redis_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.22/redis_client.py index 703d79a28..cb15d8d81 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.22/redis_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.22/redis_client.py @@ -77,11 +77,13 @@ backward compatibility by re-exporting all classes. """ +from __future__ import annotations + import logging # Thread safety support for concurrent access patterns from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Dict, Optional, Union +from typing import Any, AsyncGenerator, Union import redis import redis.asyncio as async_redis @@ -154,7 +156,7 @@ # Lazy initialization prevents Redis connection errors during module imports # on dev machines where Redis VM (172.16.168.23) is unreachable. -_connection_manager: Optional[RedisConnectionManager] = None +_connection_manager: RedisConnectionManager | None = None def _get_connection_manager() -> RedisConnectionManager: @@ -253,27 +255,27 @@ def get_redis_client( # ============================================================================= -def get_knowledge_base_redis(**kwargs) -> Optional[redis.Redis]: +def get_knowledge_base_redis(**kwargs) -> redis.Redis | None: """Get Redis client for knowledge base data.""" return get_redis_client(database="knowledge", **kwargs) -def get_prompts_redis(**kwargs) -> Optional[redis.Redis]: +def get_prompts_redis(**kwargs) -> redis.Redis | None: """Get Redis client for prompt templates.""" return get_redis_client(database="prompts", **kwargs) -def get_agents_redis(**kwargs) -> Optional[redis.Redis]: +def get_agents_redis(**kwargs) -> redis.Redis | None: """Get Redis client for agent communication.""" return get_redis_client(database="agents", **kwargs) -def get_metrics_redis(**kwargs) -> Optional[redis.Redis]: +def get_metrics_redis(**kwargs) -> redis.Redis | None: """Get Redis client for performance metrics.""" return get_redis_client(database="metrics", **kwargs) -def get_main_redis(**kwargs) -> Optional[redis.Redis]: +def get_main_redis(**kwargs) -> redis.Redis | None: """Get Redis client for main application data.""" return get_redis_client(database="main", **kwargs) @@ -283,17 +285,17 @@ def get_main_redis(**kwargs) -> Optional[redis.Redis]: # ============================================================================= -def get_redis_health() -> Dict[str, Any]: +def get_redis_health() -> dict[str, Any]: """Get Redis health status.""" return _get_connection_manager().get_health_status() -def get_redis_metrics(database: Optional[str] = None) -> Dict[str, Any]: +def get_redis_metrics(database: str | None = None) -> dict[str, Any]: """Get Redis connection metrics.""" return _get_connection_manager().get_metrics(database) -def get_connection_info(database: str = "main") -> Dict[str, Any]: +def get_connection_info(database: str = "main") -> dict[str, Any]: """ Get detailed connection status and info for a specific database. @@ -391,7 +393,7 @@ async def close_all_redis_connections(): # ============================================================================= -async def redis_get(key: str, database: str = "main") -> Optional[Any]: +async def redis_get(key: str, database: str = "main") -> Any | None: """ Async Redis GET operation with consolidated backend. @@ -414,7 +416,7 @@ async def redis_get(key: str, database: str = "main") -> Optional[Any]: async def redis_set( - key: str, value: Any, expire: Optional[int] = None, database: str = "main" + key: str, value: Any, expire: int | None = None, database: str = "main" ) -> bool: """ Async Redis SET operation with optional expiration. @@ -520,9 +522,7 @@ def __init__(self): "Use get_redis_client() from backend.utils.redis_client instead." ) - def get_connection( - self, database: Union[RedisDatabase, str] - ) -> Optional[redis.Redis]: + def get_connection(self, database: Union[RedisDatabase, str]) -> redis.Redis | None: """Get synchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -531,7 +531,7 @@ def get_connection( async def get_async_connection( self, database: Union[RedisDatabase, str] - ) -> Optional[async_redis.Redis]: + ) -> async_redis.Redis | None: """Get asynchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -541,7 +541,7 @@ async def get_async_connection( # Global instance for backward compatibility (lazy-loaded) # Issue #665: Use lazy initialization to avoid deprecation warning at import time -_redis_db_manager_instance: Optional[RedisDatabaseManager] = None +_redis_db_manager_instance: RedisDatabaseManager | None = None def _get_redis_db_manager() -> RedisDatabaseManager: diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.23/http_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.23/http_client.py index f9889905d..6f29db9ca 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.23/http_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.23/http_client.py @@ -6,10 +6,12 @@ Provides efficient aiohttp client session management to prevent resource exhaustion """ +from __future__ import annotations + import asyncio import logging import time -from typing import Any, Dict, Optional +from typing import Any import aiohttp from aiohttp import ClientSession, ClientTimeout, TCPConnector @@ -24,8 +26,8 @@ class HTTPClientManager: Prevents creating new ClientSession for each request which causes resource exhaustion. """ - _instance: Optional["HTTPClientManager"] = None - _session: Optional[ClientSession] = None + _instance: "HTTPClientManager" | None = None + _session: ClientSession | None = None _lock = asyncio.Lock() def __new__(cls): @@ -288,7 +290,7 @@ async def post(self, url: str, **kwargs) -> aiohttp.ClientResponse: """Convenience method for POST requests.""" return await self.request("POST", url, **kwargs) - async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: + async def get_json(self, url: str, **kwargs) -> dict[str, Any]: """ Make a GET request and return JSON response. @@ -304,8 +306,8 @@ async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: return await response.json() async def post_json( - self, url: str, json_data: Dict[str, Any], **kwargs - ) -> Dict[str, Any]: + self, url: str, json_data: dict[str, Any], **kwargs + ) -> dict[str, Any]: """ Make a POST request with JSON data and return JSON response. @@ -336,7 +338,7 @@ async def close(self): self._connector = None self._closed = True - def get_stats(self) -> Dict[str, Any]: + def get_stats(self) -> dict[str, Any]: """Get client usage statistics.""" utilization = ( self._active_requests / self._current_pool_size @@ -374,7 +376,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): # Global singleton instance (thread-safe) import threading -_http_client: Optional[HTTPClientManager] = None +_http_client: HTTPClientManager | None = None _http_client_lock = threading.Lock() diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.23/redis_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.23/redis_client.py index 703d79a28..cb15d8d81 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.23/redis_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.23/redis_client.py @@ -77,11 +77,13 @@ backward compatibility by re-exporting all classes. """ +from __future__ import annotations + import logging # Thread safety support for concurrent access patterns from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Dict, Optional, Union +from typing import Any, AsyncGenerator, Union import redis import redis.asyncio as async_redis @@ -154,7 +156,7 @@ # Lazy initialization prevents Redis connection errors during module imports # on dev machines where Redis VM (172.16.168.23) is unreachable. -_connection_manager: Optional[RedisConnectionManager] = None +_connection_manager: RedisConnectionManager | None = None def _get_connection_manager() -> RedisConnectionManager: @@ -253,27 +255,27 @@ def get_redis_client( # ============================================================================= -def get_knowledge_base_redis(**kwargs) -> Optional[redis.Redis]: +def get_knowledge_base_redis(**kwargs) -> redis.Redis | None: """Get Redis client for knowledge base data.""" return get_redis_client(database="knowledge", **kwargs) -def get_prompts_redis(**kwargs) -> Optional[redis.Redis]: +def get_prompts_redis(**kwargs) -> redis.Redis | None: """Get Redis client for prompt templates.""" return get_redis_client(database="prompts", **kwargs) -def get_agents_redis(**kwargs) -> Optional[redis.Redis]: +def get_agents_redis(**kwargs) -> redis.Redis | None: """Get Redis client for agent communication.""" return get_redis_client(database="agents", **kwargs) -def get_metrics_redis(**kwargs) -> Optional[redis.Redis]: +def get_metrics_redis(**kwargs) -> redis.Redis | None: """Get Redis client for performance metrics.""" return get_redis_client(database="metrics", **kwargs) -def get_main_redis(**kwargs) -> Optional[redis.Redis]: +def get_main_redis(**kwargs) -> redis.Redis | None: """Get Redis client for main application data.""" return get_redis_client(database="main", **kwargs) @@ -283,17 +285,17 @@ def get_main_redis(**kwargs) -> Optional[redis.Redis]: # ============================================================================= -def get_redis_health() -> Dict[str, Any]: +def get_redis_health() -> dict[str, Any]: """Get Redis health status.""" return _get_connection_manager().get_health_status() -def get_redis_metrics(database: Optional[str] = None) -> Dict[str, Any]: +def get_redis_metrics(database: str | None = None) -> dict[str, Any]: """Get Redis connection metrics.""" return _get_connection_manager().get_metrics(database) -def get_connection_info(database: str = "main") -> Dict[str, Any]: +def get_connection_info(database: str = "main") -> dict[str, Any]: """ Get detailed connection status and info for a specific database. @@ -391,7 +393,7 @@ async def close_all_redis_connections(): # ============================================================================= -async def redis_get(key: str, database: str = "main") -> Optional[Any]: +async def redis_get(key: str, database: str = "main") -> Any | None: """ Async Redis GET operation with consolidated backend. @@ -414,7 +416,7 @@ async def redis_get(key: str, database: str = "main") -> Optional[Any]: async def redis_set( - key: str, value: Any, expire: Optional[int] = None, database: str = "main" + key: str, value: Any, expire: int | None = None, database: str = "main" ) -> bool: """ Async Redis SET operation with optional expiration. @@ -520,9 +522,7 @@ def __init__(self): "Use get_redis_client() from backend.utils.redis_client instead." ) - def get_connection( - self, database: Union[RedisDatabase, str] - ) -> Optional[redis.Redis]: + def get_connection(self, database: Union[RedisDatabase, str]) -> redis.Redis | None: """Get synchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -531,7 +531,7 @@ def get_connection( async def get_async_connection( self, database: Union[RedisDatabase, str] - ) -> Optional[async_redis.Redis]: + ) -> async_redis.Redis | None: """Get asynchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -541,7 +541,7 @@ async def get_async_connection( # Global instance for backward compatibility (lazy-loaded) # Issue #665: Use lazy initialization to avoid deprecation warning at import time -_redis_db_manager_instance: Optional[RedisDatabaseManager] = None +_redis_db_manager_instance: RedisDatabaseManager | None = None def _get_redis_db_manager() -> RedisDatabaseManager: diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.24/http_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.24/http_client.py index f9889905d..6f29db9ca 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.24/http_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.24/http_client.py @@ -6,10 +6,12 @@ Provides efficient aiohttp client session management to prevent resource exhaustion """ +from __future__ import annotations + import asyncio import logging import time -from typing import Any, Dict, Optional +from typing import Any import aiohttp from aiohttp import ClientSession, ClientTimeout, TCPConnector @@ -24,8 +26,8 @@ class HTTPClientManager: Prevents creating new ClientSession for each request which causes resource exhaustion. """ - _instance: Optional["HTTPClientManager"] = None - _session: Optional[ClientSession] = None + _instance: "HTTPClientManager" | None = None + _session: ClientSession | None = None _lock = asyncio.Lock() def __new__(cls): @@ -288,7 +290,7 @@ async def post(self, url: str, **kwargs) -> aiohttp.ClientResponse: """Convenience method for POST requests.""" return await self.request("POST", url, **kwargs) - async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: + async def get_json(self, url: str, **kwargs) -> dict[str, Any]: """ Make a GET request and return JSON response. @@ -304,8 +306,8 @@ async def get_json(self, url: str, **kwargs) -> Dict[str, Any]: return await response.json() async def post_json( - self, url: str, json_data: Dict[str, Any], **kwargs - ) -> Dict[str, Any]: + self, url: str, json_data: dict[str, Any], **kwargs + ) -> dict[str, Any]: """ Make a POST request with JSON data and return JSON response. @@ -336,7 +338,7 @@ async def close(self): self._connector = None self._closed = True - def get_stats(self) -> Dict[str, Any]: + def get_stats(self) -> dict[str, Any]: """Get client usage statistics.""" utilization = ( self._active_requests / self._current_pool_size @@ -374,7 +376,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): # Global singleton instance (thread-safe) import threading -_http_client: Optional[HTTPClientManager] = None +_http_client: HTTPClientManager | None = None _http_client_lock = threading.Lock() diff --git a/autobot-slm-backend/ansible/autobot@172.16.168.24/redis_client.py b/autobot-slm-backend/ansible/autobot@172.16.168.24/redis_client.py index 703d79a28..cb15d8d81 100644 --- a/autobot-slm-backend/ansible/autobot@172.16.168.24/redis_client.py +++ b/autobot-slm-backend/ansible/autobot@172.16.168.24/redis_client.py @@ -77,11 +77,13 @@ backward compatibility by re-exporting all classes. """ +from __future__ import annotations + import logging # Thread safety support for concurrent access patterns from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Dict, Optional, Union +from typing import Any, AsyncGenerator, Union import redis import redis.asyncio as async_redis @@ -154,7 +156,7 @@ # Lazy initialization prevents Redis connection errors during module imports # on dev machines where Redis VM (172.16.168.23) is unreachable. -_connection_manager: Optional[RedisConnectionManager] = None +_connection_manager: RedisConnectionManager | None = None def _get_connection_manager() -> RedisConnectionManager: @@ -253,27 +255,27 @@ def get_redis_client( # ============================================================================= -def get_knowledge_base_redis(**kwargs) -> Optional[redis.Redis]: +def get_knowledge_base_redis(**kwargs) -> redis.Redis | None: """Get Redis client for knowledge base data.""" return get_redis_client(database="knowledge", **kwargs) -def get_prompts_redis(**kwargs) -> Optional[redis.Redis]: +def get_prompts_redis(**kwargs) -> redis.Redis | None: """Get Redis client for prompt templates.""" return get_redis_client(database="prompts", **kwargs) -def get_agents_redis(**kwargs) -> Optional[redis.Redis]: +def get_agents_redis(**kwargs) -> redis.Redis | None: """Get Redis client for agent communication.""" return get_redis_client(database="agents", **kwargs) -def get_metrics_redis(**kwargs) -> Optional[redis.Redis]: +def get_metrics_redis(**kwargs) -> redis.Redis | None: """Get Redis client for performance metrics.""" return get_redis_client(database="metrics", **kwargs) -def get_main_redis(**kwargs) -> Optional[redis.Redis]: +def get_main_redis(**kwargs) -> redis.Redis | None: """Get Redis client for main application data.""" return get_redis_client(database="main", **kwargs) @@ -283,17 +285,17 @@ def get_main_redis(**kwargs) -> Optional[redis.Redis]: # ============================================================================= -def get_redis_health() -> Dict[str, Any]: +def get_redis_health() -> dict[str, Any]: """Get Redis health status.""" return _get_connection_manager().get_health_status() -def get_redis_metrics(database: Optional[str] = None) -> Dict[str, Any]: +def get_redis_metrics(database: str | None = None) -> dict[str, Any]: """Get Redis connection metrics.""" return _get_connection_manager().get_metrics(database) -def get_connection_info(database: str = "main") -> Dict[str, Any]: +def get_connection_info(database: str = "main") -> dict[str, Any]: """ Get detailed connection status and info for a specific database. @@ -391,7 +393,7 @@ async def close_all_redis_connections(): # ============================================================================= -async def redis_get(key: str, database: str = "main") -> Optional[Any]: +async def redis_get(key: str, database: str = "main") -> Any | None: """ Async Redis GET operation with consolidated backend. @@ -414,7 +416,7 @@ async def redis_get(key: str, database: str = "main") -> Optional[Any]: async def redis_set( - key: str, value: Any, expire: Optional[int] = None, database: str = "main" + key: str, value: Any, expire: int | None = None, database: str = "main" ) -> bool: """ Async Redis SET operation with optional expiration. @@ -520,9 +522,7 @@ def __init__(self): "Use get_redis_client() from backend.utils.redis_client instead." ) - def get_connection( - self, database: Union[RedisDatabase, str] - ) -> Optional[redis.Redis]: + def get_connection(self, database: Union[RedisDatabase, str]) -> redis.Redis | None: """Get synchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -531,7 +531,7 @@ def get_connection( async def get_async_connection( self, database: Union[RedisDatabase, str] - ) -> Optional[async_redis.Redis]: + ) -> async_redis.Redis | None: """Get asynchronous Redis connection (DEPRECATED).""" db_name = ( database.name.lower() if isinstance(database, RedisDatabase) else database @@ -541,7 +541,7 @@ async def get_async_connection( # Global instance for backward compatibility (lazy-loaded) # Issue #665: Use lazy initialization to avoid deprecation warning at import time -_redis_db_manager_instance: Optional[RedisDatabaseManager] = None +_redis_db_manager_instance: RedisDatabaseManager | None = None def _get_redis_db_manager() -> RedisDatabaseManager: