|
444 | 444 | output_parse_pii: bool = False |
445 | 445 | ############################################# |
446 | 446 | from litellm.litellm_core_utils.get_model_cost_map import get_model_cost_map |
| 447 | +from .cost_calculator import completion_cost, cost_per_token, response_cost_calculator |
| 448 | +from litellm.litellm_core_utils.litellm_logging import Logging, modify_integration |
447 | 449 |
|
448 | 450 | model_cost = get_model_cost_map(url=model_cost_map_url) |
449 | 451 | cost_discount_config: Dict[str, float] = ( |
@@ -1058,9 +1060,49 @@ def add_known_models(): |
1058 | 1060 | from litellm.litellm_core_utils.core_helpers import remove_index_from_tool_calls |
1059 | 1061 | from litellm.litellm_core_utils.token_counter import get_modified_max_tokens |
1060 | 1062 | # client must be imported immediately as it's used as a decorator at function definition time |
1061 | | -from .utils import client |
1062 | | -# Note: Most other utils imports are lazy-loaded via __getattr__ to avoid loading utils.py |
1063 | | -# (which imports tiktoken) at import time |
| 1063 | +from .utils import ( |
| 1064 | + client, |
| 1065 | + exception_type, |
| 1066 | + get_optional_params, |
| 1067 | + get_response_string, |
| 1068 | + token_counter, |
| 1069 | + create_pretrained_tokenizer, |
| 1070 | + create_tokenizer, |
| 1071 | + supports_function_calling, |
| 1072 | + supports_web_search, |
| 1073 | + supports_url_context, |
| 1074 | + supports_response_schema, |
| 1075 | + supports_parallel_function_calling, |
| 1076 | + supports_vision, |
| 1077 | + supports_audio_input, |
| 1078 | + supports_audio_output, |
| 1079 | + supports_system_messages, |
| 1080 | + supports_reasoning, |
| 1081 | + get_litellm_params, |
| 1082 | + acreate, |
| 1083 | + get_max_tokens, |
| 1084 | + get_model_info, |
| 1085 | + register_prompt_template, |
| 1086 | + validate_environment, |
| 1087 | + check_valid_key, |
| 1088 | + register_model, |
| 1089 | + encode, |
| 1090 | + decode, |
| 1091 | + _calculate_retry_after, |
| 1092 | + _should_retry, |
| 1093 | + get_supported_openai_params, |
| 1094 | + get_api_base, |
| 1095 | + get_first_chars_messages, |
| 1096 | + ModelResponse, |
| 1097 | + ModelResponseStream, |
| 1098 | + EmbeddingResponse, |
| 1099 | + ImageResponse, |
| 1100 | + TranscriptionResponse, |
| 1101 | + TextCompletionResponse, |
| 1102 | + get_provider_fields, |
| 1103 | + ModelResponseListIterator, |
| 1104 | + get_valid_models, |
| 1105 | +) |
1064 | 1106 |
|
1065 | 1107 | from .llms.bytez.chat.transformation import BytezChatConfig |
1066 | 1108 | from .llms.custom_llm import CustomLLM |
@@ -1491,56 +1533,3 @@ def set_global_gitlab_config(config: Dict[str, Any]) -> None: |
1491 | 1533 | global_gitlab_config = config |
1492 | 1534 |
|
1493 | 1535 |
|
1494 | | -# Lazy loading system for heavy modules to reduce initial import time and memory usage |
1495 | | - |
1496 | | -if TYPE_CHECKING: |
1497 | | - cost_per_token: Callable[..., Tuple[float, float]] |
1498 | | - completion_cost: Callable[..., float] |
1499 | | - response_cost_calculator: Any |
1500 | | - modify_integration: Any |
1501 | | - |
1502 | | - |
1503 | | -def __getattr__(name: str) -> Any: |
1504 | | - """Lazy import handler for cost_calculator and litellm_logging functions.""" |
1505 | | - # Lazy load cost_calculator functions |
1506 | | - _cost_calculator_names = ( |
1507 | | - "completion_cost", |
1508 | | - "cost_per_token", |
1509 | | - "response_cost_calculator", |
1510 | | - ) |
1511 | | - if name in _cost_calculator_names: |
1512 | | - from ._lazy_imports import _lazy_import_cost_calculator |
1513 | | - return _lazy_import_cost_calculator(name) |
1514 | | - |
1515 | | - # Lazy load litellm_logging functions |
1516 | | - _litellm_logging_names = ( |
1517 | | - "Logging", |
1518 | | - "modify_integration", |
1519 | | - ) |
1520 | | - if name in _litellm_logging_names: |
1521 | | - from ._lazy_imports import _lazy_import_litellm_logging |
1522 | | - return _lazy_import_litellm_logging(name) |
1523 | | - |
1524 | | - # Lazy load utils functions |
1525 | | - _utils_names = ( |
1526 | | - "exception_type", "get_optional_params", "get_response_string", "token_counter", |
1527 | | - "create_pretrained_tokenizer", "create_tokenizer", "supports_function_calling", |
1528 | | - "supports_web_search", "supports_url_context", "supports_response_schema", |
1529 | | - "supports_parallel_function_calling", "supports_vision", "supports_audio_input", |
1530 | | - "supports_audio_output", "supports_system_messages", "supports_reasoning", |
1531 | | - "get_litellm_params", "acreate", "get_max_tokens", "get_model_info", |
1532 | | - "register_prompt_template", "validate_environment", "check_valid_key", |
1533 | | - "register_model", "encode", "decode", "_calculate_retry_after", "_should_retry", |
1534 | | - "get_supported_openai_params", "get_api_base", "get_first_chars_messages", |
1535 | | - "ModelResponse", "ModelResponseStream", "EmbeddingResponse", "ImageResponse", |
1536 | | - "TranscriptionResponse", "TextCompletionResponse", "get_provider_fields", |
1537 | | - "ModelResponseListIterator", "get_valid_models", |
1538 | | - ) |
1539 | | - if name in _utils_names: |
1540 | | - from ._lazy_imports import _lazy_import_utils |
1541 | | - return _lazy_import_utils(name) |
1542 | | - |
1543 | | - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") |
1544 | | - |
1545 | | - |
1546 | | -# ALL_LITELLM_RESPONSE_TYPES is lazy-loaded via __getattr__ to avoid loading utils at import time |
0 commit comments