diff --git a/.github/workflows/docker-build-test.yml b/.github/workflows/docker-build-test.yml index 8c68f495047..328fcbeb22d 100644 --- a/.github/workflows/docker-build-test.yml +++ b/.github/workflows/docker-build-test.yml @@ -51,6 +51,7 @@ jobs: platforms: linux/amd64,linux/arm64 push: false target: aider-ce + cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:latest-bc - name: Build Docker images (Push) if: ${{ github.event_name != 'pull_request' }} @@ -62,3 +63,4 @@ jobs: push: true tags: ${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:dev target: aider-ce + cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:latest-bc diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index 63fa5650399..32add8d67ae 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -38,4 +38,6 @@ jobs: ${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:${{ github.ref_name }} ${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:latest target: aider-ce + cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:latest-bc + cache-to: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:latest-bc,mode=max diff --git a/.github/workflows/ubuntu-tests.yml b/.github/workflows/ubuntu-tests.yml index 02dcc53a772..ad79b78dec4 100644 --- a/.github/workflows/ubuntu-tests.yml +++ b/.github/workflows/ubuntu-tests.yml @@ -46,8 +46,14 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install pytest - pip install . + pip install uv + uv pip install --system \ + pytest \ + -r requirements/requirements.in \ + -r requirements/requirements-browser.in \ + -r requirements/requirements-help.in \ + -r requirements/requirements-playwright.in \ + ".[browser,help,playwright]" - name: Run tests env: diff --git a/.github/workflows/windows-tests.yml b/.github/workflows/windows-tests.yml index f5734cf188b..25c41c39d36 100644 --- a/.github/workflows/windows-tests.yml +++ b/.github/workflows/windows-tests.yml @@ -41,8 +41,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install pytest - pip install . + pip install uv + uv pip install --system pytest -r requirements/requirements.in -r requirements/requirements-browser.in -r requirements/requirements-help.in -r requirements/requirements-playwright.in '.[browser,help,playwright]' - name: Run tests env: diff --git a/README.md b/README.md index 5e01b35aa58..36a659b4bf6 100644 --- a/README.md +++ b/README.md @@ -25,10 +25,27 @@ This project aims to be compatible with upstream Aider, but with priority commit * [Manually install necessary ubuntu dependencies](https://github.com/dwash96/aider-ce/issues/14) * [.gitignore updates](https://github.com/dwash96/aider-ce/commit/7c7e803fa63d1acd860eef1423e5a03220df6017) * [Experimental Context Compaction For Longer Running Generation Tasks](https://github.com/Aider-AI/aider/issues/6) +* [Fix Deepseek model configurations](https://github.com/Aider-AI/aider/commit/c839a6dd8964d702172cae007375e299732d3823) +* [Relax Version Pinning For Easier Distribution](https://github.com/dwash96/aider-ce/issues/18) ### Other Notes * [MCP Configuration](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/mcp.md) +### Installation Instructions +This project should be installable using the commands + +``` +pip install aider-ce +``` + +or + +``` +uv install aider-ce +``` + +The package exports an `aider-ce` command that accepts all of Aider's configuration options +
diff --git a/aider/__init__.py b/aider/__init__.py index 76fec311e99..21c650060b5 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.87.2.dev" +__version__ = "0.87.4.dev" safe_version = __version__ try: diff --git a/aider/models.py b/aider/models.py index c14d31a8837..1b5d0208dfd 100644 --- a/aider/models.py +++ b/aider/models.py @@ -987,8 +987,6 @@ def send_completion( kwargs["tools"] = [dict(type="function", function=tool) for tool in effective_tools] else: kwargs["tools"] = effective_tools - else: - kwargs["tools"] = [] # Forcing a function call is for legacy style `functions` with a single function. # This is used by ArchitectCoder and not intended for NavigatorCoder's tools. diff --git a/aider/repomap.py b/aider/repomap.py index 77bdf7bf384..985b2ce159e 100644 --- a/aider/repomap.py +++ b/aider/repomap.py @@ -9,6 +9,7 @@ from importlib import resources from pathlib import Path +import tree_sitter from diskcache import Cache from grep_ast import TreeContext, filename_to_lang from pygments.lexers import guess_lexer_for_filename @@ -413,8 +414,13 @@ def get_tags_raw(self, fname, rel_fname): tree = parser.parse(bytes(code, "utf-8")) # Run the tags queries - query = language.query(query_scm) - captures = query.captures(tree.root_node) + if sys.version_info >= (3, 10): + query = tree_sitter.Query(language, query_scm) + cursor = tree_sitter.QueryCursor(query) + captures = cursor.captures(tree.root_node) + else: + query = language.query(query_scm) + captures = query.captures(tree.root_node) saw = set() if USING_TSL_PACK: diff --git a/aider/resources/model-metadata.json b/aider/resources/model-metadata.json index aff3b78cbe1..c3037edfc3a 100644 --- a/aider/resources/model-metadata.json +++ b/aider/resources/model-metadata.json @@ -1,7 +1,21 @@ { + "deepseek/deepseek-chat": { + "max_tokens": 8192, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000027, + "input_cost_per_token_cache_hit": 0.00000007, + "cache_read_input_token_cost": 0.00000007, + "cache_creation_input_token_cost": 0.0, + "output_cost_per_token": 0.0000011, + "litellm_provider": "deepseek", + "mode": "chat", + "supports_assistant_prefill": true, + "supports_prompt_caching": true + }, "deepseek-reasoner": { "max_tokens": 8192, - "max_input_tokens": 64000, + "max_input_tokens": 128000, "max_output_tokens": 8192, "input_cost_per_token": 0.00000055, "input_cost_per_token_cache_hit": 0.00000014, @@ -10,41 +24,35 @@ "output_cost_per_token": 0.00000219, "litellm_provider": "deepseek", "mode": "chat", - //"supports_function_calling": true, "supports_assistant_prefill": true, - //"supports_tool_choice": true, "supports_prompt_caching": true }, "openrouter/deepseek/deepseek-r1:free": { "max_tokens": 8192, - "max_input_tokens": 64000, + "max_input_tokens": 128000, "max_output_tokens": 8192, "input_cost_per_token": 0.0, "input_cost_per_token_cache_hit": 0.0, - "cache_read_input_token_cost": 0.00, + "cache_read_input_token_cost": 0.0, "cache_creation_input_token_cost": 0.0, "output_cost_per_token": 0.0, "litellm_provider": "openrouter", "mode": "chat", - //"supports_function_calling": true, "supports_assistant_prefill": true, - //"supports_tool_choice": true, "supports_prompt_caching": true }, "openrouter/deepseek/deepseek-chat:free": { "max_tokens": 8192, - "max_input_tokens": 64000, + "max_input_tokens": 128000, "max_output_tokens": 8192, "input_cost_per_token": 0.0, "input_cost_per_token_cache_hit": 0.0, - "cache_read_input_token_cost": 0.00, + "cache_read_input_token_cost": 0.0, "cache_creation_input_token_cost": 0.0, "output_cost_per_token": 0.0, "litellm_provider": "openrouter", "mode": "chat", - //"supports_function_calling": true, "supports_assistant_prefill": true, - //"supports_tool_choice": true, "supports_prompt_caching": true }, "openrouter/deepseek/deepseek-chat-v3-0324": { @@ -58,9 +66,7 @@ "output_cost_per_token": 0.00000219, "litellm_provider": "openrouter", "mode": "chat", - //"supports_function_calling": true, "supports_assistant_prefill": true, - //"supports_tool_choice": true, "supports_prompt_caching": true }, "openrouter/deepseek/deepseek-chat-v3-0324:free": { @@ -181,9 +187,9 @@ "input_cost_per_video_per_second": 0, "input_cost_per_audio_per_second": 0, "input_cost_per_token": 0, - "input_cost_per_character": 0, - "input_cost_per_token_above_128k_tokens": 0, - "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, "input_cost_per_image_above_128k_tokens": 0, "input_cost_per_video_per_second_above_128k_tokens": 0, "input_cost_per_audio_per_second_above_128k_tokens": 0, @@ -191,7 +197,6 @@ "output_cost_per_character": 0, "output_cost_per_token_above_128k_tokens": 0, "output_cost_per_character_above_128k_tokens": 0, - //"litellm_provider": "vertex_ai-language-models", "litellm_provider": "gemini", "mode": "chat", "supports_system_messages": true, @@ -218,9 +223,9 @@ "input_cost_per_video_per_second": 0, "input_cost_per_audio_per_second": 0, "input_cost_per_token": 0, - "input_cost_per_character": 0, - "input_cost_per_token_above_128k_tokens": 0, - "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, "input_cost_per_image_above_128k_tokens": 0, "input_cost_per_video_per_second_above_128k_tokens": 0, "input_cost_per_audio_per_second_above_128k_tokens": 0, @@ -300,8 +305,15 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" }, "vertex_ai/gemini-2.5-flash": { @@ -327,8 +339,15 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" }, "openrouter/google/gemini-2.5-pro-preview-03-25": { @@ -508,7 +527,7 @@ "input_cost_per_token": 0.00000125, "input_cost_per_token_above_200k_tokens": 0.0000025, "output_cost_per_token": 0.00001, - "output_cost_per_token_above_200k_tokens": 0.000015, + "output_cost_per_token_above_200k_tokens": 0.000015, "litellm_provider": "vertex_ai-language-models", "mode": "chat", "supports_reasoning": true, @@ -518,9 +537,20 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_endpoints": ["/v1/chat/completions", "/v1/completions", "/v1/batch"], - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview" }, "gemini-2.5-pro-preview-06-05": { @@ -537,7 +567,7 @@ "input_cost_per_token": 0.00000125, "input_cost_per_token_above_200k_tokens": 0.0000025, "output_cost_per_token": 0.00001, - "output_cost_per_token_above_200k_tokens": 0.000015, + "output_cost_per_token_above_200k_tokens": 0.000015, "litellm_provider": "vertex_ai-language-models", "mode": "chat", "supports_reasoning": true, @@ -547,9 +577,20 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_endpoints": ["/v1/chat/completions", "/v1/completions", "/v1/batch"], - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview" }, "gemini/gemini-2.5-pro-preview-05-06": { @@ -564,9 +605,9 @@ "max_pdf_size_mb": 30, "input_cost_per_audio_token": 0.0000007, "input_cost_per_token": 0.00000125, - "input_cost_per_token_above_200k_tokens": 0.0000025, + "input_cost_per_token_above_200k_tokens": 0.0000025, "output_cost_per_token": 0.00001, - "output_cost_per_token_above_200k_tokens": 0.000015, + "output_cost_per_token_above_200k_tokens": 0.000015, "litellm_provider": "gemini", "mode": "chat", "rpm": 10000, @@ -577,8 +618,15 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview" }, "gemini/gemini-2.5-pro-preview-06-05": { @@ -593,9 +641,9 @@ "max_pdf_size_mb": 30, "input_cost_per_audio_token": 0.0000007, "input_cost_per_token": 0.00000125, - "input_cost_per_token_above_200k_tokens": 0.0000025, + "input_cost_per_token_above_200k_tokens": 0.0000025, "output_cost_per_token": 0.00001, - "output_cost_per_token_above_200k_tokens": 0.000015, + "output_cost_per_token_above_200k_tokens": 0.000015, "litellm_provider": "gemini", "mode": "chat", "rpm": 10000, @@ -606,8 +654,15 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview" }, "gemini/gemini-2.5-pro": { @@ -634,8 +689,15 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro" }, "gemini/gemini-2.5-flash": { @@ -661,8 +723,15 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash" }, "gemini/gemini-2.5-flash-lite-preview-06-17": { @@ -688,12 +757,19 @@ "supports_response_schema": true, "supports_audio_output": false, "supports_tool_choice": true, - "supported_modalities": ["text", "image", "audio", "video"], - "supported_output_modalities": ["text"], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash-lite" }, - "together_ai/Qwen/Qwen3-235B-A22B-fp8-tput": { + "together_ai/Qwen/Qwen3-235B-A22B-fp8-tput": { "input_cost_per_token": 0.0000002, "output_cost_per_token": 0.0000006, - } -} + } +} \ No newline at end of file diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 7bcfe080654..33075ff6fca 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -597,7 +597,7 @@ reminder: sys examples_as_sys_msg: true extra_params: - max_tokens: 8192 + max_tokens: 128000 caches_by_default: true - name: openrouter/deepseek/deepseek-chat:free @@ -606,7 +606,7 @@ use_repo_map: true examples_as_sys_msg: true extra_params: - max_tokens: 8192 + max_tokens: 128000 caches_by_default: true use_temperature: false editor_model_name: openrouter/deepseek/deepseek-chat:free @@ -627,7 +627,7 @@ reminder: sys examples_as_sys_msg: true extra_params: - max_tokens: 8192 + max_tokens: 128000 - name: deepseek-coder edit_format: diff @@ -635,7 +635,7 @@ reminder: sys examples_as_sys_msg: true extra_params: - max_tokens: 8192 + max_tokens: 128000 caches_by_default: true - name: openrouter/deepseek/deepseek-coder @@ -995,7 +995,7 @@ use_repo_map: true edit_format: whole accepts_settings: - - reasoning_effort + - reasoning_effort - name: openai/o4-mini edit_format: diff @@ -1072,7 +1072,7 @@ use_repo_map: true edit_format: diff examples_as_sys_msg: true - + - name: gpt-4.1 edit_format: diff weak_model_name: gpt-4.1-mini @@ -1359,7 +1359,7 @@ system_prompt_prefix: "Formatting re-enabled. " accepts_settings: ["reasoning_effort"] examples_as_sys_msg: true - + - name: gemini/gemini-2.5-flash-preview-04-17 edit_format: diff use_repo_map: true @@ -1380,7 +1380,7 @@ edit_format: diff-fenced use_repo_map: true weak_model_name: openrouter/google/gemini-2.0-flash-001 - + - name: gemini/gemini-2.5-pro-preview-05-06 overeager: true edit_format: diff-fenced @@ -1487,7 +1487,7 @@ # top_p: 0.8 # top_k: 20 # min_p: 0.0 -# temperature: 0.7 +# temperature: 0.7 - name: claude-sonnet-4-20250514 diff --git a/aider/voice.py b/aider/voice.py index c9af7ae9983..63b9108d7bd 100644 --- a/aider/voice.py +++ b/aider/voice.py @@ -17,8 +17,20 @@ warnings.filterwarnings("ignore", category=SyntaxWarning) -from pydub import AudioSegment # noqa -from pydub.exceptions import CouldntDecodeError, CouldntEncodeError # noqa +try: + from pydub import AudioSegment # noqa + from pydub.exceptions import CouldntDecodeError, CouldntEncodeError # noqa + + PYDUB_AVAILABLE = True +except (ModuleNotFoundError, ImportError) as e: + if "audioop" in str(e) or "pyaudioop" in str(e): + # Handle missing audioop/pyaudioop dependency gracefully + PYDUB_AVAILABLE = False + AudioSegment = None + CouldntDecodeError = Exception + CouldntEncodeError = Exception + else: + raise try: import soundfile as sf @@ -152,11 +164,17 @@ def raw_record_and_transcribe(self, history, language): filename = temp_wav if use_audio_format != "wav": try: - new_filename = tempfile.mktemp(suffix=f".{use_audio_format}") - audio = AudioSegment.from_wav(temp_wav) - audio.export(new_filename, format=use_audio_format) - os.remove(temp_wav) - filename = new_filename + if not PYDUB_AVAILABLE: + print( + f"Warning: pydub not available, cannot convert to {use_audio_format}. Using" + " original WAV file." + ) + else: + new_filename = tempfile.mktemp(suffix=f".{use_audio_format}") + audio = AudioSegment.from_wav(temp_wav) + audio.export(new_filename, format=use_audio_format) + os.remove(temp_wav) + filename = new_filename except (CouldntDecodeError, CouldntEncodeError) as e: print(f"Error converting audio: {e}") except (OSError, FileNotFoundError) as e: diff --git a/docker/Dockerfile b/docker/Dockerfile index 9de6e46f25a..2778dd7fcfe 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -21,6 +21,7 @@ WORKDIR /app # Create virtual environment RUN python -m venv /venv ENV PATH="/venv/bin:$PATH" +RUN /venv/bin/python -m pip install --no-cache-dir uv # Playwright browser settings ENV PLAYWRIGHT_BROWSERS_PATH=/home/appuser/pw-browsers @@ -42,18 +43,18 @@ ENV HOME=/app ######################### FROM base AS aider-ce -ENV AIDER_DOCKER_IMAGE=dustinwashington/aider +ENV AIDER_DOCKER_IMAGE=dustinwashington/aider-ce # Copy requirements files COPY requirements.txt /tmp/aider/ COPY requirements/ /tmp/aider/requirements/ # Install dependencies as root -RUN /venv/bin/python -m pip install --no-cache-dir -r /tmp/aider/requirements.txt && \ +RUN uv pip install --no-cache-dir -r /tmp/aider/requirements.txt && \ rm -rf /tmp/aider # Install playwright browsers -RUN /venv/bin/python -m pip install --no-cache-dir playwright && \ +RUN uv pip install --no-cache-dir playwright && \ /venv/bin/python -m playwright install --with-deps chromium # Fix site-packages permissions @@ -63,10 +64,10 @@ RUN find /venv/lib/python3.12/site-packages \( -type d -exec chmod a+rwx {} + \) COPY . /app/ # Install the application as a package -RUN /venv/bin/python -m pip install . && \ +RUN uv pip install . && \ find . -mindepth 1 -delete # Switch to appuser USER appuser -ENTRYPOINT ["/venv/bin/aider"] \ No newline at end of file +ENTRYPOINT ["/venv/bin/aider"] diff --git a/pyproject.toml b/pyproject.toml index 8efec605739..5daab07f281 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ classifiers = [ "Programming Language :: Python", "Topic :: Software Development", ] -requires-python = ">=3.10,<3.13" +requires-python = ">=3.10" dynamic = ["dependencies", "optional-dependencies", "version"] [project.urls] @@ -26,13 +26,13 @@ aider = "aider.main:main" aider-ce = "aider.main:main" [tool.setuptools.dynamic] -dependencies = { file = "requirements.txt" } +dependencies = { file = "requirements.in" } [tool.setuptools.dynamic.optional-dependencies] -dev = { file = "requirements/requirements-dev.txt" } -help = { file = "requirements/requirements-help.txt" } -browser = { file = "requirements/requirements-browser.txt" } -playwright = { file = "requirements/requirements-playwright.txt" } +dev = { file = "requirements/requirements-dev.in" } +help = { file = "requirements/requirements-help.in" } +browser = { file = "requirements/requirements-browser.in" } +playwright = { file = "requirements/requirements-playwright.in" } [tool.setuptools] include-package-data = true diff --git a/requirements.txt b/requirements.txt index 4a3a5321b1e..4c9a7a4a75a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -219,6 +219,7 @@ jsonschema==4.25.0 # -c requirements/common-constraints.txt # -r requirements/requirements.in # litellm + # mcp jsonschema-specifications==2025.4.1 # via # -c requirements/common-constraints.txt @@ -396,6 +397,10 @@ python-dotenv==1.1.1 # -c requirements/common-constraints.txt # litellm # pydantic-settings +python-multipart==0.0.20 + # via + # -c requirements/common-constraints.txt + # mcp pyyaml==6.0.2 # via # -c requirements/common-constraints.txt @@ -495,6 +500,7 @@ tqdm==4.67.1 # openai # via # -c requirements/common-constraints.txt + # -r requirements/requirements.in # tree-sitter-language-pack tree-sitter-c-sharp==0.23.1 # via @@ -562,4 +568,4 @@ zipp==3.23.0 # importlib-metadata tree-sitter==0.23.2; python_version < "3.10" -tree-sitter==0.24.0; python_version >= "3.10" +tree-sitter>=0.25.1; python_version >= "3.10" diff --git a/requirements/common-constraints.txt b/requirements/common-constraints.txt index 422442f9bc2..7740127b793 100644 --- a/requirements/common-constraints.txt +++ b/requirements/common-constraints.txt @@ -241,6 +241,7 @@ jsonschema==4.25.0 # -r requirements/requirements.in # altair # litellm + # mcp jsonschema-specifications==2025.4.1 # via jsonschema kiwisolver==1.4.8 @@ -311,6 +312,43 @@ numpy==1.26.4 # soundfile # streamlit # transformers +nvidia-cublas-cu12==12.8.4.1 + # via + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.8.90 + # via torch +nvidia-cuda-nvrtc-cu12==12.8.93 + # via torch +nvidia-cuda-runtime-cu12==12.8.90 + # via torch +nvidia-cudnn-cu12==9.10.2.21 + # via torch +nvidia-cufft-cu12==11.3.3.83 + # via torch +nvidia-cufile-cu12==1.13.1.3 + # via torch +nvidia-curand-cu12==10.3.9.90 + # via torch +nvidia-cusolver-cu12==11.7.3.90 + # via torch +nvidia-cusparse-cu12==12.5.8.93 + # via + # nvidia-cusolver-cu12 + # torch +nvidia-cusparselt-cu12==0.7.1 + # via torch +nvidia-nccl-cu12==2.27.3 + # via torch +nvidia-nvjitlink-cu12==12.8.93 + # via + # nvidia-cufft-cu12 + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 + # torch +nvidia-nvtx-cu12==12.8.90 + # via torch openai==1.99.1 # via litellm oslex==0.1.3 @@ -454,8 +492,11 @@ python-dateutil==2.9.0.post0 # pandas # posthog python-dotenv==1.1.1 - # via litellm + # via + # litellm # pydantic-settings +python-multipart==0.0.20 + # via mcp pytz==2025.2 # via pandas pyyaml==6.0.2 @@ -513,6 +554,7 @@ setuptools==80.9.0 # llama-index-core # pip-tools # torch + # triton shellingham==1.5.4 # via typer shtab==1.7.2 @@ -580,7 +622,9 @@ tqdm==4.67.1 transformers==4.55.0 # via sentence-transformers tree-sitter==0.25.1 - # via tree-sitter-language-pack + # via + # -r requirements/requirements.in + # tree-sitter-language-pack tree-sitter-c-sharp==0.23.1 # via tree-sitter-language-pack tree-sitter-embedded-template==0.23.2 @@ -589,6 +633,8 @@ tree-sitter-language-pack==0.9.0 # via grep-ast tree-sitter-yaml==0.7.1 # via tree-sitter-language-pack +triton==3.4.0 + # via torch typer==0.16.0 # via -r requirements/requirements-dev.in typing-extensions==4.14.1 @@ -620,7 +666,7 @@ typing-inspect==0.9.0 # dataclasses-json # llama-index-core typing-inspection==0.4.1 - # via + # via # pydantic # pydantic-settings tzdata==2025.2 @@ -637,6 +683,8 @@ uvicorn==0.34.2 # via mcp virtualenv==20.33.1 # via pre-commit +watchdog==6.0.0 + # via streamlit watchfiles==1.1.0 # via -r requirements/requirements.in wcwidth==0.2.13 diff --git a/requirements/requirements-browser.txt b/requirements/requirements-browser.txt index 5d7ade977f5..88c3f1f5013 100644 --- a/requirements/requirements-browser.txt +++ b/requirements/requirements-browser.txt @@ -153,3 +153,7 @@ urllib3==2.5.0 # via # -c requirements/common-constraints.txt # requests +watchdog==6.0.0 + # via + # -c requirements/common-constraints.txt + # streamlit diff --git a/requirements/requirements-help.in b/requirements/requirements-help.in index 8fdc9ddd6e3..b82009ef174 100644 --- a/requirements/requirements-help.in +++ b/requirements/requirements-help.in @@ -1,7 +1,7 @@ llama-index-embeddings-huggingface # Because sentence-transformers doesn't like >=2 -numpy<2 +numpy>=1.26.4 # Mac x86 only supports 2.2.2 # https://discuss.pytorch.org/t/why-no-macosx-x86-64-build-after-torch-2-2-2-cp39-none-macosx-10-9-x86-64-whl/204546/2 diff --git a/requirements/requirements-help.txt b/requirements/requirements-help.txt index d8413ee4874..3387f9f5508 100644 --- a/requirements/requirements-help.txt +++ b/requirements/requirements-help.txt @@ -192,6 +192,68 @@ numpy==1.26.4 # scikit-learn # scipy # transformers +nvidia-cublas-cu12==12.8.4.1 + # via + # -c requirements/common-constraints.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.8.90 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-cuda-nvrtc-cu12==12.8.93 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-cuda-runtime-cu12==12.8.90 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-cudnn-cu12==9.10.2.21 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-cufft-cu12==11.3.3.83 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-cufile-cu12==1.13.1.3 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-curand-cu12==10.3.9.90 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-cusolver-cu12==11.7.3.90 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-cusparse-cu12==12.5.8.93 + # via + # -c requirements/common-constraints.txt + # nvidia-cusolver-cu12 + # torch +nvidia-cusparselt-cu12==0.7.1 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-nccl-cu12==2.27.3 + # via + # -c requirements/common-constraints.txt + # torch +nvidia-nvjitlink-cu12==12.8.93 + # via + # -c requirements/common-constraints.txt + # nvidia-cufft-cu12 + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 + # torch +nvidia-nvtx-cu12==12.8.90 + # via + # -c requirements/common-constraints.txt + # torch packaging==25.0 # via # -c requirements/common-constraints.txt @@ -265,6 +327,7 @@ setuptools==80.9.0 # -c requirements/common-constraints.txt # llama-index-core # torch + # triton sniffio==1.3.1 # via # -c requirements/common-constraints.txt @@ -309,6 +372,10 @@ transformers==4.55.0 # via # -c requirements/common-constraints.txt # sentence-transformers +triton==3.4.0 + # via + # -c requirements/common-constraints.txt + # torch typing-extensions==4.14.1 # via # -c requirements/common-constraints.txt diff --git a/requirements/requirements.in b/requirements/requirements.in index 9e0a6234721..02bfc83d0f3 100644 --- a/requirements/requirements.in +++ b/requirements/requirements.in @@ -1,36 +1,36 @@ -pydub -configargparse -GitPython -jsonschema -rich +pydub>=0.25.1 +configargparse>=1.7.1 +GitPython>=3.1.45 +jsonschema>=4.25.0 +rich>=14.1.0 prompt_toolkit -backoff -pathspec -diskcache +backoff>=2.2.1 +pathspec>=0.12.1 +diskcache>=5.6.3 grep_ast -packaging -sounddevice -soundfile -beautifulsoup4 -PyYAML -diff-match-patch -pypandoc -litellm -flake8 +packaging>=25.0 +sounddevice>=0.5.2 +soundfile>=0.13.1 +beautifulsoup4>=4.13.4 +PyYAML>=6.0.2 +diff-match-patch>=20241021 +pypandoc>=1.15 +litellm>=1.75.0 +flake8>=7.3.0 importlib_resources -pyperclip -posthog -mixpanel -pexpect -json5 -psutil -watchfiles -socksio -pillow -shtab -oslex -google-generativeai -mcp==1.12.3 +pyperclip>=1.9.0 +posthog>=6.4.1 +mixpanel>=4.10.1 +pexpect>=4.9.0 +json5>=0.12.0 +psutil>=7.0.0 +watchfiles>=1.1.0 +socksio>=1.0.0 +pillow>=11.3.0 +shtab>=1.7.2 +oslex>=0.1.3 +google-generativeai>=0.8.5 +mcp>=1.12.3 # The proper dependency is networkx[default], but this brings # in matplotlib and a bunch of other deps @@ -38,16 +38,19 @@ mcp==1.12.3 # We really only need networkx itself and scipy for the repomap. # # >3.5 seems to not be available for py3.10 -networkx<3.5 +networkx>=3.4.2 # This is the one networkx dependency that we need. # Including it here explicitly because we # didn't specify networkx[default] above. # # 1.16 onwards only supports python3.11+ -scipy<1.16 +scipy>=1.15.3 # GitHub Release action failing on "KeyError: 'home-page'" # https://github.com/pypa/twine/blob/6fbf880ee60915cf1666348c4bdd78a10415f2ac/twine/__init__.py#L40 # Uses importlib-metadata -importlib-metadata<8.0.0 +importlib-metadata>=7.2.1 + +tree-sitter==0.23.2; python_version < "3.10" +tree-sitter>=0.25.1; python_version >= "3.10" \ No newline at end of file diff --git a/requirements/tree-sitter.in b/requirements/tree-sitter.in index eba2e6770cf..1d46f67e313 100644 --- a/requirements/tree-sitter.in +++ b/requirements/tree-sitter.in @@ -1,3 +1,3 @@ tree-sitter==0.23.2; python_version < "3.10" -tree-sitter==0.24.0; python_version >= "3.10" +tree-sitter>=0.25.1; python_version >= "3.10" diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 21073cb0c8a..73e13febb2a 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -442,12 +442,47 @@ def test_ollama_num_ctx_set_when_missing(self, mock_token_count, mock_completion model=model.name, messages=messages, stream=False, - tools=[], temperature=0, num_ctx=expected_ctx, timeout=600, ) + @patch("aider.models.litellm.completion") + def test_modern_tool_call_propagation(self, mock_completion): + # Test modern tool calling (used for MCP Server Tool Calls) + model = Model("gpt-4") + messages = [{"role": "user", "content": "Hello"}] + + model.send_completion( + messages, functions=None, stream=False, tools=[dict(type="function", function="test")] + ) + + mock_completion.assert_called_with( + model=model.name, + messages=messages, + stream=False, + tools=[dict(type="function", function="test")], + temperature=0, + timeout=600, + ) + + @patch("aider.models.litellm.completion") + def test_legacy_tool_call_propagation(self, mock_completion): + # Test modern tool calling (used for legacy server tool calling) + model = Model("gpt-4") + messages = [{"role": "user", "content": "Hello"}] + + model.send_completion(messages, functions=["test"], stream=False) + + mock_completion.assert_called_with( + model=model.name, + messages=messages, + stream=False, + tools=[dict(type="function", function="test")], + temperature=0, + timeout=600, + ) + @patch("aider.models.litellm.completion") def test_ollama_uses_existing_num_ctx(self, mock_completion): model = Model("ollama/llama3") @@ -461,7 +496,6 @@ def test_ollama_uses_existing_num_ctx(self, mock_completion): model=model.name, messages=messages, stream=False, - tools=[], temperature=0, num_ctx=4096, timeout=600, @@ -480,7 +514,6 @@ def test_non_ollama_no_num_ctx(self, mock_completion): model=model.name, messages=messages, stream=False, - tools=[], temperature=0, timeout=600, ) @@ -512,7 +545,6 @@ def test_request_timeout_default(self, mock_completion): model=model.name, messages=messages, stream=False, - tools=[], temperature=0, timeout=600, # Default timeout ) @@ -528,7 +560,6 @@ def test_request_timeout_from_extra_params(self, mock_completion): model=model.name, messages=messages, stream=False, - tools=[], temperature=0, timeout=300, # From extra_params ) @@ -544,7 +575,6 @@ def test_use_temperature_in_send_completion(self, mock_completion): model=model.name, messages=messages, stream=False, - tools=[], temperature=0, timeout=600, ) @@ -564,7 +594,6 @@ def test_use_temperature_in_send_completion(self, mock_completion): mock_completion.assert_called_with( model=model.name, messages=messages, - tools=[], stream=False, temperature=0.7, timeout=600,