diff --git a/.flake8 b/.flake8
index 3963261f7b7..c923419b923 100644
--- a/.flake8
+++ b/.flake8
@@ -1,3 +1,3 @@
[flake8]
ignore = E203,W503
-max-line-length = 100
+max-line-length = 120
diff --git a/.github/workflows/check_pypi_version.yml b/.github/workflows/check_pypi_version.yml
index b383e87be88..859020b239a 100644
--- a/.github/workflows/check_pypi_version.yml
+++ b/.github/workflows/check_pypi_version.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.10", "3.11", "3.12"]
+ python-version: ["3.12", "3.11", "3.10"]
steps:
- name: Set up Python ${{ matrix.python-version }}
@@ -23,14 +23,14 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- - name: Install aider-chat
- run: pip install aider-chat
+ - name: Install aider-ce
+ run: pip install aider-ce
- name: Get installed aider version
id: installed_version
run: |
set -x # Enable debugging output
- aider_version_output=$(aider --version)
+ aider_version_output=$(aider-ce --version)
if [ $? -ne 0 ]; then
echo "Error: 'aider --version' command failed."
exit 1
diff --git a/.github/workflows/docker-build-test.yml b/.github/workflows/docker-build-test.yml
index f5872ce8b8f..8c68f495047 100644
--- a/.github/workflows/docker-build-test.yml
+++ b/.github/workflows/docker-build-test.yml
@@ -50,7 +50,7 @@ jobs:
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: false
- target: aider
+ target: aider-ce
- name: Build Docker images (Push)
if: ${{ github.event_name != 'pull_request' }}
@@ -60,26 +60,5 @@ jobs:
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: true
- tags: ${{ secrets.DOCKERHUB_USERNAME }}/aider:dev
- target: aider
-
- - name: Build Docker full image (PR)
- if: ${{ github.event_name == 'pull_request' }}
- uses: docker/build-push-action@v5
- with:
- context: .
- file: ./docker/Dockerfile
- platforms: linux/amd64,linux/arm64
- push: false
- target: aider-full
-
- - name: Build Docker full image (Push)
- if: ${{ github.event_name != 'pull_request' }}
- uses: docker/build-push-action@v5
- with:
- context: .
- file: ./docker/Dockerfile
- platforms: linux/amd64,linux/arm64
- push: true
- tags: ${{ secrets.DOCKERHUB_USERNAME }}/aider-full:dev
- target: aider-full
+ tags: ${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:dev
+ target: aider-ce
diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml
index 9e7efc2f5eb..63fa5650399 100644
--- a/.github/workflows/docker-release.yml
+++ b/.github/workflows/docker-release.yml
@@ -35,18 +35,7 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
- ${{ secrets.DOCKERHUB_USERNAME }}/aider:${{ github.ref_name }}
- ${{ secrets.DOCKERHUB_USERNAME }}/aider:latest
- target: aider
+ ${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:${{ github.ref_name }}
+ ${{ secrets.DOCKERHUB_USERNAME }}/aider-ce:latest
+ target: aider-ce
- - name: Build and push Docker full image
- uses: docker/build-push-action@v5
- with:
- context: .
- file: ./docker/Dockerfile
- platforms: linux/amd64,linux/arm64
- push: true
- tags: |
- ${{ secrets.DOCKERHUB_USERNAME }}/aider-full:${{ github.ref_name }}
- ${{ secrets.DOCKERHUB_USERNAME }}/aider-full:latest
- target: aider-full
diff --git a/.github/workflows/issues.yml b/.github/workflows/issues.yml
index 29751ebfb79..e1326a88e8b 100644
--- a/.github/workflows/issues.yml
+++ b/.github/workflows/issues.yml
@@ -25,5 +25,5 @@ jobs:
- name: Run issues script
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }}
run: python scripts/issues.py --yes
diff --git a/.github/workflows/ubuntu-tests.yml b/.github/workflows/ubuntu-tests.yml
index 753470af066..02dcc53a772 100644
--- a/.github/workflows/ubuntu-tests.yml
+++ b/.github/workflows/ubuntu-tests.yml
@@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.10", "3.11", "3.12"]
+ python-version: ["3.12", "3.11", "3.10"]
steps:
- name: Check out repository
diff --git a/.github/workflows/windows-tests.yml b/.github/workflows/windows-tests.yml
index f79f84b6640..f5734cf188b 100644
--- a/.github/workflows/windows-tests.yml
+++ b/.github/workflows/windows-tests.yml
@@ -25,7 +25,7 @@ jobs:
runs-on: windows-latest
strategy:
matrix:
- python-version: ["3.10", "3.11", "3.12"]
+ python-version: ["3.12", "3.11", "3.10"]
steps:
- name: Check out repository
diff --git a/.github/workflows/windows_check_pypi_version.yml b/.github/workflows/windows_check_pypi_version.yml
index 6bd48fdf644..6273feae843 100644
--- a/.github/workflows/windows_check_pypi_version.yml
+++ b/.github/workflows/windows_check_pypi_version.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: windows-latest
strategy:
matrix:
- python-version: ["3.10", "3.11", "3.12"]
+ python-version: ["3.12", "3.11", "3.10"]
defaults:
run:
shell: pwsh # Use PowerShell for all run steps
@@ -26,14 +26,14 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- - name: Install aider-chat
- run: pip install aider-chat
+ - name: Install aider-ce
+ run: pip install aider-ce
- name: Get installed aider version
id: installed_version
run: |
Write-Host "Running 'aider --version'..."
- $aider_version_output = aider --version
+ $aider_version_output = aider-ce --version
if ($LASTEXITCODE -ne 0) {
Write-Error "Error: 'aider --version' command failed."
exit 1
diff --git a/README.md b/README.md
index d9b54aa2663..5b0dc18aa86 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,39 @@
+## Fork Additions
+
+This project aims to be compatible with upstream Aider, but with priority commits merged in and with some opportunistic bug fixes and optimizations
+
+### Merged PRs
+
+* [MCP: #3937](https://github.com/Aider-AI/aider/pull/3937)
+ * [MCP Multi Tool Response](https://github.com/quinlanjager/aider/pull/1)
+* [Navigator Mode: #3781](https://github.com/Aider-AI/aider/pull/3781)
+ * [Navigator Mode Large File Count](https://github.com/Aider-AI/aider/commit/b88a7bda649931798209945d9687718316c7427f)
+* [Qwen 3: #4383](https://github.com/Aider-AI/aider/pull/4383)
+* [Fuzzy Search: #4366](https://github.com/Aider-AI/aider/pull/4366)
+* [Map Cache Location Config: #2911](https://github.com/Aider-AI/aider/pull/2911)
+* [Enhanced System Prompts: #3804](https://github.com/Aider-AI/aider/pull/3804)
+* [Repo Map File Name Truncation Fix: #4320](https://github.com/Aider-AI/aider/pull/4320)
+
+### Other Updates
+
+* [Added Remote MCP Tool Calls With HTTP Streaming](https://github.com/Aider-AI/aider/commit/a86039f73579df7c32fee910967827c9fccdec0d)
+ * [Enforce single tool call at a time](https://github.com/Aider-AI/aider/commit/3346c3e6194096cef64b1899b017bde36a65f794)
+ * [Upgraded MCP dep to 1.12.3 for Remote MCP Tool Calls](https://github.com/dwash96/aider-ce/commit/a91ee1c03627a31093364fd2a09e654781b1b879)
+ * [Updated base Python version to 3.12 to better support navigator mode (might consider undoing this, if dependency list supports it)](https://github.com/dwash96/aider-ce/commit/9ed416d523c11362a3ba9fc4c02134e0e79d41fc)
+* [Suppress LiteLLM asyncio errors that clutter output](https://github.com/Aider-AI/aider/issues/6)
+* [Updated Docker File Build Process](https://github.com/Aider-AI/aider/commit/cbab01458d0a35c03b30ac2f6347a74fc2b9f662)
+ * [Manually install necessary ubuntu dependencies](https://github.com/dwash96/aider-ce/issues/14)
+* [.gitignore updates](https://github.com/dwash96/aider-ce/commit/7c7e803fa63d1acd860eef1423e5a03220df6017)
+* [Experimental Context Compaction For Longer Running Generation Tasks](https://github.com/Aider-AI/aider/issues/6)
+
+### Other Notes
+* [MCP Configuration](https://github.com/dwash96/aider/blob/main/aider-ce/website/docs/config/mcp.md)
+
+
AI Pair Programming in Your Terminal
diff --git a/aider/__init__.py b/aider/__init__.py
index 71b3e3f366b..76fec311e99 100644
--- a/aider/__init__.py
+++ b/aider/__init__.py
@@ -1,6 +1,6 @@
from packaging import version
-__version__ = "0.87.1.dev"
+__version__ = "0.87.2.dev"
safe_version = __version__
try:
diff --git a/aider/args.py b/aider/args.py
index ddc8c132d35..8607828b76b 100644
--- a/aider/args.py
+++ b/aider/args.py
@@ -305,6 +305,16 @@ def get_parser(default_config_files, git_root):
" minified JS files etc. (default: 100)"
),
)
+ group.add_argument(
+ "--map-cache-dir",
+ metavar="MAP_CACHE_DIR",
+ dest="map_cache_dir",
+ default=".",
+ help=(
+ "Directory for the repository map cache .aider.tags.cache.v3"
+ " (default: current directory)"
+ ),
+ )
##########
group = parser.add_argument_group("History Files")
diff --git a/aider/change_tracker.py b/aider/change_tracker.py
index f826c3975b5..48d38c21c3f 100644
--- a/aider/change_tracker.py
+++ b/aider/change_tracker.py
@@ -1,23 +1,24 @@
import time
import uuid
from collections import defaultdict
-from datetime import datetime
+
class ChangeTracker:
"""
Tracks changes made to files for the undo functionality.
This enables granular editing operations with the ability to undo specific changes.
"""
-
+
def __init__(self):
self.changes = {} # change_id -> change_info
self.files_changed = defaultdict(list) # file_path -> [change_ids]
-
- def track_change(self, file_path, change_type, original_content, new_content,
- metadata=None, change_id=None):
+
+ def track_change(
+ self, file_path, change_type, original_content, new_content, metadata=None, change_id=None
+ ):
"""
Record a change to enable future undo operations.
-
+
Parameters:
- file_path: Path to the file that was changed
- change_type: Type of change (e.g., 'replacetext', 'insertlines')
@@ -25,7 +26,7 @@ def track_change(self, file_path, change_type, original_content, new_content,
- new_content: New content after the change
- metadata: Additional information about the change (line numbers, positions, etc.)
- change_id: Optional custom ID for the change (if None, one will be generated)
-
+
Returns:
- change_id: Unique identifier for the change
"""
@@ -39,61 +40,60 @@ def track_change(self, file_path, change_type, original_content, new_content,
# Defensive check: Ensure the ID isn't literally the string 'False' or boolean False
# which might indicate an upstream issue or unexpected input.
- if current_change_id == 'False' or current_change_id is False:
- # Log a warning? For now, generate a new ID to prevent storing False.
- print(f"Warning: change_id evaluated to False for {file_path}. Generating new ID.")
- current_change_id = self._generate_change_id()
-
+ if current_change_id == "False" or current_change_id is False:
+ # Log a warning? For now, generate a new ID to prevent storing False.
+ print(f"Warning: change_id evaluated to False for {file_path}. Generating new ID.")
+ current_change_id = self._generate_change_id()
change = {
# Use the confirmed string ID here
- 'id': current_change_id,
- 'file_path': file_path,
- 'type': change_type,
- 'original': original_content,
- 'new': new_content,
- 'metadata': metadata or {},
- 'timestamp': time.time()
+ "id": current_change_id,
+ "file_path": file_path,
+ "type": change_type,
+ "original": original_content,
+ "new": new_content,
+ "metadata": metadata or {},
+ "timestamp": time.time(),
}
# Use the confirmed string ID for storage and return
self.changes[current_change_id] = change
self.files_changed[file_path].append(current_change_id)
return current_change_id
-
+
def undo_change(self, change_id):
"""
Get information needed to reverse a specific change by ID.
-
+
Parameters:
- change_id: ID of the change to undo
-
+
Returns:
- (success, message, change_info): Tuple with success flag, message, and change information
"""
if change_id not in self.changes:
return False, f"Change ID {change_id} not found", None
-
+
change = self.changes[change_id]
-
+
# Mark this change as undone by removing it from the tracking dictionaries
- self.files_changed[change['file_path']].remove(change_id)
- if not self.files_changed[change['file_path']]:
- del self.files_changed[change['file_path']]
-
+ self.files_changed[change["file_path"]].remove(change_id)
+ if not self.files_changed[change["file_path"]]:
+ del self.files_changed[change["file_path"]]
+
# Keep the change in the changes dict but mark it as undone
- change['undone'] = True
- change['undone_at'] = time.time()
-
+ change["undone"] = True
+ change["undone_at"] = time.time()
+
return True, f"Undid change {change_id} in {change['file_path']}", change
-
+
def get_last_change(self, file_path):
"""
Get the most recent change for a specific file.
-
+
Parameters:
- file_path: Path to the file
-
+
Returns:
- change_id or None if no changes found
"""
@@ -101,15 +101,15 @@ def get_last_change(self, file_path):
if not changes:
return None
return changes[-1]
-
+
def list_changes(self, file_path=None, limit=10):
"""
List recent changes, optionally filtered by file.
-
+
Parameters:
- file_path: Optional path to filter changes by file
- limit: Maximum number of changes to list
-
+
Returns:
- List of change dictionaries
"""
@@ -120,14 +120,14 @@ def list_changes(self, file_path=None, limit=10):
else:
# Get all changes
changes = list(self.changes.values())
-
+
# Filter out undone changes and sort by timestamp (most recent first)
- changes = [c for c in changes if not c.get('undone', False)]
- changes = sorted(changes, key=lambda c: c['timestamp'], reverse=True)
-
+ changes = [c for c in changes if not c.get("undone", False)]
+ changes = sorted(changes, key=lambda c: c["timestamp"], reverse=True)
+
# Apply limit
return changes[:limit]
-
+
def _generate_change_id(self):
"""Generate a unique ID for a change."""
return str(uuid.uuid4())[:8] # Short, readable ID
diff --git a/aider/coders/__init__.py b/aider/coders/__init__.py
index 138540c6124..648e36fb9a2 100644
--- a/aider/coders/__init__.py
+++ b/aider/coders/__init__.py
@@ -8,8 +8,8 @@
from .editor_editblock_coder import EditorEditBlockCoder
from .editor_whole_coder import EditorWholeFileCoder
from .help_coder import HelpCoder
-from .patch_coder import PatchCoder
from .navigator_coder import NavigatorCoder
+from .patch_coder import PatchCoder
from .udiff_coder import UnifiedDiffCoder
from .udiff_simple import UnifiedDiffSimpleCoder
from .wholefile_coder import WholeFileCoder
diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py
index 4e2d9d502e8..c033a121bd9 100755
--- a/aider/coders/base_coder.py
+++ b/aider/coders/base_coder.py
@@ -128,10 +128,12 @@ class Coder:
file_watcher = None
mcp_servers = None
mcp_tools = None
-
+
# Context management settings (for all modes)
context_management_enabled = False # Disabled by default except for navigator mode
- large_file_token_threshold = 25000 # Files larger than this will be truncated when context management is enabled
+ large_file_token_threshold = (
+ 25000 # Files larger than this will be truncated when context management is enabled
+ )
@classmethod
def create(
@@ -172,8 +174,7 @@ def create(
done_messages = from_coder.done_messages
if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:
try:
- done_messages = from_coder.summarizer.summarize_all(
- done_messages)
+ done_messages = from_coder.summarizer.summarize_all(done_messages)
except ValueError:
# If summarization fails, keep the original messages and warn the user
io.tool_warning(
@@ -231,8 +232,7 @@ def get_announcements(self):
else:
prefix = "Model"
- output = f"{prefix}: {main_model.name} with {
- self.edit_format} edit format"
+ output = f"{prefix}: {main_model.name} with {self.edit_format} edit format"
# Check for thinking token budget
thinking_tokens = main_model.get_thinking_tokens()
@@ -281,13 +281,11 @@ def get_announcements(self):
map_tokens = self.repo_map.max_map_tokens
if map_tokens > 0:
refresh = self.repo_map.refresh
- lines.append(
- f"Repo-map: using {map_tokens} tokens, {refresh} refresh")
+ lines.append(f"Repo-map: using {map_tokens} tokens, {refresh} refresh")
max_map_tokens = self.main_model.get_repo_map_tokens() * 2
if map_tokens > max_map_tokens:
lines.append(
- f"Warning: map-tokens > {
- max_map_tokens} is not recommended. Too much"
+ f"Warning: map-tokens > {max_map_tokens} is not recommended. Too much"
" irrelevant code can confuse LLMs."
)
else:
@@ -307,8 +305,7 @@ def get_announcements(self):
lines.append("Restored previous conversation history.")
if self.io.multiline_mode:
- lines.append(
- "Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text")
+ lines.append("Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text")
return lines
@@ -361,7 +358,11 @@ def __init__(
enable_context_compaction=False,
context_compaction_max_tokens=None,
context_compaction_summary_tokens=8192,
+ map_cache_dir=".",
):
+ # initialize from args.map_cache_dir
+ self.map_cache_dir = map_cache_dir
+
# Fill in a dummy Analytics if needed, but it is never .enable()'d
self.analytics = analytics if analytics is not None else Analytics()
@@ -391,7 +392,6 @@ def __init__(
self.mcp_servers = mcp_servers
self.enable_context_compaction = enable_context_compaction
-
self.context_compaction_max_tokens = context_compaction_max_tokens
self.context_compaction_summary_tokens = context_compaction_summary_tokens
@@ -478,13 +478,11 @@ def __init__(
for fname in fnames:
fname = Path(fname)
if self.repo and self.repo.git_ignored_file(fname) and not self.add_gitignore_files:
- self.io.tool_warning(
- f"Skipping {fname} that matches gitignore spec.")
+ self.io.tool_warning(f"Skipping {fname} that matches gitignore spec.")
continue
if self.repo and self.repo.ignored_file(fname):
- self.io.tool_warning(
- f"Skipping {fname} that matches aiderignore spec.")
+ self.io.tool_warning(f"Skipping {fname} that matches aiderignore spec.")
continue
if not fname.exists():
@@ -495,8 +493,7 @@ def __init__(
continue
if not fname.is_file():
- self.io.tool_warning(
- f"Skipping {fname} that is not a normal file.")
+ self.io.tool_warning(f"Skipping {fname} that is not a normal file.")
continue
fname = str(fname.resolve())
@@ -514,8 +511,7 @@ def __init__(
if os.path.exists(abs_fname):
self.abs_read_only_fnames.add(abs_fname)
else:
- self.io.tool_warning(
- f"Error: Read-only file {fname} does not exist. Skipping.")
+ self.io.tool_warning(f"Error: Read-only file {fname} does not exist. Skipping.")
if map_tokens is None:
use_repo_map = main_model.use_repo_map
@@ -525,13 +521,12 @@ def __init__(
max_inp_tokens = self.main_model.info.get("max_input_tokens") or 0
- has_map_prompt = hasattr(
- self, "gpt_prompts") and self.gpt_prompts.repo_content_prefix
+ has_map_prompt = hasattr(self, "gpt_prompts") and self.gpt_prompts.repo_content_prefix
if use_repo_map and self.repo and has_map_prompt:
self.repo_map = RepoMap(
map_tokens,
- self.root,
+ self.map_cache_dir,
self.main_model,
io,
self.gpt_prompts.repo_content_prefix,
@@ -554,8 +549,7 @@ def __init__(
if not self.done_messages and restore_chat_history:
history_md = self.io.read_text(self.io.chat_history_file)
if history_md:
- self.done_messages = utils.split_chat_history_markdown(
- history_md)
+ self.done_messages = utils.split_chat_history_markdown(history_md)
self.summarize_start()
# Linting and testing
@@ -640,8 +634,7 @@ def get_abs_fnames_content(self):
if content is None:
relative_fname = self.get_rel_fname(fname)
- self.io.tool_warning(
- f"Dropping {relative_fname} from the chat.")
+ self.io.tool_warning(f"Dropping {relative_fname} from the chat.")
self.abs_fnames.remove(fname)
else:
yield fname, content
@@ -690,27 +683,31 @@ def get_files_content(self, fnames=None):
if self.context_management_enabled:
# Calculate tokens for this file
file_tokens = self.main_model.token_count(content)
-
+
if file_tokens > self.large_file_token_threshold:
# Truncate the file content
lines = content.splitlines()
- total_lines = len(lines)
-
+
# Keep the first and last parts of the file with a marker in between
- keep_lines = self.large_file_token_threshold // 40 # Rough estimate of tokens per line
- first_chunk = lines[:keep_lines//2]
- last_chunk = lines[-(keep_lines//2):]
-
+ keep_lines = (
+ self.large_file_token_threshold // 40
+ ) # Rough estimate of tokens per line
+ first_chunk = lines[: keep_lines // 2]
+ last_chunk = lines[-(keep_lines // 2) :]
+
truncated_content = "\n".join(first_chunk)
- truncated_content += f"\n\n... [File truncated due to size ({file_tokens} tokens). Use /context-management to toggle truncation off] ...\n\n"
+ truncated_content += (
+ f"\n\n... [File truncated due to size ({file_tokens} tokens). Use"
+ " /context-management to toggle truncation off] ...\n\n"
+ )
truncated_content += "\n".join(last_chunk)
-
+
# Add message about truncation
self.io.tool_output(
f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). "
"Use /context-management to toggle truncation off if needed."
)
-
+
prompt += truncated_content
else:
prompt += content
@@ -730,38 +727,42 @@ def get_read_only_files_content(self):
prompt += "\n"
prompt += relative_fname
prompt += f"\n{self.fence[0]}\n"
-
+
# Apply context management if enabled for large files (same as get_files_content)
if self.context_management_enabled:
# Calculate tokens for this file
file_tokens = self.main_model.token_count(content)
-
+
if file_tokens > self.large_file_token_threshold:
# Truncate the file content
lines = content.splitlines()
- total_lines = len(lines)
-
+
# Keep the first and last parts of the file with a marker in between
- keep_lines = self.large_file_token_threshold // 40 # Rough estimate of tokens per line
- first_chunk = lines[:keep_lines//2]
- last_chunk = lines[-(keep_lines//2):]
-
+ keep_lines = (
+ self.large_file_token_threshold // 40
+ ) # Rough estimate of tokens per line
+ first_chunk = lines[: keep_lines // 2]
+ last_chunk = lines[-(keep_lines // 2) :]
+
truncated_content = "\n".join(first_chunk)
- truncated_content += f"\n\n... [File truncated due to size ({file_tokens} tokens). Use /context-management to toggle truncation off] ...\n\n"
+ truncated_content += (
+ f"\n\n... [File truncated due to size ({file_tokens} tokens). Use"
+ " /context-management to toggle truncation off] ...\n\n"
+ )
truncated_content += "\n".join(last_chunk)
-
+
# Add message about truncation
self.io.tool_output(
f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). "
"Use /context-management to toggle truncation off if needed."
)
-
+
prompt += truncated_content
else:
prompt += content
else:
prompt += content
-
+
prompt += f"{self.fence[1]}\n"
return prompt
@@ -813,12 +814,10 @@ def get_repo_map(self, force_refresh=False):
mentioned_fnames = self.get_file_mentions(cur_msg_text)
mentioned_idents = self.get_ident_mentions(cur_msg_text)
- mentioned_fnames.update(
- self.get_ident_filename_matches(mentioned_idents))
+ mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))
all_abs_files = set(self.get_all_abs_files())
- repo_abs_read_only_fnames = set(
- self.abs_read_only_fnames) & all_abs_files
+ repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files
chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames
other_files = all_abs_files - chat_files
@@ -882,8 +881,7 @@ def get_readonly_files_messages(self):
if images_message is not None:
readonly_messages += [
images_message,
- dict(role="assistant",
- content="Ok, I will use these images as references."),
+ dict(role="assistant", content="Ok, I will use these images as references."),
]
return readonly_messages
@@ -938,16 +936,14 @@ def get_images_message(self, fnames):
continue
with open(fname, "rb") as image_file:
- encoded_string = base64.b64encode(
- image_file.read()).decode("utf-8")
+ encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
image_url = f"data:{mime_type};base64,{encoded_string}"
rel_fname = self.get_rel_fname(fname)
if mime_type.startswith("image/") and supports_images:
image_messages += [
{"type": "text", "text": f"Image file: {rel_fname}"},
- {"type": "image_url", "image_url": {
- "url": image_url, "detail": "high"}},
+ {"type": "image_url", "image_url": {"url": image_url, "detail": "high"}},
]
elif mime_type == "application/pdf" and supports_pdfs:
image_messages += [
@@ -1002,8 +998,7 @@ def copy_context(self):
def get_input(self):
inchat_files = self.get_inchat_relative_files()
- read_only_files = [self.get_rel_fname(
- fname) for fname in self.abs_read_only_fnames]
+ read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]
all_files = sorted(set(inchat_files + read_only_files))
edit_format = "" if self.edit_format == self.main_model.edit_format else self.edit_format
return self.io.get_input(
@@ -1043,8 +1038,7 @@ def run_one(self, user_message, preproc):
break
if self.num_reflections >= self.max_reflections:
- self.io.tool_warning(
- f"Only {self.max_reflections} reflections allowed, stopping.")
+ self.io.tool_warning(f"Only {self.max_reflections} reflections allowed, stopping.")
return
self.num_reflections += 1
@@ -1147,8 +1141,10 @@ def compact_context_if_needed(self):
if not self.enable_context_compaction:
self.summarize_start()
return
-
- if not self.summarizer.check_max_tokens(self.done_messages, max_tokens=self.context_compaction_max_tokens):
+
+ if not self.summarizer.check_max_tokens(
+ self.done_messages, max_tokens=self.context_compaction_max_tokens
+ ):
return
self.io.tool_output("Compacting chat history to make room for new messages...")
@@ -1341,14 +1337,11 @@ def fmt_system_prompt(self, prompt):
platform_text = self.get_platform_info()
if self.suggest_shell_commands:
- shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(
- platform=platform_text)
- shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(
- platform=platform_text)
+ shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)
+ shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)
rename_with_shell = self.gpt_prompts.rename_with_shell
else:
- shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(
- platform=platform_text)
+ shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)
shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(
platform=platform_text
)
@@ -1424,8 +1417,7 @@ def format_chat_chunks(self):
]
if self.gpt_prompts.system_reminder:
- main_sys += "\n" + \
- self.fmt_system_prompt(self.gpt_prompts.system_reminder)
+ main_sys += "\n" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)
chunks = ChatChunks()
@@ -1551,8 +1543,7 @@ def warm_cache_worker():
) or getattr(completion.usage, "cache_read_input_tokens", 0)
if self.verbose:
- self.io.tool_output(
- f"Warmed {format_tokens(cache_hit_tokens)} cached tokens.")
+ self.io.tool_output(f"Warmed {format_tokens(cache_hit_tokens)} cached tokens.")
self.cache_warming_thread = threading.Timer(0, warm_cache_worker)
self.cache_warming_thread.daemon = True
@@ -1567,14 +1558,11 @@ def check_tokens(self, messages):
if max_input_tokens and input_tokens >= max_input_tokens:
self.io.tool_error(
- f"Your estimated chat context of {
- input_tokens:,} tokens exceeds the"
- f" {max_input_tokens:,} token limit for {
- self.main_model.name}!"
+ f"Your estimated chat context of {input_tokens:,} tokens exceeds the"
+ f" {max_input_tokens:,} token limit for {self.main_model.name}!"
)
self.io.tool_output("To reduce the chat context:")
- self.io.tool_output(
- "- Use /drop to remove unneeded files from the chat")
+ self.io.tool_output("- Use /drop to remove unneeded files from the chat")
self.io.tool_output("- Use /clear to clear the chat history")
self.io.tool_output("- Break your code into smaller files")
self.io.tool_output(
@@ -1608,8 +1596,7 @@ def send_message(self, inp):
self.multi_response_content = ""
if self.show_pretty():
- self.waiting_spinner = WaitingSpinner(
- "Waiting for " + self.main_model.name)
+ self.waiting_spinner = WaitingSpinner("Waiting for " + self.main_model.name)
self.waiting_spinner.start()
if self.stream:
self.mdstream = self.io.get_assistant_mdstream()
@@ -1655,8 +1642,7 @@ def send_message(self, inp):
else:
self.io.tool_error(err_msg)
- self.io.tool_output(
- f"Retrying in {retry_delay:.1f} seconds...")
+ self.io.tool_output(f"Retrying in {retry_delay:.1f} seconds...")
time.sleep(retry_delay)
continue
except KeyboardInterrupt:
@@ -1674,13 +1660,11 @@ def send_message(self, inp):
messages[-1]["content"] = self.multi_response_content
else:
messages.append(
- dict(role="assistant",
- content=self.multi_response_content, prefix=True)
+ dict(role="assistant", content=self.multi_response_content, prefix=True)
)
except Exception as err:
self.mdstream = None
- lines = traceback.format_exception(
- type(err), err, err.__traceback__)
+ lines = traceback.format_exception(type(err), err, err.__traceback__)
self.io.tool_warning("".join(lines))
self.io.tool_error(str(err))
self.event("message_send_exception", exception=str(err))
@@ -1693,8 +1677,7 @@ def send_message(self, inp):
# Ensure any waiting spinner is stopped
self._stop_waiting_spinner()
- self.partial_response_content = self.get_multi_response_content_in_progress(
- True)
+ self.partial_response_content = self.get_multi_response_content_in_progress(True)
self.remove_reasoning_content()
self.multi_response_content = ""
@@ -1743,8 +1726,7 @@ def send_message(self, inp):
return
# Process any tools using MCP servers
- tool_call_response = litellm.stream_chunk_builder(
- self.partial_response_tool_call)
+ tool_call_response = litellm.stream_chunk_builder(self.partial_response_tool_call)
if self.process_tool_calls(tool_call_response):
self.num_tool_calls += 1
return self.run(with_message="Continue with tool call response", preproc=False)
@@ -1761,11 +1743,9 @@ def send_message(self, inp):
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
self.cur_messages[-1]["content"] += "\n^C KeyboardInterrupt"
else:
- self.cur_messages += [dict(role="user",
- content="^C KeyboardInterrupt")]
+ self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")]
self.cur_messages += [
- dict(role="assistant",
- content="I see that you interrupted my previous reply.")
+ dict(role="assistant", content="I see that you interrupted my previous reply.")
]
return
@@ -1826,9 +1806,7 @@ def process_tool_calls(self, tool_call_response):
# If there are no arguments, or it's not a string that looks like it could
# be concatenated JSON, just add it and continue.
- if not args_string or not (
- args_string.startswith("{") or args_string.startswith("[")
- ):
+ if not args_string or not (args_string.startswith("{") or args_string.startswith("[")):
expanded_tool_calls.append(tool_call)
continue
@@ -1874,9 +1852,7 @@ def process_tool_calls(self, tool_call_response):
return True
elif self.num_tool_calls >= self.max_tool_calls:
- self.io.tool_warning(
- f"Only {self.max_tool_calls} tool calls allowed, stopping."
- )
+ self.io.tool_warning(f"Only {self.max_tool_calls} tool calls allowed, stopping.")
return False
@@ -1887,8 +1863,7 @@ def _print_tool_call_info(self, server_tool_calls):
for server, tool_calls in server_tool_calls.items():
for tool_call in tool_calls:
self.io.tool_output(f"Tool Call: {tool_call.function.name}")
- self.io.tool_output(
- f"Arguments: {tool_call.function.arguments}")
+ self.io.tool_output(f"Arguments: {tool_call.function.arguments}")
self.io.tool_output(f"MCP Server: {server.name}")
if self.verbose:
@@ -1942,7 +1917,9 @@ async def _exec_server_tools(server, tool_calls_list):
{
"role": "tool",
"tool_call_id": tool_call.id,
- "content": f"Coder does not support local tool: {tool_call.function.name}",
+ "content": (
+ f"Coder does not support local tool: {tool_call.function.name}"
+ ),
}
)
return error_responses
@@ -2003,7 +1980,8 @@ async def _exec_server_tools(server, tool_calls_list):
resource, "mimeType", "unknown mime type"
)
content_parts.append(
- f"[embedded binary resource: {name} ({mime_type})]"
+ "[embedded binary resource:"
+ f" {name} ({mime_type})]"
)
elif hasattr(item, "text"): # TextContent
content_parts.append(item.text)
@@ -2022,7 +2000,8 @@ async def _exec_server_tools(server, tool_calls_list):
except Exception as e:
tool_error = f"Error executing tool call {tool_call.function.name}: \n{e}"
self.io.tool_warning(
- f"Executing {tool_call.function.name} on {server.name} failed: \n Error: {e}\n"
+ f"Executing {tool_call.function.name} on {server.name} failed: \n "
+ f" Error: {e}\n"
)
tool_responses.append(
{"role": "tool", "tool_call_id": tool_call.id, "content": tool_error}
@@ -2050,7 +2029,21 @@ async def _execute_all_tool_calls():
# Run the async execution and collect results
if tool_calls:
- all_results = asyncio.run(_execute_all_tool_calls())
+ all_results = []
+ max_retries = 3
+ for i in range(max_retries):
+ try:
+ all_results = asyncio.run(_execute_all_tool_calls())
+ break
+ except asyncio.exceptions.CancelledError:
+ if i < max_retries - 1:
+ time.sleep(0.1) # Brief pause before retrying
+ else:
+ self.io.tool_warning(
+ "MCP tool execution failed after multiple retries due to cancellation."
+ )
+ all_results = []
+
# Flatten the results from all servers
for server_results in all_results:
tool_responses.extend(server_results)
@@ -2083,7 +2076,21 @@ async def get_all_server_tools():
return [result for result in results if result is not None]
if self.mcp_servers:
- tools = asyncio.run(get_all_server_tools())
+ # Retry initialization in case of CancelledError
+ max_retries = 3
+ for i in range(max_retries):
+ try:
+ tools = asyncio.run(get_all_server_tools())
+ break
+ except asyncio.exceptions.CancelledError:
+ if i < max_retries - 1:
+ time.sleep(0.1) # Brief pause before retrying
+ else:
+ self.io.tool_warning(
+ "MCP tool initialization failed after multiple retries due to"
+ " cancellation."
+ )
+ tools = []
if len(tools) > 0:
self.io.tool_output("MCP servers configured:")
diff --git a/aider/coders/base_prompts.py b/aider/coders/base_prompts.py
index 07f19381365..6a10f9cde07 100644
--- a/aider/coders/base_prompts.py
+++ b/aider/coders/base_prompts.py
@@ -73,12 +73,14 @@ class CoderPrompts:
go_ahead_tip = ""
compaction_prompt = """You are an expert at summarizing conversations.
-The user is going to provide you with a conversation that is getting too long to fit in the context window of a language model.
+The user is going to provide you with a conversation.
+This conversation is getting too long to fit in the context window of a language model.
You need to summarize the conversation to reduce its length, while retaining all the important information.
The summary should contain three parts:
- Overall Goal: What is the user trying to achieve with this conversation?
-- Next Steps: What are the next steps for the language model to take to help the user? Create a checklist of what has been done and what is left to do.
+- Next Steps: What are the next steps for the language model to take to help the user?
+ Create a checklist of what has been done and what is left to do.
- Active files: What files are currently in the context window?
Here is the conversation so far:
diff --git a/aider/coders/editor_editblock_coder.py b/aider/coders/editor_editblock_coder.py
index 98628ed77e1..f43258bc96f 100644
--- a/aider/coders/editor_editblock_coder.py
+++ b/aider/coders/editor_editblock_coder.py
@@ -4,5 +4,6 @@
class EditorEditBlockCoder(EditBlockCoder):
"A coder that uses search/replace blocks, focused purely on editing files."
+
edit_format = "editor-diff"
gpt_prompts = EditorEditBlockPrompts()
diff --git a/aider/coders/editor_whole_coder.py b/aider/coders/editor_whole_coder.py
index 9f37a3698b8..90c7e833247 100644
--- a/aider/coders/editor_whole_coder.py
+++ b/aider/coders/editor_whole_coder.py
@@ -4,5 +4,6 @@
class EditorWholeFileCoder(WholeFileCoder):
"A coder that operates on entire files, focused purely on editing files."
+
edit_format = "editor-whole"
gpt_prompts = EditorWholeFilePrompts()
diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py
index 535053cd88a..0232e9c3e83 100644
--- a/aider/coders/navigator_coder.py
+++ b/aider/coders/navigator_coder.py
@@ -1,90 +1,91 @@
import ast
+import asyncio
+import base64
import json
-import re
-import fnmatch
+import locale
import os
+import platform
+import re
import time
-import random
-import subprocess
import traceback
-import platform
-import locale
-import asyncio
-import base64
-from datetime import datetime
-from pathlib import Path
-import xml.etree.ElementTree as ET
-from xml.etree.ElementTree import ParseError
+
# Add necessary imports if not already present
from collections import defaultdict
-from types import SimpleNamespace
+from datetime import datetime
+from pathlib import Path
from litellm import experimental_mcp_client
-from .base_coder import Coder, ChatChunks
-from .editblock_coder import find_original_update_blocks, do_replace, find_similar_lines
-from .navigator_prompts import NavigatorPrompts
-from .navigator_legacy_prompts import NavigatorLegacyPrompts
-from aider.repo import ANY_GIT_ERROR
-from aider import urls
-from aider import utils
-# Import run_cmd for potentially interactive execution and run_cmd_subprocess for guaranteed non-interactive
-from aider.run_cmd import run_cmd, run_cmd_subprocess
+from aider import urls, utils
+
# Import the change tracker
from aider.change_tracker import ChangeTracker
-# Import tool functions
-from aider.tools.view_files_at_glob import execute_view_files_at_glob
-from aider.tools.view_files_matching import execute_view_files_matching
-from aider.tools.ls import execute_ls
-from aider.tools.view import execute_view
-from aider.tools.remove import _execute_remove
-from aider.tools.make_editable import _execute_make_editable
-from aider.tools.make_readonly import _execute_make_readonly
-from aider.tools.view_files_with_symbol import _execute_view_files_with_symbol
+from aider.mcp.server import LocalServer
+from aider.repo import ANY_GIT_ERROR
+
+# Import run_cmd for potentially interactive execution and run_cmd_subprocess for guaranteed non-interactive
from aider.tools.command import _execute_command
from aider.tools.command_interactive import _execute_command_interactive
-from aider.tools.replace_text import _execute_replace_text
-from aider.tools.replace_all import _execute_replace_all
-from aider.tools.insert_block import _execute_insert_block
from aider.tools.delete_block import _execute_delete_block
-from aider.tools.replace_line import _execute_replace_line
-from aider.tools.replace_lines import _execute_replace_lines
-from aider.tools.indent_lines import _execute_indent_lines
from aider.tools.delete_line import _execute_delete_line
from aider.tools.delete_lines import _execute_delete_lines
-from aider.tools.undo_change import _execute_undo_change
-from aider.tools.list_changes import _execute_list_changes
from aider.tools.extract_lines import _execute_extract_lines
-from aider.tools.show_numbered_context import execute_show_numbered_context
from aider.tools.grep import _execute_grep
-from aider.mcp.server import LocalServer
+from aider.tools.indent_lines import _execute_indent_lines
+from aider.tools.insert_block import _execute_insert_block
+from aider.tools.list_changes import _execute_list_changes
+from aider.tools.ls import execute_ls
+from aider.tools.make_editable import _execute_make_editable
+from aider.tools.make_readonly import _execute_make_readonly
+from aider.tools.remove import _execute_remove
+from aider.tools.replace_all import _execute_replace_all
+from aider.tools.replace_line import _execute_replace_line
+from aider.tools.replace_lines import _execute_replace_lines
+from aider.tools.replace_text import _execute_replace_text
+from aider.tools.show_numbered_context import execute_show_numbered_context
+from aider.tools.undo_change import _execute_undo_change
+from aider.tools.view import execute_view
+
+# Import tool functions
+from aider.tools.view_files_at_glob import execute_view_files_at_glob
+from aider.tools.view_files_matching import execute_view_files_matching
+from aider.tools.view_files_with_symbol import _execute_view_files_with_symbol
+
+from .base_coder import ChatChunks, Coder
+from .editblock_coder import do_replace, find_original_update_blocks, find_similar_lines
+from .navigator_legacy_prompts import NavigatorLegacyPrompts
+from .navigator_prompts import NavigatorPrompts
class NavigatorCoder(Coder):
"""Mode where the LLM autonomously manages which files are in context."""
edit_format = "navigator"
-
+
# TODO: We'll turn on granular editing by default once those tools stabilize
use_granular_editing = False
-
+
def __init__(self, *args, **kwargs):
# Initialize appropriate prompt set before calling parent constructor
# This needs to happen before super().__init__ so the parent class has access to gpt_prompts
- self.gpt_prompts = NavigatorPrompts() if self.use_granular_editing else NavigatorLegacyPrompts()
+ self.gpt_prompts = (
+ NavigatorPrompts() if self.use_granular_editing else NavigatorLegacyPrompts()
+ )
# Dictionary to track recently removed files
self.recently_removed = {}
# Configuration parameters
- self.max_tool_calls = 100 # Maximum number of tool calls per response
+ self.max_tool_calls = 100 # Maximum number of tool calls per response
# Context management parameters
- self.large_file_token_threshold = 25000 # Files larger than this in tokens are considered large
- self.max_files_per_glob = 50 # Maximum number of files to add at once via glob/grep
+ self.large_file_token_threshold = (
+ 25000 # Files larger than this in tokens are considered large
+ )
+ self.max_files_per_glob = 50 # Maximum number of files to add at once via glob/grep
# Enable context management by default only in navigator mode
- self.context_management_enabled = True # Enabled by default for navigator mode
+ self.context_management_enabled = True # Enabled by default for navigator mode
# Initialize change tracker for granular editing
self.change_tracker = ChangeTracker()
@@ -101,16 +102,16 @@ def __init__(self, *args, **kwargs):
# Enable enhanced context blocks by default
self.use_enhanced_context = True
-
- # Initialize empty token tracking dictionary and cache structures
+
+ # Initialize empty token tracking dictionary and cache structures
# but don't populate yet to avoid startup delay
self.context_block_tokens = {}
self.context_blocks_cache = {}
self.tokens_calculated = False
-
+
super().__init__(*args, **kwargs)
self.initialize_local_tools()
-
+
def initialize_local_tools(self):
if not self.use_granular_editing:
return
@@ -145,7 +146,10 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "pattern": {"type": "string", "description": "The glob pattern to match files."},
+ "pattern": {
+ "type": "string",
+ "description": "The glob pattern to match files.",
+ },
},
"required": ["pattern"],
},
@@ -159,9 +163,23 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "pattern": {"type": "string", "description": "The pattern to search for in file contents."},
- "file_pattern": {"type": "string", "description": "An optional glob pattern to filter which files are searched."},
- "regex": {"type": "boolean", "description": "Whether the pattern is a regular expression. Defaults to False."},
+ "pattern": {
+ "type": "string",
+ "description": "The pattern to search for in file contents.",
+ },
+ "file_pattern": {
+ "type": "string",
+ "description": (
+ "An optional glob pattern to filter which files are searched."
+ ),
+ },
+ "regex": {
+ "type": "boolean",
+ "description": (
+ "Whether the pattern is a regular expression. Defaults to"
+ " False."
+ ),
+ },
},
"required": ["pattern"],
},
@@ -175,7 +193,10 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "directory": {"type": "string", "description": "The directory to list."},
+ "directory": {
+ "type": "string",
+ "description": "The directory to list.",
+ },
},
"required": ["directory"],
},
@@ -189,7 +210,10 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "file_path": {"type": "string", "description": "The path to the file to view."},
+ "file_path": {
+ "type": "string",
+ "description": "The path to the file to view.",
+ },
},
"required": ["file_path"],
},
@@ -203,7 +227,10 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "file_path": {"type": "string", "description": "The path to the file to remove."},
+ "file_path": {
+ "type": "string",
+ "description": "The path to the file to remove.",
+ },
},
"required": ["file_path"],
},
@@ -217,7 +244,10 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "file_path": {"type": "string", "description": "The path to the file to make editable."},
+ "file_path": {
+ "type": "string",
+ "description": "The path to the file to make editable.",
+ },
},
"required": ["file_path"],
},
@@ -231,7 +261,10 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "file_path": {"type": "string", "description": "The path to the file to make read-only."},
+ "file_path": {
+ "type": "string",
+ "description": "The path to the file to make read-only.",
+ },
},
"required": ["file_path"],
},
@@ -241,11 +274,16 @@ def get_local_tool_schemas(self):
"type": "function",
"function": {
"name": "ViewFilesWithSymbol",
- "description": "View files that contain a specific symbol (e.g., class, function).",
+ "description": (
+ "View files that contain a specific symbol (e.g., class, function)."
+ ),
"parameters": {
"type": "object",
"properties": {
- "symbol": {"type": "string", "description": "The symbol to search for."},
+ "symbol": {
+ "type": "string",
+ "description": "The symbol to search for.",
+ },
},
"required": ["symbol"],
},
@@ -259,7 +297,10 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "command_string": {"type": "string", "description": "The shell command to execute."},
+ "command_string": {
+ "type": "string",
+ "description": "The shell command to execute.",
+ },
},
"required": ["command_string"],
},
@@ -273,7 +314,10 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "command_string": {"type": "string", "description": "The interactive shell command to execute."},
+ "command_string": {
+ "type": "string",
+ "description": "The interactive shell command to execute.",
+ },
},
"required": ["command_string"],
},
@@ -287,13 +331,41 @@ def get_local_tool_schemas(self):
"parameters": {
"type": "object",
"properties": {
- "pattern": {"type": "string", "description": "The pattern to search for."},
- "file_pattern": {"type": "string", "description": "Glob pattern for files to search. Defaults to '*'."},
- "directory": {"type": "string", "description": "Directory to search in. Defaults to '.'."},
- "use_regex": {"type": "boolean", "description": "Whether to use regex. Defaults to False."},
- "case_insensitive": {"type": "boolean", "description": "Whether to perform a case-insensitive search. Defaults to False."},
- "context_before": {"type": "integer", "description": "Number of lines to show before a match. Defaults to 5."},
- "context_after": {"type": "integer", "description": "Number of lines to show after a match. Defaults to 5."},
+ "pattern": {
+ "type": "string",
+ "description": "The pattern to search for.",
+ },
+ "file_pattern": {
+ "type": "string",
+ "description": "Glob pattern for files to search. Defaults to '*'.",
+ },
+ "directory": {
+ "type": "string",
+ "description": "Directory to search in. Defaults to '.'.",
+ },
+ "use_regex": {
+ "type": "boolean",
+ "description": "Whether to use regex. Defaults to False.",
+ },
+ "case_insensitive": {
+ "type": "boolean",
+ "description": (
+ "Whether to perform a case-insensitive search. Defaults to"
+ " False."
+ ),
+ },
+ "context_before": {
+ "type": "integer",
+ "description": (
+ "Number of lines to show before a match. Defaults to 5."
+ ),
+ },
+ "context_after": {
+ "type": "integer",
+ "description": (
+ "Number of lines to show after a match. Defaults to 5."
+ ),
+ },
},
"required": ["pattern"],
},
@@ -507,7 +579,9 @@ def get_local_tool_schemas(self):
"type": "function",
"function": {
"name": "ExtractLines",
- "description": "Extract lines from a source file and append them to a target file.",
+ "description": (
+ "Extract lines from a source file and append them to a target file."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -528,7 +602,9 @@ def get_local_tool_schemas(self):
"type": "function",
"function": {
"name": "ShowNumberedContext",
- "description": "Show numbered lines of context around a pattern or line number.",
+ "description": (
+ "Show numbered lines of context around a pattern or line number."
+ ),
"parameters": {
"type": "object",
"properties": {
@@ -643,7 +719,7 @@ async def _execute_local_tool_calls(self, tool_calls_list):
}
)
return tool_responses
-
+
def _execute_mcp_tool(self, server, tool_name, params):
"""Helper to execute a single MCP tool call, created from legacy format."""
@@ -701,52 +777,59 @@ def _calculate_context_block_tokens(self, force=False):
Calculate token counts for all enhanced context blocks.
This is the central method for calculating token counts,
ensuring they're consistent across all parts of the code.
-
+
This method populates the cache for context blocks and calculates tokens.
-
+
Args:
force: If True, recalculate tokens even if already calculated
"""
# Skip if already calculated and not forced
- if hasattr(self, 'tokens_calculated') and self.tokens_calculated and not force:
+ if hasattr(self, "tokens_calculated") and self.tokens_calculated and not force:
return
-
+
# Clear existing token counts
self.context_block_tokens = {}
-
+
# Initialize the cache for context blocks if needed
- if not hasattr(self, 'context_blocks_cache'):
+ if not hasattr(self, "context_blocks_cache"):
self.context_blocks_cache = {}
-
+
if not self.use_enhanced_context:
return
-
+
try:
# First, clear the cache to force regeneration of all blocks
self.context_blocks_cache = {}
-
+
# Generate all context blocks and calculate token counts
- block_types = ["environment_info", "directory_structure", "git_status", "symbol_outline"]
-
+ block_types = [
+ "environment_info",
+ "directory_structure",
+ "git_status",
+ "symbol_outline",
+ ]
+
for block_type in block_types:
block_content = self._generate_context_block(block_type)
if block_content:
- self.context_block_tokens[block_type] = self.main_model.token_count(block_content)
-
+ self.context_block_tokens[block_type] = self.main_model.token_count(
+ block_content
+ )
+
# Mark as calculated
self.tokens_calculated = True
- except Exception as e:
+ except Exception:
# Silently handle errors during calculation
# This prevents errors in token counting from breaking the main functionality
pass
-
+
def _generate_context_block(self, block_name):
"""
Generate a specific context block and cache it.
This is a helper method for get_cached_context_block.
"""
content = None
-
+
if block_name == "environment_info":
content = self.get_environment_info()
elif block_name == "directory_structure":
@@ -757,35 +840,35 @@ def _generate_context_block(self, block_name):
content = self.get_context_symbol_outline()
elif block_name == "context_summary":
content = self.get_context_summary()
-
+
# Cache the result if it's not None
if content is not None:
self.context_blocks_cache[block_name] = content
-
+
return content
-
+
def get_cached_context_block(self, block_name):
"""
Get a context block from the cache, or generate it if not available.
This should be used by format_chat_chunks to avoid regenerating blocks.
-
+
This will ensure tokens are calculated if they haven't been yet.
"""
# Make sure tokens have been calculated at least once
- if not hasattr(self, 'tokens_calculated') or not self.tokens_calculated:
+ if not hasattr(self, "tokens_calculated") or not self.tokens_calculated:
self._calculate_context_block_tokens()
-
+
# Return from cache if available
- if hasattr(self, 'context_blocks_cache') and block_name in self.context_blocks_cache:
+ if hasattr(self, "context_blocks_cache") and block_name in self.context_blocks_cache:
return self.context_blocks_cache[block_name]
-
+
# Otherwise generate and cache the block
return self._generate_context_block(block_name)
-
+
def set_granular_editing(self, enabled):
"""
Switch between granular editing tools and legacy search/replace.
-
+
Args:
enabled (bool): True to use granular editing tools, False to use legacy search/replace
"""
@@ -801,9 +884,12 @@ def get_context_symbol_outline(self):
return None
try:
- result = "\n"
+ result = '\n'
result += "## Symbol Outline (Current Context)\n\n"
- result += "Code definitions (classes, functions, methods, etc.) found in files currently in chat context.\n\n"
+ result += (
+ "Code definitions (classes, functions, methods, etc.) found in files currently in"
+ " chat context.\n\n"
+ )
files_to_outline = list(self.abs_fnames) + list(self.abs_read_only_fnames)
if not files_to_outline:
@@ -816,8 +902,8 @@ def get_context_symbol_outline(self):
# Use repo_map which should be initialized in BaseCoder
if not self.repo_map:
- self.io.tool_warning("RepoMap not initialized, cannot generate symbol outline.")
- return None # Or return a message indicating repo map is unavailable
+ self.io.tool_warning("RepoMap not initialized, cannot generate symbol outline.")
+ return None # Or return a message indicating repo map is unavailable
for abs_fname in sorted(files_to_outline):
rel_fname = self.get_rel_fname(abs_fname)
@@ -831,7 +917,7 @@ def get_context_symbol_outline(self):
self.io.tool_warning(f"Could not get symbols for {rel_fname}: {e}")
if not has_symbols:
- result += "No symbols found in the current context files.\n"
+ result += "No symbols found in the current context files.\n"
else:
for rel_fname in sorted(all_tags_by_file.keys()):
tags = sorted(all_tags_by_file[rel_fname], key=lambda t: (t.line, t.name))
@@ -841,7 +927,10 @@ def get_context_symbol_outline(self):
# Use specific_kind first if available, otherwise fall back to kind
kind_to_check = tag.specific_kind or tag.kind
# Check if the kind represents a definition using the set from RepoMap
- if kind_to_check and kind_to_check.lower() in self.repo_map.definition_kinds:
+ if (
+ kind_to_check
+ and kind_to_check.lower() in self.repo_map.definition_kinds
+ ):
definition_tags.append(tag)
if definition_tags:
@@ -851,12 +940,12 @@ def get_context_symbol_outline(self):
# Display line number if available
line_info = f", line {tag.line + 1}" if tag.line >= 0 else ""
# Display the specific kind (which we checked)
- kind_to_check = tag.specific_kind or tag.kind # Recalculate for safety
+ kind_to_check = tag.specific_kind or tag.kind # Recalculate for safety
result += f"- {tag.name} ({kind_to_check}{line_info})\n"
- result += "\n" # Add space between files
+ result += "\n" # Add space between files
result += ""
- return result.strip() # Remove trailing newline if any
+ return result.strip() # Remove trailing newline if any
except Exception as e:
self.io.tool_error(f"Error generating symbol outline: {str(e)}")
@@ -869,34 +958,34 @@ def format_chat_chunks(self):
"""
Override parent's format_chat_chunks to include enhanced context blocks with a
cleaner, more hierarchical structure for better organization.
-
+
Optimized for prompt caching by placing context blocks strategically:
1. Relatively static blocks (directory structure, environment info) before done_messages
2. Dynamic blocks (context summary, symbol outline, git status) after chat_files
-
+
This approach preserves prefix caching while providing fresh context information.
"""
# First get the normal chat chunks from the parent method without calling super
# We'll manually build the chunks to control placement of context blocks
chunks = self.format_chat_chunks_base()
-
+
# If enhanced context blocks are not enabled, just return the base chunks
if not self.use_enhanced_context:
return chunks
-
+
# Make sure token counts are updated - using centralized method
# This also populates the context block cache
self._calculate_context_block_tokens()
-
+
# Get blocks from cache to avoid regenerating them
env_context = self.get_cached_context_block("environment_info")
dir_structure = self.get_cached_context_block("directory_structure")
git_status = self.get_cached_context_block("git_status")
symbol_outline = self.get_cached_context_block("symbol_outline")
-
+
# Context summary needs special handling because it depends on other blocks
context_summary = self.get_context_summary()
-
+
# 1. Add relatively static blocks BEFORE done_messages
# These blocks change less frequently and can be part of the cacheable prefix
static_blocks = []
@@ -904,12 +993,12 @@ def format_chat_chunks(self):
static_blocks.append(dir_structure)
if env_context:
static_blocks.append(env_context)
-
+
if static_blocks:
static_message = "\n\n".join(static_blocks)
# Insert as a system message right before done_messages
chunks.done.insert(0, dict(role="system", content=static_message))
-
+
# 2. Add dynamic blocks AFTER chat_files
# These blocks change with the current files in context
dynamic_blocks = []
@@ -919,14 +1008,14 @@ def format_chat_chunks(self):
dynamic_blocks.append(symbol_outline)
if git_status:
dynamic_blocks.append(git_status)
-
+
if dynamic_blocks:
dynamic_message = "\n\n".join(dynamic_blocks)
# Append as a system message after chat_files
chunks.chat_files.append(dict(role="system", content=dynamic_message))
-
+
return chunks
-
+
def format_chat_chunks_base(self):
"""
Create base chat chunks without enhanced context blocks.
@@ -1051,20 +1140,19 @@ def get_context_summary(self):
"""
if not self.use_enhanced_context:
return None
-
+
# If context_summary is already in the cache, return it
- if hasattr(self, 'context_blocks_cache') and "context_summary" in self.context_blocks_cache:
+ if hasattr(self, "context_blocks_cache") and "context_summary" in self.context_blocks_cache:
return self.context_blocks_cache["context_summary"]
-
+
try:
# Make sure token counts are updated before generating the summary
- if not hasattr(self, 'context_block_tokens') or not self.context_block_tokens:
+ if not hasattr(self, "context_block_tokens") or not self.context_block_tokens:
self._calculate_context_block_tokens()
-
- result = "\n"
+
+ result = '\n'
result += "## Current Context Overview\n\n"
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
- max_output_tokens = self.main_model.info.get("max_output_tokens") or 0
if max_input_tokens:
result += f"Model context limit: {max_input_tokens:,} tokens\n\n"
@@ -1084,11 +1172,20 @@ def get_context_summary(self):
tokens = self.main_model.token_count(content)
total_file_tokens += tokens
editable_tokens += tokens
- size_indicator = "🔴 Large" if tokens > 5000 else ("🟡 Medium" if tokens > 1000 else "🟢 Small")
- editable_files.append(f"- {rel_fname}: {tokens:,} tokens ({size_indicator})")
+ size_indicator = (
+ "🔴 Large"
+ if tokens > 5000
+ else ("🟡 Medium" if tokens > 1000 else "🟢 Small")
+ )
+ editable_files.append(
+ f"- {rel_fname}: {tokens:,} tokens ({size_indicator})"
+ )
if editable_files:
result += "\n".join(editable_files) + "\n\n"
- result += f"**Total editable: {len(editable_files)} files, {editable_tokens:,} tokens**\n\n"
+ result += (
+ f"**Total editable: {len(editable_files)} files,"
+ f" {editable_tokens:,} tokens**\n\n"
+ )
else:
result += "No editable files in context\n\n"
@@ -1102,11 +1199,20 @@ def get_context_summary(self):
tokens = self.main_model.token_count(content)
total_file_tokens += tokens
readonly_tokens += tokens
- size_indicator = "🔴 Large" if tokens > 5000 else ("🟡 Medium" if tokens > 1000 else "🟢 Small")
- readonly_files.append(f"- {rel_fname}: {tokens:,} tokens ({size_indicator})")
+ size_indicator = (
+ "🔴 Large"
+ if tokens > 5000
+ else ("🟡 Medium" if tokens > 1000 else "🟢 Small")
+ )
+ readonly_files.append(
+ f"- {rel_fname}: {tokens:,} tokens ({size_indicator})"
+ )
if readonly_files:
result += "\n".join(readonly_files) + "\n\n"
- result += f"**Total read-only: {len(readonly_files)} files, {readonly_tokens:,} tokens**\n\n"
+ result += (
+ f"**Total read-only: {len(readonly_files)} files,"
+ f" {readonly_tokens:,} tokens**\n\n"
+ )
else:
result += "No read-only files in context\n\n"
@@ -1122,20 +1228,20 @@ def get_context_summary(self):
result += f" ({percentage:.1f}% of limit)"
if percentage > 80:
result += "\n\n⚠️ **Context is getting full!** Remove non-essential files via:\n"
- result += "- `[tool_call(Remove, file_path=\"path/to/large_file.ext\")]`\n"
+ result += '- `[tool_call(Remove, file_path="path/to/large_file.ext")]`\n'
result += "- Keep only essential files in context for best performance"
result += "\n"
-
+
# Cache the result
- if not hasattr(self, 'context_blocks_cache'):
+ if not hasattr(self, "context_blocks_cache"):
self.context_blocks_cache = {}
self.context_blocks_cache["context_summary"] = result
-
+
return result
except Exception as e:
self.io.tool_error(f"Error generating context summary: {str(e)}")
return None
-
+
def get_environment_info(self):
"""
Generate an environment information context block with key system details.
@@ -1143,24 +1249,24 @@ def get_environment_info(self):
"""
if not self.use_enhanced_context:
return None
-
+
try:
# Get current date in ISO format
current_date = datetime.now().strftime("%Y-%m-%d")
-
+
# Get platform information
platform_info = platform.platform()
-
+
# Get language preference
language = self.chat_language or locale.getlocale()[0] or "en-US"
-
- result = "\n"
+
+ result = '\n'
result += "## Environment Information\n\n"
result += f"- Working directory: {self.root}\n"
result += f"- Current date: {current_date}\n"
result += f"- Platform: {platform_info}\n"
result += f"- Language preference: {language}\n"
-
+
# Add git repo information if available
if self.repo:
try:
@@ -1171,7 +1277,7 @@ def get_environment_info(self):
result += "- Git repository: active but details unavailable\n"
else:
result += "- Git repository: none\n"
-
+
# Add enabled features information
features = []
if self.context_management_enabled:
@@ -1180,21 +1286,21 @@ def get_environment_info(self):
features.append("enhanced context blocks")
if features:
result += f"- Enabled features: {', '.join(features)}\n"
-
+
result += ""
return result
except Exception as e:
self.io.tool_error(f"Error generating environment info: {str(e)}")
return None
-
+
def reply_completed(self):
"""Process the completed response from the LLM.
-
+
This is a key method that:
1. Processes any tool commands in the response (only after a '---' line)
2. Processes any SEARCH/REPLACE blocks in the response (only before the '---' line if one exists)
3. If tool commands were found, sets up for another automatic round
-
+
This enables the "auto-exploration" workflow where the LLM can
iteratively discover and analyze relevant files before providing
a final answer to the user's question.
@@ -1206,7 +1312,7 @@ def reply_completed(self):
content = self.partial_response_content
if not content or not content.strip():
return True
-
+
# Check for search/replace blocks
has_search = "<<<<<<< SEARCH" in content
has_divider = "=======" in content
@@ -1215,11 +1321,11 @@ def reply_completed(self):
self.io.tool_output("Detected edit blocks, applying changes...")
edited_files = self._apply_edits_from_response()
if self.reflected_message:
- return False # Trigger reflection if edits failed
-
+ return False # Trigger reflection if edits failed
+
# If edits were successful, we might want to reflect.
# For now, let's consider the turn complete.
-
+
# Since tool calls are handled earlier, we finalize the turn.
self.tool_call_count = 0
self.files_added_in_exploration = set()
@@ -1230,11 +1336,13 @@ def reply_completed(self):
content = self.partial_response_content
if not content or not content.strip():
return True
- original_content = content # Keep the original response
+ original_content = content # Keep the original response
# Process tool commands: returns content with tool calls removed, results, flag if any tool calls were found,
# and the content before the last '---' line
- processed_content, result_messages, tool_calls_found, content_before_last_separator = self._process_tool_commands(content)
+ processed_content, result_messages, tool_calls_found, content_before_last_separator = (
+ self._process_tool_commands(content)
+ )
# Since we are no longer suppressing, the partial_response_content IS the final content.
# We might want to update it to the processed_content (without tool calls) if we don't
@@ -1267,7 +1375,7 @@ def reply_completed(self):
# return False to trigger a reflection loop.
if self.reflected_message:
return False
-
+
# If edits were successfully applied and we haven't exceeded reflection limits,
# set up for another iteration (similar to tool calls)
if edited_files and self.num_reflections < self.max_reflections:
@@ -1279,8 +1387,10 @@ def reply_completed(self):
break
else:
# Default if no user message found
- original_question = "Please continue your exploration and provide a final answer."
-
+ original_question = (
+ "Please continue your exploration and provide a final answer."
+ )
+
# Construct the message for the next turn
next_prompt = (
"I have applied the edits you suggested. "
@@ -1288,7 +1398,7 @@ def reply_completed(self):
"Let me continue working on your request.\n\n"
f"Your original question was: {original_question}"
)
-
+
self.reflected_message = next_prompt
self.io.tool_output("Continuing after applying edits...")
return False # Indicate that we need another iteration
@@ -1300,7 +1410,7 @@ def reply_completed(self):
self.tool_call_count = 0
# Clear exploration files for the next round
self.files_added_in_exploration = set()
-
+
# Get the original user question from the most recent user message
if self.cur_messages and len(self.cur_messages) >= 1:
for msg in reversed(self.cur_messages):
@@ -1309,8 +1419,10 @@ def reply_completed(self):
break
else:
# Default if no user message found
- original_question = "Please continue your exploration and provide a final answer."
-
+ original_question = (
+ "Please continue your exploration and provide a final answer."
+ )
+
# Construct the message for the next turn, including tool results
next_prompt_parts = []
next_prompt_parts.append(
@@ -1322,9 +1434,15 @@ def reply_completed(self):
next_prompt_parts.append("\nResults from previous tool calls:")
# result_messages already have [Result (...): ...] format
next_prompt_parts.extend(result_messages)
- next_prompt_parts.append("\nBased on these results and the updated file context, I will proceed.")
+ next_prompt_parts.append(
+ "\nBased on these results and the updated file context, I will proceed."
+ )
else:
- next_prompt_parts.append("\nNo specific results were returned from the previous tool calls, but the file context may have been updated. I will proceed based on the current context.")
+ next_prompt_parts.append(
+ "\nNo specific results were returned from the previous tool calls, but the"
+ " file context may have been updated. I will proceed based on the current"
+ " context."
+ )
next_prompt_parts.append(f"\nYour original question was: {original_question}")
@@ -1336,32 +1454,34 @@ def reply_completed(self):
# Exploration finished for this turn.
# Append results to the content that will be stored in history.
if result_messages:
- results_block = "\n\n" + "\n".join(result_messages)
- # Append results to the cleaned content
- self.partial_response_content += results_block
+ results_block = "\n\n" + "\n".join(result_messages)
+ # Append results to the cleaned content
+ self.partial_response_content += results_block
# After applying edits OR determining no edits were needed (and no reflection needed),
# the turn is complete. Reset counters and finalize history.
self.tool_call_count = 0
self.files_added_in_exploration = set()
# Move cur_messages to done_messages
- self.move_back_cur_messages(None) # Pass None as we handled commit message earlier if needed
- return True # Indicate exploration is finished for this round
+ self.move_back_cur_messages(
+ None
+ ) # Pass None as we handled commit message earlier if needed
+ return True # Indicate exploration is finished for this round
def _process_tool_commands(self, content):
"""
Process tool commands in the `[tool_call(name, param=value)]` format within the content.
-
+
Rules:
1. Tool calls must appear after the LAST '---' line separator in the content
2. Any tool calls before this last separator are treated as text (not executed)
3. SEARCH/REPLACE blocks can only appear before this last separator
-
+
Returns processed content, result messages, and a flag indicating if any tool calls were found.
Also returns the content before the last separator for SEARCH/REPLACE block validation.
"""
result_messages = []
- modified_content = content # Start with original content
+ modified_content = content # Start with original content
tool_calls_found = False
call_count = 0
max_calls = self.max_tool_calls
@@ -1369,24 +1489,24 @@ def _process_tool_commands(self, content):
# Check if there's a '---' separator and only process tool calls after the LAST one
separator_marker = "---"
content_parts = content.split(separator_marker)
-
+
# If there's no separator, treat the entire content as before the separator
if len(content_parts) == 1:
# Return the original content with no tool calls processed, and the content itself as before_separator
return content, result_messages, False, content
-
+
# Take everything before the last separator (including intermediate separators)
content_before_separator = separator_marker.join(content_parts[:-1])
# Take only what comes after the last separator
content_after_separator = content_parts[-1]
-
+
# Find tool calls using a more robust method, but only in the content after separator
processed_content = content_before_separator + separator_marker
last_index = 0
-
+
# Support any [tool_...(...)] format
tool_call_pattern = re.compile(r"\[tool_.*?\(", re.DOTALL)
- end_marker = "]" # The parenthesis balancing finds the ')', we just need the final ']'
+ end_marker = "]" # The parenthesis balancing finds the ')', we just need the final ']'
while True:
match = tool_call_pattern.search(content_after_separator, last_index)
@@ -1401,14 +1521,16 @@ def _process_tool_commands(self, content):
# Count preceding backslashes to handle \\
backslashes = 0
p = start_pos - 1
- while p >= 0 and content_after_separator[p] == '\\':
+ while p >= 0 and content_after_separator[p] == "\\":
backslashes += 1
p -= 1
if backslashes % 2 == 1:
# Odd number of backslashes means it's escaped. Treat as text.
# We append up to the end of the marker and continue searching.
- processed_content += content_after_separator[last_index : start_pos + len(start_marker)]
+ processed_content += content_after_separator[
+ last_index : start_pos + len(start_marker)
+ ]
last_index = start_pos + len(start_marker)
continue
@@ -1428,15 +1550,15 @@ def _process_tool_commands(self, content):
if escaped:
escaped = False
- elif char == '\\':
+ elif char == "\\":
escaped = True
elif char == "'" and not in_double_quotes:
in_single_quotes = not in_single_quotes
elif char == '"' and not in_single_quotes:
in_double_quotes = not in_double_quotes
- elif char == '(' and not in_single_quotes and not in_double_quotes:
+ elif char == "(" and not in_single_quotes and not in_double_quotes:
paren_level += 1
- elif char == ')' and not in_single_quotes and not in_double_quotes:
+ elif char == ")" and not in_single_quotes and not in_double_quotes:
paren_level -= 1
if paren_level == 0:
end_paren_pos = i
@@ -1446,54 +1568,62 @@ def _process_tool_commands(self, content):
expected_end_marker_start = end_paren_pos + 1
actual_end_marker_start = -1
end_marker_found = False
- if end_paren_pos != -1: # Only search if we found a closing parenthesis
+ if end_paren_pos != -1: # Only search if we found a closing parenthesis
for j in range(expected_end_marker_start, len(content_after_separator)):
if not content_after_separator[j].isspace():
actual_end_marker_start = j
# Check if the found character is the end marker ']'
if content_after_separator[actual_end_marker_start] == end_marker:
end_marker_found = True
- break # Stop searching after first non-whitespace char
+ break # Stop searching after first non-whitespace char
if not end_marker_found:
# Try to extract the tool name for better error message
tool_name = "unknown"
try:
# Look for the first comma after the tool call start
- partial_content = content_after_separator[scan_start_pos:scan_start_pos+100] # Limit to avoid huge strings
- comma_pos = partial_content.find(',')
+ partial_content = content_after_separator[
+ scan_start_pos : scan_start_pos + 100
+ ] # Limit to avoid huge strings
+ comma_pos = partial_content.find(",")
if comma_pos > 0:
tool_name = partial_content[:comma_pos].strip()
else:
# If no comma, look for opening parenthesis or first whitespace
- space_pos = partial_content.find(' ')
- paren_pos = partial_content.find('(')
+ space_pos = partial_content.find(" ")
+ paren_pos = partial_content.find("(")
if space_pos > 0 and (paren_pos < 0 or space_pos < paren_pos):
tool_name = partial_content[:space_pos].strip()
elif paren_pos > 0:
tool_name = partial_content[:paren_pos].strip()
- except:
+ except Exception:
pass # Silently fail if we can't extract the name
-
+
# Malformed call: couldn't find matching ')' or the subsequent ']'
- self.io.tool_warning(f"Malformed tool call for '{tool_name}'. Missing closing parenthesis or bracket. Skipping.")
+ self.io.tool_warning(
+ f"Malformed tool call for '{tool_name}'. Missing closing parenthesis or"
+ " bracket. Skipping."
+ )
# Append the start marker itself to processed content so it's not lost
processed_content += start_marker
- last_index = scan_start_pos # Continue searching after the marker
+ last_index = scan_start_pos # Continue searching after the marker
continue
# Found a potential tool call
# Adjust full_match_str and last_index based on the actual end marker ']' position
- full_match_str = content_after_separator[start_pos : actual_end_marker_start + 1] # End marker ']' is 1 char
+ full_match_str = content_after_separator[
+ start_pos : actual_end_marker_start + 1
+ ] # End marker ']' is 1 char
inner_content = content_after_separator[scan_start_pos:end_paren_pos].strip()
- last_index = actual_end_marker_start + 1 # Move past the processed call (including ']')
-
+ last_index = actual_end_marker_start + 1 # Move past the processed call (including ']')
call_count += 1
if call_count > max_calls:
- self.io.tool_warning(f"Exceeded maximum tool calls ({max_calls}). Skipping remaining calls.")
+ self.io.tool_warning(
+ f"Exceeded maximum tool calls ({max_calls}). Skipping remaining calls."
+ )
# Don't append the skipped call to processed_content
- continue # Skip processing this call
+ continue # Skip processing this call
tool_calls_found = True
tool_name = None
@@ -1510,9 +1640,9 @@ def _process_tool_commands(self, content):
parts = inner_content.split(",", 1)
potential_tool_name = parts[0].strip()
- is_string = (potential_tool_name.startswith("'") and potential_tool_name.endswith(
- "'"
- )) or (potential_tool_name.startswith('"') and potential_tool_name.endswith('"'))
+ is_string = (
+ potential_tool_name.startswith("'") and potential_tool_name.endswith("'")
+ ) or (potential_tool_name.startswith('"') and potential_tool_name.endswith('"'))
if not potential_tool_name.isidentifier() and not is_string:
# It's not a valid identifier and not a string, so quote it.
@@ -1529,20 +1659,26 @@ def _process_tool_commands(self, content):
parsed_ast = ast.parse(parse_str)
# Validate AST structure
- if not isinstance(parsed_ast, ast.Module) or not parsed_ast.body or not isinstance(parsed_ast.body[0], ast.Expr):
+ if (
+ not isinstance(parsed_ast, ast.Module)
+ or not parsed_ast.body
+ or not isinstance(parsed_ast.body[0], ast.Expr)
+ ):
raise ValueError("Unexpected AST structure")
call_node = parsed_ast.body[0].value
if not isinstance(call_node, ast.Call):
- raise ValueError("Expected a Call node")
+ raise ValueError("Expected a Call node")
# Extract tool name (should be the first positional argument)
if not call_node.args:
raise ValueError("Tool name not found or invalid")
-
+
tool_name_node = call_node.args[0]
if isinstance(tool_name_node, ast.Name):
tool_name = tool_name_node.id
- elif isinstance(tool_name_node, ast.Constant) and isinstance(tool_name_node.value, str):
+ elif isinstance(tool_name_node, ast.Constant) and isinstance(
+ tool_name_node.value, str
+ ):
tool_name = tool_name_node.value
else:
raise ValueError("Tool name must be an identifier or a string literal")
@@ -1555,26 +1691,32 @@ def _process_tool_commands(self, content):
if isinstance(value_node, ast.Constant):
value = value_node.value
# Check if this is a multiline string and trim whitespace
- if isinstance(value, str) and '\n' in value:
+ if isinstance(value, str) and "\n" in value:
# Get the source line(s) for this node to check if it's a triple-quoted string
- lineno = value_node.lineno if hasattr(value_node, 'lineno') else 0
- end_lineno = value_node.end_lineno if hasattr(value_node, 'end_lineno') else lineno
+ lineno = value_node.lineno if hasattr(value_node, "lineno") else 0
+ end_lineno = (
+ value_node.end_lineno
+ if hasattr(value_node, "end_lineno")
+ else lineno
+ )
if end_lineno > lineno: # It's a multiline string
# Trim exactly one leading and one trailing newline if present
- if value.startswith('\n'):
+ if value.startswith("\n"):
value = value[1:]
- if value.endswith('\n'):
+ if value.endswith("\n"):
value = value[:-1]
- elif isinstance(value_node, ast.Name): # Handle unquoted values like True/False/None or variables
+ elif isinstance(
+ value_node, ast.Name
+ ): # Handle unquoted values like True/False/None or variables
id_val = value_node.id.lower()
- if id_val == 'true':
+ if id_val == "true":
value = True
- elif id_val == 'false':
+ elif id_val == "false":
value = False
- elif id_val == 'none':
+ elif id_val == "none":
value = None
else:
- value = value_node.id # Keep as string if it's something else
+ value = value_node.id # Keep as string if it's something else
# Add more types if needed (e.g., ast.List, ast.Dict)
else:
# Attempt to reconstruct the source for complex types, or raise error
@@ -1582,30 +1724,38 @@ def _process_tool_commands(self, content):
# Note: ast.unparse requires Python 3.9+
# If using older Python, might need a different approach or limit supported types
value = ast.unparse(value_node)
- except AttributeError: # Handle case where ast.unparse is not available
- raise ValueError(f"Unsupported argument type for key '{key}': {type(value_node)}")
- except Exception as ue:
- raise ValueError(f"Could not unparse value for key '{key}': {ue}")
-
+ except AttributeError: # Handle case where ast.unparse is not available
+ raise ValueError(
+ f"Unsupported argument type for key '{key}': {type(value_node)}"
+ )
+ except Exception as unparse_e:
+ raise ValueError(
+ f"Could not unparse value for key '{key}': {unparse_e}"
+ )
# Check for suppressed values (e.g., "...")
suppressed_arg_values = ["..."]
if isinstance(value, str) and value in suppressed_arg_values:
- self.io.tool_warning(f"Skipping suppressed argument value '{value}' for key '{key}' in tool '{tool_name}'")
+ self.io.tool_warning(
+ f"Skipping suppressed argument value '{value}' for key '{key}' in tool"
+ f" '{tool_name}'"
+ )
continue
params[key] = value
-
except (SyntaxError, ValueError) as e:
result_message = f"Error parsing tool call '{inner_content}': {e}"
self.io.tool_error(f"Failed to parse tool call: {full_match_str}\nError: {e}")
# Don't append the malformed call to processed_content
result_messages.append(f"[Result (Parse Error): {result_message}]")
- continue # Skip execution
- except Exception as e: # Catch any other unexpected parsing errors
+ continue # Skip execution
+ except Exception as e: # Catch any other unexpected parsing errors
result_message = f"Unexpected error parsing tool call '{inner_content}': {e}"
- self.io.tool_error(f"Unexpected error during parsing: {full_match_str}\nError: {e}\n{traceback.format_exc()}")
+ self.io.tool_error(
+ f"Unexpected error during parsing: {full_match_str}\nError:"
+ f" {e}\n{traceback.format_exc()}"
+ )
result_messages.append(f"[Result (Parse Error): {result_message}]")
continue
@@ -1614,53 +1764,55 @@ def _process_tool_commands(self, content):
# Normalize tool name for case-insensitive matching
norm_tool_name = tool_name.lower()
- if norm_tool_name == 'viewfilesatglob':
- pattern = params.get('pattern')
+ if norm_tool_name == "viewfilesatglob":
+ pattern = params.get("pattern")
if pattern is not None:
# Call the imported function
result_message = execute_view_files_at_glob(self, pattern)
else:
result_message = "Error: Missing 'pattern' parameter for ViewFilesAtGlob"
- elif norm_tool_name == 'viewfilesmatching':
- pattern = params.get('pattern')
- file_pattern = params.get('file_pattern') # Optional
- regex = params.get('regex', False) # Default to False if not provided
+ elif norm_tool_name == "viewfilesmatching":
+ pattern = params.get("pattern")
+ file_pattern = params.get("file_pattern") # Optional
+ regex = params.get("regex", False) # Default to False if not provided
if pattern is not None:
- result_message = execute_view_files_matching(self, pattern, file_pattern, regex)
+ result_message = execute_view_files_matching(
+ self, pattern, file_pattern, regex
+ )
else:
result_message = "Error: Missing 'pattern' parameter for ViewFilesMatching"
- elif norm_tool_name == 'ls':
- directory = params.get('directory')
+ elif norm_tool_name == "ls":
+ directory = params.get("directory")
if directory is not None:
result_message = execute_ls(self, directory)
else:
result_message = "Error: Missing 'directory' parameter for Ls"
- elif norm_tool_name == 'view':
- file_path = params.get('file_path')
+ elif norm_tool_name == "view":
+ file_path = params.get("file_path")
if file_path is not None:
result_message = execute_view(self, file_path)
else:
result_message = "Error: Missing 'file_path' parameter for View"
- elif norm_tool_name == 'remove':
- file_path = params.get('file_path')
+ elif norm_tool_name == "remove":
+ file_path = params.get("file_path")
if file_path is not None:
result_message = _execute_remove(self, file_path)
else:
result_message = "Error: Missing 'file_path' parameter for Remove"
- elif norm_tool_name == 'makeeditable':
- file_path = params.get('file_path')
+ elif norm_tool_name == "makeeditable":
+ file_path = params.get("file_path")
if file_path is not None:
result_message = _execute_make_editable(self, file_path)
else:
result_message = "Error: Missing 'file_path' parameter for MakeEditable"
- elif norm_tool_name == 'makereadonly':
- file_path = params.get('file_path')
+ elif norm_tool_name == "makereadonly":
+ file_path = params.get("file_path")
if file_path is not None:
result_message = _execute_make_readonly(self, file_path)
else:
result_message = "Error: Missing 'file_path' parameter for MakeReadonly"
- elif norm_tool_name == 'viewfileswithsymbol':
- symbol = params.get('symbol')
+ elif norm_tool_name == "viewfileswithsymbol":
+ symbol = params.get("symbol")
if symbol is not None:
# Call the imported function from the tools directory
result_message = _execute_view_files_with_symbol(self, symbol)
@@ -1668,228 +1820,331 @@ def _process_tool_commands(self, content):
result_message = "Error: Missing 'symbol' parameter for ViewFilesWithSymbol"
# Command tools
- elif norm_tool_name == 'command':
- command_string = params.get('command_string')
+ elif norm_tool_name == "command":
+ command_string = params.get("command_string")
if command_string is not None:
result_message = _execute_command(self, command_string)
else:
result_message = "Error: Missing 'command_string' parameter for Command"
- elif norm_tool_name == 'commandinteractive':
- command_string = params.get('command_string')
+ elif norm_tool_name == "commandinteractive":
+ command_string = params.get("command_string")
if command_string is not None:
result_message = _execute_command_interactive(self, command_string)
else:
- result_message = "Error: Missing 'command_string' parameter for CommandInteractive"
+ result_message = (
+ "Error: Missing 'command_string' parameter for CommandInteractive"
+ )
# Grep tool
- elif norm_tool_name == 'grep':
- pattern = params.get('pattern')
- file_pattern = params.get('file_pattern', '*') # Default to all files
- directory = params.get('directory', '.') # Default to current directory
- use_regex = params.get('use_regex', False) # Default to literal search
- case_insensitive = params.get('case_insensitive', False) # Default to case-sensitive
- context_before = params.get('context_before', 5)
- context_after = params.get('context_after', 5)
-
+ elif norm_tool_name == "grep":
+ pattern = params.get("pattern")
+ file_pattern = params.get("file_pattern", "*") # Default to all files
+ directory = params.get("directory", ".") # Default to current directory
+ use_regex = params.get("use_regex", False) # Default to literal search
+ case_insensitive = params.get(
+ "case_insensitive", False
+ ) # Default to case-sensitive
+ context_before = params.get("context_before", 5)
+ context_after = params.get("context_after", 5)
if pattern is not None:
# Import the function if not already imported (it should be)
from aider.tools.grep import _execute_grep
- result_message = _execute_grep(self, pattern, file_pattern, directory, use_regex, case_insensitive, context_before, context_after)
+
+ result_message = _execute_grep(
+ self,
+ pattern,
+ file_pattern,
+ directory,
+ use_regex,
+ case_insensitive,
+ context_before,
+ context_after,
+ )
else:
result_message = "Error: Missing required 'pattern' parameter for Grep"
# Granular editing tools
- elif norm_tool_name == 'replacetext':
- file_path = params.get('file_path')
- find_text = params.get('find_text')
- replace_text = params.get('replace_text')
- near_context = params.get('near_context')
- occurrence = params.get('occurrence', 1) # Default to first occurrence
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False) # Default to False
+ elif norm_tool_name == "replacetext":
+ file_path = params.get("file_path")
+ find_text = params.get("find_text")
+ replace_text = params.get("replace_text")
+ near_context = params.get("near_context")
+ occurrence = params.get("occurrence", 1) # Default to first occurrence
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False) # Default to False
if file_path is not None and find_text is not None and replace_text is not None:
result_message = _execute_replace_text(
- self, file_path, find_text, replace_text, near_context, occurrence, change_id, dry_run
+ self,
+ file_path,
+ find_text,
+ replace_text,
+ near_context,
+ occurrence,
+ change_id,
+ dry_run,
)
else:
- result_message = "Error: Missing required parameters for ReplaceText (file_path, find_text, replace_text)"
-
- elif norm_tool_name == 'replaceall':
- file_path = params.get('file_path')
- find_text = params.get('find_text')
- replace_text = params.get('replace_text')
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False) # Default to False
+ result_message = (
+ "Error: Missing required parameters for ReplaceText (file_path,"
+ " find_text, replace_text)"
+ )
+
+ elif norm_tool_name == "replaceall":
+ file_path = params.get("file_path")
+ find_text = params.get("find_text")
+ replace_text = params.get("replace_text")
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False) # Default to False
if file_path is not None and find_text is not None and replace_text is not None:
result_message = _execute_replace_all(
self, file_path, find_text, replace_text, change_id, dry_run
)
else:
- result_message = "Error: Missing required parameters for ReplaceAll (file_path, find_text, replace_text)"
-
- elif norm_tool_name == 'insertblock':
- file_path = params.get('file_path')
- content = params.get('content')
- after_pattern = params.get('after_pattern')
- before_pattern = params.get('before_pattern')
- occurrence = params.get('occurrence', 1) # Default 1
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False) # Default False
- position = params.get('position')
- auto_indent = params.get('auto_indent', True) # Default True
- use_regex = params.get('use_regex', False) # Default False
-
- if file_path is not None and content is not None and (after_pattern is not None or before_pattern is not None or position is not None):
+ result_message = (
+ "Error: Missing required parameters for ReplaceAll (file_path,"
+ " find_text, replace_text)"
+ )
+
+ elif norm_tool_name == "insertblock":
+ file_path = params.get("file_path")
+ content = params.get("content")
+ after_pattern = params.get("after_pattern")
+ before_pattern = params.get("before_pattern")
+ occurrence = params.get("occurrence", 1) # Default 1
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False) # Default False
+ position = params.get("position")
+ auto_indent = params.get("auto_indent", True) # Default True
+ use_regex = params.get("use_regex", False) # Default False
+
+ if (
+ file_path is not None
+ and content is not None
+ and (
+ after_pattern is not None
+ or before_pattern is not None
+ or position is not None
+ )
+ ):
result_message = _execute_insert_block(
- self, file_path, content, after_pattern, before_pattern, occurrence, change_id, dry_run, position, auto_indent, use_regex
+ self,
+ file_path,
+ content,
+ after_pattern,
+ before_pattern,
+ occurrence,
+ change_id,
+ dry_run,
+ position,
+ auto_indent,
+ use_regex,
)
else:
- result_message = "Error: Missing required parameters for InsertBlock (file_path, content, and either after_pattern or before_pattern)"
-
- elif norm_tool_name == 'deleteblock':
- file_path = params.get('file_path')
- start_pattern = params.get('start_pattern')
- end_pattern = params.get('end_pattern')
- line_count = params.get('line_count')
- near_context = params.get('near_context') # New
- occurrence = params.get('occurrence', 1) # New, default 1
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False) # New, default False
+ result_message = (
+ "Error: Missing required parameters for InsertBlock (file_path,"
+ " content, and either after_pattern or before_pattern)"
+ )
+
+ elif norm_tool_name == "deleteblock":
+ file_path = params.get("file_path")
+ start_pattern = params.get("start_pattern")
+ end_pattern = params.get("end_pattern")
+ line_count = params.get("line_count")
+ near_context = params.get("near_context") # New
+ occurrence = params.get("occurrence", 1) # New, default 1
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False) # New, default False
if file_path is not None and start_pattern is not None:
result_message = _execute_delete_block(
- self, file_path, start_pattern, end_pattern, line_count, near_context, occurrence, change_id, dry_run
+ self,
+ file_path,
+ start_pattern,
+ end_pattern,
+ line_count,
+ near_context,
+ occurrence,
+ change_id,
+ dry_run,
)
else:
- result_message = "Error: Missing required parameters for DeleteBlock (file_path, start_pattern)"
-
- elif norm_tool_name == 'replaceline':
- file_path = params.get('file_path')
- line_number = params.get('line_number')
- new_content = params.get('new_content')
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False) # New, default False
-
- if file_path is not None and line_number is not None and new_content is not None:
+ result_message = (
+ "Error: Missing required parameters for DeleteBlock (file_path,"
+ " start_pattern)"
+ )
+
+ elif norm_tool_name == "replaceline":
+ file_path = params.get("file_path")
+ line_number = params.get("line_number")
+ new_content = params.get("new_content")
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False) # New, default False
+
+ if (
+ file_path is not None
+ and line_number is not None
+ and new_content is not None
+ ):
result_message = _execute_replace_line(
self, file_path, line_number, new_content, change_id, dry_run
)
else:
- result_message = "Error: Missing required parameters for ReplaceLine (file_path, line_number, new_content)"
-
- elif norm_tool_name == 'replacelines':
- file_path = params.get('file_path')
- start_line = params.get('start_line')
- end_line = params.get('end_line')
- new_content = params.get('new_content')
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False) # New, default False
-
- if file_path is not None and start_line is not None and end_line is not None and new_content is not None:
+ result_message = (
+ "Error: Missing required parameters for ReplaceLine (file_path,"
+ " line_number, new_content)"
+ )
+
+ elif norm_tool_name == "replacelines":
+ file_path = params.get("file_path")
+ start_line = params.get("start_line")
+ end_line = params.get("end_line")
+ new_content = params.get("new_content")
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False) # New, default False
+
+ if (
+ file_path is not None
+ and start_line is not None
+ and end_line is not None
+ and new_content is not None
+ ):
result_message = _execute_replace_lines(
self, file_path, start_line, end_line, new_content, change_id, dry_run
)
else:
- result_message = "Error: Missing required parameters for ReplaceLines (file_path, start_line, end_line, new_content)"
-
- elif norm_tool_name == 'indentlines':
- file_path = params.get('file_path')
- start_pattern = params.get('start_pattern')
- end_pattern = params.get('end_pattern')
- line_count = params.get('line_count')
- indent_levels = params.get('indent_levels', 1) # Default to indent 1 level
- near_context = params.get('near_context') # New
- occurrence = params.get('occurrence', 1) # New, default 1
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False) # New, default False
+ result_message = (
+ "Error: Missing required parameters for ReplaceLines (file_path,"
+ " start_line, end_line, new_content)"
+ )
+
+ elif norm_tool_name == "indentlines":
+ file_path = params.get("file_path")
+ start_pattern = params.get("start_pattern")
+ end_pattern = params.get("end_pattern")
+ line_count = params.get("line_count")
+ indent_levels = params.get("indent_levels", 1) # Default to indent 1 level
+ near_context = params.get("near_context") # New
+ occurrence = params.get("occurrence", 1) # New, default 1
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False) # New, default False
if file_path is not None and start_pattern is not None:
result_message = _execute_indent_lines(
- self, file_path, start_pattern, end_pattern, line_count, indent_levels, near_context, occurrence, change_id, dry_run
+ self,
+ file_path,
+ start_pattern,
+ end_pattern,
+ line_count,
+ indent_levels,
+ near_context,
+ occurrence,
+ change_id,
+ dry_run,
)
else:
- result_message = "Error: Missing required parameters for IndentLines (file_path, start_pattern)"
+ result_message = (
+ "Error: Missing required parameters for IndentLines (file_path,"
+ " start_pattern)"
+ )
- elif norm_tool_name == 'deleteline':
- file_path = params.get('file_path')
- line_number = params.get('line_number')
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False)
+ elif norm_tool_name == "deleteline":
+ file_path = params.get("file_path")
+ line_number = params.get("line_number")
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False)
if file_path is not None and line_number is not None:
result_message = _execute_delete_line(
self, file_path, line_number, change_id, dry_run
)
else:
- result_message = "Error: Missing required parameters for DeleteLine (file_path, line_number)"
+ result_message = (
+ "Error: Missing required parameters for DeleteLine (file_path,"
+ " line_number)"
+ )
- elif norm_tool_name == 'deletelines':
- file_path = params.get('file_path')
- start_line = params.get('start_line')
- end_line = params.get('end_line')
- change_id = params.get('change_id')
- dry_run = params.get('dry_run', False)
+ elif norm_tool_name == "deletelines":
+ file_path = params.get("file_path")
+ start_line = params.get("start_line")
+ end_line = params.get("end_line")
+ change_id = params.get("change_id")
+ dry_run = params.get("dry_run", False)
if file_path is not None and start_line is not None and end_line is not None:
result_message = _execute_delete_lines(
self, file_path, start_line, end_line, change_id, dry_run
)
else:
- result_message = "Error: Missing required parameters for DeleteLines (file_path, start_line, end_line)"
+ result_message = (
+ "Error: Missing required parameters for DeleteLines (file_path,"
+ " start_line, end_line)"
+ )
+
+ elif norm_tool_name == "undochange":
+ change_id = params.get("change_id")
+ file_path = params.get("file_path")
- elif norm_tool_name == 'undochange':
- change_id = params.get('change_id')
- file_path = params.get('file_path')
-
result_message = _execute_undo_change(self, change_id, file_path)
-
- elif norm_tool_name == 'listchanges':
- file_path = params.get('file_path')
- limit = params.get('limit', 10)
-
+
+ elif norm_tool_name == "listchanges":
+ file_path = params.get("file_path")
+ limit = params.get("limit", 10)
+
result_message = _execute_list_changes(self, file_path, limit)
- elif norm_tool_name == 'extractlines':
- source_file_path = params.get('source_file_path')
- target_file_path = params.get('target_file_path')
- start_pattern = params.get('start_pattern')
- end_pattern = params.get('end_pattern')
- line_count = params.get('line_count')
- near_context = params.get('near_context')
- occurrence = params.get('occurrence', 1)
- dry_run = params.get('dry_run', False)
+ elif norm_tool_name == "extractlines":
+ source_file_path = params.get("source_file_path")
+ target_file_path = params.get("target_file_path")
+ start_pattern = params.get("start_pattern")
+ end_pattern = params.get("end_pattern")
+ line_count = params.get("line_count")
+ near_context = params.get("near_context")
+ occurrence = params.get("occurrence", 1)
+ dry_run = params.get("dry_run", False)
if source_file_path and target_file_path and start_pattern:
result_message = _execute_extract_lines(
- self, source_file_path, target_file_path, start_pattern, end_pattern,
- line_count, near_context, occurrence, dry_run
+ self,
+ source_file_path,
+ target_file_path,
+ start_pattern,
+ end_pattern,
+ line_count,
+ near_context,
+ occurrence,
+ dry_run,
)
else:
- result_message = "Error: Missing required parameters for ExtractLines (source_file_path, target_file_path, start_pattern)"
+ result_message = (
+ "Error: Missing required parameters for ExtractLines (source_file_path,"
+ " target_file_path, start_pattern)"
+ )
- elif norm_tool_name == 'shownumberedcontext':
- file_path = params.get('file_path')
- pattern = params.get('pattern')
- line_number = params.get('line_number')
- context_lines = params.get('context_lines', 3) # Default context
+ elif norm_tool_name == "shownumberedcontext":
+ file_path = params.get("file_path")
+ pattern = params.get("pattern")
+ line_number = params.get("line_number")
+ context_lines = params.get("context_lines", 3) # Default context
if file_path is not None and (pattern is not None or line_number is not None):
result_message = execute_show_numbered_context(
self, file_path, pattern, line_number, context_lines
)
else:
- result_message = "Error: Missing required parameters for ViewNumberedContext (file_path and either pattern or line_number)"
+ result_message = (
+ "Error: Missing required parameters for ViewNumberedContext (file_path"
+ " and either pattern or line_number)"
+ )
else:
result_message = f"Error: Unknown tool name '{tool_name}'"
if self.mcp_tools:
for server_name, server_tools in self.mcp_tools:
if any(
- t.get("function", {}).get("name") == tool_name
- for t in server_tools
+ t.get("function", {}).get("name") == tool_name for t in server_tools
):
server = next(
(s for s in self.mcp_servers if s.name == server_name), None
@@ -1906,7 +2161,9 @@ def _process_tool_commands(self, content):
except Exception as e:
result_message = f"Error executing {tool_name}: {str(e)}"
- self.io.tool_error(f"Error during {tool_name} execution: {e}\n{traceback.format_exc()}")
+ self.io.tool_error(
+ f"Error during {tool_name} execution: {e}\n{traceback.format_exc()}"
+ )
if result_message:
result_messages.append(f"[Result ({tool_name}): {result_message}]")
@@ -1948,7 +2205,7 @@ def _apply_edits_from_response(self):
# 2. Prepare edits (check permissions, commit dirty files)
prepared_edits = []
seen_paths = dict()
- self.need_commit_before_edits = set() # Reset before checking
+ self.need_commit_before_edits = set() # Reset before checking
for edit in edits:
path = edit[0]
@@ -1963,7 +2220,7 @@ def _apply_edits_from_response(self):
# Commit any dirty files identified by allowed_to_edit
self.dirty_commit()
- self.need_commit_before_edits = set() # Clear after commit
+ self.need_commit_before_edits = set() # Clear after commit
# 3. Apply edits (logic adapted from EditBlockCoder.apply_edits)
failed = []
@@ -1980,16 +2237,19 @@ def _apply_edits_from_response(self):
# Simplified cross-file patching check from EditBlockCoder
if not new_content and original.strip():
- for other_full_path in self.abs_fnames:
- if other_full_path == full_path: continue
- other_content = self.io.read_text(other_full_path)
- other_new_content = do_replace(other_full_path, other_content, original, updated, self.fence)
- if other_new_content:
- path = self.get_rel_fname(other_full_path)
- full_path = other_full_path
- new_content = other_new_content
- self.io.tool_warning(f"Applied edit intended for {edit[0]} to {path}")
- break
+ for other_full_path in self.abs_fnames:
+ if other_full_path == full_path:
+ continue
+ other_content = self.io.read_text(other_full_path)
+ other_new_content = do_replace(
+ other_full_path, other_content, original, updated, self.fence
+ )
+ if other_new_content:
+ path = self.get_rel_fname(other_full_path)
+ full_path = other_full_path
+ new_content = other_new_content
+ self.io.tool_warning(f"Applied edit intended for {edit[0]} to {path}")
+ break
if new_content:
if not self.dry_run:
@@ -1997,7 +2257,7 @@ def _apply_edits_from_response(self):
self.io.tool_output(f"Applied edit to {path}")
else:
self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
- passed.append((path, original, updated)) # Store path relative to root
+ passed.append((path, original, updated)) # Store path relative to root
else:
failed.append(edit)
@@ -2008,7 +2268,7 @@ def _apply_edits_from_response(self):
for edit in failed:
path, original, updated = edit
full_path = self.abs_root_path(path)
- content = self.io.read_text(full_path) # Read content again for context
+ content = self.io.read_text(full_path) # Read content again for context
error_message += f"""
## SearchReplaceNoExactMatch: This SEARCH block failed to exactly match lines in {path}
@@ -2027,13 +2287,13 @@ def _apply_edits_from_response(self):
"""
if updated in content and updated:
- error_message += f"""Are you sure you need this SEARCH/REPLACE block?
+ error_message += f"""Are you sure you need this SEARCH/REPLACE block?
The REPLACE lines are already in {path}!
"""
error_message += (
- "The SEARCH section must exactly match an existing block of lines including all white"
- " space, comments, indentation, docstrings, etc\n"
+ "The SEARCH section must exactly match an existing block of lines including all"
+ " white space, comments, indentation, docstrings, etc\n"
)
if passed:
pblocks = "block" if len(passed) == 1 else "blocks"
@@ -2046,34 +2306,34 @@ def _apply_edits_from_response(self):
# Set reflected_message to prompt LLM to fix the failed blocks
self.reflected_message = error_message
- edited_files = set(edit[0] for edit in passed) # Use relative paths stored in passed
+ edited_files = set(edit[0] for edit in passed) # Use relative paths stored in passed
# 4. Post-edit actions (commit, lint, test, shell commands)
if edited_files:
- self.aider_edited_files.update(edited_files) # Track edited files
- saved_message = self.auto_commit(edited_files)
+ self.aider_edited_files.update(edited_files) # Track edited files
+ self.auto_commit(edited_files)
# We don't use saved_message here as we are not moving history back
if self.auto_lint:
- lint_errors = self.lint_edited(edited_files)
- self.auto_commit(edited_files, context="Ran the linter")
- if lint_errors and not self.reflected_message: # Reflect only if no edit errors
- ok = self.io.confirm_ask("Attempt to fix lint errors?")
- if ok:
- self.reflected_message = lint_errors
+ lint_errors = self.lint_edited(edited_files)
+ self.auto_commit(edited_files, context="Ran the linter")
+ if lint_errors and not self.reflected_message: # Reflect only if no edit errors
+ ok = self.io.confirm_ask("Attempt to fix lint errors?")
+ if ok:
+ self.reflected_message = lint_errors
shared_output = self.run_shell_commands()
if shared_output:
- # Add shell output as a new user message? Or just display?
- # Let's just display for now to avoid complex history manipulation
- self.io.tool_output("Shell command output:\n" + shared_output)
+ # Add shell output as a new user message? Or just display?
+ # Let's just display for now to avoid complex history manipulation
+ self.io.tool_output("Shell command output:\n" + shared_output)
- if self.auto_test and not self.reflected_message: # Reflect only if no prior errors
- test_errors = self.commands.cmd_test(self.test_cmd)
- if test_errors:
- ok = self.io.confirm_ask("Attempt to fix test errors?")
- if ok:
- self.reflected_message = test_errors
+ if self.auto_test and not self.reflected_message: # Reflect only if no prior errors
+ test_errors = self.commands.cmd_test(self.test_cmd)
+ if test_errors:
+ ok = self.io.confirm_ask("Attempt to fix test errors?")
+ if ok:
+ self.reflected_message = test_errors
self.show_undo_hint()
@@ -2085,7 +2345,7 @@ def _apply_edits_from_response(self):
self.io.tool_output(urls.edit_errors)
self.io.tool_output()
self.io.tool_output(str(error_message))
- self.reflected_message = str(error_message) # Reflect parsing errors
+ self.reflected_message = str(error_message) # Reflect parsing errors
except ANY_GIT_ERROR as err:
self.io.tool_error(f"Git error during edit application: {str(err)}")
self.reflected_message = f"Git error during edit application: {str(err)}"
@@ -2097,10 +2357,6 @@ def _apply_edits_from_response(self):
return edited_files
-
-
-
-
def _add_file_to_context(self, file_path, explicit=False):
"""
Helper method to add a file to context as read-only.
@@ -2112,31 +2368,31 @@ def _add_file_to_context(self, file_path, explicit=False):
# Check if file exists
abs_path = self.abs_root_path(file_path)
rel_path = self.get_rel_fname(abs_path)
-
+
if not os.path.isfile(abs_path):
self.io.tool_output(f"⚠️ File '{file_path}' not found")
- return f"File not found"
-
+ return "File not found"
+
# Check if the file is already in context (either editable or read-only)
if abs_path in self.abs_fnames:
if explicit:
self.io.tool_output(f"📎 File '{file_path}' already in context as editable")
- return f"File already in context as editable"
- return f"File already in context as editable"
-
+ return "File already in context as editable"
+ return "File already in context as editable"
+
if abs_path in self.abs_read_only_fnames:
if explicit:
self.io.tool_output(f"📎 File '{file_path}' already in context as read-only")
- return f"File already in context as read-only"
- return f"File already in context as read-only"
-
+ return "File already in context as read-only"
+ return "File already in context as read-only"
+
# Add file to context as read-only
try:
# Check for large file and apply context management if enabled
content = self.io.read_text(abs_path)
if content is None:
return f"Error reading file: {file_path}"
-
+
# Check if file is very large and context management is enabled
if self.context_management_enabled:
file_tokens = self.main_model.token_count(content)
@@ -2145,7 +2401,7 @@ def _add_file_to_context(self, file_path, explicit=False):
f"⚠️ '{file_path}' is very large ({file_tokens} tokens). "
"Use /context-management to toggle truncation off if needed."
)
-
+
# Add to read-only files
self.abs_read_only_fnames.add(abs_path)
@@ -2155,49 +2411,43 @@ def _add_file_to_context(self, file_path, explicit=False):
# Inform user
if explicit:
self.io.tool_output(f"📎 Viewed '{file_path}' (added to context as read-only)")
- return f"Viewed file (added to context as read-only)"
+ return "Viewed file (added to context as read-only)"
else:
# For implicit adds (from ViewFilesAtGlob/ViewFilesMatching), just return success
- return f"Added file to context as read-only"
+ return "Added file to context as read-only"
except Exception as e:
self.io.tool_error(f"Error adding file '{file_path}' for viewing: {str(e)}")
return f"Error adding file for viewing: {str(e)}"
-
-
-
-
-
-
def _process_file_mentions(self, content):
"""
Process implicit file mentions in the content, adding files if they're not already in context.
-
+
This handles the case where the LLM mentions file paths without using explicit tool commands.
"""
# Extract file mentions using the parent class's method
mentioned_files = set(self.get_file_mentions(content, ignore_current=False))
current_files = set(self.get_inchat_relative_files())
-
+
# Get new files to add (not already in context)
- new_files = mentioned_files - current_files
-
+ mentioned_files - current_files
+
# In navigator mode, we *only* add files via explicit tool commands (`View`, `ViewFilesAtGlob`, etc.).
# Do nothing here for implicit mentions.
pass
-
def check_for_file_mentions(self, content):
"""
Override parent's method to use our own file processing logic.
Override parent's method to disable implicit file mention handling in navigator mode.
- Files should only be added via explicit tool commands (`View`, `ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`).
+ Files should only be added via explicit tool commands
+ (`View`, `ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`).
"""
# Do nothing - disable implicit file adds in navigator mode.
pass
-
+
def preproc_user_input(self, inp):
"""
Override parent's method to wrap user input in a context block.
@@ -2205,11 +2455,11 @@ def preproc_user_input(self, inp):
"""
# First apply the parent's preprocessing
inp = super().preproc_user_input(inp)
-
+
# If we still have input after preprocessing, wrap it in a context block
- if inp and not inp.startswith(""):
- inp = f"\n{inp}\n"
-
+ if inp and not inp.startswith(''):
+ inp = f'\n{inp}\n'
+
return inp
def get_directory_structure(self):
@@ -2219,95 +2469,103 @@ def get_directory_structure(self):
"""
if not self.use_enhanced_context:
return None
-
+
try:
# Start with the header
- result = "\n"
+ result = '\n'
result += "## Project File Structure\n\n"
- result += "Below is a snapshot of this project's file structure at the current time. It skips over .gitignore patterns.\n\n"
-
+ result += (
+ "Below is a snapshot of this project's file structure at the current time. It skips"
+ " over .gitignore patterns.\n\n"
+ )
+
# Get the root directory
- root_path = Path(self.root)
- root_str = str(root_path)
-
+ Path(self.root)
+
# Get all files in the repo (both tracked and untracked)
if self.repo:
# Get tracked files
tracked_files = self.repo.get_tracked_files()
-
+
# Get untracked files (files present in the working directory but not in git)
untracked_files = []
try:
# Run git status to get untracked files
- untracked_output = self.repo.repo.git.status('--porcelain')
+ untracked_output = self.repo.repo.git.status("--porcelain")
for line in untracked_output.splitlines():
- if line.startswith('??'):
+ if line.startswith("??"):
# Extract the filename (remove the '?? ' prefix)
untracked_file = line[3:]
if not self.repo.git_ignored_file(untracked_file):
untracked_files.append(untracked_file)
except Exception as e:
self.io.tool_warning(f"Error getting untracked files: {str(e)}")
-
+
# Combine tracked and untracked files
all_files = tracked_files + untracked_files
else:
# If no repo, get all files relative to root
all_files = []
- for path in Path(self.root).rglob('*'):
+ for path in Path(self.root).rglob("*"):
if path.is_file():
all_files.append(str(path.relative_to(self.root)))
-
+
# Sort files to ensure deterministic output
all_files = sorted(all_files)
# Filter out .aider files/dirs
- all_files = [f for f in all_files if not any(part.startswith('.aider') for part in f.split('/'))]
+ all_files = [
+ f for f in all_files if not any(part.startswith(".aider") for part in f.split("/"))
+ ]
# Build tree structure
tree = {}
for file in all_files:
- parts = file.split('/')
+ parts = file.split("/")
current = tree
for i, part in enumerate(parts):
if i == len(parts) - 1: # Last part (file)
- if '.' not in current:
- current['.'] = []
- current['.'].append(part)
+ if "." not in current:
+ current["."] = []
+ current["."].append(part)
else: # Directory
if part not in current:
current[part] = {}
current = current[part]
-
+
# Function to recursively print the tree
def print_tree(node, prefix="- ", indent=" ", path=""):
lines = []
# First print all directories
- dirs = sorted([k for k in node.keys() if k != '.'])
+ dirs = sorted([k for k in node.keys() if k != "."])
for i, dir_name in enumerate(dirs):
full_path = f"{path}/{dir_name}" if path else dir_name
lines.append(f"{prefix}{full_path}/")
- sub_lines = print_tree(node[dir_name], prefix=prefix, indent=indent, path=full_path)
+ sub_lines = print_tree(
+ node[dir_name], prefix=prefix, indent=indent, path=full_path
+ )
for sub_line in sub_lines:
lines.append(f"{indent}{sub_line}")
-
+
# Then print all files
- if '.' in node:
- for file_name in sorted(node['.']):
- lines.append(f"{prefix}{path}/{file_name}" if path else f"{prefix}{file_name}")
-
+ if "." in node:
+ for file_name in sorted(node["."]):
+ lines.append(
+ f"{prefix}{path}/{file_name}" if path else f"{prefix}{file_name}"
+ )
+
return lines
-
+
# Generate the tree starting from root
tree_lines = print_tree(tree, prefix="- ")
result += "\n".join(tree_lines)
result += "\n"
-
+
return result
except Exception as e:
self.io.tool_error(f"Error generating directory structure: {str(e)}")
return None
-
+
def get_git_status(self):
"""
Generate a git status context block for repository information.
@@ -2315,41 +2573,41 @@ def get_git_status(self):
"""
if not self.use_enhanced_context or not self.repo:
return None
-
+
try:
- result = "\n"
+ result = '\n'
result += "## Git Repository Status\n\n"
result += "This is a snapshot of the git status at the current time.\n"
-
+
# Get current branch
try:
current_branch = self.repo.repo.active_branch.name
result += f"Current branch: {current_branch}\n\n"
except Exception:
result += "Current branch: (detached HEAD state)\n\n"
-
+
# Get main/master branch
main_branch = None
try:
for branch in self.repo.repo.branches:
- if branch.name in ('main', 'master'):
+ if branch.name in ("main", "master"):
main_branch = branch.name
break
if main_branch:
result += f"Main branch (you will usually use this for PRs): {main_branch}\n\n"
except Exception:
pass
-
+
# Git status
result += "Status:\n"
try:
# Get modified files
- status = self.repo.repo.git.status('--porcelain')
-
+ status = self.repo.repo.git.status("--porcelain")
+
# Process and categorize the status output
if status:
- status_lines = status.strip().split('\n')
-
+ status_lines = status.strip().split("\n")
+
# Group by status type for better organization
staged_added = []
staged_modified = []
@@ -2357,34 +2615,34 @@ def get_git_status(self):
unstaged_modified = []
unstaged_deleted = []
untracked = []
-
+
for line in status_lines:
if len(line) < 4: # Ensure the line has enough characters
continue
-
+
status_code = line[:2]
file_path = line[3:]
# Skip .aider files/dirs
- if any(part.startswith('.aider') for part in file_path.split('/')):
+ if any(part.startswith(".aider") for part in file_path.split("/")):
continue
-
+
# Staged changes
- if status_code[0] == 'A':
+ if status_code[0] == "A":
staged_added.append(file_path)
- elif status_code[0] == 'M':
+ elif status_code[0] == "M":
staged_modified.append(file_path)
- elif status_code[0] == 'D':
+ elif status_code[0] == "D":
staged_deleted.append(file_path)
# Unstaged changes
- if status_code[1] == 'M':
+ if status_code[1] == "M":
unstaged_modified.append(file_path)
- elif status_code[1] == 'D':
+ elif status_code[1] == "D":
unstaged_deleted.append(file_path)
# Untracked files
- if status_code == '??':
+ if status_code == "??":
untracked.append(file_path)
-
+
# Output in a nicely formatted manner
if staged_added:
for file in staged_added:
@@ -2408,41 +2666,46 @@ def get_git_status(self):
result += "Working tree clean\n"
except Exception as e:
result += f"Unable to get modified files: {str(e)}\n"
-
+
# Recent commits
result += "\nRecent commits:\n"
try:
commits = list(self.repo.repo.iter_commits(max_count=5))
for commit in commits:
short_hash = commit.hexsha[:8]
- message = commit.message.strip().split('\n')[0] # First line only
+ message = commit.message.strip().split("\n")[0] # First line only
result += f"{short_hash} {message}\n"
except Exception:
result += "Unable to get recent commits\n"
-
+
result += ""
return result
except Exception as e:
self.io.tool_error(f"Error generating git status: {str(e)}")
return None
-
+
def cmd_context_blocks(self, args=""):
"""
Toggle enhanced context blocks feature.
"""
self.use_enhanced_context = not self.use_enhanced_context
-
+
if self.use_enhanced_context:
- self.io.tool_output("Enhanced context blocks are now ON - directory structure and git status will be included.")
+ self.io.tool_output(
+ "Enhanced context blocks are now ON - directory structure and git status will be"
+ " included."
+ )
# Mark tokens as needing calculation, but don't calculate yet (lazy calculation)
self.tokens_calculated = False
self.context_blocks_cache = {}
else:
- self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.")
+ self.io.tool_output(
+ "Enhanced context blocks are now OFF - directory structure and git status will not"
+ " be included."
+ )
# Clear token counts and cache when disabled
self.context_block_tokens = {}
self.context_blocks_cache = {}
self.tokens_calculated = False
-
- return True
+ return True
diff --git a/aider/coders/navigator_legacy_prompts.py b/aider/coders/navigator_legacy_prompts.py
index 538d03b43e2..5b95aa77f0a 100644
--- a/aider/coders/navigator_legacy_prompts.py
+++ b/aider/coders/navigator_legacy_prompts.py
@@ -6,12 +6,12 @@
class NavigatorLegacyPrompts(CoderPrompts):
"""
Prompt templates for the Navigator mode using search/replace instead of granular editing tools.
-
+
The NavigatorCoder uses these prompts to guide its behavior when exploring and modifying
a codebase using special tool commands like Glob, Grep, Add, etc. This version uses the legacy
search/replace editing method instead of granular editing tools.
"""
-
+
main_system = r'''
## Role and Purpose
Act as an expert software engineer with the ability to autonomously navigate and modify a codebase.
@@ -253,11 +253,12 @@ class NavigatorLegacyPrompts(CoderPrompts):
'''
- files_content_assistant_reply = (
- "I understand. I'll use these files to help with your request."
- )
+ files_content_assistant_reply = "I understand. I'll use these files to help with your request."
- files_no_full_files = "I don't have full contents of any files yet. I'll add them as needed using the tool commands."
+ files_no_full_files = (
+ "I don't have full contents of any files yet. I'll add them"
+ " as needed using the tool commands."
+ )
files_no_full_files_with_repo_map = """
I have access to a map of the repository with summary information about files, but I don't have the complete content of any files yet.
diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py
index 52f87324a17..d6730d9718b 100644
--- a/aider/coders/navigator_prompts.py
+++ b/aider/coders/navigator_prompts.py
@@ -6,12 +6,12 @@
class NavigatorPrompts(CoderPrompts):
"""
Prompt templates for the Navigator mode, which enables autonomous codebase exploration.
-
+
The NavigatorCoder uses these prompts to guide its behavior when exploring and modifying
a codebase using special tool commands like Glob, Grep, Add, etc. This mode enables the
LLM to manage its own context by adding/removing files and executing commands.
"""
-
+
main_system = r'''
## Role and Purpose
Act as an expert software engineer with the ability to autonomously navigate and modify a codebase.
@@ -442,11 +442,12 @@ def new_function(param1, param2):
'''
- files_content_assistant_reply = (
- "I understand. I'll use these files to help with your request."
- )
+ files_content_assistant_reply = "I understand. I'll use these files to help with your request."
- files_no_full_files = "I don't have full contents of any files yet. I'll add them as needed using the tool commands."
+ files_no_full_files = (
+ "I don't have full contents of any files yet. I'll add them"
+ " as needed using the tool commands."
+ )
files_no_full_files_with_repo_map = """
I have access to a map of the repository with summary information about files, but I don't have the complete content of any files yet.
diff --git a/aider/commands.py b/aider/commands.py
index 2bfdafb71fc..c9707697133 100644
--- a/aider/commands.py
+++ b/aider/commands.py
@@ -23,6 +23,7 @@
from aider.run_cmd import run_cmd
from aider.scrape import Scraper, install_playwright
from aider.utils import is_image_file, run_fzf
+
from .dump import dump # noqa: F401
@@ -196,7 +197,6 @@ def cmd_chat_mode(self, args):
elif ef == "ask":
summarize_from_coder = False
-
raise SwitchCoder(
edit_format=edit_format,
summarize_from_coder=summarize_from_coder,
@@ -483,18 +483,20 @@ def cmd_tokens(self, args):
res.append((tokens, "repository map", "use --map-tokens to resize"))
# Enhanced context blocks (only for navigator mode)
- if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context:
+ if hasattr(self.coder, "use_enhanced_context") and self.coder.use_enhanced_context:
# Force token calculation if it hasn't been done yet
- if hasattr(self.coder, '_calculate_context_block_tokens'):
- if not hasattr(self.coder, 'tokens_calculated') or not self.coder.tokens_calculated:
+ if hasattr(self.coder, "_calculate_context_block_tokens"):
+ if not hasattr(self.coder, "tokens_calculated") or not self.coder.tokens_calculated:
self.coder._calculate_context_block_tokens()
-
+
# Add enhanced context blocks to the display
- if hasattr(self.coder, 'context_block_tokens') and self.coder.context_block_tokens:
+ if hasattr(self.coder, "context_block_tokens") and self.coder.context_block_tokens:
for block_name, tokens in self.coder.context_block_tokens.items():
# Format the block name more nicely
- display_name = block_name.replace('_', ' ').title()
- res.append((tokens, f"{display_name} context block", "/context-blocks to toggle"))
+ display_name = block_name.replace("_", " ").title()
+ res.append(
+ (tokens, f"{display_name} context block", "/context-blocks to toggle")
+ )
fence = "`" * 3
@@ -502,17 +504,19 @@ def cmd_tokens(self, args):
# Process files with progress indication
total_editable_files = len(self.coder.abs_fnames)
total_readonly_files = len(self.coder.abs_read_only_fnames)
-
+
# Display progress for editable files
if total_editable_files > 0:
if total_editable_files > 20:
- self.io.tool_output(f"Calculating tokens for {total_editable_files} editable files...")
-
+ self.io.tool_output(
+ f"Calculating tokens for {total_editable_files} editable files..."
+ )
+
# Calculate tokens for editable files
for i, fname in enumerate(self.coder.abs_fnames):
if i > 0 and i % 20 == 0 and total_editable_files > 20:
self.io.tool_output(f"Processed {i}/{total_editable_files} editable files...")
-
+
relative_fname = self.coder.get_rel_fname(fname)
content = self.io.read_text(fname)
if is_image_file(relative_fname):
@@ -522,17 +526,19 @@ def cmd_tokens(self, args):
content = f"{relative_fname}\n{fence}\n" + content + "{fence}\n"
tokens = self.coder.main_model.token_count(content)
file_res.append((tokens, f"{relative_fname}", "/drop to remove"))
-
+
# Display progress for read-only files
if total_readonly_files > 0:
if total_readonly_files > 20:
- self.io.tool_output(f"Calculating tokens for {total_readonly_files} read-only files...")
-
+ self.io.tool_output(
+ f"Calculating tokens for {total_readonly_files} read-only files..."
+ )
+
# Calculate tokens for read-only files
for i, fname in enumerate(self.coder.abs_read_only_fnames):
if i > 0 and i % 20 == 0 and total_readonly_files > 20:
self.io.tool_output(f"Processed {i}/{total_readonly_files} read-only files...")
-
+
relative_fname = self.coder.get_rel_fname(fname)
content = self.io.read_text(fname)
if content is not None and not is_image_file(relative_fname):
@@ -945,10 +951,13 @@ def cmd_add(self, args):
fname = self.coder.get_rel_fname(abs_file_path)
self.io.tool_output(f"Added {fname} to the chat")
self.coder.check_added_files()
-
+
# Recalculate context block tokens if using navigator mode
- if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context:
- if hasattr(self.coder, '_calculate_context_block_tokens'):
+ if (
+ hasattr(self.coder, "use_enhanced_context")
+ and self.coder.use_enhanced_context
+ ):
+ if hasattr(self.coder, "_calculate_context_block_tokens"):
self.coder._calculate_context_block_tokens()
def completions_drop(self):
@@ -957,27 +966,27 @@ def completions_drop(self):
all_files = files + read_only_files
all_files = [self.quote_fname(fn) for fn in all_files]
return all_files
-
+
def completions_context_blocks(self):
"""Return available context block names for auto-completion."""
- if not hasattr(self.coder, 'use_enhanced_context') or not self.coder.use_enhanced_context:
+ if not hasattr(self.coder, "use_enhanced_context") or not self.coder.use_enhanced_context:
return []
-
+
# If the coder has context blocks available
- if hasattr(self.coder, 'context_block_tokens') and self.coder.context_block_tokens:
+ if hasattr(self.coder, "context_block_tokens") and self.coder.context_block_tokens:
# Get all block names from the tokens dictionary
block_names = list(self.coder.context_block_tokens.keys())
# Format them for display (convert snake_case to Title Case)
- formatted_blocks = [name.replace('_', ' ').title() for name in block_names]
+ formatted_blocks = [name.replace("_", " ").title() for name in block_names]
return formatted_blocks
-
+
# Standard blocks that are typically available
return [
- "Context Summary",
- "Directory Structure",
- "Environment Info",
- "Git Status",
- "Symbol Outline"
+ "Context Summary",
+ "Directory Structure",
+ "Environment Info",
+ "Git Status",
+ "Symbol Outline",
]
def cmd_drop(self, args=""):
@@ -991,16 +1000,16 @@ def cmd_drop(self, args=""):
else:
self.io.tool_output("Dropping all files from the chat session.")
self._drop_all_files()
-
+
# Recalculate context block tokens after dropping all files
- if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context:
- if hasattr(self.coder, '_calculate_context_block_tokens'):
+ if hasattr(self.coder, "use_enhanced_context") and self.coder.use_enhanced_context:
+ if hasattr(self.coder, "_calculate_context_block_tokens"):
self.coder._calculate_context_block_tokens()
return
filenames = parse_quoted_filenames(args)
files_changed = False
-
+
for word in filenames:
# Expand tilde in the path
expanded_word = os.path.expanduser(word)
@@ -1043,10 +1052,14 @@ def cmd_drop(self, args=""):
self.coder.abs_fnames.remove(abs_fname)
self.io.tool_output(f"Removed {matched_file} from the chat")
files_changed = True
-
+
# Recalculate context block tokens if any files were changed and using navigator mode
- if files_changed and hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context:
- if hasattr(self.coder, '_calculate_context_block_tokens'):
+ if (
+ files_changed
+ and hasattr(self.coder, "use_enhanced_context")
+ and self.coder.use_enhanced_context
+ ):
+ if hasattr(self.coder, "_calculate_context_block_tokens"):
self.coder._calculate_context_block_tokens()
def cmd_git(self, args):
@@ -1148,39 +1161,39 @@ def cmd_quit(self, args):
def cmd_context_management(self, args=""):
"Toggle context management for large files"
- if not hasattr(self.coder, 'context_management_enabled'):
+ if not hasattr(self.coder, "context_management_enabled"):
self.io.tool_error("Context management is only available in navigator mode.")
return
-
+
# Toggle the setting
self.coder.context_management_enabled = not self.coder.context_management_enabled
-
+
# Report the new state
if self.coder.context_management_enabled:
self.io.tool_output("Context management is now ON - large files may be truncated.")
else:
self.io.tool_output("Context management is now OFF - files will not be truncated.")
-
+
def cmd_context_blocks(self, args=""):
"Toggle enhanced context blocks or print a specific block"
- if not hasattr(self.coder, 'use_enhanced_context'):
+ if not hasattr(self.coder, "use_enhanced_context"):
self.io.tool_error("Enhanced context blocks are only available in navigator mode.")
return
-
+
# If an argument is provided, try to print that specific context block
if args.strip():
# Format block name to match internal naming conventions
block_name = args.strip().lower().replace(" ", "_")
-
+
# Check if the coder has the necessary method to get context blocks
- if hasattr(self.coder, '_generate_context_block'):
+ if hasattr(self.coder, "_generate_context_block"):
# Force token recalculation to ensure blocks are fresh
- if hasattr(self.coder, '_calculate_context_block_tokens'):
+ if hasattr(self.coder, "_calculate_context_block_tokens"):
self.coder._calculate_context_block_tokens(force=True)
-
+
# Try to get the requested block
block_content = self.coder._generate_context_block(block_name)
-
+
if block_content:
# Calculate token count
tokens = self.coder.main_model.token_count(block_content)
@@ -1190,50 +1203,64 @@ def cmd_context_blocks(self, args=""):
else:
# List available blocks if the requested one wasn't found
self.io.tool_error(f"Context block '{args.strip()}' not found or empty.")
- if hasattr(self.coder, 'context_block_tokens'):
+ if hasattr(self.coder, "context_block_tokens"):
available_blocks = list(self.coder.context_block_tokens.keys())
- formatted_blocks = [name.replace('_', ' ').title() for name in available_blocks]
+ formatted_blocks = [
+ name.replace("_", " ").title() for name in available_blocks
+ ]
self.io.tool_output(f"Available blocks: {', '.join(formatted_blocks)}")
return
else:
self.io.tool_error("This coder doesn't support generating context blocks.")
return
-
+
# If no argument, toggle the enhanced context setting
self.coder.use_enhanced_context = not self.coder.use_enhanced_context
-
+
# Report the new state
if self.coder.use_enhanced_context:
- self.io.tool_output("Enhanced context blocks are now ON - directory structure and git status will be included.")
- if hasattr(self.coder, 'context_block_tokens'):
+ self.io.tool_output(
+ "Enhanced context blocks are now ON - directory structure and git status will be"
+ " included."
+ )
+ if hasattr(self.coder, "context_block_tokens"):
available_blocks = list(self.coder.context_block_tokens.keys())
- formatted_blocks = [name.replace('_', ' ').title() for name in available_blocks]
+ formatted_blocks = [name.replace("_", " ").title() for name in available_blocks]
self.io.tool_output(f"Available blocks: {', '.join(formatted_blocks)}")
self.io.tool_output("Use '/context-blocks [block name]' to view a specific block.")
else:
- self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.")
-
+ self.io.tool_output(
+ "Enhanced context blocks are now OFF - directory structure and git status will not"
+ " be included."
+ )
+
def cmd_granular_editing(self, args=""):
"Toggle granular editing tools in navigator mode"
- if not hasattr(self.coder, 'use_granular_editing'):
+ if not hasattr(self.coder, "use_granular_editing"):
self.io.tool_error("Granular editing toggle is only available in navigator mode.")
return
-
+
# Toggle the setting using the navigator's method if available
new_state = not self.coder.use_granular_editing
-
- if hasattr(self.coder, 'set_granular_editing'):
+
+ if hasattr(self.coder, "set_granular_editing"):
self.coder.set_granular_editing(new_state)
else:
# Fallback if method doesn't exist
self.coder.use_granular_editing = new_state
-
+
# Report the new state
if self.coder.use_granular_editing:
- self.io.tool_output("Granular editing tools are now ON - navigator will use specific editing tools instead of search/replace.")
+ self.io.tool_output(
+ "Granular editing tools are now ON - navigator will use specific editing tools"
+ " instead of search/replace."
+ )
else:
- self.io.tool_output("Granular editing tools are now OFF - navigator will use search/replace blocks for editing.")
-
+ self.io.tool_output(
+ "Granular editing tools are now OFF - navigator will use search/replace blocks for"
+ " editing."
+ )
+
def cmd_ls(self, args):
"List all known files and indicate which are included in the chat session"
@@ -1351,7 +1378,7 @@ def completions_architect(self):
def completions_context(self):
raise CommandCompletionException()
-
+
def completions_navigator(self):
raise CommandCompletionException()
@@ -1370,14 +1397,14 @@ def cmd_architect(self, args):
def cmd_context(self, args):
"""Enter context mode to see surrounding code context. If no prompt provided, switches to context mode.""" # noqa
return self._generic_chat_command(args, "context", placeholder=args.strip() or None)
-
+
def cmd_navigator(self, args):
"""Enter navigator mode to autonomously discover and manage relevant files. If no prompt provided, switches to navigator mode.""" # noqa
# Enable context management when entering navigator mode
- if hasattr(self.coder, 'context_management_enabled'):
+ if hasattr(self.coder, "context_management_enabled"):
self.coder.context_management_enabled = True
self.io.tool_output("Context management enabled for large files")
-
+
return self._generic_chat_command(args, "navigator", placeholder=args.strip() or None)
def _generic_chat_command(self, args, edit_format, placeholder=None):
diff --git a/aider/history.py b/aider/history.py
index 8f9d0d754d7..ad4a3db34ce 100644
--- a/aider/history.py
+++ b/aider/history.py
@@ -149,9 +149,7 @@ def summarize_all_as_text(self, messages, prompt, max_tokens=None):
for model in self.models:
try:
- summary = model.simple_send_with_retries(
- summarize_messages, max_tokens=max_tokens
- )
+ summary = model.simple_send_with_retries(summarize_messages, max_tokens=max_tokens)
if summary is not None:
return summary
except Exception as e:
diff --git a/aider/io.py b/aider/io.py
index e087123a243..ca0092fbecf 100644
--- a/aider/io.py
+++ b/aider/io.py
@@ -128,13 +128,13 @@ def tokenize(self):
if self.tokenized:
return
self.tokenized = True
-
+
# Performance optimization for large file sets
if len(self.all_fnames) > 100:
# Skip tokenization for very large numbers of files to avoid input lag
self.tokenized = True
return
-
+
# Limit number of files to process to avoid excessive tokenization time
process_fnames = self.all_fnames
if len(process_fnames) > 50:
@@ -1177,7 +1177,9 @@ def append_chat_history(self, text, linebreak=False, blockquote=False, strip=Tru
if self.chat_history_file is not None:
try:
self.chat_history_file.parent.mkdir(parents=True, exist_ok=True)
- with self.chat_history_file.open("a", encoding=self.encoding or "utf-8", errors="ignore") as f:
+ with self.chat_history_file.open(
+ "a", encoding=self.encoding or "utf-8", errors="ignore"
+ ) as f:
f.write(text)
except (PermissionError, OSError) as err:
print(f"Warning: Unable to write to chat history file {self.chat_history_file}.")
@@ -1187,18 +1189,18 @@ def append_chat_history(self, text, linebreak=False, blockquote=False, strip=Tru
def format_files_for_input(self, rel_fnames, rel_read_only_fnames):
# Optimization for large number of files
total_files = len(rel_fnames) + len(rel_read_only_fnames or [])
-
+
# For very large numbers of files, use a summary display
if total_files > 50:
read_only_count = len(rel_read_only_fnames or [])
editable_count = len([f for f in rel_fnames if f not in (rel_read_only_fnames or [])])
-
+
summary = f"{editable_count} editable file(s)"
if read_only_count > 0:
summary += f", {read_only_count} read-only file(s)"
summary += " (use /ls to list all files)\n"
return summary
-
+
# Original implementation for reasonable number of files
if not self.pretty:
read_only_files = []
diff --git a/aider/main.py b/aider/main.py
index 9b7ffe7a728..361375a083b 100644
--- a/aider/main.py
+++ b/aider/main.py
@@ -1023,6 +1023,7 @@ def get_io(pretty):
enable_context_compaction=args.enable_context_compaction,
context_compaction_max_tokens=args.context_compaction_max_tokens,
context_compaction_summary_tokens=args.context_compaction_summary_tokens,
+ map_cache_dir=args.map_cache_dir,
)
except UnknownEditFormat as err:
io.tool_error(str(err))
@@ -1195,7 +1196,7 @@ def get_io(pretty):
# Disable cache warming for the new coder
kwargs["num_cache_warming_pings"] = 0
-
+
coder = Coder.create(**kwargs)
if switch.kwargs.get("show_announcements") is not False:
diff --git a/aider/mcp/__init__.py b/aider/mcp/__init__.py
index 0fcf3f003d3..0da5b6e232e 100644
--- a/aider/mcp/__init__.py
+++ b/aider/mcp/__init__.py
@@ -1,6 +1,6 @@
import json
-from aider.mcp.server import McpServer, HttpStreamingServer
+from aider.mcp.server import HttpStreamingServer, McpServer
def _parse_mcp_servers_from_json_string(json_string, io, verbose=False, mcp_transport="stdio"):
diff --git a/aider/mcp/server.py b/aider/mcp/server.py
index c21a709950f..5e5660a185e 100644
--- a/aider/mcp/server.py
+++ b/aider/mcp/server.py
@@ -76,6 +76,7 @@ async def disconnect(self):
except Exception as e:
logging.error(f"Error during cleanup of server {self.name}: {e}")
+
class HttpStreamingServer(McpServer):
async def connect(self):
if self.session is not None:
@@ -97,6 +98,7 @@ async def connect(self):
await self.disconnect()
raise
+
class LocalServer(McpServer):
"""
A dummy McpServer for executing local, in-process tools
@@ -115,4 +117,3 @@ async def connect(self):
async def disconnect(self):
"""Disconnect from the MCP server and clean up resources."""
self.session = None
-
diff --git a/aider/models.py b/aider/models.py
index 219ee3db4a0..c14d31a8837 100644
--- a/aider/models.py
+++ b/aider/models.py
@@ -954,7 +954,9 @@ class GitHubCopilotTokenError(Exception):
os.environ[openai_api_key] = token
- def send_completion(self, messages, functions, stream, temperature=None, tools=None, max_tokens=None):
+ def send_completion(
+ self, messages, functions, stream, temperature=None, tools=None, max_tokens=None
+ ):
if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
sanity_check_messages(messages)
@@ -985,6 +987,8 @@ def send_completion(self, messages, functions, stream, temperature=None, tools=N
kwargs["tools"] = [dict(type="function", function=tool) for tool in effective_tools]
else:
kwargs["tools"] = effective_tools
+ else:
+ kwargs["tools"] = []
# Forcing a function call is for legacy style `functions` with a single function.
# This is used by ArchitectCoder and not intended for NavigatorCoder's tools.
@@ -1029,7 +1033,6 @@ def send_completion(self, messages, functions, stream, temperature=None, tools=N
self.github_copilot_token_to_open_ai_key(kwargs["extra_headers"])
-
try:
res = litellm.completion(**kwargs)
except Exception as err:
diff --git a/aider/openrouter.py b/aider/openrouter.py
index 6517cb1526a..ea641c17fda 100644
--- a/aider/openrouter.py
+++ b/aider/openrouter.py
@@ -6,6 +6,7 @@
helper class that returns metadata for a given model in a format compatible
with litellm’s ``get_model_info``.
"""
+
from __future__ import annotations
import json
diff --git a/aider/repomap.py b/aider/repomap.py
index e1961f7d756..77bdf7bf384 100644
--- a/aider/repomap.py
+++ b/aider/repomap.py
@@ -17,19 +17,52 @@
from aider.dump import dump
from aider.special import filter_important_files
+from aider.tools.tool_utils import ToolError
from aider.waiting import Spinner
# tree_sitter is throwing a FutureWarning
warnings.simplefilter("ignore", category=FutureWarning)
from grep_ast.tsl import USING_TSL_PACK, get_language, get_parser # noqa: E402
+
# Define the Tag namedtuple with a default for specific_kind to maintain compatibility
# with cached entries that might have been created with the old definition
-class TagBase(namedtuple("TagBase", "rel_fname fname line name kind specific_kind start_line end_line start_byte end_byte")):
+class TagBase(
+ namedtuple(
+ "TagBase",
+ "rel_fname fname line name kind specific_kind start_line end_line start_byte end_byte",
+ )
+):
__slots__ = ()
- def __new__(cls, rel_fname, fname, line, name, kind, specific_kind=None, start_line=None, end_line=None, start_byte=None, end_byte=None):
+
+ def __new__(
+ cls,
+ rel_fname,
+ fname,
+ line,
+ name,
+ kind,
+ specific_kind=None,
+ start_line=None,
+ end_line=None,
+ start_byte=None,
+ end_byte=None,
+ ):
# Provide a default value for specific_kind to handle old cached objects
- return super(TagBase, cls).__new__(cls, rel_fname, fname, line, name, kind, specific_kind, start_line, end_line, start_byte, end_byte)
+ return super(TagBase, cls).__new__(
+ cls,
+ rel_fname,
+ fname,
+ line,
+ name,
+ kind,
+ specific_kind,
+ start_line,
+ end_line,
+ start_byte,
+ end_byte,
+ )
+
Tag = TagBase
@@ -52,18 +85,26 @@ class RepoMap:
# Define kinds that typically represent definitions across languages
# Used by NavigatorCoder to filter tags for the symbol outline
definition_kinds = {
- "class", "struct", "enum", "interface", "trait", # Structure definitions
- "function", "method", "constructor", # Function/method definitions
- "module", "namespace", # Module/namespace definitions
- "constant", "variable", # Top-level/class variable definitions (consider refining)
- "type", # Type definitions
+ "class",
+ "struct",
+ "enum",
+ "interface",
+ "trait", # Structure definitions
+ "function",
+ "method",
+ "constructor", # Function/method definitions
+ "module",
+ "namespace", # Module/namespace definitions
+ "constant",
+ "variable", # Top-level/class variable definitions (consider refining)
+ "type", # Type definitions
# Add more based on tree-sitter queries if needed
}
def __init__(
self,
map_tokens=1024,
- root=None,
+ map_cache_dir=".",
main_model=None,
io=None,
repo_content_prefix=None,
@@ -77,9 +118,8 @@ def __init__(
self.verbose = verbose
self.refresh = refresh
- if not root:
- root = os.getcwd()
- self.root = root
+ self.map_cache_dir = map_cache_dir
+ self.root = os.getcwd()
self.load_tags_cache()
self.cache_threshold = 0.95
@@ -104,6 +144,8 @@ def __init__(
self.io.tool_output(
f"RepoMap initialized with map_mul_no_files: {self.map_mul_no_files}"
)
+ self.io.tool_output(f"RepoMap initialized with map_cache_dir: {self.map_cache_dir}")
+ self.io.tool_output(f"RepoMap assumes repo root is: {self.root}")
def token_count(self, text):
len_text = len(text)
@@ -202,7 +244,7 @@ def tags_cache_error(self, original_error=None):
if isinstance(getattr(self, "TAGS_CACHE", None), dict):
return
- path = Path(self.root) / self.TAGS_CACHE_DIR
+ path = Path(self.map_cache_dir) / self.TAGS_CACHE_DIR
# Try to recreate the cache
try:
@@ -234,7 +276,7 @@ def tags_cache_error(self, original_error=None):
self.TAGS_CACHE = dict()
def load_tags_cache(self):
- path = Path(self.root) / self.TAGS_CACHE_DIR
+ path = Path(self.map_cache_dir) / self.TAGS_CACHE_DIR
try:
self.TAGS_CACHE = Cache(path)
except SQLITE_ERRORS as e:
@@ -266,10 +308,10 @@ def get_tags(self, fname, rel_fname):
try:
# Get the cached data
data = self.TAGS_CACHE[cache_key]["data"]
-
+
# Let our Tag class handle compatibility with old cache formats
# No need for special handling as TagBase.__new__ will supply default specific_kind
-
+
return data
except SQLITE_ERRORS as e:
self.tags_cache_error(e)
@@ -294,6 +336,7 @@ def get_tags(self, fname, rel_fname):
self.TAGS_CACHE[cache_key] = {"mtime": file_mtime, "data": data}
return data
+
def get_symbol_definition_location(self, file_path, symbol_name):
"""
Finds the unique definition location (start/end line) for a symbol in a file.
@@ -308,8 +351,8 @@ def get_symbol_definition_location(self, file_path, symbol_name):
Raises:
ToolError: If the symbol is not found, not unique, or not a definition.
"""
- abs_path = self.io.root_abs_path(file_path) # Assuming io has this helper or similar
- rel_path = self.get_rel_fname(abs_path) # Ensure we use consistent relative path
+ abs_path = self.io.root_abs_path(file_path) # Assuming io has this helper or similar
+ rel_path = self.get_rel_fname(abs_path) # Ensure we use consistent relative path
tags = self.get_tags(abs_path, rel_path)
if not tags:
@@ -319,7 +362,7 @@ def get_symbol_definition_location(self, file_path, symbol_name):
for tag in tags:
# Check if it's a definition and the name matches
if tag.kind == "def" and tag.name == symbol_name:
- # Ensure we have valid location info
+ # Ensure we have valid location info
if tag.start_line is not None and tag.end_line is not None and tag.start_line >= 0:
definitions.append(tag)
@@ -327,14 +370,20 @@ def get_symbol_definition_location(self, file_path, symbol_name):
# Check if it exists as a non-definition tag
non_defs = [tag for tag in tags if tag.name == symbol_name and tag.kind != "def"]
if non_defs:
- raise ToolError(f"Symbol '{symbol_name}' found in '{file_path}', but not as a unique definition (found as {non_defs[0].kind}).")
+ raise ToolError(
+ f"Symbol '{symbol_name}' found in '{file_path}', but not as a unique definition"
+ f" (found as {non_defs[0].kind})."
+ )
else:
raise ToolError(f"Symbol '{symbol_name}' definition not found in '{file_path}'.")
if len(definitions) > 1:
# Provide more context about ambiguity if possible
- lines = sorted([d.start_line + 1 for d in definitions]) # 1-based for user message
- raise ToolError(f"Symbol '{symbol_name}' is ambiguous in '{file_path}'. Found definitions on lines: {', '.join(map(str, lines))}.")
+ lines = sorted([d.start_line + 1 for d in definitions]) # 1-based for user message
+ raise ToolError(
+ f"Symbol '{symbol_name}' is ambiguous in '{file_path}'. Found definitions on lines:"
+ f" {', '.join(map(str, lines))}."
+ )
# Unique definition found
definition_tag = definitions[0]
@@ -386,7 +435,7 @@ def get_tags_raw(self, fname, rel_fname):
saw.add(kind)
# Extract specific kind from the tag, e.g., 'function' from 'name.definition.function'
- specific_kind = tag.split('.')[-1] if '.' in tag else None
+ specific_kind = tag.split(".")[-1] if "." in tag else None
result = Tag(
rel_fname=rel_fname,
@@ -427,8 +476,8 @@ def get_tags_raw(self, fname, rel_fname):
fname=fname,
name=token,
kind="ref",
- specific_kind="name", # Default for pygments fallback
- line=-1, # Pygments doesn't give precise locations easily
+ specific_kind="name", # Default for pygments fallback
+ line=-1, # Pygments doesn't give precise locations easily
start_line=-1,
end_line=-1,
start_byte=-1,
@@ -936,4 +985,4 @@ def get_supported_languages_md():
repo_map = rm.get_ranked_tags_map(chat_fnames, other_fnames)
dump(len(repo_map))
- print(repo_map)
\ No newline at end of file
+ print(repo_map)
diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml
index 2dcf2ce979a..7bcfe080654 100644
--- a/aider/resources/model-settings.yml
+++ b/aider/resources/model-settings.yml
@@ -1825,190 +1825,222 @@
weak_model_name: gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: gpt-5-2025-08-07
edit_format: diff
weak_model_name: gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: gpt-5-mini
edit_format: diff
weak_model_name: gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: gpt-5-mini-2025-08-07
edit_format: diff
weak_model_name: gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: gpt-5-nano
edit_format: diff
weak_model_name: gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: gpt-5-nano-2025-08-07
edit_format: diff
weak_model_name: gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: gpt-5-chat
edit_format: diff
weak_model_name: gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: gpt-5-chat-latest
edit_format: diff
weak_model_name: gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: azure/gpt-5
edit_format: diff
weak_model_name: azure/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: azure/gpt-5-2025-08-07
edit_format: diff
weak_model_name: azure/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: azure/gpt-5-mini
edit_format: diff
weak_model_name: azure/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: azure/gpt-5-mini-2025-08-07
edit_format: diff
weak_model_name: azure/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: azure/gpt-5-nano
edit_format: diff
weak_model_name: azure/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: azure/gpt-5-nano-2025-08-07
edit_format: diff
weak_model_name: azure/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: azure/gpt-5-chat
edit_format: diff
weak_model_name: azure/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: azure/gpt-5-chat-latest
edit_format: diff
weak_model_name: azure/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openai/gpt-5
edit_format: diff
weak_model_name: openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openai/gpt-5-2025-08-07
edit_format: diff
weak_model_name: openai/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openai/gpt-5-mini
edit_format: diff
weak_model_name: openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openai/gpt-5-mini-2025-08-07
edit_format: diff
weak_model_name: openai/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openai/gpt-5-nano
edit_format: diff
weak_model_name: openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openai/gpt-5-nano-2025-08-07
edit_format: diff
weak_model_name: openai/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openai/gpt-5-chat
edit_format: diff
weak_model_name: openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openai/gpt-5-chat-latest
edit_format: diff
weak_model_name: openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openrouter/openai/gpt-5
edit_format: diff
weak_model_name: openrouter/openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openrouter/openai/gpt-5-2025-08-07
edit_format: diff
weak_model_name: openrouter/openai/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openrouter/openai/gpt-5-mini
edit_format: diff
weak_model_name: openrouter/openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openrouter/openai/gpt-5-mini-2025-08-07
edit_format: diff
weak_model_name: openrouter/openai/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openrouter/openai/gpt-5-nano
edit_format: diff
weak_model_name: openrouter/openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openrouter/openai/gpt-5-nano-2025-08-07
edit_format: diff
weak_model_name: openrouter/openai/gpt-5-nano-2025-08-07
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openrouter/openai/gpt-5-chat
edit_format: diff
weak_model_name: openrouter/openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
- name: openrouter/openai/gpt-5-chat-latest
edit_format: diff
weak_model_name: openrouter/openai/gpt-5-nano
use_repo_map: true
use_temperature: false
+ accepts_settings: ["reasoning_effort"]
diff --git a/aider/scrape.py b/aider/scrape.py
index 3d5cfa86f35..7ef78285ac4 100755
--- a/aider/scrape.py
+++ b/aider/scrape.py
@@ -169,7 +169,7 @@ def scrape_with_playwright(self, url):
try:
response = page.goto(url, wait_until="networkidle", timeout=5000)
except PlaywrightTimeoutError:
- print(f"Page didn't quiesce, scraping content anyway: {url}")
+ self.print_error(f"Page didn't quiesce, scraping content anyway: {url}")
response = None
except PlaywrightError as e:
self.print_error(f"Error navigating to {url}: {str(e)}")
diff --git a/aider/tools/__init__.py b/aider/tools/__init__.py
index f173e68cc3e..a1b22d3e8fa 100644
--- a/aider/tools/__init__.py
+++ b/aider/tools/__init__.py
@@ -1,35 +1,26 @@
# flake8: noqa: F401
# Import tool functions into the aider.tools namespace
-# Discovery
+from .command import _execute_command
+from .command_interactive import _execute_command_interactive
+from .delete_block import _execute_delete_block
+from .delete_line import _execute_delete_line
+from .delete_lines import _execute_delete_lines
+from .extract_lines import _execute_extract_lines
+from .indent_lines import _execute_indent_lines
+from .insert_block import _execute_insert_block
+from .list_changes import _execute_list_changes
from .ls import execute_ls
-from .view_files_at_glob import execute_view_files_at_glob
-from .view_files_matching import execute_view_files_matching
-from .view_files_with_symbol import _execute_view_files_with_symbol
-
-# Context Management
-from .view import execute_view
-from .remove import _execute_remove
from .make_editable import _execute_make_editable
from .make_readonly import _execute_make_readonly
-from .show_numbered_context import execute_show_numbered_context
-
-# Granular Editing
-from .replace_text import _execute_replace_text
+from .remove import _execute_remove
from .replace_all import _execute_replace_all
-from .insert_block import _execute_insert_block
-from .delete_block import _execute_delete_block
from .replace_line import _execute_replace_line
from .replace_lines import _execute_replace_lines
-from .indent_lines import _execute_indent_lines
-from .extract_lines import _execute_extract_lines
-from .delete_line import _execute_delete_line
-from .delete_lines import _execute_delete_lines
-
-# Change Tracking
+from .replace_text import _execute_replace_text
+from .show_numbered_context import execute_show_numbered_context
from .undo_change import _execute_undo_change
-from .list_changes import _execute_list_changes
-
-# Other
-from .command import _execute_command
-from .command_interactive import _execute_command_interactive
+from .view import execute_view
+from .view_files_at_glob import execute_view_files_at_glob
+from .view_files_matching import execute_view_files_matching
+from .view_files_with_symbol import _execute_view_files_with_symbol
diff --git a/aider/tools/command.py b/aider/tools/command.py
index b9692b205c5..0435f39dcd2 100644
--- a/aider/tools/command.py
+++ b/aider/tools/command.py
@@ -1,6 +1,7 @@
# Import necessary functions
from aider.run_cmd import run_cmd_subprocess
+
def _execute_command(coder, command_string):
"""
Execute a non-interactive shell command after user confirmation.
@@ -12,8 +13,8 @@ def _execute_command(coder, command_string):
confirmed = coder.io.confirm_ask(
"Allow execution of this command?",
subject=command_string,
- explicit_yes_required=True, # Require explicit 'yes' or 'always'
- allow_never=True # Enable the 'Always' option
+ explicit_yes_required=True, # Require explicit 'yes' or 'always'
+ allow_never=True, # Enable the 'Always' option
)
if not confirmed:
@@ -27,9 +28,7 @@ def _execute_command(coder, command_string):
# Use run_cmd_subprocess for non-interactive execution
exit_status, combined_output = run_cmd_subprocess(
- command_string,
- verbose=coder.verbose,
- cwd=coder.root # Execute in the project root
+ command_string, verbose=coder.verbose, cwd=coder.root # Execute in the project root
)
# Format the output for the result message, include more content
@@ -38,7 +37,11 @@ def _execute_command(coder, command_string):
output_limit = coder.large_file_token_threshold
if len(output_content) > output_limit:
# Truncate and add a clear message using the constant value
- output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)"
+ output_content = (
+ output_content[:output_limit]
+ + f"\n... (output truncated at {output_limit} characters, based on"
+ " large_file_token_threshold)"
+ )
if exit_status == 0:
return f"Shell command executed successfully (exit code 0). Output:\n{output_content}"
@@ -46,7 +49,9 @@ def _execute_command(coder, command_string):
return f"Shell command failed with exit code {exit_status}. Output:\n{output_content}"
except Exception as e:
- coder.io.tool_error(f"Error executing non-interactive shell command '{command_string}': {str(e)}")
+ coder.io.tool_error(
+ f"Error executing non-interactive shell command '{command_string}': {str(e)}"
+ )
# Optionally include traceback for debugging if verbose
# if coder.verbose:
# coder.io.tool_error(traceback.format_exc())
diff --git a/aider/tools/command_interactive.py b/aider/tools/command_interactive.py
index e71e3f88e08..a25c001c77c 100644
--- a/aider/tools/command_interactive.py
+++ b/aider/tools/command_interactive.py
@@ -1,6 +1,7 @@
# Import necessary functions
from aider.run_cmd import run_cmd
+
def _execute_command_interactive(coder, command_string):
"""
Execute an interactive shell command using run_cmd (which uses pexpect/PTY).
@@ -12,9 +13,9 @@ def _execute_command_interactive(coder, command_string):
# Use run_cmd which handles PTY logic
exit_status, combined_output = run_cmd(
command_string,
- verbose=coder.verbose, # Pass verbose flag
- error_print=coder.io.tool_error, # Use io for error printing
- cwd=coder.root # Execute in the project root
+ verbose=coder.verbose, # Pass verbose flag
+ error_print=coder.io.tool_error, # Use io for error printing
+ cwd=coder.root, # Execute in the project root
)
coder.io.tool_output(">>> Interactive command finished <<<")
@@ -25,15 +26,27 @@ def _execute_command_interactive(coder, command_string):
output_limit = coder.large_file_token_threshold
if len(output_content) > output_limit:
# Truncate and add a clear message using the constant value
- output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)"
+ output_content = (
+ output_content[:output_limit]
+ + f"\n... (output truncated at {output_limit} characters, based on"
+ " large_file_token_threshold)"
+ )
if exit_status == 0:
- return f"Interactive command finished successfully (exit code 0). Output:\n{output_content}"
+ return (
+ "Interactive command finished successfully (exit code 0)."
+ f" Output:\n{output_content}"
+ )
else:
- return f"Interactive command finished with exit code {exit_status}. Output:\n{output_content}"
+ return (
+ f"Interactive command finished with exit code {exit_status}."
+ f" Output:\n{output_content}"
+ )
except Exception as e:
- coder.io.tool_error(f"Error executing interactive shell command '{command_string}': {str(e)}")
+ coder.io.tool_error(
+ f"Error executing interactive shell command '{command_string}': {str(e)}"
+ )
# Optionally include traceback for debugging if verbose
# if coder.verbose:
# coder.io.tool_error(traceback.format_exc())
diff --git a/aider/tools/delete_block.py b/aider/tools/delete_block.py
index 5ab2d6d37af..cbaeedffbc7 100644
--- a/aider/tools/delete_block.py
+++ b/aider/tools/delete_block.py
@@ -1,17 +1,27 @@
-import traceback
from .tool_utils import (
ToolError,
- validate_file_for_edit,
- find_pattern_indices,
- select_occurrence_index,
- determine_line_range,
apply_change,
- handle_tool_error,
+ determine_line_range,
+ find_pattern_indices,
format_tool_result,
generate_unified_diff_snippet,
+ handle_tool_error,
+ select_occurrence_index,
+ validate_file_for_edit,
)
-def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, change_id=None, dry_run=False):
+
+def _execute_delete_block(
+ coder,
+ file_path,
+ start_pattern,
+ end_pattern=None,
+ line_count=None,
+ near_context=None,
+ occurrence=1,
+ change_id=None,
+ dry_run=False,
+):
"""
Delete a block of text between start_pattern and end_pattern (inclusive).
Uses utility functions for validation, finding lines, and applying changes.
@@ -37,18 +47,18 @@ def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, lin
start_pattern_line_index=start_line_idx,
end_pattern=end_pattern,
line_count=line_count,
- target_symbol=None, # DeleteBlock uses patterns, not symbols
- pattern_desc=pattern_desc
+ target_symbol=None, # DeleteBlock uses patterns, not symbols
+ pattern_desc=pattern_desc,
)
# 4. Prepare the deletion
- deleted_lines = lines[start_line:end_line+1]
- new_lines = lines[:start_line] + lines[end_line+1:]
- new_content = '\n'.join(new_lines)
+ deleted_lines = lines[start_line : end_line + 1]
+ new_lines = lines[:start_line] + lines[end_line + 1 :]
+ new_content = "\n".join(new_lines)
if original_content == new_content:
- coder.io.tool_warning(f"No changes made: deletion would not change file")
- return f"Warning: No changes made (deletion would not change file)"
+ coder.io.tool_warning("No changes made: deletion would not change file")
+ return "Warning: No changes made (deletion would not change file)"
# 5. Generate diff for feedback
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
@@ -58,26 +68,46 @@ def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, lin
# 6. Handle dry run
if dry_run:
- dry_run_message = f"Dry run: Would delete {num_deleted} lines ({start_line+1}-{end_line+1}) based on {occurrence_str}start pattern '{start_pattern}' in {file_path}."
- return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
+ dry_run_message = (
+ f"Dry run: Would delete {num_deleted} lines ({start_line + 1}-{end_line + 1}) based"
+ f" on {occurrence_str}start pattern '{start_pattern}' in {file_path}."
+ )
+ return format_tool_result(
+ coder,
+ tool_name,
+ "",
+ dry_run=True,
+ dry_run_message=dry_run_message,
+ diff_snippet=diff_snippet,
+ )
# 7. Apply Change (Not dry run)
metadata = {
- 'start_line': start_line + 1,
- 'end_line': end_line + 1,
- 'start_pattern': start_pattern,
- 'end_pattern': end_pattern,
- 'line_count': line_count,
- 'near_context': near_context,
- 'occurrence': occurrence,
- 'deleted_content': '\n'.join(deleted_lines)
+ "start_line": start_line + 1,
+ "end_line": end_line + 1,
+ "start_pattern": start_pattern,
+ "end_pattern": end_pattern,
+ "line_count": line_count,
+ "near_context": near_context,
+ "occurrence": occurrence,
+ "deleted_content": "\n".join(deleted_lines),
}
final_change_id = apply_change(
- coder, abs_path, rel_path, original_content, new_content, 'deleteblock', metadata, change_id
+ coder,
+ abs_path,
+ rel_path,
+ original_content,
+ new_content,
+ "deleteblock",
+ metadata,
+ change_id,
)
# 8. Format and return result, adding line range to success message
- success_message = f"Deleted {num_deleted} lines ({start_line+1}-{end_line+1}) (from {occurrence_str}start pattern) in {file_path}"
+ success_message = (
+ f"Deleted {num_deleted} lines ({start_line + 1}-{end_line + 1}) (from"
+ f" {occurrence_str}start pattern) in {file_path}"
+ )
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
@@ -87,4 +117,4 @@ def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, lin
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
- return handle_tool_error(coder, tool_name, e)
\ No newline at end of file
+ return handle_tool_error(coder, tool_name, e)
diff --git a/aider/tools/delete_line.py b/aider/tools/delete_line.py
index 66c3f319987..c1e8ed6b299 100644
--- a/aider/tools/delete_line.py
+++ b/aider/tools/delete_line.py
@@ -1,6 +1,13 @@
import os
-import traceback
-from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change
+
+from .tool_utils import (
+ ToolError,
+ apply_change,
+ format_tool_result,
+ generate_unified_diff_snippet,
+ handle_tool_error,
+)
+
def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run=False):
"""
@@ -46,17 +53,19 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run=
line_num_int = int(line_number)
if line_num_int < 1 or line_num_int > len(lines):
raise ToolError(f"Line number {line_num_int} is out of range (1-{len(lines)})")
- line_idx = line_num_int - 1 # Convert to 0-based index
+ line_idx = line_num_int - 1 # Convert to 0-based index
except ValueError:
raise ToolError(f"Invalid line_number value: '{line_number}'. Must be an integer.")
# Prepare the deletion
deleted_line = lines[line_idx]
- new_lines = lines[:line_idx] + lines[line_idx+1:]
- new_content = '\n'.join(new_lines)
+ new_lines = lines[:line_idx] + lines[line_idx + 1 :]
+ new_content = "\n".join(new_lines)
if original_content == new_content:
- coder.io.tool_warning(f"No changes made: deleting line {line_num_int} would not change file")
+ coder.io.tool_warning(
+ f"No changes made: deleting line {line_num_int} would not change file"
+ )
return f"Warning: No changes made (deleting line {line_num_int} would not change file)"
# Generate diff snippet
@@ -65,15 +74,26 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run=
# Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would delete line {line_num_int} in {file_path}"
- return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
+ return format_tool_result(
+ coder,
+ tool_name,
+ "",
+ dry_run=True,
+ dry_run_message=dry_run_message,
+ diff_snippet=diff_snippet,
+ )
# --- Apply Change (Not dry run) ---
- metadata = {
- 'line_number': line_num_int,
- 'deleted_content': deleted_line
- }
+ metadata = {"line_number": line_num_int, "deleted_content": deleted_line}
final_change_id = apply_change(
- coder, abs_path, rel_path, original_content, new_content, 'deleteline', metadata, change_id
+ coder,
+ abs_path,
+ rel_path,
+ original_content,
+ new_content,
+ "deleteline",
+ metadata,
+ change_id,
)
coder.aider_edited_files.add(rel_path)
@@ -89,4 +109,4 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run=
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
- return handle_tool_error(coder, tool_name, e)
\ No newline at end of file
+ return handle_tool_error(coder, tool_name, e)
diff --git a/aider/tools/delete_lines.py b/aider/tools/delete_lines.py
index 876897ef4b1..0aa33ba8833 100644
--- a/aider/tools/delete_lines.py
+++ b/aider/tools/delete_lines.py
@@ -1,6 +1,13 @@
import os
-import traceback
-from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change
+
+from .tool_utils import (
+ ToolError,
+ apply_change,
+ format_tool_result,
+ generate_unified_diff_snippet,
+ handle_tool_error,
+)
+
def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None, dry_run=False):
"""
@@ -51,45 +58,73 @@ def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None
if end_line_int < 1 or end_line_int > len(lines):
raise ToolError(f"End line {end_line_int} is out of range (1-{len(lines)})")
if start_line_int > end_line_int:
- raise ToolError(f"Start line {start_line_int} cannot be after end line {end_line_int}")
+ raise ToolError(
+ f"Start line {start_line_int} cannot be after end line {end_line_int}"
+ )
- start_idx = start_line_int - 1 # Convert to 0-based index
- end_idx = end_line_int - 1 # Convert to 0-based index
+ start_idx = start_line_int - 1 # Convert to 0-based index
+ end_idx = end_line_int - 1 # Convert to 0-based index
except ValueError:
- raise ToolError(f"Invalid line numbers: '{start_line}', '{end_line}'. Must be integers.")
+ raise ToolError(
+ f"Invalid line numbers: '{start_line}', '{end_line}'. Must be integers."
+ )
# Prepare the deletion
- deleted_lines = lines[start_idx:end_idx+1]
- new_lines = lines[:start_idx] + lines[end_idx+1:]
- new_content = '\n'.join(new_lines)
+ deleted_lines = lines[start_idx : end_idx + 1]
+ new_lines = lines[:start_idx] + lines[end_idx + 1 :]
+ new_content = "\n".join(new_lines)
if original_content == new_content:
- coder.io.tool_warning(f"No changes made: deleting lines {start_line_int}-{end_line_int} would not change file")
- return f"Warning: No changes made (deleting lines {start_line_int}-{end_line_int} would not change file)"
+ coder.io.tool_warning(
+ f"No changes made: deleting lines {start_line_int}-{end_line_int} would not change"
+ " file"
+ )
+ return (
+ f"Warning: No changes made (deleting lines {start_line_int}-{end_line_int} would"
+ " not change file)"
+ )
# Generate diff snippet
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
# Handle dry run
if dry_run:
- dry_run_message = f"Dry run: Would delete lines {start_line_int}-{end_line_int} in {file_path}"
- return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
+ dry_run_message = (
+ f"Dry run: Would delete lines {start_line_int}-{end_line_int} in {file_path}"
+ )
+ return format_tool_result(
+ coder,
+ tool_name,
+ "",
+ dry_run=True,
+ dry_run_message=dry_run_message,
+ diff_snippet=diff_snippet,
+ )
# --- Apply Change (Not dry run) ---
metadata = {
- 'start_line': start_line_int,
- 'end_line': end_line_int,
- 'deleted_content': '\n'.join(deleted_lines)
+ "start_line": start_line_int,
+ "end_line": end_line_int,
+ "deleted_content": "\n".join(deleted_lines),
}
-
+
final_change_id = apply_change(
- coder, abs_path, rel_path, original_content, new_content, 'deletelines', metadata, change_id
+ coder,
+ abs_path,
+ rel_path,
+ original_content,
+ new_content,
+ "deletelines",
+ metadata,
+ change_id,
)
coder.aider_edited_files.add(rel_path)
num_deleted = end_idx - start_idx + 1
# Format and return result
- success_message = f"Deleted {num_deleted} lines ({start_line_int}-{end_line_int}) in {file_path}"
+ success_message = (
+ f"Deleted {num_deleted} lines ({start_line_int}-{end_line_int}) in {file_path}"
+ )
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
@@ -99,4 +134,4 @@ def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
- return handle_tool_error(coder, tool_name, e)
\ No newline at end of file
+ return handle_tool_error(coder, tool_name, e)
diff --git a/aider/tools/extract_lines.py b/aider/tools/extract_lines.py
index a9a318e27aa..c18a231ebbc 100644
--- a/aider/tools/extract_lines.py
+++ b/aider/tools/extract_lines.py
@@ -1,8 +1,20 @@
import os
import traceback
+
from .tool_utils import generate_unified_diff_snippet
-def _execute_extract_lines(coder, source_file_path, target_file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, dry_run=False):
+
+def _execute_extract_lines(
+ coder,
+ source_file_path,
+ target_file_path,
+ start_pattern,
+ end_pattern=None,
+ line_count=None,
+ near_context=None,
+ occurrence=1,
+ dry_run=False,
+):
"""
Extract a range of lines from a source file and move them to a target file.
@@ -26,15 +38,17 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt
if not os.path.isfile(abs_source_path):
coder.io.tool_error(f"Source file '{source_file_path}' not found")
- return f"Error: Source file not found"
+ return "Error: Source file not found"
if abs_source_path not in coder.abs_fnames:
if abs_source_path in coder.abs_read_only_fnames:
- coder.io.tool_error(f"Source file '{source_file_path}' is read-only. Use MakeEditable first.")
- return f"Error: Source file is read-only. Use MakeEditable first."
+ coder.io.tool_error(
+ f"Source file '{source_file_path}' is read-only. Use MakeEditable first."
+ )
+ return "Error: Source file is read-only. Use MakeEditable first."
else:
coder.io.tool_error(f"Source file '{source_file_path}' not in context")
- return f"Error: Source file not in context"
+ return "Error: Source file not in context"
# --- Validate Target File ---
abs_target_path = coder.abs_root_path(target_file_path)
@@ -45,17 +59,24 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt
if target_exists and not target_is_editable:
if target_is_readonly:
- coder.io.tool_error(f"Target file '{target_file_path}' exists but is read-only. Use MakeEditable first.")
- return f"Error: Target file exists but is read-only. Use MakeEditable first."
+ coder.io.tool_error(
+ f"Target file '{target_file_path}' exists but is read-only. Use MakeEditable"
+ " first."
+ )
+ return "Error: Target file exists but is read-only. Use MakeEditable first."
else:
# This case shouldn't happen if file exists, but handle defensively
- coder.io.tool_error(f"Target file '{target_file_path}' exists but is not in context. Add it first.")
- return f"Error: Target file exists but is not in context."
+ coder.io.tool_error(
+ f"Target file '{target_file_path}' exists but is not in context. Add it first."
+ )
+ return "Error: Target file exists but is not in context."
# --- Read Source Content ---
source_content = coder.io.read_text(abs_source_path)
if source_content is None:
- coder.io.tool_error(f"Could not read source file '{source_file_path}' before ExtractLines operation.")
+ coder.io.tool_error(
+ f"Could not read source file '{source_file_path}' before ExtractLines operation."
+ )
return f"Error: Could not read source file '{source_file_path}'"
# --- Find Extraction Range ---
@@ -80,7 +101,8 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt
if not start_pattern_line_indices:
err_msg = f"Start pattern '{start_pattern}' not found"
- if near_context: err_msg += f" near context '{near_context}'"
+ if near_context:
+ err_msg += f" near context '{near_context}'"
err_msg += f" in source file '{source_file_path}'."
coder.io.tool_error(err_msg)
return f"Error: {err_msg}"
@@ -93,8 +115,12 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt
elif occurrence > 0 and occurrence <= num_occurrences:
target_idx = occurrence - 1
else:
- err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences"
- if near_context: err_msg += f" near '{near_context}'"
+ err_msg = (
+ f"Occurrence number {occurrence} is out of range for start pattern"
+ f" '{start_pattern}'. Found {num_occurrences} occurrences"
+ )
+ if near_context:
+ err_msg += f" near '{near_context}'"
err_msg += f" in '{source_file_path}'."
coder.io.tool_error(err_msg)
return f"Error: {err_msg}"
@@ -112,24 +138,30 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt
end_line = i
break
if end_line == -1:
- err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{source_file_path}'."
+ err_msg = (
+ f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern"
+ f" '{start_pattern}' (line {start_line + 1}) in '{source_file_path}'."
+ )
coder.io.tool_error(err_msg)
return f"Error: {err_msg}"
elif line_count:
try:
line_count = int(line_count)
- if line_count <= 0: raise ValueError("Line count must be positive")
+ if line_count <= 0:
+ raise ValueError("Line count must be positive")
end_line = min(start_line + line_count - 1, len(source_lines) - 1)
except ValueError:
- coder.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.")
+ coder.io.tool_error(
+ f"Invalid line_count value: '{line_count}'. Must be a positive integer."
+ )
return f"Error: Invalid line_count value '{line_count}'"
else:
- end_line = start_line # Extract just the start line if no end specified
+ end_line = start_line # Extract just the start line if no end specified
# --- Prepare Content Changes ---
- extracted_lines = source_lines[start_line:end_line+1]
- new_source_lines = source_lines[:start_line] + source_lines[end_line+1:]
- new_source_content = '\n'.join(new_source_lines)
+ extracted_lines = source_lines[start_line : end_line + 1]
+ new_source_lines = source_lines[:start_line] + source_lines[end_line + 1 :]
+ new_source_content = "\n".join(new_source_lines)
target_content = ""
if target_exists:
@@ -137,29 +169,37 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt
if target_content is None:
coder.io.tool_error(f"Could not read existing target file '{target_file_path}'.")
return f"Error: Could not read target file '{target_file_path}'"
- original_target_content = target_content # For tracking
+ original_target_content = target_content # For tracking
# Append extracted lines to target content, ensuring a newline if target wasn't empty
- extracted_block = '\n'.join(extracted_lines)
- if target_content and not target_content.endswith('\n'):
- target_content += '\n' # Add newline before appending if needed
+ extracted_block = "\n".join(extracted_lines)
+ if target_content and not target_content.endswith("\n"):
+ target_content += "\n" # Add newline before appending if needed
new_target_content = target_content + extracted_block
# --- Generate Diffs ---
- source_diff_snippet = generate_unified_diff_snippet(original_source_content, new_source_content, rel_source_path)
+ source_diff_snippet = generate_unified_diff_snippet(
+ original_source_content, new_source_content, rel_source_path
+ )
target_insertion_line = len(target_content.splitlines()) if target_content else 0
- target_diff_snippet = generate_unified_diff_snippet(original_target_content, new_target_content, rel_target_path)
+ target_diff_snippet = generate_unified_diff_snippet(
+ original_target_content, new_target_content, rel_target_path
+ )
# --- Handle Dry Run ---
if dry_run:
num_extracted = end_line - start_line + 1
target_action = "append to" if target_exists else "create"
- coder.io.tool_output(f"Dry run: Would extract {num_extracted} lines (from {occurrence_str}start pattern '{start_pattern}') in {source_file_path} and {target_action} {target_file_path}")
+ coder.io.tool_output(
+ f"Dry run: Would extract {num_extracted} lines (from {occurrence_str}start pattern"
+ f" '{start_pattern}') in {source_file_path} and {target_action} {target_file_path}"
+ )
# Provide more informative dry run response with diffs
return (
- f"Dry run: Would extract {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n"
- f"Source Diff (Deletion):\n{source_diff_snippet}\n"
- f"Target Diff (Insertion):\n{target_diff_snippet}"
+ f"Dry run: Would extract {num_extracted} lines from {rel_source_path} and"
+ f" {target_action} {rel_target_path}.\nSource Diff"
+ f" (Deletion):\n{source_diff_snippet}\nTarget Diff"
+ f" (Insertion):\n{target_diff_snippet}"
)
# --- Apply Changes (Not Dry Run) ---
@@ -171,28 +211,38 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt
target_change_id = "TRACKING_FAILED"
try:
source_metadata = {
- 'start_line': start_line + 1, 'end_line': end_line + 1,
- 'start_pattern': start_pattern, 'end_pattern': end_pattern, 'line_count': line_count,
- 'near_context': near_context, 'occurrence': occurrence,
- 'extracted_content': extracted_block, 'target_file': rel_target_path
+ "start_line": start_line + 1,
+ "end_line": end_line + 1,
+ "start_pattern": start_pattern,
+ "end_pattern": end_pattern,
+ "line_count": line_count,
+ "near_context": near_context,
+ "occurrence": occurrence,
+ "extracted_content": extracted_block,
+ "target_file": rel_target_path,
}
source_change_id = coder.change_tracker.track_change(
- file_path=rel_source_path, change_type='extractlines_source',
- original_content=original_source_content, new_content=new_source_content,
- metadata=source_metadata
+ file_path=rel_source_path,
+ change_type="extractlines_source",
+ original_content=original_source_content,
+ new_content=new_source_content,
+ metadata=source_metadata,
)
except Exception as track_e:
coder.io.tool_error(f"Error tracking source change for ExtractLines: {track_e}")
try:
target_metadata = {
- 'insertion_line': target_insertion_line + 1,
- 'inserted_content': extracted_block, 'source_file': rel_source_path
+ "insertion_line": target_insertion_line + 1,
+ "inserted_content": extracted_block,
+ "source_file": rel_source_path,
}
target_change_id = coder.change_tracker.track_change(
- file_path=rel_target_path, change_type='extractlines_target',
- original_content=original_target_content, new_content=new_target_content,
- metadata=target_metadata
+ file_path=rel_target_path,
+ change_type="extractlines_target",
+ original_content=original_target_content,
+ new_content=new_target_content,
+ metadata=target_metadata,
)
except Exception as track_e:
coder.io.tool_error(f"Error tracking target change for ExtractLines: {track_e}")
@@ -208,14 +258,19 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt
# --- Return Result ---
num_extracted = end_line - start_line + 1
target_action = "appended to" if target_exists else "created"
- coder.io.tool_output(f"✅ Extracted {num_extracted} lines from {rel_source_path} (change_id: {source_change_id}) and {target_action} {rel_target_path} (change_id: {target_change_id})")
+ coder.io.tool_output(
+ f"✅ Extracted {num_extracted} lines from {rel_source_path} (change_id:"
+ f" {source_change_id}) and {target_action} {rel_target_path} (change_id:"
+ f" {target_change_id})"
+ )
# Provide more informative success response with change IDs and diffs
return (
- f"Successfully extracted {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n"
- f"Source Change ID: {source_change_id}\nSource Diff (Deletion):\n{source_diff_snippet}\n"
- f"Target Change ID: {target_change_id}\nTarget Diff (Insertion):\n{target_diff_snippet}"
+ f"Successfully extracted {num_extracted} lines from {rel_source_path} and"
+ f" {target_action} {rel_target_path}.\nSource Change ID: {source_change_id}\nSource"
+ f" Diff (Deletion):\n{source_diff_snippet}\nTarget Change ID:"
+ f" {target_change_id}\nTarget Diff (Insertion):\n{target_diff_snippet}"
)
except Exception as e:
coder.io.tool_error(f"Error in ExtractLines: {str(e)}\n{traceback.format_exc()}")
- return f"Error: {str(e)}"
\ No newline at end of file
+ return f"Error: {str(e)}"
diff --git a/aider/tools/grep.py b/aider/tools/grep.py
index 26f9581e668..e28936ef14e 100644
--- a/aider/tools/grep.py
+++ b/aider/tools/grep.py
@@ -1,20 +1,32 @@
import shlex
import shutil
from pathlib import Path
+
from aider.run_cmd import run_cmd_subprocess
+
def _find_search_tool():
"""Find the best available command-line search tool (rg, ag, grep)."""
- if shutil.which('rg'):
- return 'rg', shutil.which('rg')
- elif shutil.which('ag'):
- return 'ag', shutil.which('ag')
- elif shutil.which('grep'):
- return 'grep', shutil.which('grep')
+ if shutil.which("rg"):
+ return "rg", shutil.which("rg")
+ elif shutil.which("ag"):
+ return "ag", shutil.which("ag")
+ elif shutil.which("grep"):
+ return "grep", shutil.which("grep")
else:
return None, None
-def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=False, case_insensitive=False, context_before=5, context_after=5):
+
+def _execute_grep(
+ coder,
+ pattern,
+ file_pattern="*",
+ directory=".",
+ use_regex=False,
+ case_insensitive=False,
+ context_before=5,
+ context_after=5,
+):
"""
Search for lines matching a pattern in files within the project repository.
Uses rg (ripgrep), ag (the silver searcher), or grep, whichever is available.
@@ -52,7 +64,7 @@ def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=Fal
cmd_args = [tool_path]
# Common options or tool-specific equivalents
- if tool_name in ['rg', 'grep']:
+ if tool_name in ["rg", "grep"]:
cmd_args.append("-n") # Line numbers for rg and grep
# ag includes line numbers by default
@@ -66,37 +78,39 @@ def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=Fal
# Case sensitivity
if case_insensitive:
- cmd_args.append("-i") # Add case-insensitivity flag for all tools
+ cmd_args.append("-i") # Add case-insensitivity flag for all tools
# Pattern type (regex vs fixed string)
if use_regex:
- if tool_name == 'grep':
- cmd_args.append("-E") # Use extended regex for grep
+ if tool_name == "grep":
+ cmd_args.append("-E") # Use extended regex for grep
# rg and ag use regex by default, no flag needed for basic ERE
else:
- if tool_name == 'rg':
- cmd_args.append("-F") # Fixed strings for rg
- elif tool_name == 'ag':
- cmd_args.append("-Q") # Literal/fixed strings for ag
- elif tool_name == 'grep':
- cmd_args.append("-F") # Fixed strings for grep
+ if tool_name == "rg":
+ cmd_args.append("-F") # Fixed strings for rg
+ elif tool_name == "ag":
+ cmd_args.append("-Q") # Literal/fixed strings for ag
+ elif tool_name == "grep":
+ cmd_args.append("-F") # Fixed strings for grep
# File filtering
- if file_pattern != "*": # Avoid adding glob if it's the default '*' which might behave differently
- if tool_name == 'rg':
+ if (
+ file_pattern != "*"
+ ): # Avoid adding glob if it's the default '*' which might behave differently
+ if tool_name == "rg":
cmd_args.extend(["-g", file_pattern])
- elif tool_name == 'ag':
+ elif tool_name == "ag":
cmd_args.extend(["-G", file_pattern])
- elif tool_name == 'grep':
+ elif tool_name == "grep":
# grep needs recursive flag when filtering
cmd_args.append("-r")
cmd_args.append(f"--include={file_pattern}")
- elif tool_name == 'grep':
- # grep needs recursive flag even without include filter
- cmd_args.append("-r")
+ elif tool_name == "grep":
+ # grep needs recursive flag even without include filter
+ cmd_args.append("-r")
# Directory exclusion (rg and ag respect .gitignore/.git by default)
- if tool_name == 'grep':
+ if tool_name == "grep":
cmd_args.append("--exclude-dir=.git")
# Add pattern and directory path
@@ -110,9 +124,7 @@ def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=Fal
# Use run_cmd_subprocess for execution
# Note: rg, ag, and grep return 1 if no matches are found, which is not an error for this tool.
exit_status, combined_output = run_cmd_subprocess(
- command_string,
- verbose=coder.verbose,
- cwd=coder.root # Execute in the project root
+ command_string, verbose=coder.verbose, cwd=coder.root # Execute in the project root
)
# Format the output for the result message
@@ -121,15 +133,18 @@ def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=Fal
# Handle exit codes (consistent across rg, ag, grep)
if exit_status == 0:
# Limit output size if necessary
- max_output_lines = 50 # Consider making this configurable
+ max_output_lines = 50 # Consider making this configurable
output_lines = output_content.splitlines()
if len(output_lines) > max_output_lines:
truncated_output = "\n".join(output_lines[:max_output_lines])
- result_message = f"Found matches (truncated):\n```text\n{truncated_output}\n... ({len(output_lines) - max_output_lines} more lines)\n```"
+ result_message = (
+ f"Found matches (truncated):\n```text\n{truncated_output}\n..."
+ f" ({len(output_lines) - max_output_lines} more lines)\n```"
+ )
elif not output_content:
- # Should not happen if return code is 0, but handle defensively
- coder.io.tool_warning(f"{tool_name} returned 0 but produced no output.")
- result_message = "No matches found (unexpected)."
+ # Should not happen if return code is 0, but handle defensively
+ coder.io.tool_warning(f"{tool_name} returned 0 but produced no output.")
+ result_message = "No matches found (unexpected)."
else:
result_message = f"Found matches:\n```text\n{output_content}\n```"
return result_message
@@ -142,7 +157,7 @@ def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=Fal
error_message = f"{tool_name.capitalize()} command failed with exit code {exit_status}."
if output_content:
# Truncate error output as well if it's too long
- error_limit = 1000 # Example limit for error output
+ error_limit = 1000 # Example limit for error output
if len(output_content) > error_limit:
output_content = output_content[:error_limit] + "\n... (error output truncated)"
error_message += f" Output:\n{output_content}"
@@ -151,6 +166,6 @@ def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=Fal
except Exception as e:
# Add command_string to the error message if it's defined
- cmd_str_info = f"'{command_string}' " if 'command_string' in locals() else ""
+ cmd_str_info = f"'{command_string}' " if "command_string" in locals() else ""
coder.io.tool_error(f"Error executing {tool_name} command {cmd_str_info}: {str(e)}")
return f"Error executing {tool_name}: {str(e)}"
diff --git a/aider/tools/indent_lines.py b/aider/tools/indent_lines.py
index 928c08a5918..acb1e0bb17c 100644
--- a/aider/tools/indent_lines.py
+++ b/aider/tools/indent_lines.py
@@ -1,18 +1,28 @@
-import os
-import traceback
from .tool_utils import (
ToolError,
- validate_file_for_edit,
- find_pattern_indices,
- select_occurrence_index,
- determine_line_range,
apply_change,
- handle_tool_error,
+ determine_line_range,
+ find_pattern_indices,
format_tool_result,
generate_unified_diff_snippet,
+ handle_tool_error,
+ select_occurrence_index,
+ validate_file_for_edit,
)
-def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, near_context=None, occurrence=1, change_id=None, dry_run=False):
+
+def _execute_indent_lines(
+ coder,
+ file_path,
+ start_pattern,
+ end_pattern=None,
+ line_count=None,
+ indent_levels=1,
+ near_context=None,
+ occurrence=1,
+ change_id=None,
+ dry_run=False,
+):
"""
Indent or unindent a block of lines in a file using utility functions.
@@ -51,8 +61,8 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin
start_pattern_line_index=start_line_idx,
end_pattern=end_pattern,
line_count=line_count,
- target_symbol=None, # IndentLines uses patterns, not symbols
- pattern_desc=pattern_desc
+ target_symbol=None, # IndentLines uses patterns, not symbols
+ pattern_desc=pattern_desc,
)
# 4. Validate and prepare indentation
@@ -61,7 +71,7 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin
except ValueError:
raise ToolError(f"Invalid indent_levels value: '{indent_levels}'. Must be an integer.")
- indent_str = ' ' * 4 # Assume 4 spaces per level
+ indent_str = " " * 4 # Assume 4 spaces per level
modified_lines = list(lines)
# Apply indentation logic (core logic remains)
@@ -70,16 +80,16 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin
modified_lines[i] = (indent_str * indent_levels) + modified_lines[i]
elif indent_levels < 0:
spaces_to_remove = abs(indent_levels) * len(indent_str)
- current_leading_spaces = len(modified_lines[i]) - len(modified_lines[i].lstrip(' '))
+ current_leading_spaces = len(modified_lines[i]) - len(modified_lines[i].lstrip(" "))
actual_remove = min(spaces_to_remove, current_leading_spaces)
if actual_remove > 0:
modified_lines[i] = modified_lines[i][actual_remove:]
- new_content = '\n'.join(modified_lines)
+ new_content = "\n".join(modified_lines)
if original_content == new_content:
- coder.io.tool_warning(f"No changes made: indentation would not change file")
- return f"Warning: No changes made (indentation would not change file)"
+ coder.io.tool_warning("No changes made: indentation would not change file")
+ return "Warning: No changes made (indentation would not change file)"
# 5. Generate diff for feedback
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
@@ -92,27 +102,48 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin
# 6. Handle dry run
if dry_run:
- dry_run_message = f"Dry run: Would {action} {num_lines} lines ({start_line+1}-{end_line+1}) by {levels} {level_text} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}."
- return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
+ dry_run_message = (
+ f"Dry run: Would {action} {num_lines} lines ({start_line + 1}-{end_line + 1}) by"
+ f" {levels} {level_text} (based on {occurrence_str}start pattern '{start_pattern}')"
+ f" in {file_path}."
+ )
+ return format_tool_result(
+ coder,
+ tool_name,
+ "",
+ dry_run=True,
+ dry_run_message=dry_run_message,
+ diff_snippet=diff_snippet,
+ )
# 7. Apply Change (Not dry run)
metadata = {
- 'start_line': start_line + 1,
- 'end_line': end_line + 1,
- 'start_pattern': start_pattern,
- 'end_pattern': end_pattern,
- 'line_count': line_count,
- 'indent_levels': indent_levels,
- 'near_context': near_context,
- 'occurrence': occurrence,
+ "start_line": start_line + 1,
+ "end_line": end_line + 1,
+ "start_pattern": start_pattern,
+ "end_pattern": end_pattern,
+ "line_count": line_count,
+ "indent_levels": indent_levels,
+ "near_context": near_context,
+ "occurrence": occurrence,
}
final_change_id = apply_change(
- coder, abs_path, rel_path, original_content, new_content, 'indentlines', metadata, change_id
+ coder,
+ abs_path,
+ rel_path,
+ original_content,
+ new_content,
+ "indentlines",
+ metadata,
+ change_id,
)
# 8. Format and return result
action_past = "Indented" if indent_levels > 0 else "Unindented"
- success_message = f"{action_past} {num_lines} lines by {levels} {level_text} (from {occurrence_str}start pattern) in {file_path}"
+ success_message = (
+ f"{action_past} {num_lines} lines by {levels} {level_text} (from {occurrence_str}start"
+ f" pattern) in {file_path}"
+ )
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
@@ -121,4 +152,4 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
- return handle_tool_error(coder, tool_name, e)
\ No newline at end of file
+ return handle_tool_error(coder, tool_name, e)
diff --git a/aider/tools/insert_block.py b/aider/tools/insert_block.py
index 85e24dd4e9e..2c694c42a5b 100644
--- a/aider/tools/insert_block.py
+++ b/aider/tools/insert_block.py
@@ -1,23 +1,34 @@
-import os
import re
import traceback
+
from .tool_utils import (
ToolError,
- validate_file_for_edit,
- find_pattern_indices,
- select_occurrence_index,
apply_change,
- handle_tool_error,
+ find_pattern_indices,
format_tool_result,
generate_unified_diff_snippet,
+ handle_tool_error,
+ select_occurrence_index,
+ validate_file_for_edit,
)
-def _execute_insert_block(coder, file_path, content, after_pattern=None, before_pattern=None,
- occurrence=1, change_id=None, dry_run=False,
- position=None, auto_indent=True, use_regex=False):
+
+def _execute_insert_block(
+ coder,
+ file_path,
+ content,
+ after_pattern=None,
+ before_pattern=None,
+ occurrence=1,
+ change_id=None,
+ dry_run=False,
+ position=None,
+ auto_indent=True,
+ use_regex=False,
+):
"""
Insert a block of text after or before a specified pattern using utility functions.
-
+
Args:
coder: The coder instance
file_path: Path to the file to modify
@@ -35,12 +46,14 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_
try:
# 1. Validate parameters
if sum(x is not None for x in [after_pattern, before_pattern, position]) != 1:
- raise ToolError("Must specify exactly one of: after_pattern, before_pattern, or position")
+ raise ToolError(
+ "Must specify exactly one of: after_pattern, before_pattern, or position"
+ )
# 2. Validate file and get content
abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path)
lines = original_content.splitlines()
-
+
# Handle empty files
if not lines:
lines = [""]
@@ -50,7 +63,7 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_
pattern_type = ""
pattern_desc = ""
occurrence_str = ""
-
+
if position:
# Handle special positions
if position == "start_of_file":
@@ -60,32 +73,36 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_
insertion_line_idx = len(lines)
pattern_type = "at end of"
else:
- raise ToolError(f"Invalid position: '{position}'. Valid values are 'start_of_file' or 'end_of_file'")
+ raise ToolError(
+ f"Invalid position: '{position}'. Valid values are 'start_of_file' or"
+ " 'end_of_file'"
+ )
else:
# Handle pattern-based insertion
pattern = after_pattern if after_pattern else before_pattern
pattern_type = "after" if after_pattern else "before"
pattern_desc = f"Pattern '{pattern}'"
-
+
# Find pattern matches
- pattern_line_indices = find_pattern_indices(lines, pattern,
- use_regex=use_regex)
-
+ pattern_line_indices = find_pattern_indices(lines, pattern, use_regex=use_regex)
+
# Select the target occurrence
- target_line_idx = select_occurrence_index(pattern_line_indices, occurrence, pattern_desc)
-
+ target_line_idx = select_occurrence_index(
+ pattern_line_indices, occurrence, pattern_desc
+ )
+
# Determine insertion point
insertion_line_idx = target_line_idx
if pattern_type == "after":
insertion_line_idx += 1 # Insert on the line *after* the matched line
-
+
# Format occurrence info for output
num_occurrences = len(pattern_line_indices)
occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else ""
# 4. Handle indentation if requested
content_lines = content.splitlines()
-
+
if auto_indent and content_lines:
# Determine base indentation level
base_indent = ""
@@ -93,14 +110,18 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_
# Use indentation from the line before insertion point
reference_line_idx = min(insertion_line_idx - 1, len(lines) - 1)
reference_line = lines[reference_line_idx]
- base_indent = re.match(r'^(\s*)', reference_line).group(1)
-
+ base_indent = re.match(r"^(\s*)", reference_line).group(1)
+
# Apply indentation to content lines, preserving relative indentation
if content_lines:
# Find minimum indentation in content to preserve relative indentation
- content_indents = [len(re.match(r'^(\s*)', line).group(1)) for line in content_lines if line.strip()]
+ content_indents = [
+ len(re.match(r"^(\s*)", line).group(1))
+ for line in content_lines
+ if line.strip()
+ ]
min_content_indent = min(content_indents) if content_indents else 0
-
+
# Apply base indentation while preserving relative indentation
indented_content_lines = []
for line in content_lines:
@@ -108,51 +129,73 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_
indented_content_lines.append("")
else:
# Remove existing indentation and add new base indentation
- stripped_line = line[min_content_indent:] if min_content_indent <= len(line) else line
+ stripped_line = (
+ line[min_content_indent:] if min_content_indent <= len(line) else line
+ )
indented_content_lines.append(base_indent + stripped_line)
-
+
content_lines = indented_content_lines
# 5. Prepare the insertion
new_lines = lines[:insertion_line_idx] + content_lines + lines[insertion_line_idx:]
- new_content = '\n'.join(new_lines)
+ new_content = "\n".join(new_lines)
if original_content == new_content:
- coder.io.tool_warning(f"No changes made: insertion would not change file")
- return f"Warning: No changes made (insertion would not change file)"
+ coder.io.tool_warning("No changes made: insertion would not change file")
+ return "Warning: No changes made (insertion would not change file)"
# 6. Generate diff for feedback
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
-
+
# 7. Handle dry run
if dry_run:
if position:
dry_run_message = f"Dry run: Would insert block {pattern_type} {file_path}."
else:
- dry_run_message = f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path} at line {insertion_line_idx + 1}."
- return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
+ dry_run_message = (
+ f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern"
+ f" '{pattern}' in {file_path} at line {insertion_line_idx + 1}."
+ )
+ return format_tool_result(
+ coder,
+ tool_name,
+ "",
+ dry_run=True,
+ dry_run_message=dry_run_message,
+ diff_snippet=diff_snippet,
+ )
# 8. Apply Change (Not dry run)
metadata = {
- 'insertion_line_idx': insertion_line_idx,
- 'after_pattern': after_pattern,
- 'before_pattern': before_pattern,
- 'position': position,
- 'occurrence': occurrence,
- 'content': content,
- 'auto_indent': auto_indent,
- 'use_regex': use_regex
+ "insertion_line_idx": insertion_line_idx,
+ "after_pattern": after_pattern,
+ "before_pattern": before_pattern,
+ "position": position,
+ "occurrence": occurrence,
+ "content": content,
+ "auto_indent": auto_indent,
+ "use_regex": use_regex,
}
final_change_id = apply_change(
- coder, abs_path, rel_path, original_content, new_content, 'insertblock', metadata, change_id
+ coder,
+ abs_path,
+ rel_path,
+ original_content,
+ new_content,
+ "insertblock",
+ metadata,
+ change_id,
)
# 9. Format and return result
if position:
success_message = f"Inserted block {pattern_type} {file_path}"
else:
- success_message = f"Inserted block {pattern_type} {occurrence_str}pattern in {file_path} at line {insertion_line_idx + 1}"
-
+ success_message = (
+ f"Inserted block {pattern_type} {occurrence_str}pattern in {file_path} at line"
+ f" {insertion_line_idx + 1}"
+ )
+
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
@@ -160,7 +203,9 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_
except ToolError as e:
# Handle errors raised by utility functions (expected errors)
return handle_tool_error(coder, tool_name, e, add_traceback=False)
-
+
except Exception as e:
- coder.io.tool_error(f"Error in InsertBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback
+ coder.io.tool_error(
+ f"Error in InsertBlock: {str(e)}\n{traceback.format_exc()}"
+ ) # Add traceback
return f"Error: {str(e)}"
diff --git a/aider/tools/list_changes.py b/aider/tools/list_changes.py
index 4dfa39721a5..1c4bcc4dd98 100644
--- a/aider/tools/list_changes.py
+++ b/aider/tools/list_changes.py
@@ -1,15 +1,16 @@
import traceback
from datetime import datetime
+
def _execute_list_changes(coder, file_path=None, limit=10):
"""
List recent changes made to files.
-
+
Parameters:
- coder: The Coder instance
- file_path: Optional path to filter changes by file
- limit: Maximum number of changes to list
-
+
Returns a formatted list of changes.
"""
try:
@@ -18,29 +19,33 @@ def _execute_list_changes(coder, file_path=None, limit=10):
if file_path:
abs_path = coder.abs_root_path(file_path)
rel_file_path = coder.get_rel_fname(abs_path)
-
+
# Get the list of changes
changes = coder.change_tracker.list_changes(rel_file_path, limit)
-
+
if not changes:
if file_path:
return f"No changes found for file '{file_path}'"
else:
return "No changes have been made yet"
-
+
# Format the changes into a readable list
result = "Recent changes:\n"
for i, change in enumerate(changes):
- change_time = datetime.fromtimestamp(change['timestamp']).strftime('%H:%M:%S')
- change_type = change['type']
- file_path = change['file_path']
- change_id = change['id']
-
- result += f"{i+1}. [{change_id}] {change_time} - {change_type.upper()} on {file_path}\n"
-
- coder.io.tool_output(result) # Also print to console for user
+ change_time = datetime.fromtimestamp(change["timestamp"]).strftime("%H:%M:%S")
+ change_type = change["type"]
+ file_path = change["file_path"]
+ change_id = change["id"]
+
+ result += (
+ f"{i + 1}. [{change_id}] {change_time} - {change_type.upper()} on {file_path}\n"
+ )
+
+ coder.io.tool_output(result) # Also print to console for user
return result
-
+
except Exception as e:
- coder.io.tool_error(f"Error in ListChanges: {str(e)}\n{traceback.format_exc()}") # Add traceback
+ coder.io.tool_error(
+ f"Error in ListChanges: {str(e)}\n{traceback.format_exc()}"
+ ) # Add traceback
return f"Error: {str(e)}"
diff --git a/aider/tools/ls.py b/aider/tools/ls.py
index 42119a0a8a4..38baa5ad331 100644
--- a/aider/tools/ls.py
+++ b/aider/tools/ls.py
@@ -1,39 +1,40 @@
import os
+
def execute_ls(coder, dir_path):
"""
List files in directory and optionally add some to context.
-
+
This provides information about the structure of the codebase,
similar to how a developer would explore directories.
"""
try:
# Make the path relative to root if it's absolute
- if dir_path.startswith('/'):
+ if dir_path.startswith("/"):
rel_dir = os.path.relpath(dir_path, coder.root)
else:
rel_dir = dir_path
-
+
# Get absolute path
abs_dir = coder.abs_root_path(rel_dir)
-
+
# Check if path exists
if not os.path.exists(abs_dir):
coder.io.tool_output(f"⚠️ Directory '{dir_path}' not found")
- return f"Directory not found"
-
+ return "Directory not found"
+
# Get directory contents
contents = []
try:
with os.scandir(abs_dir) as entries:
for entry in entries:
- if entry.is_file() and not entry.name.startswith('.'):
- rel_path = os.path.join(rel_dir, entry.name)
+ if entry.is_file() and not entry.name.startswith("."):
+ rel_path = os.path.join(rel_dir, entry.name)
contents.append(rel_path)
except NotADirectoryError:
# If it's a file, just return the file
contents = [rel_dir]
-
+
if contents:
coder.io.tool_output(f"📋 Listed {len(contents)} file(s) in '{dir_path}'")
if len(contents) > 10:
@@ -42,7 +43,7 @@ def execute_ls(coder, dir_path):
return f"Found {len(contents)} files: {', '.join(contents)}"
else:
coder.io.tool_output(f"📋 No files found in '{dir_path}'")
- return f"No files found in directory"
+ return "No files found in directory"
except Exception as e:
coder.io.tool_error(f"Error in ls: {str(e)}")
return f"Error: {str(e)}"
diff --git a/aider/tools/make_editable.py b/aider/tools/make_editable.py
index 03c7c000627..33316935b3e 100644
--- a/aider/tools/make_editable.py
+++ b/aider/tools/make_editable.py
@@ -1,27 +1,27 @@
import os
+
# Keep the underscore prefix as this function is primarily for internal coder use
def _execute_make_editable(coder, file_path):
"""
Convert a read-only file to an editable file.
-
+
This allows the LLM to upgrade a file from read-only to editable
when it determines it needs to make changes to that file.
"""
try:
# Get absolute path
abs_path = coder.abs_root_path(file_path)
- rel_path = coder.get_rel_fname(abs_path)
-
+
# Check if file is already editable
if abs_path in coder.abs_fnames:
coder.io.tool_output(f"📝 File '{file_path}' is already editable")
- return f"File is already editable"
+ return "File is already editable"
# Check if file exists on disk
if not os.path.isfile(abs_path):
coder.io.tool_output(f"⚠️ File '{file_path}' not found")
- return f"Error: File not found"
+ return "Error: File not found"
# File exists, is not editable, might be read-only or not in context yet
was_read_only = False
@@ -34,13 +34,13 @@ def _execute_make_editable(coder, file_path):
if was_read_only:
coder.io.tool_output(f"📝 Moved '{file_path}' from read-only to editable")
- return f"File is now editable (moved from read-only)"
+ return "File is now editable (moved from read-only)"
else:
# File was not previously in context at all
coder.io.tool_output(f"📝 Added '{file_path}' directly to editable context")
# Track if added during exploration? Maybe not needed for direct MakeEditable.
# coder.files_added_in_exploration.add(rel_path) # Consider if needed
- return f"File is now editable (added directly)"
+ return "File is now editable (added directly)"
except Exception as e:
coder.io.tool_error(f"Error in MakeEditable for '{file_path}': {str(e)}")
return f"Error: {str(e)}"
diff --git a/aider/tools/make_readonly.py b/aider/tools/make_readonly.py
index a4dac34dcb4..13b85e549f5 100644
--- a/aider/tools/make_readonly.py
+++ b/aider/tools/make_readonly.py
@@ -1,30 +1,29 @@
def _execute_make_readonly(coder, file_path):
"""
Convert an editable file to a read-only file.
-
+
This allows the LLM to downgrade a file from editable to read-only
when it determines it no longer needs to make changes to that file.
"""
try:
# Get absolute path
abs_path = coder.abs_root_path(file_path)
- rel_path = coder.get_rel_fname(abs_path)
-
+
# Check if file is in editable context
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
coder.io.tool_output(f"📚 File '{file_path}' is already read-only")
- return f"File is already read-only"
+ return "File is already read-only"
else:
coder.io.tool_output(f"⚠️ File '{file_path}' not in context")
- return f"File not in context"
-
+ return "File not in context"
+
# Move from editable to read-only
coder.abs_fnames.remove(abs_path)
coder.abs_read_only_fnames.add(abs_path)
-
+
coder.io.tool_output(f"📚 Made '{file_path}' read-only")
- return f"File is now read-only"
+ return "File is now read-only"
except Exception as e:
coder.io.tool_error(f"Error making file read-only: {str(e)}")
return f"Error: {str(e)}"
diff --git a/aider/tools/remove.py b/aider/tools/remove.py
index 40530fe13de..fc94a2b9a19 100644
--- a/aider/tools/remove.py
+++ b/aider/tools/remove.py
@@ -1,9 +1,10 @@
import time
+
def _execute_remove(coder, file_path):
"""
Explicitly remove a file from context.
-
+
This allows the LLM to clean up its context when files are no
longer needed, keeping the context focused and efficient.
"""
@@ -17,29 +18,31 @@ def _execute_remove(coder, file_path):
if abs_path in coder.abs_fnames:
# Don't remove if it's the last editable file and there are no read-only files
if len(coder.abs_fnames) <= 1 and not coder.abs_read_only_fnames:
- coder.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context")
- return f"Cannot remove - last file in context"
+ coder.io.tool_output(
+ f"⚠️ Cannot remove '{file_path}' - it's the only file in context"
+ )
+ return "Cannot remove - last file in context"
coder.abs_fnames.remove(abs_path)
removed = True
elif abs_path in coder.abs_read_only_fnames:
# Don't remove if it's the last read-only file and there are no editable files
if len(coder.abs_read_only_fnames) <= 1 and not coder.abs_fnames:
- coder.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context")
- return f"Cannot remove - last file in context"
+ coder.io.tool_output(
+ f"⚠️ Cannot remove '{file_path}' - it's the only file in context"
+ )
+ return "Cannot remove - last file in context"
coder.abs_read_only_fnames.remove(abs_path)
removed = True
if not removed:
coder.io.tool_output(f"⚠️ File '{file_path}' not in context")
- return f"File not in context"
+ return "File not in context"
# Track in recently removed
- coder.recently_removed[rel_path] = {
- 'removed_at': time.time()
- }
-
+ coder.recently_removed[rel_path] = {"removed_at": time.time()}
+
coder.io.tool_output(f"🗑️ Explicitly removed '{file_path}' from context")
- return f"Removed file from context"
+ return "Removed file from context"
except Exception as e:
coder.io.tool_error(f"Error removing file: {str(e)}")
return f"Error: {str(e)}"
diff --git a/aider/tools/replace_all.py b/aider/tools/replace_all.py
index 279854578ff..ce1095cfa34 100644
--- a/aider/tools/replace_all.py
+++ b/aider/tools/replace_all.py
@@ -1,13 +1,13 @@
-import traceback
from .tool_utils import (
ToolError,
- validate_file_for_edit,
apply_change,
- handle_tool_error,
- generate_unified_diff_snippet,
format_tool_result,
+ generate_unified_diff_snippet,
+ handle_tool_error,
+ validate_file_for_edit,
)
+
def _execute_replace_all(coder, file_path, find_text, replace_text, change_id=None, dry_run=False):
"""
Replace all occurrences of text in a file using utility functions.
@@ -24,31 +24,43 @@ def _execute_replace_all(coder, file_path, find_text, replace_text, change_id=No
count = original_content.count(find_text)
if count == 0:
coder.io.tool_warning(f"Text '{find_text}' not found in file '{file_path}'")
- return f"Warning: Text not found in file"
+ return "Warning: Text not found in file"
# 3. Perform the replacement
new_content = original_content.replace(find_text, replace_text)
if original_content == new_content:
- coder.io.tool_warning(f"No changes made: replacement text is identical to original")
- return f"Warning: No changes made (replacement identical to original)"
+ coder.io.tool_warning("No changes made: replacement text is identical to original")
+ return "Warning: No changes made (replacement identical to original)"
# 4. Generate diff for feedback
diff_examples = generate_unified_diff_snippet(original_content, new_content, rel_path)
# 5. Handle dry run
if dry_run:
- dry_run_message = f"Dry run: Would replace {count} occurrences of '{find_text}' in {file_path}."
- return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_examples)
+ dry_run_message = (
+ f"Dry run: Would replace {count} occurrences of '{find_text}' in {file_path}."
+ )
+ return format_tool_result(
+ coder,
+ tool_name,
+ "",
+ dry_run=True,
+ dry_run_message=dry_run_message,
+ diff_snippet=diff_examples,
+ )
# 6. Apply Change (Not dry run)
- metadata = {
- 'find_text': find_text,
- 'replace_text': replace_text,
- 'occurrences': count
- }
+ metadata = {"find_text": find_text, "replace_text": replace_text, "occurrences": count}
final_change_id = apply_change(
- coder, abs_path, rel_path, original_content, new_content, 'replaceall', metadata, change_id
+ coder,
+ abs_path,
+ rel_path,
+ original_content,
+ new_content,
+ "replaceall",
+ metadata,
+ change_id,
)
# 7. Format and return result
@@ -62,4 +74,4 @@ def _execute_replace_all(coder, file_path, find_text, replace_text, change_id=No
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
- return handle_tool_error(coder, tool_name, e)
\ No newline at end of file
+ return handle_tool_error(coder, tool_name, e)
diff --git a/aider/tools/replace_line.py b/aider/tools/replace_line.py
index 1c3098f252c..f30d8fcd282 100644
--- a/aider/tools/replace_line.py
+++ b/aider/tools/replace_line.py
@@ -1,11 +1,14 @@
import os
import traceback
-def _execute_replace_line(coder, file_path, line_number, new_content, change_id=None, dry_run=False):
+
+def _execute_replace_line(
+ coder, file_path, line_number, new_content, change_id=None, dry_run=False
+):
"""
Replace a specific line identified by line number.
Useful for fixing errors identified by error messages or linters.
-
+
Parameters:
- coder: The Coder instance
- file_path: Path to the file to modify
@@ -13,67 +16,72 @@ def _execute_replace_line(coder, file_path, line_number, new_content, change_id=
- new_content: New content for the line
- change_id: Optional ID for tracking the change
- dry_run: If True, simulate the change without modifying the file
-
+
Returns a result message.
"""
try:
# Get absolute file path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
-
+
# Check if file exists
if not os.path.isfile(abs_path):
coder.io.tool_error(f"File '{file_path}' not found")
- return f"Error: File not found"
-
+ return "Error: File not found"
+
# Check if file is in editable context
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.")
- return f"Error: File is read-only. Use MakeEditable first."
+ return "Error: File is read-only. Use MakeEditable first."
else:
coder.io.tool_error(f"File '{file_path}' not in context")
- return f"Error: File not in context"
-
+ return "Error: File not in context"
+
# Reread file content immediately before modification
file_content = coder.io.read_text(abs_path)
if file_content is None:
coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceLine operation.")
return f"Error: Could not read file '{file_path}'"
-
+
# Split into lines
lines = file_content.splitlines()
-
+
# Validate line number
if not isinstance(line_number, int):
try:
line_number = int(line_number)
except ValueError:
coder.io.tool_error(f"Line number must be an integer, got '{line_number}'")
- coder.io.tool_error(f"Invalid line_number value: '{line_number}'. Must be an integer.")
+ coder.io.tool_error(
+ f"Invalid line_number value: '{line_number}'. Must be an integer."
+ )
return f"Error: Invalid line_number value '{line_number}'"
-
+
# Convert 1-based line number to 0-based index
idx = line_number - 1
-
+
if idx < 0 or idx >= len(lines):
- coder.io.tool_error(f"Line number {line_number} is out of range for file '{file_path}' (has {len(lines)} lines).")
+ coder.io.tool_error(
+ f"Line number {line_number} is out of range for file '{file_path}' (has"
+ f" {len(lines)} lines)."
+ )
return f"Error: Line number {line_number} out of range"
-
+
# Store original content for change tracking
original_content = file_content
original_line = lines[idx]
-
+
# Replace the line
lines[idx] = new_content
-
+
# Join lines back into a string
- new_content_full = '\n'.join(lines)
-
+ new_content_full = "\n".join(lines)
+
if original_content == new_content_full:
coder.io.tool_warning("No changes made: new line content is identical to original")
- return f"Warning: No changes made (new content identical to original)"
-
+ return "Warning: No changes made (new content identical to original)"
+
# Create a readable diff for the line replacement
diff = f"Line {line_number}:\n- {original_line}\n+ {new_content}"
@@ -84,32 +92,34 @@ def _execute_replace_line(coder, file_path, line_number, new_content, change_id=
# --- Apply Change (Not dry run) ---
coder.io.write_text(abs_path, new_content_full)
-
+
# Track the change
try:
metadata = {
- 'line_number': line_number,
- 'original_line': original_line,
- 'new_line': new_content
+ "line_number": line_number,
+ "original_line": original_line,
+ "new_line": new_content,
}
change_id = coder.change_tracker.track_change(
file_path=rel_path,
- change_type='replaceline',
+ change_type="replaceline",
original_content=original_content,
new_content=new_content_full,
metadata=metadata,
- change_id=change_id
+ change_id=change_id,
)
except Exception as track_e:
coder.io.tool_error(f"Error tracking change for ReplaceLine: {track_e}")
change_id = "TRACKING_FAILED"
coder.aider_edited_files.add(rel_path)
-
+
# Improve feedback
- coder.io.tool_output(f"✅ Replaced line {line_number} in {file_path} (change_id: {change_id})")
+ coder.io.tool_output(
+ f"✅ Replaced line {line_number} in {file_path} (change_id: {change_id})"
+ )
return f"Successfully replaced line {line_number} (change_id: {change_id}). Diff:\n{diff}"
-
+
except Exception as e:
coder.io.tool_error(f"Error in ReplaceLine: {str(e)}\n{traceback.format_exc()}")
return f"Error: {str(e)}"
diff --git a/aider/tools/replace_lines.py b/aider/tools/replace_lines.py
index 346ac6eb9f6..2ba65eef7cc 100644
--- a/aider/tools/replace_lines.py
+++ b/aider/tools/replace_lines.py
@@ -1,12 +1,21 @@
import os
-import traceback
-from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change
-def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, change_id=None, dry_run=False):
+from .tool_utils import (
+ ToolError,
+ apply_change,
+ format_tool_result,
+ generate_unified_diff_snippet,
+ handle_tool_error,
+)
+
+
+def _execute_replace_lines(
+ coder, file_path, start_line, end_line, new_content, change_id=None, dry_run=False
+):
"""
Replace a range of lines identified by line numbers.
Useful for fixing errors identified by error messages or linters.
-
+
Parameters:
- file_path: Path to the file to modify
- start_line: The first line number to replace (1-based)
@@ -14,7 +23,7 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content,
- new_content: New content for the lines (can be multi-line)
- change_id: Optional ID for tracking the change
- dry_run: If True, simulate the change without modifying the file
-
+
Returns a result message.
"""
tool_name = "ReplaceLines"
@@ -22,66 +31,72 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content,
# Get absolute file path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
-
+
# Check if file exists
if not os.path.isfile(abs_path):
raise ToolError(f"File '{file_path}' not found")
-
+
# Check if file is in editable context
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.")
else:
raise ToolError(f"File '{file_path}' not in context")
-
+
# Reread file content immediately before modification
file_content = coder.io.read_text(abs_path)
if file_content is None:
raise ToolError(f"Could not read file '{file_path}'")
-
+
# Convert line numbers to integers if needed
try:
start_line = int(start_line)
except ValueError:
raise ToolError(f"Invalid start_line value: '{start_line}'. Must be an integer.")
-
+
try:
end_line = int(end_line)
except ValueError:
raise ToolError(f"Invalid end_line value: '{end_line}'. Must be an integer.")
-
+
# Split into lines
lines = file_content.splitlines()
-
+
# Convert 1-based line numbers to 0-based indices
start_idx = start_line - 1
end_idx = end_line - 1
-
+
# Validate line numbers
if start_idx < 0 or start_idx >= len(lines):
- raise ToolError(f"Start line {start_line} is out of range for file '{file_path}' (has {len(lines)} lines).")
-
+ raise ToolError(
+ f"Start line {start_line} is out of range for file '{file_path}' (has"
+ f" {len(lines)} lines)."
+ )
+
if end_idx < start_idx or end_idx >= len(lines):
- raise ToolError(f"End line {end_line} is out of range for file '{file_path}' (must be >= start line {start_line} and <= {len(lines)}).")
-
+ raise ToolError(
+ f"End line {end_line} is out of range for file '{file_path}' (must be >= start line"
+ f" {start_line} and <= {len(lines)})."
+ )
+
# Store original content for change tracking
original_content = file_content
- replaced_lines = lines[start_idx:end_idx+1]
-
+ replaced_lines = lines[start_idx : end_idx + 1]
+
# Split the new content into lines
new_lines = new_content.splitlines()
-
+
# Perform the replacement
- new_full_lines = lines[:start_idx] + new_lines + lines[end_idx+1:]
- new_content_full = '\n'.join(new_full_lines)
-
+ new_full_lines = lines[:start_idx] + new_lines + lines[end_idx + 1 :]
+ new_content_full = "\n".join(new_full_lines)
+
if original_content == new_content_full:
coder.io.tool_warning("No changes made: new content is identical to original")
- return f"Warning: No changes made (new content identical to original)"
-
+ return "Warning: No changes made (new content identical to original)"
+
# Generate diff snippet
diff_snippet = generate_unified_diff_snippet(original_content, new_content_full, rel_path)
-
+
# Create a readable diff for the lines replacement
diff = f"Lines {start_line}-{end_line}:\n"
# Add removed lines with - prefix
@@ -96,33 +111,50 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content,
# Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would replace lines {start_line}-{end_line} in {file_path}"
- return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
+ return format_tool_result(
+ coder,
+ tool_name,
+ "",
+ dry_run=True,
+ dry_run_message=dry_run_message,
+ diff_snippet=diff_snippet,
+ )
# --- Apply Change (Not dry run) ---
metadata = {
- 'start_line': start_line,
- 'end_line': end_line,
- 'replaced_lines': replaced_lines,
- 'new_lines': new_lines
+ "start_line": start_line,
+ "end_line": end_line,
+ "replaced_lines": replaced_lines,
+ "new_lines": new_lines,
}
-
+
final_change_id = apply_change(
- coder, abs_path, rel_path, original_content, new_content_full, 'replacelines', metadata, change_id
+ coder,
+ abs_path,
+ rel_path,
+ original_content,
+ new_content_full,
+ "replacelines",
+ metadata,
+ change_id,
)
coder.aider_edited_files.add(rel_path)
replaced_count = end_line - start_line + 1
new_count = len(new_lines)
-
+
# Format and return result
- success_message = f"Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path}"
+ success_message = (
+ f"Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new"
+ f" lines in {file_path}"
+ )
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
-
+
except ToolError as e:
# Handle errors raised by utility functions (expected errors)
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
- return handle_tool_error(coder, tool_name, e)
\ No newline at end of file
+ return handle_tool_error(coder, tool_name, e)
diff --git a/aider/tools/replace_text.py b/aider/tools/replace_text.py
index b6409d40a87..c0cc7cb6b8b 100644
--- a/aider/tools/replace_text.py
+++ b/aider/tools/replace_text.py
@@ -1,14 +1,23 @@
-import traceback
from .tool_utils import (
ToolError,
- validate_file_for_edit,
apply_change,
- handle_tool_error,
format_tool_result,
generate_unified_diff_snippet,
+ handle_tool_error,
+ validate_file_for_edit,
)
-def _execute_replace_text(coder, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None, dry_run=False):
+
+def _execute_replace_text(
+ coder,
+ file_path,
+ find_text,
+ replace_text,
+ near_context=None,
+ occurrence=1,
+ change_id=None,
+ dry_run=False,
+):
"""
Replace specific text with new text, optionally using nearby context for disambiguation.
Uses utility functions for validation, finding occurrences, and applying changes.
@@ -38,10 +47,14 @@ def _execute_replace_text(coder, file_path, find_text, replace_text, near_contex
raise ToolError(f"Text '{find_text}' not found, cannot select last occurrence.")
target_idx = num_occurrences - 1
elif 1 <= occurrence <= num_occurrences:
- target_idx = occurrence - 1 # Convert 1-based to 0-based
+ target_idx = occurrence - 1 # Convert 1-based to 0-based
else:
- err_msg = f"Occurrence number {occurrence} is out of range. Found {num_occurrences} occurrences of '{find_text}'"
- if near_context: err_msg += f" near '{near_context}'"
+ err_msg = (
+ f"Occurrence number {occurrence} is out of range. Found"
+ f" {num_occurrences} occurrences of '{find_text}'"
+ )
+ if near_context:
+ err_msg += f" near '{near_context}'"
err_msg += f" in '{file_path}'."
raise ToolError(err_msg)
except ValueError:
@@ -50,11 +63,15 @@ def _execute_replace_text(coder, file_path, find_text, replace_text, near_contex
start_index = occurrences[target_idx]
# 4. Perform the replacement
- new_content = original_content[:start_index] + replace_text + original_content[start_index + len(find_text):]
+ new_content = (
+ original_content[:start_index]
+ + replace_text
+ + original_content[start_index + len(find_text) :]
+ )
if original_content == new_content:
- coder.io.tool_warning(f"No changes made: replacement text is identical to original")
- return f"Warning: No changes made (replacement identical to original)"
+ coder.io.tool_warning("No changes made: replacement text is identical to original")
+ return "Warning: No changes made (replacement identical to original)"
# 5. Generate diff for feedback
# Note: _generate_diff_snippet is currently on the Coder class
@@ -63,19 +80,35 @@ def _execute_replace_text(coder, file_path, find_text, replace_text, near_contex
# 6. Handle dry run
if dry_run:
- dry_run_message = f"Dry run: Would replace {occurrence_str} of '{find_text}' in {file_path}."
- return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
+ dry_run_message = (
+ f"Dry run: Would replace {occurrence_str} of '{find_text}' in {file_path}."
+ )
+ return format_tool_result(
+ coder,
+ tool_name,
+ "",
+ dry_run=True,
+ dry_run_message=dry_run_message,
+ diff_snippet=diff_snippet,
+ )
# 7. Apply Change (Not dry run)
metadata = {
- 'start_index': start_index,
- 'find_text': find_text,
- 'replace_text': replace_text,
- 'near_context': near_context,
- 'occurrence': occurrence
+ "start_index": start_index,
+ "find_text": find_text,
+ "replace_text": replace_text,
+ "near_context": near_context,
+ "occurrence": occurrence,
}
final_change_id = apply_change(
- coder, abs_path, rel_path, original_content, new_content, 'replacetext', metadata, change_id
+ coder,
+ abs_path,
+ rel_path,
+ original_content,
+ new_content,
+ "replacetext",
+ metadata,
+ change_id,
)
# 8. Format and return result
@@ -89,4 +122,4 @@ def _execute_replace_text(coder, file_path, find_text, replace_text, near_contex
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
- return handle_tool_error(coder, tool_name, e)
\ No newline at end of file
+ return handle_tool_error(coder, tool_name, e)
diff --git a/aider/tools/show_numbered_context.py b/aider/tools/show_numbered_context.py
index 6df4386e4dd..4cecf96bb2c 100644
--- a/aider/tools/show_numbered_context.py
+++ b/aider/tools/show_numbered_context.py
@@ -1,7 +1,11 @@
import os
-from .tool_utils import ToolError, resolve_paths, handle_tool_error
-def execute_show_numbered_context(coder, file_path, pattern=None, line_number=None, context_lines=3):
+from .tool_utils import ToolError, handle_tool_error, resolve_paths
+
+
+def execute_show_numbered_context(
+ coder, file_path, pattern=None, line_number=None, context_lines=3
+):
"""
Displays numbered lines from file_path centered around a target location
(pattern or line_number), without adding the file to context.
@@ -34,10 +38,13 @@ def execute_show_numbered_context(coder, file_path, pattern=None, line_number=No
try:
line_number_int = int(line_number)
if 1 <= line_number_int <= num_lines:
- center_line_idx = line_number_int - 1 # Convert to 0-based index
+ center_line_idx = line_number_int - 1 # Convert to 0-based index
found_by = f"line {line_number_int}"
else:
- raise ToolError(f"Line number {line_number_int} is out of range (1-{num_lines}) for {file_path}.")
+ raise ToolError(
+ f"Line number {line_number_int} is out of range (1-{num_lines}) for"
+ f" {file_path}."
+ )
except ValueError:
raise ToolError(f"Invalid line number '{line_number}'. Must be an integer.")
@@ -48,7 +55,7 @@ def execute_show_numbered_context(coder, file_path, pattern=None, line_number=No
if pattern in line:
first_match_line_idx = i
break
-
+
if first_match_line_idx != -1:
center_line_idx = first_match_line_idx
found_by = f"pattern '{pattern}' on line {center_line_idx + 1}"
@@ -56,25 +63,27 @@ def execute_show_numbered_context(coder, file_path, pattern=None, line_number=No
raise ToolError(f"Pattern '{pattern}' not found in {file_path}.")
if center_line_idx == -1:
- # Should not happen if logic above is correct, but as a safeguard
- raise ToolError("Internal error: Could not determine center line.")
+ # Should not happen if logic above is correct, but as a safeguard
+ raise ToolError("Internal error: Could not determine center line.")
# 5. Calculate context window
try:
context_lines_int = int(context_lines)
if context_lines_int < 0:
- raise ValueError("Context lines must be non-negative")
+ raise ValueError("Context lines must be non-negative")
except ValueError:
- coder.io.tool_warning(f"Invalid context_lines value '{context_lines}', using default 3.")
+ coder.io.tool_warning(
+ f"Invalid context_lines value '{context_lines}', using default 3."
+ )
context_lines_int = 3
-
+
start_line_idx = max(0, center_line_idx - context_lines_int)
end_line_idx = min(num_lines - 1, center_line_idx + context_lines_int)
# 6. Format output
# Use rel_path for user-facing messages
output_lines = [f"Displaying context around {found_by} in {rel_path}:"]
- max_line_num_width = len(str(end_line_idx + 1)) # Width for padding
+ max_line_num_width = len(str(end_line_idx + 1)) # Width for padding
for i in range(start_line_idx, end_line_idx + 1):
line_num_str = str(i + 1).rjust(max_line_num_width)
diff --git a/aider/tools/tool_utils.py b/aider/tools/tool_utils.py
index 8c43ca5bb4d..63e068129a3 100644
--- a/aider/tools/tool_utils.py
+++ b/aider/tools/tool_utils.py
@@ -3,10 +3,13 @@
import re
import traceback
+
class ToolError(Exception):
"""Custom exception for tool-specific errors that should be reported to the LLM."""
+
pass
+
def resolve_paths(coder, file_path):
"""Resolves absolute and relative paths for a given file path."""
try:
@@ -17,6 +20,7 @@ def resolve_paths(coder, file_path):
# Wrap unexpected errors during path resolution
raise ToolError(f"Error resolving path '{file_path}': {e}")
+
def validate_file_for_edit(coder, file_path):
"""
Validates if a file exists, is in context, and is editable.
@@ -42,11 +46,14 @@ def validate_file_for_edit(coder, file_path):
content = coder.io.read_text(abs_path)
if content is None:
# This indicates an issue reading a file we know exists and is in context
- coder.io.tool_error(f"Internal error: Could not read file '{file_path}' which should be accessible.")
+ coder.io.tool_error(
+ f"Internal error: Could not read file '{file_path}' which should be accessible."
+ )
raise ToolError(f"Could not read file '{file_path}'")
return abs_path, rel_path, content
+
def find_pattern_indices(lines, pattern, use_regex=False):
"""Finds all line indices matching a pattern."""
indices = []
@@ -55,6 +62,7 @@ def find_pattern_indices(lines, pattern, use_regex=False):
indices.append(i)
return indices
+
def select_occurrence_index(indices, occurrence, pattern_desc="Pattern"):
"""
Selects the target 0-based index from a list of indices based on the 1-based occurrence parameter.
@@ -65,25 +73,29 @@ def select_occurrence_index(indices, occurrence, pattern_desc="Pattern"):
raise ToolError(f"{pattern_desc} not found")
try:
- occurrence = int(occurrence) # Ensure occurrence is an integer
- if occurrence == -1: # Last occurrence
+ occurrence = int(occurrence) # Ensure occurrence is an integer
+ if occurrence == -1: # Last occurrence
if num_occurrences == 0:
- raise ToolError(f"{pattern_desc} not found, cannot select last occurrence.")
+ raise ToolError(f"{pattern_desc} not found, cannot select last occurrence.")
target_idx = num_occurrences - 1
elif 1 <= occurrence <= num_occurrences:
- target_idx = occurrence - 1 # Convert 1-based to 0-based
+ target_idx = occurrence - 1 # Convert 1-based to 0-based
else:
- raise ToolError(f"Occurrence number {occurrence} is out of range for {pattern_desc}. Found {num_occurrences} occurrences.")
+ raise ToolError(
+ f"Occurrence number {occurrence} is out of range for {pattern_desc}. Found"
+ f" {num_occurrences} occurrences."
+ )
except ValueError:
raise ToolError(f"Invalid occurrence value: '{occurrence}'. Must be an integer.")
return indices[target_idx]
+
def determine_line_range(
coder,
file_path,
lines,
- start_pattern_line_index=None, # Made optional
+ start_pattern_line_index=None, # Made optional
end_pattern=None,
line_count=None,
target_symbol=None,
@@ -102,36 +114,42 @@ def determine_line_range(
if sum(targeting_methods) > 1:
raise ToolError("Cannot specify target_symbol along with start_pattern.")
if sum(targeting_methods) == 0:
- raise ToolError("Must specify either target_symbol or start_pattern.") # Or line numbers for line-based tools, handled elsewhere
+ raise ToolError(
+ "Must specify either target_symbol or start_pattern."
+ ) # Or line numbers for line-based tools, handled elsewhere
if target_symbol:
if end_pattern or line_count:
- raise ToolError("Cannot specify end_pattern or line_count when using target_symbol.")
+ raise ToolError("Cannot specify end_pattern or line_count when using target_symbol.")
try:
# Use repo_map to find the symbol's definition range
- start_line, end_line = coder.repo_map.get_symbol_definition_location(file_path, target_symbol)
+ start_line, end_line = coder.repo_map.get_symbol_definition_location(
+ file_path, target_symbol
+ )
return start_line, end_line
- except AttributeError: # Use specific exception
- # Check if repo_map exists and is initialized before accessing methods
- if not hasattr(coder, 'repo_map') or coder.repo_map is None:
- raise ToolError("RepoMap is not available or not initialized.")
- # If repo_map exists, the error might be from get_symbol_definition_location itself
- # Re-raise ToolErrors directly
- raise
+ except AttributeError: # Use specific exception
+ # Check if repo_map exists and is initialized before accessing methods
+ if not hasattr(coder, "repo_map") or coder.repo_map is None:
+ raise ToolError("RepoMap is not available or not initialized.")
+ # If repo_map exists, the error might be from get_symbol_definition_location itself
+ # Re-raise ToolErrors directly
+ raise
except ToolError as e:
- # Propagate specific ToolErrors from repo_map (not found, ambiguous, etc.)
- raise e
+ # Propagate specific ToolErrors from repo_map (not found, ambiguous, etc.)
+ raise e
except Exception as e:
- # Catch other unexpected errors during symbol lookup
- raise ToolError(f"Unexpected error looking up symbol '{target_symbol}': {e}")
+ # Catch other unexpected errors during symbol lookup
+ raise ToolError(f"Unexpected error looking up symbol '{target_symbol}': {e}")
# --- Existing logic for pattern/line_count based targeting ---
# Ensure start_pattern_line_index is provided if not using target_symbol
if start_pattern_line_index is None:
- raise ToolError("Internal error: start_pattern_line_index is required when not using target_symbol.")
+ raise ToolError(
+ "Internal error: start_pattern_line_index is required when not using target_symbol."
+ )
# Assign start_line here for the pattern-based logic path
- start_line = start_pattern_line_index # Start of existing logic
+ start_line = start_pattern_line_index # Start of existing logic
start_line = start_pattern_line_index
end_line = -1
@@ -147,7 +165,10 @@ def determine_line_range(
found_end = True
break
if not found_end:
- raise ToolError(f"End pattern '{end_pattern}' not found after start pattern on line {start_line + 1}")
+ raise ToolError(
+ f"End pattern '{end_pattern}' not found after start pattern on line"
+ f" {start_line + 1}"
+ )
elif line_count:
try:
line_count = int(line_count)
@@ -156,7 +177,9 @@ def determine_line_range(
# Calculate end line index, ensuring it doesn't exceed file bounds
end_line = min(start_line + line_count - 1, len(lines) - 1)
except ValueError:
- raise ToolError(f"Invalid line_count value: '{line_count}'. Must be a positive integer.")
+ raise ToolError(
+ f"Invalid line_count value: '{line_count}'. Must be a positive integer."
+ )
else:
# If neither end_pattern nor line_count is given, the range is just the start line
end_line = start_line
@@ -188,7 +211,7 @@ def generate_unified_diff_snippet(original_content, new_content, file_path, cont
new_lines,
fromfile=f"a/{file_path}",
tofile=f"b/{file_path}",
- n=context_lines, # Number of context lines
+ n=context_lines, # Number of context lines
)
# Join the diff lines, potentially skipping the header if desired,
@@ -196,11 +219,15 @@ def generate_unified_diff_snippet(original_content, new_content, file_path, cont
diff_snippet = "".join(diff)
# Ensure snippet ends with a newline for cleaner formatting in results
- if diff_snippet and not diff_snippet.endswith('\n'):
- diff_snippet += '\n'
+ if diff_snippet and not diff_snippet.endswith("\n"):
+ diff_snippet += "\n"
return diff_snippet
-def apply_change(coder, abs_path, rel_path, original_content, new_content, change_type, metadata, change_id=None):
+
+
+def apply_change(
+ coder, abs_path, rel_path, original_content, new_content, change_type, metadata, change_id=None
+):
"""
Writes the new content, tracks the change, and updates coder state.
Returns the final change ID. Raises ToolError on tracking failure.
@@ -213,7 +240,7 @@ def apply_change(coder, abs_path, rel_path, original_content, new_content, chang
original_content=original_content,
new_content=new_content,
metadata=metadata,
- change_id=change_id
+ change_id=change_id,
)
except Exception as track_e:
# Log the error but also raise ToolError to inform the LLM
@@ -233,28 +260,38 @@ def handle_tool_error(coder, tool_name, e, add_traceback=True):
# Return only the core error message to the LLM for brevity
return f"Error: {str(e)}"
-def format_tool_result(coder, tool_name, success_message, change_id=None, diff_snippet=None, dry_run=False, dry_run_message=None):
+
+def format_tool_result(
+ coder,
+ tool_name,
+ success_message,
+ change_id=None,
+ diff_snippet=None,
+ dry_run=False,
+ dry_run_message=None,
+):
"""Formats the result message for tool execution."""
if dry_run:
full_message = dry_run_message or f"Dry run: Would execute {tool_name}."
if diff_snippet:
full_message += f" Diff snippet:\n{diff_snippet}"
- coder.io.tool_output(full_message) # Log the dry run action
+ coder.io.tool_output(full_message) # Log the dry run action
return full_message
else:
# Use the provided success message, potentially adding change_id and diff
full_message = f"✅ {success_message}"
if change_id:
full_message += f" (change_id: {change_id})"
- coder.io.tool_output(full_message) # Log the success action
+ coder.io.tool_output(full_message) # Log the success action
result_for_llm = f"Successfully executed {tool_name}."
if change_id:
- result_for_llm += f" Change ID: {change_id}."
+ result_for_llm += f" Change ID: {change_id}."
if diff_snippet:
result_for_llm += f" Diff snippet:\n{diff_snippet}"
return result_for_llm
+
# Example usage within a hypothetical tool:
# try:
# abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path)
@@ -263,7 +300,13 @@ def format_tool_result(coder, tool_name, success_message, change_id=None, diff_s
# return format_tool_result(coder, "MyTool", "", dry_run=True, diff_snippet=diff)
#
# change_id = apply_change(coder, abs_path, rel_path, original_content, new_content, 'mytool', metadata)
-# return format_tool_result(coder, "MyTool", f"Applied change to {file_path}", change_id=change_id, diff_snippet=diff)
+# return format_tool_result(
+# coder,
+# "MyTool",
+# f"Applied change to {file_path}",
+# change_id=change_id,
+# diff_snippet=diff
+# )
# except ToolError as e:
# return handle_tool_error(coder, "MyTool", e, add_traceback=False) # Don't need traceback for ToolErrors
# except Exception as e:
diff --git a/aider/tools/undo_change.py b/aider/tools/undo_change.py
index 9138628d1f2..fc3484a3038 100644
--- a/aider/tools/undo_change.py
+++ b/aider/tools/undo_change.py
@@ -1,14 +1,15 @@
import traceback
-def _execute_undo_change(coder, change_id=None, file_path=None):
+
+def _execute_undo_change(coder, change_id=None, file_path=None):
"""
Undo a specific change by ID, or the last change to a file.
-
+
Parameters:
- coder: The Coder instance
- change_id: ID of the change to undo
- file_path: Path to file where the last change should be undone
-
+
Returns a result message.
"""
# Note: Undo does not have a dry_run parameter as it's inherently about reverting a previous action.
@@ -17,40 +18,43 @@ def _execute_undo_change(coder, change_id=None, file_path=None):
if change_id is None and file_path is None:
coder.io.tool_error("Must specify either change_id or file_path for UndoChange")
return "Error: Must specify either change_id or file_path"
-
+
# If file_path is specified, get the most recent change for that file
- if file_path:
+ if file_path:
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
-
+
change_id = coder.change_tracker.get_last_change(rel_path)
if not change_id:
coder.io.tool_error(f"No tracked changes found for file '{file_path}' to undo.")
return f"Error: No changes found for file '{file_path}'"
-
+
# Attempt to get undo information from the tracker
success, message, change_info = coder.change_tracker.undo_change(change_id)
-
+
if not success:
coder.io.tool_error(f"Failed to undo change '{change_id}': {message}")
return f"Error: {message}"
-
+
# Apply the undo by restoring the original content
if change_info:
- file_path = change_info['file_path']
+ file_path = change_info["file_path"]
abs_path = coder.abs_root_path(file_path)
# Write the original content back to the file
- coder.io.write_text(abs_path, change_info['original'])
- coder.aider_edited_files.add(file_path) # Track that the file was modified by the undo
-
- change_type = change_info['type']
+ coder.io.write_text(abs_path, change_info["original"])
+ coder.aider_edited_files.add(file_path) # Track that the file was modified by the undo
+
+ change_type = change_info["type"]
coder.io.tool_output(f"✅ Undid {change_type} change '{change_id}' in {file_path}")
return f"Successfully undid {change_type} change '{change_id}'."
else:
# This case should ideally not be reached if tracker returns success
- coder.io.tool_error(f"Failed to undo change '{change_id}': Change info missing after successful tracker update.")
+ coder.io.tool_error(
+ f"Failed to undo change '{change_id}': Change info missing after successful tracker"
+ " update."
+ )
return f"Error: Failed to undo change '{change_id}' (missing change info)"
-
+
except Exception as e:
coder.io.tool_error(f"Error in UndoChange: {str(e)}\n{traceback.format_exc()}")
return f"Error: {str(e)}"
diff --git a/aider/tools/view_files_at_glob.py b/aider/tools/view_files_at_glob.py
index 9dda05d02c6..34af0f74d54 100644
--- a/aider/tools/view_files_at_glob.py
+++ b/aider/tools/view_files_at_glob.py
@@ -1,5 +1,6 @@
-import os
import fnmatch
+import os
+
def execute_view_files_at_glob(coder, pattern):
"""
@@ -11,19 +12,19 @@ def execute_view_files_at_glob(coder, pattern):
try:
# Find files matching the pattern
matching_files = []
-
+
# Make the pattern relative to root if it's absolute
- if pattern.startswith('/'):
+ if pattern.startswith("/"):
pattern = os.path.relpath(pattern, coder.root)
-
+
# Get all files in the repo
all_files = coder.get_all_relative_files()
-
+
# Find matches with pattern matching
for file in all_files:
if fnmatch.fnmatch(file, pattern):
matching_files.append(file)
-
+
# Limit the number of files added if there are too many matches
if len(matching_files) > coder.max_files_per_glob:
coder.io.tool_output(
@@ -31,22 +32,31 @@ def execute_view_files_at_glob(coder, pattern):
f"limiting to {coder.max_files_per_glob} most relevant files."
)
# Sort by modification time (most recent first)
- matching_files.sort(key=lambda f: os.path.getmtime(coder.abs_root_path(f)), reverse=True)
- matching_files = matching_files[:coder.max_files_per_glob]
-
+ matching_files.sort(
+ key=lambda f: os.path.getmtime(coder.abs_root_path(f)), reverse=True
+ )
+ matching_files = matching_files[: coder.max_files_per_glob]
+
# Add files to context
for file in matching_files:
# Use the coder's internal method to add files
coder._add_file_to_context(file)
-
+
# Return a user-friendly result
if matching_files:
if len(matching_files) > 10:
- brief = ', '.join(matching_files[:5]) + f', and {len(matching_files)-5} more'
- coder.io.tool_output(f"📂 Added {len(matching_files)} files matching '{pattern}': {brief}")
+ brief = ", ".join(matching_files[:5]) + f", and {len(matching_files) - 5} more"
+ coder.io.tool_output(
+ f"📂 Added {len(matching_files)} files matching '{pattern}': {brief}"
+ )
else:
- coder.io.tool_output(f"📂 Added files matching '{pattern}': {', '.join(matching_files)}")
- return f"Added {len(matching_files)} files: {', '.join(matching_files[:5])}{' and more' if len(matching_files) > 5 else ''}"
+ coder.io.tool_output(
+ f"📂 Added files matching '{pattern}': {', '.join(matching_files)}"
+ )
+ return (
+ f"Added {len(matching_files)} files:"
+ f" {', '.join(matching_files[:5])}{' and more' if len(matching_files) > 5 else ''}"
+ )
else:
coder.io.tool_output(f"⚠️ No files found matching '{pattern}'")
return f"No files found matching '{pattern}'"
diff --git a/aider/tools/view_files_matching.py b/aider/tools/view_files_matching.py
index 7d395721a27..f87d4682c6d 100644
--- a/aider/tools/view_files_matching.py
+++ b/aider/tools/view_files_matching.py
@@ -1,6 +1,6 @@
-import re
-import os
import fnmatch
+import re
+
def execute_view_files_matching(coder, search_pattern, file_pattern=None, regex=False):
"""
@@ -8,9 +8,12 @@ def execute_view_files_matching(coder, search_pattern, file_pattern=None, regex=
Args:
coder: The Coder instance.
- search_pattern (str): The pattern to search for. Treated as a literal string by default.
- file_pattern (str, optional): Glob pattern to filter which files are searched. Defaults to None (search all files).
- regex (bool, optional): If True, treat search_pattern as a regular expression. Defaults to False.
+ search_pattern (str): The pattern to search for.
+ Treated as a literal string by default.
+ file_pattern (str, optional): Glob pattern to filter which files are searched.
+ Defaults to None (search all files).
+ regex (bool, optional): If True, treat search_pattern as a regular expression.
+ Defaults to False.
This tool lets the LLM search for content within files, mimicking
how a developer would use grep or regex search to find relevant code.
@@ -24,19 +27,21 @@ def execute_view_files_matching(coder, search_pattern, file_pattern=None, regex=
for file in all_files:
if fnmatch.fnmatch(file, file_pattern):
files_to_search.append(file)
-
+
if not files_to_search:
- return f"No files matching '{file_pattern}' to search for pattern '{search_pattern}'"
+ return (
+ f"No files matching '{file_pattern}' to search for pattern '{search_pattern}'"
+ )
else:
# Search all files if no pattern provided
files_to_search = coder.get_all_relative_files()
-
+
# Search for pattern in files
matches = {}
for file in files_to_search:
abs_path = coder.abs_root_path(file)
try:
- with open(abs_path, 'r', encoding='utf-8') as f:
+ with open(abs_path, "r", encoding="utf-8") as f:
content = f.read()
match_count = 0
if regex:
@@ -47,17 +52,17 @@ def execute_view_files_matching(coder, search_pattern, file_pattern=None, regex=
# Handle invalid regex patterns gracefully
coder.io.tool_error(f"Invalid regex pattern '{search_pattern}': {e}")
# Skip this file for this search if regex is invalid
- continue
+ continue
else:
# Exact string matching
match_count = content.count(search_pattern)
-
+
if match_count > 0:
matches[file] = match_count
except Exception:
# Skip files that can't be read (binary, etc.)
pass
-
+
# Limit the number of files added if there are too many matches
if len(matches) > coder.max_files_per_glob:
coder.io.tool_output(
@@ -66,27 +71,33 @@ def execute_view_files_matching(coder, search_pattern, file_pattern=None, regex=
)
# Sort by number of matches (most matches first)
sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True)
- matches = dict(sorted_matches[:coder.max_files_per_glob])
-
+ matches = dict(sorted_matches[: coder.max_files_per_glob])
+
# Add matching files to context
for file in matches:
coder._add_file_to_context(file)
-
+
# Return a user-friendly result
if matches:
# Sort by number of matches (most matches first)
sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True)
match_list = [f"{file} ({count} matches)" for file, count in sorted_matches[:5]]
-
+
if len(sorted_matches) > 5:
- coder.io.tool_output(f"🔍 Found '{search_pattern}' in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more")
- return f"Found in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more"
+ coder.io.tool_output(
+ f"🔍 Found '{search_pattern}' in {len(matches)} files:"
+ f" {', '.join(match_list)} and {len(matches) - 5} more"
+ )
+ return (
+ f"Found in {len(matches)} files: {', '.join(match_list)} and"
+ f" {len(matches) - 5} more"
+ )
else:
coder.io.tool_output(f"🔍 Found '{search_pattern}' in: {', '.join(match_list)}")
return f"Found in {len(matches)} files: {', '.join(match_list)}"
else:
coder.io.tool_output(f"⚠️ Pattern '{search_pattern}' not found in any files")
- return f"Pattern not found in any files"
+ return "Pattern not found in any files"
except Exception as e:
coder.io.tool_error(f"Error in ViewFilesMatching: {str(e)}")
- return f"Error: {str(e)}"
\ No newline at end of file
+ return f"Error: {str(e)}"
diff --git a/aider/tools/view_files_with_symbol.py b/aider/tools/view_files_with_symbol.py
index ebf849cd249..dc5962cf26f 100644
--- a/aider/tools/view_files_with_symbol.py
+++ b/aider/tools/view_files_with_symbol.py
@@ -1,5 +1,6 @@
import os
+
def _execute_view_files_with_symbol(coder, symbol):
"""
Find files containing a symbol using RepoMap and add them to context.
@@ -24,20 +25,29 @@ def _execute_view_files_with_symbol(coder, symbol):
for tag in tags:
if tag.name == symbol:
found_in_context.append(rel_fname)
- break # Found in this file, move to next
+ break # Found in this file, move to next
except Exception as e:
- coder.io.tool_warning(f"Could not get symbols for {rel_fname} while checking context: {e}")
+ coder.io.tool_warning(
+ f"Could not get symbols for {rel_fname} while checking context: {e}"
+ )
if found_in_context:
# Symbol found in already loaded files. Report this and stop.
file_list = ", ".join(sorted(list(set(found_in_context))))
- coder.io.tool_output(f"Symbol '{symbol}' found in already loaded file(s): {file_list}. No external search performed.")
- return f"Symbol '{symbol}' found in already loaded file(s): {file_list}. No external search performed."
+ coder.io.tool_output(
+ f"Symbol '{symbol}' found in already loaded file(s): {file_list}. No external search"
+ " performed."
+ )
+ return (
+ f"Symbol '{symbol}' found in already loaded file(s): {file_list}. No external search"
+ " performed."
+ )
# --- End Modification ---
-
# 2. If not found in context, search the repository using RepoMap
- coder.io.tool_output(f"🔎 Searching for symbol '{symbol}' in repository (excluding current context)...")
+ coder.io.tool_output(
+ f"🔎 Searching for symbol '{symbol}' in repository (excluding current context)..."
+ )
try:
found_files = set()
current_context_files = coder.abs_fnames | coder.abs_read_only_fnames
@@ -60,18 +70,20 @@ def _execute_view_files_with_symbol(coder, symbol):
if tag.name == symbol:
# Use absolute path directly if available, otherwise resolve from relative path
abs_fname = rel_fname_to_abs.get(tag.rel_fname) or coder.abs_root_path(tag.fname)
- if abs_fname in files_to_search: # Ensure we only add files we intended to search
+ if abs_fname in files_to_search: # Ensure we only add files we intended to search
found_files.add(abs_fname)
# Limit the number of files added
if len(found_files) > coder.max_files_per_glob:
- coder.io.tool_output(
+ coder.io.tool_output(
f"⚠️ Found symbol '{symbol}' in {len(found_files)} files, "
f"limiting to {coder.max_files_per_glob} most relevant files."
)
- # Sort by modification time (most recent first) - approximate relevance
- sorted_found_files = sorted(list(found_files), key=lambda f: os.path.getmtime(f), reverse=True)
- found_files = set(sorted_found_files[:coder.max_files_per_glob])
+ # Sort by modification time (most recent first) - approximate relevance
+ sorted_found_files = sorted(
+ list(found_files), key=lambda f: os.path.getmtime(f), reverse=True
+ )
+ found_files = set(sorted_found_files[: coder.max_files_per_glob])
# Add files to context (as read-only)
added_count = 0
@@ -79,24 +91,31 @@ def _execute_view_files_with_symbol(coder, symbol):
for abs_file_path in found_files:
rel_path = coder.get_rel_fname(abs_file_path)
# Double check it's not already added somehow
- if abs_file_path not in coder.abs_fnames and abs_file_path not in coder.abs_read_only_fnames:
+ if (
+ abs_file_path not in coder.abs_fnames
+ and abs_file_path not in coder.abs_read_only_fnames
+ ):
# Use explicit=True for clear output, even though it's an external search result
add_result = coder._add_file_to_context(rel_path, explicit=True)
- if "Added" in add_result or "Viewed" in add_result: # Count successful adds/views
+ if "Added" in add_result or "Viewed" in add_result: # Count successful adds/views
added_count += 1
added_files_rel.append(rel_path)
if added_count > 0:
if added_count > 5:
- brief = ', '.join(added_files_rel[:5]) + f', and {added_count-5} more'
+ brief = ", ".join(added_files_rel[:5]) + f", and {added_count - 5} more"
coder.io.tool_output(f"🔎 Found '{symbol}' and added {added_count} files: {brief}")
else:
- coder.io.tool_output(f"🔎 Found '{symbol}' and added files: {', '.join(added_files_rel)}")
+ coder.io.tool_output(
+ f"🔎 Found '{symbol}' and added files: {', '.join(added_files_rel)}"
+ )
return f"Found symbol '{symbol}' and added {added_count} files as read-only."
else:
- coder.io.tool_output(f"⚠️ Symbol '{symbol}' not found in searchable files (outside current context).")
+ coder.io.tool_output(
+ f"⚠️ Symbol '{symbol}' not found in searchable files (outside current context)."
+ )
return f"Symbol '{symbol}' not found in searchable files (outside current context)."
except Exception as e:
coder.io.tool_error(f"Error in ViewFilesWithSymbol: {str(e)}")
- return f"Error: {str(e)}"
\ No newline at end of file
+ return f"Error: {str(e)}"
diff --git a/aider/utils.py b/aider/utils.py
index 938030c58a5..0a7a06ded11 100644
--- a/aider/utils.py
+++ b/aider/utils.py
@@ -166,7 +166,9 @@ def format_messages(messages, title=None):
if fence_count > 5:
# Show truncated content with file count for large files to improve performance
first_line = content.split("\n", 1)[0]
- output.append(f"{role} {first_line} [content with ~{fence_count} files truncated]")
+ output.append(
+ f"{role} {first_line} [content with ~{fence_count} files truncated]"
+ )
else:
output.append(format_content(role, content))
else:
diff --git a/aider/website/_data/polyglot_leaderboard.yml b/aider/website/_data/polyglot_leaderboard.yml
index 758687615e2..5faa62dd908 100644
--- a/aider/website/_data/polyglot_leaderboard.yml
+++ b/aider/website/_data/polyglot_leaderboard.yml
@@ -1683,3 +1683,31 @@
versions: 0.85.2.dev
seconds_per_case: 67.6
total_cost: 1.2357
+
+- dirname: 2025-08-06-04-54-48--gpt-oss-120b-high-polyglot
+ test_cases: 225
+ model: gpt-oss-120b (high)
+ edit_format: diff
+ commit_hash: 1af0e59
+ pass_rate_1: 13.8
+ pass_rate_2: 41.8
+ pass_num_1: 31
+ pass_num_2: 94
+ percent_cases_well_formed: 79.1
+ error_outputs: 95
+ num_malformed_responses: 77
+ num_with_malformed_responses: 47
+ user_asks: 142
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 3123768
+ completion_tokens: 856495
+ test_timeouts: 4
+ total_tests: 225
+ command: aider --model openrouter/openai/gpt-oss-120b --reasoning-effort high
+ date: 2025-08-06
+ versions: 0.85.3.dev
+ seconds_per_case: 35.5
+ total_cost: 0.7406
diff --git a/aider/website/_includes/footer_custom.html b/aider/website/_includes/footer_custom.html
new file mode 100644
index 00000000000..348aa3117b5
--- /dev/null
+++ b/aider/website/_includes/footer_custom.html
@@ -0,0 +1,3 @@
+
diff --git a/aider/website/_includes/head_custom.html b/aider/website/_includes/head_custom.html
index 20f888728d7..c60846d974a 100644
--- a/aider/website/_includes/head_custom.html
+++ b/aider/website/_includes/head_custom.html
@@ -32,12 +32,19 @@
.side-bar {
background: linear-gradient(135deg, #ffffff 0%, rgba(20, 176, 20, 0.01) 25%, rgba(20, 176, 20, 0.04) 40%, rgba(220, 230, 255, 0.4) 60%, rgba(205, 218, 255, 0.4) 80%, #F5F6FA 100%);
}
+
+ @media (max-width: 50em) {
+ .ea-ad--sidebar { display: none; }
+ .ea-ad--mobile { display: block; }
+ }
+
+