-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Add VercelAIAdapter.dump_messages to convert Pydantic AI messages to Vercel AI messages
#3392
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
DouweM
merged 20 commits into
pydantic:main
from
dsfaccini:vercelai-adapter-dump-messages
Dec 4, 2025
+1,115
−3
Merged
Changes from 1 commit
Commits
Show all changes
20 commits
Select commit
Hold shift + click to select a range
4fdb27d
add dump_messages method to vercel ai adapter
dsfaccini 1cb60bf
fix broken loop and add tests for coverage
dsfaccini 261bc3a
add missing tests for coverage
dsfaccini 3f70b83
wip: remove id generator and BuiltinToolReturnPart - fix tests using …
dsfaccini 97feec2
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini dfcb30c
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini f99bc0c
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini 4870dd4
refactor: simplify dump_messages method and remove unused id generator
dsfaccini b1272b7
test: add unit test for dumping and loading ThinkingPart with metadata
dsfaccini 6cdec4d
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini e300c15
coverage
dsfaccini 42596b9
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini 57157d6
syntax imprevement
dsfaccini 58a71a2
address review points
dsfaccini 2f3b2a2
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini a061421
fix coverage
dsfaccini 73fb21d
refaator (for comfort)
dsfaccini e49f656
Refactor dump_messages per review: merge tool dicts, handle RetryProm…
dsfaccini 54f77a7
test builtin tool call without return in dump_messages
dsfaccini dbdca1d
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Some comments aren't visible on the classic Files Changed page.
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -14,7 +14,6 @@ | |
|
|
||
| from ...messages import ( | ||
| AudioUrl, | ||
| BaseToolCallPart, | ||
| BinaryContent, | ||
| BuiltinToolCallPart, | ||
| BuiltinToolReturnPart, | ||
|
|
@@ -246,10 +245,12 @@ def _dump_request_message(msg: ModelRequest) -> tuple[list[UIMessagePart], list[ | |
| # Tool returns are merged into the tool call in the assistant message | ||
| pass | ||
| elif isinstance(part, RetryPromptPart): | ||
| # RetryPromptPart always has a tool_call_id (generated if not provided). | ||
| # These are handled when processing ToolCallPart in ModelResponse, | ||
| # where they become DynamicToolOutputErrorPart via the tool_errors dict. | ||
| pass | ||
| if part.tool_name: | ||
| # Tool-related retries are handled when processing ToolCallPart in ModelResponse | ||
| pass | ||
| else: | ||
| # Non-tool retries (e.g., output validation errors) become user text | ||
| user_ui_parts.append(TextUIPart(text=part.model_response(), state='done')) | ||
| else: | ||
| assert_never(part) | ||
|
|
||
|
|
@@ -258,8 +259,7 @@ def _dump_request_message(msg: ModelRequest) -> tuple[list[UIMessagePart], list[ | |
| @staticmethod | ||
| def _dump_response_message( # noqa: C901 | ||
| msg: ModelResponse, | ||
| tool_returns: dict[str, ToolReturnPart], | ||
| tool_errors: dict[str, RetryPromptPart], | ||
| tool_results: dict[str, ToolReturnPart | RetryPromptPart], | ||
| ) -> list[UIMessagePart]: | ||
| """Convert a ModelResponse into a UIMessage.""" | ||
| ui_parts: list[UIMessagePart] = [] | ||
|
|
@@ -298,71 +298,69 @@ def _dump_response_message( # noqa: C901 | |
| media_type=part.content.media_type, | ||
| ) | ||
| ) | ||
| elif isinstance(part, BaseToolCallPart): | ||
| if isinstance(part, BuiltinToolCallPart): | ||
| call_provider_metadata = ( | ||
| {'pydantic_ai': {'provider_name': part.provider_name}} if part.provider_name else None | ||
| ) | ||
| elif isinstance(part, BuiltinToolCallPart): | ||
| call_provider_metadata = ( | ||
| {'pydantic_ai': {'provider_name': part.provider_name}} if part.provider_name else None | ||
| ) | ||
|
|
||
| if builtin_return := local_builtin_returns.get(part.tool_call_id): | ||
| content = builtin_return.model_response_str() | ||
| ui_parts.append( | ||
| ToolOutputAvailablePart( | ||
| type=f'tool-{part.tool_name}', | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| output=content, | ||
| state='output-available', | ||
| provider_executed=True, | ||
| call_provider_metadata=call_provider_metadata, | ||
| ) | ||
| if builtin_return := local_builtin_returns.get(part.tool_call_id): | ||
| content = builtin_return.model_response_str() | ||
| ui_parts.append( | ||
| ToolOutputAvailablePart( | ||
| type=f'tool-{part.tool_name}', | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| output=content, | ||
| state='output-available', | ||
| provider_executed=True, | ||
| call_provider_metadata=call_provider_metadata, | ||
| ) | ||
| else: # pragma: no cover | ||
| ui_parts.append( | ||
| ToolInputAvailablePart( | ||
| type=f'tool-{part.tool_name}', | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| state='input-available', | ||
| provider_executed=True, | ||
| call_provider_metadata=call_provider_metadata, | ||
| ) | ||
| ) | ||
| else: # pragma: no cover | ||
|
||
| ui_parts.append( | ||
| ToolInputAvailablePart( | ||
| type=f'tool-{part.tool_name}', | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| state='input-available', | ||
| provider_executed=True, | ||
| call_provider_metadata=call_provider_metadata, | ||
| ) | ||
| else: | ||
| tool_return = tool_returns.get(part.tool_call_id) | ||
| tool_error = tool_errors.get(part.tool_call_id) | ||
|
|
||
| if isinstance(tool_return, ToolReturnPart): | ||
| content = tool_return.model_response_str() | ||
| ui_parts.append( | ||
| DynamicToolOutputAvailablePart( | ||
| tool_name=part.tool_name, | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| output=content, | ||
| state='output-available', | ||
| ) | ||
| ) | ||
| elif isinstance(part, ToolCallPart): | ||
| tool_result = tool_results.get(part.tool_call_id) | ||
|
|
||
| if isinstance(tool_result, ToolReturnPart): | ||
| content = tool_result.model_response_str() | ||
| ui_parts.append( | ||
| DynamicToolOutputAvailablePart( | ||
| tool_name=part.tool_name, | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| output=content, | ||
| state='output-available', | ||
| ) | ||
| elif tool_error: | ||
| error_text = tool_error.model_response() | ||
| ui_parts.append( | ||
| DynamicToolOutputErrorPart( | ||
| tool_name=part.tool_name, | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| error_text=error_text, | ||
| state='output-error', | ||
| ) | ||
| ) | ||
| elif isinstance(tool_result, RetryPromptPart): | ||
| error_text = tool_result.model_response() | ||
| ui_parts.append( | ||
| DynamicToolOutputErrorPart( | ||
| tool_name=part.tool_name, | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| error_text=error_text, | ||
| state='output-error', | ||
| ) | ||
| else: | ||
| ui_parts.append( | ||
| DynamicToolInputAvailablePart( | ||
| tool_name=part.tool_name, | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| state='input-available', | ||
| ) | ||
| ) | ||
| else: | ||
| ui_parts.append( | ||
| DynamicToolInputAvailablePart( | ||
| tool_name=part.tool_name, | ||
| tool_call_id=part.tool_call_id, | ||
| input=part.args_as_json_str(), | ||
| state='input-available', | ||
| ) | ||
| ) | ||
| else: | ||
| assert_never(part) | ||
|
|
||
|
|
@@ -381,16 +379,15 @@ def dump_messages( | |
| Returns: | ||
| A list of UIMessage objects in Vercel AI format | ||
| """ | ||
| tool_returns: dict[str, ToolReturnPart] = {} | ||
| tool_errors: dict[str, RetryPromptPart] = {} | ||
| tool_results: dict[str, ToolReturnPart | RetryPromptPart] = {} | ||
|
|
||
| for msg in messages: | ||
| if isinstance(msg, ModelRequest): | ||
| for part in msg.parts: | ||
| if isinstance(part, ToolReturnPart): | ||
| tool_returns[part.tool_call_id] = part | ||
| elif isinstance(part, RetryPromptPart) and part.tool_call_id: | ||
| tool_errors[part.tool_call_id] = part | ||
| tool_results[part.tool_call_id] = part | ||
| elif isinstance(part, RetryPromptPart) and part.tool_name: | ||
| tool_results[part.tool_call_id] = part | ||
|
|
||
| result: list[UIMessage] = [] | ||
|
|
||
|
|
@@ -406,9 +403,7 @@ def dump_messages( | |
| elif isinstance( # pragma: no branch | ||
| msg, ModelResponse | ||
| ): | ||
| ui_parts: list[UIMessagePart] = cls._dump_response_message( | ||
| msg, tool_returns=tool_returns, tool_errors=tool_errors | ||
| ) | ||
| ui_parts: list[UIMessagePart] = cls._dump_response_message(msg, tool_results) | ||
| if ui_parts: # pragma: no branch | ||
| result.append(UIMessage(id=str(uuid.uuid4()), role='assistant', parts=ui_parts)) | ||
| else: | ||
|
|
||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.