Skip to content
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
4fdb27d
add dump_messages method to vercel ai adapter
dsfaccini Nov 11, 2025
1cb60bf
fix broken loop and add tests for coverage
dsfaccini Nov 11, 2025
261bc3a
add missing tests for coverage
dsfaccini Nov 11, 2025
3f70b83
wip: remove id generator and BuiltinToolReturnPart - fix tests using …
dsfaccini Nov 17, 2025
97feec2
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini Nov 20, 2025
dfcb30c
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini Nov 22, 2025
f99bc0c
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini Nov 26, 2025
4870dd4
refactor: simplify dump_messages method and remove unused id generator
dsfaccini Nov 23, 2025
b1272b7
test: add unit test for dumping and loading ThinkingPart with metadata
dsfaccini Nov 27, 2025
6cdec4d
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini Nov 27, 2025
e300c15
coverage
dsfaccini Nov 27, 2025
42596b9
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini Nov 27, 2025
57157d6
syntax imprevement
dsfaccini Nov 28, 2025
58a71a2
address review points
dsfaccini Nov 28, 2025
2f3b2a2
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini Nov 29, 2025
a061421
fix coverage
dsfaccini Nov 29, 2025
73fb21d
refaator (for comfort)
dsfaccini Nov 30, 2025
e49f656
Refactor dump_messages per review: merge tool dicts, handle RetryProm…
dsfaccini Dec 1, 2025
54f77a7
test builtin tool call without return in dump_messages
dsfaccini Dec 3, 2025
dbdca1d
Merge branch 'main' into vercelai-adapter-dump-messages
dsfaccini Dec 3, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions pydantic_ai_slim/pydantic_ai/ui/_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,12 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]:
"""Transform protocol-specific messages into Pydantic AI messages."""
raise NotImplementedError

@classmethod
@abstractmethod
def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[MessageT]:
"""Transform Pydantic AI messages into protocol-specific messages."""
raise NotImplementedError

@abstractmethod
def build_event_stream(self) -> UIEventStream[RunInputT, EventT, AgentDepsT, OutputDataT]:
"""Build a protocol-specific event stream transformer."""
Expand Down
5 changes: 5 additions & 0 deletions pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,11 @@ def state(self) -> dict[str, Any] | None:

return cast('dict[str, Any]', state)

@classmethod
def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[Message]:
"""Transform Pydantic AI messages into AG-UI messages."""
raise NotImplementedError('TODO: implement dump_messages method') # TODO: implement dump_messages method

@classmethod
def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]:
"""Transform AG-UI messages into Pydantic AI messages."""
Expand Down
241 changes: 238 additions & 3 deletions pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,29 @@

from __future__ import annotations

import json
import uuid
from collections.abc import Sequence
from dataclasses import dataclass
from functools import cached_property
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Any, cast

from pydantic import TypeAdapter
from typing_extensions import assert_never

from ...messages import (
AudioUrl,
BaseToolCallPart,
BinaryContent,
BuiltinToolCallPart,
BuiltinToolReturnPart,
CachePoint,
DocumentUrl,
FilePart,
ImageUrl,
ModelMessage,
ModelRequest,
ModelResponse,
RetryPromptPart,
SystemPromptPart,
TextPart,
Expand All @@ -35,6 +41,9 @@
from ._event_stream import VercelAIEventStream
from .request_types import (
DataUIPart,
DynamicToolInputAvailablePart,
DynamicToolOutputAvailablePart,
DynamicToolOutputErrorPart,
DynamicToolUIPart,
FileUIPart,
ReasoningUIPart,
Expand All @@ -43,10 +52,12 @@
SourceUrlUIPart,
StepStartUIPart,
TextUIPart,
ToolInputAvailablePart,
ToolOutputAvailablePart,
ToolOutputErrorPart,
ToolUIPart,
UIMessage,
UIMessagePart,
)
from .response_types import BaseChunk

Expand All @@ -57,6 +68,7 @@
__all__ = ['VercelAIAdapter']

request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData)
BUILTIN_TOOL_CALL_ID_PREFIX = 'pyd_ai_builtin'


@dataclass
Expand Down Expand Up @@ -122,7 +134,16 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: #
if isinstance(part, TextUIPart):
builder.add(TextPart(content=part.text))
elif isinstance(part, ReasoningUIPart):
builder.add(ThinkingPart(content=part.text))
pydantic_ai_meta = (part.provider_metadata or {}).get('pydantic_ai', {})
builder.add(
ThinkingPart(
content=part.text,
id=pydantic_ai_meta.get('id'),
signature=pydantic_ai_meta.get('signature'),
provider_name=pydantic_ai_meta.get('provider_name'),
provider_details=pydantic_ai_meta.get('provider_details'),
)
)
elif isinstance(part, FileUIPart):
try:
file = BinaryContent.from_data_uri(part.url)
Expand All @@ -141,7 +162,20 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: #
builtin_tool = part.provider_executed

tool_call_id = part.tool_call_id
args = part.input

args: str | dict[str, Any] | None = part.input

if isinstance(args, str):
try:
parsed = json.loads(args)
if isinstance(parsed, dict):
args = cast(dict[str, Any], parsed)
except json.JSONDecodeError:
pass
elif isinstance(args, dict) or args is None:
pass
else:
assert_never(args)

if builtin_tool:
call_part = BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args)
Expand Down Expand Up @@ -197,3 +231,204 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: #
assert_never(msg.role)

return builder.messages

@classmethod
def dump_messages( # noqa: C901
cls,
messages: Sequence[ModelMessage],
) -> list[UIMessage]:
"""Transform Pydantic AI messages into Vercel AI messages.

Args:
messages: A sequence of ModelMessage objects to convert
_id_generator: Optional ID generator function for testing. If not provided, uses uuid.uuid4().

Returns:
A list of UIMessage objects in Vercel AI format
"""

def _message_id_generator() -> str:
"""Generate a message ID."""
return uuid.uuid4().hex

tool_returns: dict[str, ToolReturnPart] = {}
tool_errors: dict[str, RetryPromptPart] = {}

for msg in messages:
if isinstance(msg, ModelRequest):
for part in msg.parts:
if isinstance(part, ToolReturnPart):
tool_returns[part.tool_call_id] = part
elif isinstance(part, RetryPromptPart) and part.tool_call_id:
tool_errors[part.tool_call_id] = part

result: list[UIMessage] = []

for msg in messages:
if isinstance(msg, ModelRequest):
system_ui_parts: list[UIMessagePart] = []
user_ui_parts: list[UIMessagePart] = []

for part in msg.parts:
if isinstance(part, SystemPromptPart):
system_ui_parts.append(TextUIPart(text=part.content, state='done'))
elif isinstance(part, UserPromptPart):
user_ui_parts.extend(_convert_user_prompt_part(part))
elif isinstance(part, ToolReturnPart | RetryPromptPart):
# Tool returns/errors don't create separate UI parts
# They're merged into the tool call in the assistant message
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Only if the retry prompt part has a tool call ID, otherwise it should be a text message

pass
else:
assert_never(part)

if system_ui_parts:
result.append(UIMessage(id=_message_id_generator(), role='system', parts=system_ui_parts))

if user_ui_parts:
result.append(UIMessage(id=_message_id_generator(), role='user', parts=user_ui_parts))

elif isinstance( # pragma: no branch
msg, ModelResponse
):
ui_parts: list[UIMessagePart] = []

# For builtin tools, returns can be in the same ModelResponse as calls
# Build a local mapping for this message
local_builtin_returns: dict[str, BuiltinToolReturnPart] = {}
for part in msg.parts:
if isinstance(part, BuiltinToolReturnPart):
local_builtin_returns[part.tool_call_id] = part
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This can be a single dict comprehension


for part in msg.parts:
if isinstance(part, BuiltinToolReturnPart):
continue
elif isinstance(part, TextPart):
# Combine consecutive text parts by checking the last UI part
if ui_parts and isinstance(ui_parts[-1], TextUIPart):
last_text = ui_parts[-1]
ui_parts[-1] = last_text.model_copy(update={'text': last_text.text + part.content})
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not just last_text.text += part.content?

else:
ui_parts.append(TextUIPart(text=part.content, state='done'))
elif isinstance(part, ThinkingPart):
thinking_metadata: dict[str, Any] = {}
if part.id is not None:
thinking_metadata['id'] = part.id
if part.signature is not None:
thinking_metadata['signature'] = part.signature
if part.provider_name is not None:
thinking_metadata['provider_name'] = part.provider_name
if part.provider_details is not None:
thinking_metadata['provider_details'] = part.provider_details

provider_metadata = {'pydantic_ai': thinking_metadata} if thinking_metadata else None
ui_parts.append(
ReasoningUIPart(text=part.content, state='done', provider_metadata=provider_metadata)
)
elif isinstance(part, FilePart):
ui_parts.append(
FileUIPart(
url=part.content.data_uri,
media_type=part.content.media_type,
)
)
elif isinstance(part, BaseToolCallPart):
if isinstance(part, BuiltinToolCallPart):
prefixed_id = (
f'{BUILTIN_TOOL_CALL_ID_PREFIX}|{part.provider_name or ""}|{part.tool_call_id}'
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We shouldn't need to make any special IDs, as we can store metadata on the part, right? And Vercel AI already has a native way of specifying provider_executed tool calls

)

if builtin_return := local_builtin_returns.get(part.tool_call_id):
content = builtin_return.model_response_str()
call_provider_metadata = (
{'pydantic_ai': {'provider_name': part.provider_name}}
if part.provider_name
else None
)
ui_parts.append(
ToolOutputAvailablePart(
type=f'tool-{part.tool_name}',
tool_call_id=prefixed_id,
input=part.args_as_json_str(),
output=content,
state='output-available',
provider_executed=True,
call_provider_metadata=call_provider_metadata,
)
)
else: # pragma: no cover
ui_parts.append(
ToolInputAvailablePart(
type=f'tool-{part.tool_name}',
tool_call_id=prefixed_id,
input=part.args_as_json_str(),
state='input-available',
provider_executed=True,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't we still store the metadata in this case?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeahp we should, I'm not even sure this case is reachable but I should've included the metadata

)
)
else:
tool_return = tool_returns.get(part.tool_call_id)
tool_error = tool_errors.get(part.tool_call_id)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could have a single dict, right?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

makes it more verbose though, when we start getting into second level keys accesses


if isinstance(tool_return, ToolReturnPart):
content = tool_return.model_response_str()
ui_parts.append(
DynamicToolOutputAvailablePart(
tool_name=part.tool_name,
tool_call_id=part.tool_call_id,
input=part.args_as_json_str(),
output=content,
state='output-available',
)
)
elif tool_error:
error_text = tool_error.model_response()
ui_parts.append(
DynamicToolOutputErrorPart(
tool_name=part.tool_name,
tool_call_id=part.tool_call_id,
input=part.args_as_json_str(),
error_text=error_text,
state='output-error',
)
)
else:
ui_parts.append(
DynamicToolInputAvailablePart(
tool_name=part.tool_name,
tool_call_id=part.tool_call_id,
input=part.args_as_json_str(),
state='input-available',
)
)
else:
assert_never(part)

if ui_parts: # pragma: no branch
result.append(UIMessage(id=_message_id_generator(), role='assistant', parts=ui_parts))
else:
assert_never(msg)

return result


def _convert_user_prompt_part(part: UserPromptPart) -> list[UIMessagePart]:
"""Convert a UserPromptPart to a list of UI message parts."""
ui_parts: list[UIMessagePart] = []

if isinstance(part.content, str):
ui_parts.append(TextUIPart(text=part.content, state='done'))
else:
for item in part.content:
if isinstance(item, str):
ui_parts.append(TextUIPart(text=item, state='done'))
elif isinstance(item, BinaryContent):
ui_parts.append(FileUIPart(url=item.data_uri, media_type=item.media_type))
elif isinstance(item, ImageUrl | AudioUrl | VideoUrl | DocumentUrl):
ui_parts.append(FileUIPart(url=item.url, media_type=item.media_type))
elif isinstance(item, CachePoint):
# CachePoint is metadata for prompt caching, skip for UI conversion
pass
else:
assert_never(item)

return ui_parts
13 changes: 13 additions & 0 deletions tests/test_ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,10 @@ class DummyUIAdapter(UIAdapter[DummyUIRunInput, ModelMessage, str, AgentDepsT, O
def build_run_input(cls, body: bytes) -> DummyUIRunInput:
return DummyUIRunInput.model_validate_json(body)

@classmethod
def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]:
return list(messages)

@classmethod
def load_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]:
return list(messages)
Expand Down Expand Up @@ -676,3 +680,12 @@ async def send(data: MutableMapping[str, Any]) -> None:
{'type': 'http.response.body', 'body': b'', 'more_body': False},
]
)


def test_dummy_adapter_dump_messages():
"""Test that DummyUIAdapter.dump_messages returns messages as-is."""
from pydantic_ai.messages import UserPromptPart

messages = [ModelRequest(parts=[UserPromptPart(content='Hello')])]
result = DummyUIAdapter.dump_messages(messages)
assert result == messages
Loading