diff --git a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py index a1ca12cd6e..8cad4aeffb 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py @@ -143,6 +143,11 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: """Transform protocol-specific messages into Pydantic AI messages.""" raise NotImplementedError + @classmethod + def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[MessageT]: + """Transform Pydantic AI messages into protocol-specific messages.""" + raise NotImplementedError + @abstractmethod def build_event_stream(self) -> UIEventStream[RunInputT, EventT, AgentDepsT, OutputDataT]: """Build a protocol-specific event stream transformer.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 7eee52c419..fa82b9255b 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -2,10 +2,12 @@ from __future__ import annotations +import json +import uuid from collections.abc import Sequence from dataclasses import dataclass from functools import cached_property -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, cast from pydantic import TypeAdapter from typing_extensions import assert_never @@ -15,10 +17,13 @@ BinaryContent, BuiltinToolCallPart, BuiltinToolReturnPart, + CachePoint, DocumentUrl, FilePart, ImageUrl, ModelMessage, + ModelRequest, + ModelResponse, RetryPromptPart, SystemPromptPart, TextPart, @@ -35,6 +40,9 @@ from ._event_stream import VercelAIEventStream from .request_types import ( DataUIPart, + DynamicToolInputAvailablePart, + DynamicToolOutputAvailablePart, + DynamicToolOutputErrorPart, DynamicToolUIPart, FileUIPart, ReasoningUIPart, @@ -43,10 +51,12 @@ SourceUrlUIPart, StepStartUIPart, TextUIPart, + ToolInputAvailablePart, ToolOutputAvailablePart, ToolOutputErrorPart, ToolUIPart, UIMessage, + UIMessagePart, ) from .response_types import BaseChunk @@ -122,7 +132,16 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # if isinstance(part, TextUIPart): builder.add(TextPart(content=part.text)) elif isinstance(part, ReasoningUIPart): - builder.add(ThinkingPart(content=part.text)) + pydantic_ai_meta = (part.provider_metadata or {}).get('pydantic_ai', {}) + builder.add( + ThinkingPart( + content=part.text, + id=pydantic_ai_meta.get('id'), + signature=pydantic_ai_meta.get('signature'), + provider_name=pydantic_ai_meta.get('provider_name'), + provider_details=pydantic_ai_meta.get('provider_details'), + ) + ) elif isinstance(part, FileUIPart): try: file = BinaryContent.from_data_uri(part.url) @@ -141,7 +160,20 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # builtin_tool = part.provider_executed tool_call_id = part.tool_call_id - args = part.input + + args: str | dict[str, Any] | None = part.input + + if isinstance(args, str): + try: + parsed = json.loads(args) + if isinstance(parsed, dict): + args = cast(dict[str, Any], parsed) + except json.JSONDecodeError: + pass + elif isinstance(args, dict) or args is None: + pass + else: + assert_never(args) if builtin_tool: call_part = BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) @@ -197,3 +229,207 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # assert_never(msg.role) return builder.messages + + @staticmethod + def _dump_request_message(msg: ModelRequest) -> tuple[list[UIMessagePart], list[UIMessagePart]]: + """Convert a ModelRequest into a UIMessage.""" + system_ui_parts: list[UIMessagePart] = [] + user_ui_parts: list[UIMessagePart] = [] + + for part in msg.parts: + if isinstance(part, SystemPromptPart): + system_ui_parts.append(TextUIPart(text=part.content, state='done')) + elif isinstance(part, UserPromptPart): + user_ui_parts.extend(_convert_user_prompt_part(part)) + elif isinstance(part, ToolReturnPart): + # Tool returns are merged into the tool call in the assistant message + pass + elif isinstance(part, RetryPromptPart): + if part.tool_name: + # Tool-related retries are handled when processing ToolCallPart in ModelResponse + pass + else: + # Non-tool retries (e.g., output validation errors) become user text + user_ui_parts.append(TextUIPart(text=part.model_response(), state='done')) + else: + assert_never(part) + + return system_ui_parts, user_ui_parts + + @staticmethod + def _dump_response_message( # noqa: C901 + msg: ModelResponse, + tool_results: dict[str, ToolReturnPart | RetryPromptPart], + ) -> list[UIMessagePart]: + """Convert a ModelResponse into a UIMessage.""" + ui_parts: list[UIMessagePart] = [] + + # For builtin tools, returns can be in the same ModelResponse as calls + local_builtin_returns: dict[str, BuiltinToolReturnPart] = { + part.tool_call_id: part for part in msg.parts if isinstance(part, BuiltinToolReturnPart) + } + + for part in msg.parts: + if isinstance(part, BuiltinToolReturnPart): + continue + elif isinstance(part, TextPart): + # Combine consecutive text parts + if ui_parts and isinstance(ui_parts[-1], TextUIPart): + ui_parts[-1].text += part.content + else: + ui_parts.append(TextUIPart(text=part.content, state='done')) + elif isinstance(part, ThinkingPart): + thinking_metadata: dict[str, Any] = {} + if part.id is not None: + thinking_metadata['id'] = part.id + if part.signature is not None: + thinking_metadata['signature'] = part.signature + if part.provider_name is not None: + thinking_metadata['provider_name'] = part.provider_name + if part.provider_details is not None: + thinking_metadata['provider_details'] = part.provider_details + + provider_metadata = {'pydantic_ai': thinking_metadata} if thinking_metadata else None + ui_parts.append(ReasoningUIPart(text=part.content, state='done', provider_metadata=provider_metadata)) + elif isinstance(part, FilePart): + ui_parts.append( + FileUIPart( + url=part.content.data_uri, + media_type=part.content.media_type, + ) + ) + elif isinstance(part, BuiltinToolCallPart): + call_provider_metadata = ( + {'pydantic_ai': {'provider_name': part.provider_name}} if part.provider_name else None + ) + + if builtin_return := local_builtin_returns.get(part.tool_call_id): + content = builtin_return.model_response_str() + ui_parts.append( + ToolOutputAvailablePart( + type=f'tool-{part.tool_name}', + tool_call_id=part.tool_call_id, + input=part.args_as_json_str(), + output=content, + state='output-available', + provider_executed=True, + call_provider_metadata=call_provider_metadata, + ) + ) + else: + ui_parts.append( + ToolInputAvailablePart( + type=f'tool-{part.tool_name}', + tool_call_id=part.tool_call_id, + input=part.args_as_json_str(), + state='input-available', + provider_executed=True, + call_provider_metadata=call_provider_metadata, + ) + ) + elif isinstance(part, ToolCallPart): + tool_result = tool_results.get(part.tool_call_id) + + if isinstance(tool_result, ToolReturnPart): + content = tool_result.model_response_str() + ui_parts.append( + DynamicToolOutputAvailablePart( + tool_name=part.tool_name, + tool_call_id=part.tool_call_id, + input=part.args_as_json_str(), + output=content, + state='output-available', + ) + ) + elif isinstance(tool_result, RetryPromptPart): + error_text = tool_result.model_response() + ui_parts.append( + DynamicToolOutputErrorPart( + tool_name=part.tool_name, + tool_call_id=part.tool_call_id, + input=part.args_as_json_str(), + error_text=error_text, + state='output-error', + ) + ) + else: + ui_parts.append( + DynamicToolInputAvailablePart( + tool_name=part.tool_name, + tool_call_id=part.tool_call_id, + input=part.args_as_json_str(), + state='input-available', + ) + ) + else: + assert_never(part) + + return ui_parts + + @classmethod + def dump_messages( + cls, + messages: Sequence[ModelMessage], + ) -> list[UIMessage]: + """Transform Pydantic AI messages into Vercel AI messages. + + Args: + messages: A sequence of ModelMessage objects to convert + + Returns: + A list of UIMessage objects in Vercel AI format + """ + tool_results: dict[str, ToolReturnPart | RetryPromptPart] = {} + + for msg in messages: + if isinstance(msg, ModelRequest): + for part in msg.parts: + if isinstance(part, ToolReturnPart): + tool_results[part.tool_call_id] = part + elif isinstance(part, RetryPromptPart) and part.tool_name: + tool_results[part.tool_call_id] = part + + result: list[UIMessage] = [] + + for msg in messages: + if isinstance(msg, ModelRequest): + system_ui_parts, user_ui_parts = cls._dump_request_message(msg) + if system_ui_parts: + result.append(UIMessage(id=str(uuid.uuid4()), role='system', parts=system_ui_parts)) + + if user_ui_parts: + result.append(UIMessage(id=str(uuid.uuid4()), role='user', parts=user_ui_parts)) + + elif isinstance( # pragma: no branch + msg, ModelResponse + ): + ui_parts: list[UIMessagePart] = cls._dump_response_message(msg, tool_results) + if ui_parts: # pragma: no branch + result.append(UIMessage(id=str(uuid.uuid4()), role='assistant', parts=ui_parts)) + else: + assert_never(msg) + + return result + + +def _convert_user_prompt_part(part: UserPromptPart) -> list[UIMessagePart]: + """Convert a UserPromptPart to a list of UI message parts.""" + ui_parts: list[UIMessagePart] = [] + + if isinstance(part.content, str): + ui_parts.append(TextUIPart(text=part.content, state='done')) + else: + for item in part.content: + if isinstance(item, str): + ui_parts.append(TextUIPart(text=item, state='done')) + elif isinstance(item, BinaryContent): + ui_parts.append(FileUIPart(url=item.data_uri, media_type=item.media_type)) + elif isinstance(item, ImageUrl | AudioUrl | VideoUrl | DocumentUrl): + ui_parts.append(FileUIPart(url=item.url, media_type=item.media_type)) + elif isinstance(item, CachePoint): + # CachePoint is metadata for prompt caching, skip for UI conversion + pass + else: + assert_never(item) + + return ui_parts diff --git a/tests/test_ui.py b/tests/test_ui.py index a497d09389..37d22c6ecb 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -87,6 +87,10 @@ class DummyUIAdapter(UIAdapter[DummyUIRunInput, ModelMessage, str, AgentDepsT, O def build_run_input(cls, body: bytes) -> DummyUIRunInput: return DummyUIRunInput.model_validate_json(body) + @classmethod + def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: + return list(messages) + @classmethod def load_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: return list(messages) @@ -676,3 +680,12 @@ async def send(data: MutableMapping[str, Any]) -> None: {'type': 'http.response.body', 'body': b'', 'more_body': False}, ] ) + + +def test_dummy_adapter_dump_messages(): + """Test that DummyUIAdapter.dump_messages returns messages as-is.""" + from pydantic_ai.messages import UserPromptPart + + messages = [ModelRequest(parts=[UserPromptPart(content='Hello')])] + result = DummyUIAdapter.dump_messages(messages) + assert result == messages diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 9ac6137e8c..917bb51912 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -1956,3 +1956,861 @@ async def test_adapter_load_messages(): ), ] ) + + +async def test_adapter_dump_messages(): + """Test dumping Pydantic AI messages to Vercel AI format.""" + messages = [ + ModelRequest( + parts=[ + SystemPromptPart(content='You are a helpful assistant.'), + UserPromptPart(content='Hello, world!'), + ] + ), + ModelResponse( + parts=[ + TextPart(content='Hi there!'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + + # we need to dump the BaseModels to dicts for `IsStr` to work properly in snapshot + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'system', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'You are a helpful assistant.', 'state': 'done', 'provider_metadata': None} + ], + }, + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Hello, world!', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Hi there!', 'state': 'done', 'provider_metadata': None}], + }, + ] + ) + + +async def test_adapter_dump_messages_with_tools(): + """Test dumping messages with tool calls and returns.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Search for something')]), + ModelResponse( + parts=[ + TextPart(content='Let me search for that.'), + ToolCallPart( + tool_name='web_search', + args={'query': 'test query'}, + tool_call_id='tool_123', + ), + ] + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='web_search', + content={'results': ['result1', 'result2']}, + tool_call_id='tool_123', + ) + ] + ), + ModelResponse(parts=[TextPart(content='Here are the results.')]), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Search for something', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Let me search for that.', 'state': 'done', 'provider_metadata': None}, + { + 'type': 'dynamic-tool', + 'tool_name': 'web_search', + 'tool_call_id': 'tool_123', + 'state': 'output-available', + 'input': '{"query":"test query"}', + 'output': '{"results":["result1","result2"]}', + 'call_provider_metadata': None, + 'preliminary': None, + }, + ], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Here are the results.', 'state': 'done', 'provider_metadata': None} + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_builtin_tools(): + """Test dumping messages with builtin tool calls.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Search for something')]), + ModelResponse( + parts=[ + BuiltinToolCallPart( + tool_name='web_search', + args={'query': 'test'}, + tool_call_id='tool_456', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='web_search', + content={'status': 'completed'}, + tool_call_id='tool_456', + provider_name='openai', + ), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Search for something', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'tool-web_search', + 'tool_call_id': 'tool_456', + 'state': 'output-available', + 'input': '{"query":"test"}', + 'output': '{"status":"completed"}', + 'provider_executed': True, + 'call_provider_metadata': {'pydantic_ai': {'provider_name': 'openai'}}, + 'preliminary': None, + } + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_builtin_tool_without_return(): + """Test dumping messages with a builtin tool call that has no return in the same message.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Search for something')]), + ModelResponse( + parts=[ + BuiltinToolCallPart( + tool_name='web_search', + args={'query': 'orphan query'}, + tool_call_id='orphan_tool_id', + provider_name='openai', + ), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Search for something', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'tool-web_search', + 'tool_call_id': 'orphan_tool_id', + 'state': 'input-available', + 'input': '{"query":"orphan query"}', + 'provider_executed': True, + 'call_provider_metadata': {'pydantic_ai': {'provider_name': 'openai'}}, + } + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_thinking(): + """Test dumping messages with thinking parts.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Tell me something')]), + ModelResponse( + parts=[ + ThinkingPart(content='Let me think about this...'), + TextPart(content='Here is my answer.'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Tell me something', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'reasoning', + 'text': 'Let me think about this...', + 'state': 'done', + 'provider_metadata': None, + }, + {'type': 'text', 'text': 'Here is my answer.', 'state': 'done', 'provider_metadata': None}, + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_files(): + """Test dumping messages with file parts.""" + messages = [ + ModelRequest( + parts=[ + UserPromptPart( + content=[ + 'Here is an image:', + BinaryImage(data=b'fake_image', media_type='image/png'), + ImageUrl(url='https://example.com/image.png', media_type='image/png'), + ] + ) + ] + ), + ModelResponse( + parts=[ + TextPart(content='Nice image!'), + FilePart(content=BinaryContent(data=b'response_file', media_type='application/pdf')), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Here is an image:', 'state': 'done', 'provider_metadata': None}, + { + 'type': 'file', + 'media_type': 'image/png', + 'filename': None, + 'url': 'data:image/png;base64,ZmFrZV9pbWFnZQ==', + 'provider_metadata': None, + }, + { + 'type': 'file', + 'media_type': 'image/png', + 'filename': None, + 'url': 'https://example.com/image.png', + 'provider_metadata': None, + }, + ], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Nice image!', 'state': 'done', 'provider_metadata': None}, + { + 'type': 'file', + 'media_type': 'application/pdf', + 'filename': None, + 'url': 'data:application/pdf;base64,cmVzcG9uc2VfZmlsZQ==', + 'provider_metadata': None, + }, + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_retry(): + """Test dumping messages with retry prompts.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Do something')]), + ModelResponse( + parts=[ + ToolCallPart(tool_name='my_tool', args={'arg': 'value'}, tool_call_id='tool_789'), + ] + ), + ModelRequest( + parts=[ + RetryPromptPart( + content='Tool failed with error', + tool_name='my_tool', + tool_call_id='tool_789', + ) + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Do something', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'dynamic-tool', + 'tool_name': 'my_tool', + 'tool_call_id': 'tool_789', + 'state': 'output-error', + 'input': '{"arg":"value"}', + 'error_text': """\ +Tool failed with error + +Fix the errors and try again.\ +""", + 'call_provider_metadata': None, + } + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_retry_no_tool_name(): + """Test dumping messages with retry prompts without tool_name (e.g., output validation errors).""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Give me a number')]), + ModelResponse(parts=[TextPart(content='Not a valid number')]), + ModelRequest( + parts=[ + RetryPromptPart( + content='Output validation failed: expected integer', + # No tool_name - this is an output validation error, not a tool error + ) + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Give me a number', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Not a valid number', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [ + { + 'type': 'text', + 'text': """\ +Validation feedback: +Output validation failed: expected integer + +Fix the errors and try again.\ +""", + 'state': 'done', + 'provider_metadata': None, + } + ], + }, + ] + ) + + +async def test_adapter_dump_messages_consecutive_text(): + """Test that consecutive text parts are concatenated correctly.""" + messages = [ + ModelResponse( + parts=[ + TextPart(content='First '), + TextPart(content='second'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'First second', 'state': 'done', 'provider_metadata': None}], + } + ] + ) + + +async def test_adapter_dump_messages_text_with_interruption(): + """Test text concatenation with interruption.""" + messages = [ + ModelResponse( + parts=[ + TextPart(content='Before tool'), + BuiltinToolCallPart( + tool_name='test', + args={}, + tool_call_id='t1', + provider_name='test', + ), + BuiltinToolReturnPart( + tool_name='test', + content='result', + tool_call_id='t1', + provider_name='test', + ), + TextPart(content='After tool'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Before tool', 'state': 'done', 'provider_metadata': None}, + { + 'type': 'tool-test', + 'tool_call_id': 't1', + 'state': 'output-available', + 'input': '{}', + 'output': 'result', + 'provider_executed': True, + 'call_provider_metadata': {'pydantic_ai': {'provider_name': 'test'}}, + 'preliminary': None, + }, + { + 'type': 'text', + 'text': 'After tool', + 'state': 'done', + 'provider_metadata': None, + }, + ], + } + ] + ) + + +async def test_adapter_dump_load_roundtrip(): + """Test that dump_messages and load_messages are approximately inverse operations.""" + original_messages = [ + ModelRequest( + parts=[ + SystemPromptPart(content='System message'), + UserPromptPart(content='User message'), + ] + ), + ModelResponse( + parts=[ + TextPart(content='Response text'), + ToolCallPart(tool_name='tool1', args={'key': 'value'}, tool_call_id='tc1'), + ] + ), + ModelRequest(parts=[ToolReturnPart(tool_name='tool1', content='tool result', tool_call_id='tc1')]), + ModelResponse( + parts=[ + TextPart(content='Final response'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(original_messages) + + def sync_timestamps(original: list[ModelRequest | ModelResponse], new: list[ModelRequest | ModelResponse]) -> None: + for orig_msg, new_msg in zip(original, new): + for orig_part, new_part in zip(orig_msg.parts, new_msg.parts): + if hasattr(orig_part, 'timestamp') and hasattr(new_part, 'timestamp'): + new_part.timestamp = orig_part.timestamp # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] + if hasattr(orig_msg, 'timestamp') and hasattr(new_msg, 'timestamp'): + new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] + + # Load back to Pydantic AI format + reloaded_messages = VercelAIAdapter.load_messages(ui_messages) + sync_timestamps(original_messages, reloaded_messages) + + assert reloaded_messages == original_messages + + +async def test_adapter_dump_messages_text_before_thinking(): + """Test dumping messages where text precedes a thinking part.""" + messages = [ + ModelResponse( + parts=[ + TextPart(content='Let me check.'), + ThinkingPart(content='Okay, I am checking now.'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Let me check.', 'state': 'done', 'provider_metadata': None}, + { + 'type': 'reasoning', + 'text': 'Okay, I am checking now.', + 'state': 'done', + 'provider_metadata': None, + }, + ], + } + ] + ) + + +async def test_adapter_dump_messages_tool_call_without_return(): + """Test dumping messages with a tool call that has no corresponding result.""" + messages = [ + ModelResponse( + parts=[ + ToolCallPart( + tool_name='get_weather', + args={'city': 'New York'}, + tool_call_id='tool_abc', + ), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'dynamic-tool', + 'tool_name': 'get_weather', + 'tool_call_id': 'tool_abc', + 'state': 'input-available', + 'input': '{"city":"New York"}', + 'call_provider_metadata': None, + } + ], + } + ] + ) + + +async def test_adapter_dump_messages_assistant_starts_with_tool(): + """Test an assistant message that starts with a tool call instead of text.""" + messages = [ + ModelResponse( + parts=[ + ToolCallPart(tool_name='t', args={}, tool_call_id='tc1'), + TextPart(content='Some text'), + ] + ) + ] + ui_messages = VercelAIAdapter.dump_messages(messages) + + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'dynamic-tool', + 'tool_name': 't', + 'tool_call_id': 'tc1', + 'state': 'input-available', + 'input': '{}', + 'call_provider_metadata': None, + }, + { + 'type': 'text', + 'text': 'Some text', + 'state': 'done', + 'provider_metadata': None, + }, + ], + } + ] + ) + + +async def test_convert_user_prompt_part_without_urls(): + """Test converting a user prompt with only text and binary content.""" + from pydantic_ai.ui.vercel_ai._adapter import _convert_user_prompt_part # pyright: ignore[reportPrivateUsage] + + part = UserPromptPart(content=['text part', BinaryContent(data=b'data', media_type='application/pdf')]) + ui_parts = _convert_user_prompt_part(part) + assert ui_parts == snapshot( + [ + TextUIPart(text='text part', state='done'), + FileUIPart(media_type='application/pdf', url='data:application/pdf;base64,ZGF0YQ=='), + ] + ) + + +async def test_adapter_dump_messages_file_without_text(): + """Test a file part appearing without any preceding text.""" + messages = [ + ModelResponse( + parts=[ + FilePart(content=BinaryContent(data=b'file_data', media_type='image/png')), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'file', + 'media_type': 'image/png', + 'filename': None, + 'url': 'data:image/png;base64,ZmlsZV9kYXRh', + 'provider_metadata': None, + } + ], + } + ] + ) + + +async def test_convert_user_prompt_part_only_urls(): + """Test converting a user prompt with only URL content (no binary).""" + from pydantic_ai.ui.vercel_ai._adapter import _convert_user_prompt_part # pyright: ignore[reportPrivateUsage] + + part = UserPromptPart( + content=[ + ImageUrl(url='https://example.com/img.png', media_type='image/png'), + VideoUrl(url='https://example.com/vid.mp4', media_type='video/mp4'), + ] + ) + ui_parts = _convert_user_prompt_part(part) + assert ui_parts == snapshot( + [ + FileUIPart(media_type='image/png', url='https://example.com/img.png'), + FileUIPart(media_type='video/mp4', url='https://example.com/vid.mp4'), + ] + ) + + +async def test_adapter_dump_messages_thinking_with_metadata(): + """Test dumping and loading messages with ThinkingPart metadata preservation.""" + original_messages = [ + ModelResponse( + parts=[ + ThinkingPart( + content='Let me think about this...', + id='thinking_123', + signature='sig_abc', + provider_name='anthropic', + provider_details={'model': 'claude-3'}, + ), + TextPart(content='Here is my answer.'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(original_messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'reasoning', + 'text': 'Let me think about this...', + 'state': 'done', + 'provider_metadata': { + 'pydantic_ai': { + 'id': 'thinking_123', + 'signature': 'sig_abc', + 'provider_name': 'anthropic', + 'provider_details': {'model': 'claude-3'}, + } + }, + }, + {'type': 'text', 'text': 'Here is my answer.', 'state': 'done', 'provider_metadata': None}, + ], + } + ] + ) + + # Test roundtrip - verify metadata is preserved when loading back + reloaded_messages = VercelAIAdapter.load_messages(ui_messages) + + # Sync timestamps for comparison (ModelResponse always has timestamp) + for orig_msg, new_msg in zip(original_messages, reloaded_messages): + new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue] + + assert reloaded_messages == original_messages + + +async def test_adapter_load_messages_json_list_args(): + """Test that JSON list args are kept as strings (not parsed).""" + ui_messages = [ + UIMessage( + id='msg1', + role='assistant', + parts=[ + DynamicToolOutputAvailablePart( + tool_name='my_tool', + tool_call_id='tc1', + input='[1, 2, 3]', # JSON list - should stay as string + output='result', + state='output-available', + ) + ], + ) + ] + + messages = VercelAIAdapter.load_messages(ui_messages) + + assert len(messages) == 2 # ToolCall in response + ToolReturn in request + response = messages[0] + assert isinstance(response, ModelResponse) + assert len(response.parts) == 1 + tool_call = response.parts[0] + assert isinstance(tool_call, ToolCallPart) + # Args should remain as string since it parses to a list, not a dict + assert tool_call.args == '[1, 2, 3]' + + +async def test_adapter_dump_messages_with_cache_point(): + """Test that CachePoint in user content is skipped during conversion.""" + from pydantic_ai.messages import CachePoint + + messages = [ + ModelRequest( + parts=[ + UserPromptPart( + content=[ + 'Hello', + CachePoint(), # Should be skipped + 'World', + ] + ) + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + # CachePoint should be omitted, only text parts remain + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Hello', 'state': 'done', 'provider_metadata': None}, + {'type': 'text', 'text': 'World', 'state': 'done', 'provider_metadata': None}, + ], + } + ] + )