diff --git a/app/src/ai/agent_conversations_model_tests.rs b/app/src/ai/agent_conversations_model_tests.rs index e2bc0d050..467e26e7d 100644 --- a/app/src/ai/agent_conversations_model_tests.rs +++ b/app/src/ai/agent_conversations_model_tests.rs @@ -207,6 +207,7 @@ fn test_display_status_uses_active_execution_over_previous_conversation_status() parent_conversation_id: None, run_id: Some(task_id.clone()), autoexecute_override: None, + last_event_sequence: None, }, ); diff --git a/app/src/ai/agent_sdk/ambient.rs b/app/src/ai/agent_sdk/ambient.rs index 0e3cb86e1..95a3dad1b 100644 --- a/app/src/ai/agent_sdk/ambient.rs +++ b/app/src/ai/agent_sdk/ambient.rs @@ -489,7 +489,7 @@ impl AmbientAgentRunner { parent_run_id: None, runtime_skills: vec![], referenced_attachments: vec![], - fork_from_conversation_id: None, + conversation_id: None, handoff_prep_token: None, }; diff --git a/app/src/ai/agent_sdk/mcp_config_tests.rs b/app/src/ai/agent_sdk/mcp_config_tests.rs index 0a1fe1e88..0bd9f3384 100644 --- a/app/src/ai/agent_sdk/mcp_config_tests.rs +++ b/app/src/ai/agent_sdk/mcp_config_tests.rs @@ -284,7 +284,7 @@ fn serializes_mcp_servers_as_object_not_string() { parent_run_id: None, runtime_skills: vec![], referenced_attachments: vec![], - fork_from_conversation_id: None, + conversation_id: None, handoff_prep_token: None, }; diff --git a/app/src/ai/ambient_agents/spawn_tests.rs b/app/src/ai/ambient_agents/spawn_tests.rs index 729974312..2f5f14c68 100644 --- a/app/src/ai/ambient_agents/spawn_tests.rs +++ b/app/src/ai/ambient_agents/spawn_tests.rs @@ -337,7 +337,7 @@ async fn poll_stops_on_terminal_failure_like_state() { parent_run_id: None, runtime_skills: vec![], referenced_attachments: vec![], - fork_from_conversation_id: None, + conversation_id: None, handoff_prep_token: None, }; @@ -481,7 +481,7 @@ async fn poll_for_session_join_info_waits_until_link_is_available() { parent_run_id: None, runtime_skills: vec![], referenced_attachments: vec![], - fork_from_conversation_id: None, + conversation_id: None, handoff_prep_token: None, }; diff --git a/app/src/ai/blocklist/agent_view/agent_input_footer/mod.rs b/app/src/ai/blocklist/agent_view/agent_input_footer/mod.rs index 4916fdc69..03dbb00e5 100644 --- a/app/src/ai/blocklist/agent_view/agent_input_footer/mod.rs +++ b/app/src/ai/blocklist/agent_view/agent_input_footer/mod.rs @@ -231,7 +231,7 @@ pub struct AgentInputFooter { // "Hand off to cloud" chip. Visibility is gated only on the // `OzHandoff && LocalToCloudHandoff` feature flags. Per-conversation // eligibility is enforced by `Workspace::start_local_to_cloud_handoff`, - // which falls through to splitting a fresh cloud-mode pane when the + // which surfaces an error toast and does not open a pane when the // active conversation isn't handoff-able. handoff_to_cloud_button: ViewHandle, @@ -359,8 +359,8 @@ impl AgentInputFooter { // "Hand off to cloud" chip. On click dispatches the workspace action that // splits a new cloud-mode pane next to the local pane; that pane handles // the rest of the handoff flow. The chip is always visible when the feature - // flags are on; per-conversation eligibility falls through to splitting a - // fresh cloud-mode pane in `Workspace::start_local_to_cloud_handoff`. + // flags are on; per-conversation eligibility surfaces an error toast and + // does not open a pane in `Workspace::start_local_to_cloud_handoff`. let handoff_to_cloud_button = ctx.add_typed_action_view(|_ctx| { ActionButton::new("", AgentInputButtonTheme) .with_icon(Icon::UploadCloud) @@ -1984,7 +1984,7 @@ impl AgentInputFooter { // Always render the chip when the feature flags are on. // Per-conversation eligibility (synced server token, non-empty // history) is enforced by `Workspace::start_local_to_cloud_handoff`, - // which falls through to splitting a fresh cloud-mode pane when + // which surfaces an error toast and does not open a pane when // the active conversation isn't handoff-able. Some(ChildView::new(&self.handoff_to_cloud_button).finish()) } diff --git a/app/src/ai/blocklist/agent_view/agent_input_footer/toolbar_item.rs b/app/src/ai/blocklist/agent_view/agent_input_footer/toolbar_item.rs index c20cda7cf..2ba2999ef 100644 --- a/app/src/ai/blocklist/agent_view/agent_input_footer/toolbar_item.rs +++ b/app/src/ai/blocklist/agent_view/agent_input_footer/toolbar_item.rs @@ -73,8 +73,8 @@ pub enum AgentToolbarItemKind { /// that splits a fresh cloud-mode pane next to the active local pane. /// Visibility is gated only on the `OzHandoff && LocalToCloudHandoff` feature /// flags so the chip is always available; the click handler in - /// `Workspace::start_local_to_cloud_handoff` falls through to opening a - /// fresh cloud-mode pane when the active conversation isn't handoff-able + /// `Workspace::start_local_to_cloud_handoff` surfaces an error toast and + /// does not open a pane when the active conversation isn't handoff-able /// (no synced server token, empty, or no active conversation at all). HandoffToCloud, } diff --git a/app/src/ai/blocklist/block.rs b/app/src/ai/blocklist/block.rs index f3a1fdb2f..816ab0645 100644 --- a/app/src/ai/blocklist/block.rs +++ b/app/src/ai/blocklist/block.rs @@ -1205,6 +1205,14 @@ impl AIBlock { ctx.subscribe_to_model(&agent_view_controller, |_, _, _, ctx| ctx.notify()); } + // Re-render when the cloud agent transitions through setup phases so the response + // footer (thumbs up/down, fork, credits) toggles correctly with `is_cloud_agent_pre_first_exchange`. + // Without this, the prior exchange's footer remains visible during a follow-up's + // "Step n/3" loading until something else triggers a redraw. + if let Some(ambient_agent_view_model) = ambient_agent_view_model.as_ref() { + ctx.subscribe_to_model(ambient_agent_view_model, |_, _, _, ctx| ctx.notify()); + } + ctx.subscribe_to_model(&context_model, |_, _, event, ctx| { if let BlocklistAIContextEvent::UpdatedPendingContext { .. } = event { ctx.notify(); diff --git a/app/src/ai/blocklist/block/status_bar.rs b/app/src/ai/blocklist/block/status_bar.rs index 8a57558c2..c8aa00df1 100644 --- a/app/src/ai/blocklist/block/status_bar.rs +++ b/app/src/ai/blocklist/block/status_bar.rs @@ -1157,6 +1157,7 @@ impl View for BlocklistAIStatusBar { is_cloud_agent_pre_first_exchange( Some(ambient_agent_view_model), &self.agent_view_controller, + &self.terminal_model, app, ) }) diff --git a/app/src/ai/blocklist/block/view_impl.rs b/app/src/ai/blocklist/block/view_impl.rs index 658265b05..cd3fb6718 100644 --- a/app/src/ai/blocklist/block/view_impl.rs +++ b/app/src/ai/blocklist/block/view_impl.rs @@ -159,22 +159,20 @@ fn add_slash_command_highlight( /// query blocks during live startup/streaming. /// /// To avoid duplicate UI, we suppress the AI block header/query only while the viewer is live -/// (not replaying historical conversation events). -/// -/// The prompts are rendered in the ambient-agent query block UI, so this helper only gates -/// duplicate rendering in the AI block path when that optimistic block was actually inserted. +/// (not replaying historical conversation events) AND the AI block's display query matches an +/// optimistically rendered user query. The per-query check is important for forked +/// conversations (e.g. local-to-cloud handoff) where the conversation's first exchange comes +/// from the source conversation and must remain visible — only the dispatched prompt has a +/// matching optimistic block to defer to. fn should_hide_ai_block_query_and_header( - has_inserted_cloud_mode_user_query_block: bool, has_optimistic_user_query: bool, is_shared_ambient_agent_session: bool, - is_first_exchange: bool, is_receiving_agent_conversation_replay: bool, ) -> bool { FeatureFlag::CloudModeSetupV2.is_enabled() && is_shared_ambient_agent_session && !is_receiving_agent_conversation_replay - && ((has_inserted_cloud_mode_user_query_block && is_first_exchange) - || has_optimistic_user_query) + && has_optimistic_user_query } #[cfg(test)] @@ -182,39 +180,31 @@ mod tests { use super::*; #[test] - fn test_should_hide_ai_block_query_and_header_for_initial_cloud_prompt() { + fn test_should_hide_ai_block_query_and_header_for_optimistic_prompt() { let _flag = FeatureFlag::CloudModeSetupV2.override_enabled(true); - assert!(should_hide_ai_block_query_and_header( - true, false, true, true, false - )); + assert!(should_hide_ai_block_query_and_header(true, true, false)); } #[test] - fn test_should_hide_ai_block_query_and_header_for_optimistic_followup_prompt() { + fn test_should_not_hide_ai_block_query_and_header_during_replay() { let _flag = FeatureFlag::CloudModeSetupV2.override_enabled(true); - assert!(should_hide_ai_block_query_and_header( - false, true, true, false, false - )); + assert!(!should_hide_ai_block_query_and_header(true, true, true)); } #[test] - fn test_should_not_hide_ai_block_query_and_header_during_replay() { + fn test_should_not_hide_ai_block_query_and_header_for_untracked_prompt() { let _flag = FeatureFlag::CloudModeSetupV2.override_enabled(true); - assert!(!should_hide_ai_block_query_and_header( - true, true, true, true, true - )); + assert!(!should_hide_ai_block_query_and_header(false, true, false)); } #[test] - fn test_should_not_hide_ai_block_query_and_header_for_untracked_prompt() { + fn test_should_not_hide_ai_block_query_and_header_outside_shared_session() { let _flag = FeatureFlag::CloudModeSetupV2.override_enabled(true); - assert!(!should_hide_ai_block_query_and_header( - false, false, true, false, false - )); + assert!(!should_hide_ai_block_query_and_header(true, false, false)); } } @@ -895,10 +885,6 @@ impl View for AIBlock { terminal_model.is_receiving_agent_conversation_replay(), ) }; - let is_first_exchange = conversation - .first_exchange() - .is_some_and(|exchange| exchange.id == self.client_ids.client_exchange_id); - let input_props = input::Props { comments: &self.comment_states, addressed_comment_ids: &addressed_comment_ids, @@ -929,22 +915,15 @@ impl View for AIBlock { query_and_index .as_ref() .is_some_and(|(query_for_display, ..)| { - let (has_inserted_cloud_mode_user_query_block, has_optimistic_user_query) = - self.ambient_agent_view_model - .as_ref() - .map(|model| { - let model = model.as_ref(app); - ( - model.has_inserted_cloud_mode_user_query_block(), - model.has_optimistic_user_query(query_for_display), - ) - }) - .unwrap_or((false, false)); + let has_optimistic_user_query = self + .ambient_agent_view_model + .as_ref() + .is_some_and(|model| { + model.as_ref(app).has_optimistic_user_query(query_for_display) + }); should_hide_ai_block_query_and_header( - has_inserted_cloud_mode_user_query_block, has_optimistic_user_query, is_shared_ambient_agent_session, - is_first_exchange, is_receiving_agent_conversation_replay, ) }); @@ -1093,6 +1072,14 @@ impl View for AIBlock { let is_conversation_transcript_viewer = terminal_model.is_conversation_transcript_viewer(); drop(terminal_model); + let is_cloud_agent_pre_first_exchange = + crate::terminal::view::ambient_agent::is_cloud_agent_pre_first_exchange( + self.ambient_agent_view_model.as_ref(), + &self.agent_view_controller, + &self.terminal_model, + app, + ); + contents.add_child(output::render( output::Props { model: self.model.as_ref(), @@ -1159,6 +1146,7 @@ impl View for AIBlock { .is_latest_non_passive_exchange_in_root_task(app) && self.has_imported_comments_in_current_thread(app), ask_user_question_view: self.ask_user_question_view.as_ref(), + is_cloud_agent_pre_first_exchange, }, app, )); diff --git a/app/src/ai/blocklist/block/view_impl/output.rs b/app/src/ai/blocklist/block/view_impl/output.rs index 390d74a60..69889a7cc 100644 --- a/app/src/ai/blocklist/block/view_impl/output.rs +++ b/app/src/ai/blocklist/block/view_impl/output.rs @@ -201,6 +201,12 @@ pub(crate) struct Props<'a> { pub(super) thinking_display_mode: crate::settings::ThinkingDisplayMode, pub(super) conversation_has_imported_comments: bool, pub(super) ask_user_question_view: Option<&'a ViewHandle>, + /// `true` when this block belongs to a cloud agent pane that is still in its setup + /// phase (running environment startup commands before the first agent turn). Used to + /// hide the response footer (thumbs up/down, credit usage, fork) until the agent has + /// produced real output — otherwise the footer renders awkwardly above the still- + /// pending optimistic user prompt. + pub(super) is_cloud_agent_pre_first_exchange: bool, } pub(super) fn render(props: Props, app: &AppContext) -> Box { @@ -245,6 +251,7 @@ pub(super) fn render(props: Props, app: &AppContext) -> Box { && !is_output_for_static_prompt_suggestions && !is_conversation_in_progress && request_type.is_active() + && !props.is_cloud_agent_pre_first_exchange && !status .error() .map(|e| e.is_invalid_api_key()) diff --git a/app/src/ai/blocklist/controller/shared_session.rs b/app/src/ai/blocklist/controller/shared_session.rs index 6bbbf55b8..a004cab8e 100644 --- a/app/src/ai/blocklist/controller/shared_session.rs +++ b/app/src/ai/blocklist/controller/shared_session.rs @@ -112,6 +112,20 @@ impl BlocklistAIController { let existing_conversation_id = self.find_existing_conversation_by_server_token(&init_event.conversation_id, ctx); let conversation_id = existing_conversation_id + .inspect(|conversation_id| { + // The local conversation is bound to a cloud-side session, so the cloud agent + // is the source of truth for user inputs going forward. Mark it as a shared- + // session view so `apply_client_actions` reconstructs UserQuery / ActionResult + // inputs from the cloud agent's response messages — without this, the local + // exchange's inputs stay empty and the AI block has no user query to render. + // Idempotent for conversations that already have the flag set (e.g. regular + // cloud mode, where `start_new_conversation` set it at creation time); + // important for REMOTE-1519 local-to-cloud handoff, where the local fork + // started as a non-shared-session conversation. + history.update(ctx, |history, _| { + history.set_viewing_shared_session_for_conversation(*conversation_id, true); + }); + }) .or_else(|| { let selected_conversation_id = self .context_model @@ -150,9 +164,11 @@ impl BlocklistAIController { h.start_new_conversation(terminal_view_id, false, true, ctx) }) }); - if self - .should_skip_replayed_response_for_existing_conversation(existing_conversation_id, ctx) - { + if self.should_skip_replayed_response_for_existing_conversation( + existing_conversation_id, + &init_event.request_id, + ctx, + ) { self.shared_session_state.current_response_id = Some(stream_id); self.shared_session_state .should_skip_current_replayed_response = true; @@ -220,22 +236,45 @@ impl BlocklistAIController { fn should_skip_replayed_response_for_existing_conversation( &self, existing_conversation_id: Option, + init_request_id: &str, ctx: &mut ModelContext, ) -> bool { let Some(conversation_id) = existing_conversation_id else { return false; }; let model = self.terminal_model.lock(); - if !model.is_receiving_agent_conversation_replay() - || !model.should_suppress_existing_agent_conversation_replay() - { + let is_receiving_replay = model.is_receiving_agent_conversation_replay(); + let should_suppress = model.should_suppress_existing_agent_conversation_replay(); + if !is_receiving_replay || !should_suppress { return false; } drop(model); - BlocklistAIHistoryModel::as_ref(ctx) + // Only skip the replayed response stream when we already have a local + // exchange whose `server_output_id` matches its `request_id`. New + // exchanges that the cloud agent appended after the local fork (e.g. + // the user's first submitted prompt for a REMOTE-1519 local-to-cloud + // handoff pane) carry request_ids we have never seen and must flow + // through normally so the viewer's blocklist picks them up. + let history = BlocklistAIHistoryModel::as_ref(ctx); + let known_server_output_ids: Vec = history .conversation(&conversation_id) - .is_some_and(|conversation| conversation.exchange_count() > 0) + .map(|conversation| { + conversation + .all_exchanges() + .into_iter() + .filter_map(|exchange| { + exchange + .output_status + .server_output_id() + .map(|sid| sid.to_string()) + }) + .collect() + }) + .unwrap_or_default(); + known_server_output_ids + .iter() + .any(|sid| sid == init_request_id) } fn on_shared_client_actions( diff --git a/app/src/ai/blocklist/handoff/mod.rs b/app/src/ai/blocklist/handoff/mod.rs index c7d057d28..d8666f834 100644 --- a/app/src/ai/blocklist/handoff/mod.rs +++ b/app/src/ai/blocklist/handoff/mod.rs @@ -4,10 +4,12 @@ //! filesystem path the local agent has touched, groups those paths into git //! roots and orphan files, and exposes the env-overlap pick used by the //! handoff pane bootstrap. -//! - `orchestrator`: drives the prep + upload phases of the handoff off the main -//! thread. The actual cloud-agent spawn happens inside the handoff pane's -//! `AmbientAgentViewModel::submit_handoff` so the regular streaming spawn flow -//! (loading screen, shared-session join) is reused unchanged. +//! +//! The chip-click open path lives in `Workspace::start_local_to_cloud_handoff` +//! and drives the prep-fork RPC + the async snapshot upload directly via +//! `AIClient::prepare_handoff_fork` and `agent_sdk::driver::upload_snapshot_for_handoff`. +//! The actual cloud-agent spawn happens inside the handoff pane's +//! `AmbientAgentViewModel::submit_handoff`, which reads the cached +//! `forked_conversation_id` and `snapshot_prep_token` off `PendingHandoff`. -pub(crate) mod orchestrator; pub(crate) mod touched_repos; diff --git a/app/src/ai/blocklist/handoff/orchestrator.rs b/app/src/ai/blocklist/handoff/orchestrator.rs deleted file mode 100644 index d37777dc6..000000000 --- a/app/src/ai/blocklist/handoff/orchestrator.rs +++ /dev/null @@ -1,70 +0,0 @@ -//! Drives the local-to-cloud handoff lifecycle. -//! -//! Runs the prep + upload phases off the main thread by handing a `TouchedWorkspace` -//! to `agent_sdk::driver::upload_snapshot_for_handoff`, which mints a `prep_token`, -//! gathers patches and file contents, and uploads everything (plus a -//! `snapshot_state.json` manifest) to GCS. -//! -//! The actual cloud-agent spawn happens inside the handoff pane's -//! `AmbientAgentViewModel::submit_handoff` so the streaming `TaskSpawned` → -//! `SessionStarted` events drive the loading screen + shared-session join the same -//! way a normal cloud agent does. Doing the spawn here would leave us with only a -//! task id, no streaming hook, and a blank pane. - -use std::sync::Arc; - -use anyhow::Result; -use http_client::Client as HttpClient; - -use crate::ai::agent::api::ServerConversationToken; -use crate::ai::agent_sdk::driver::upload_snapshot_for_handoff; -use crate::ai::blocklist::handoff::touched_repos::TouchedWorkspace; -use crate::server::server_api::ai::AIClient; - -/// Outcome of a successful prep + upload. `submit_handoff` builds a -/// `SpawnAgentRequest` from this and dispatches it through the same -/// `spawn_agent_with_request` path that regular cloud-mode runs use. -/// -/// The agent config (env, model, worker_host, computer_use_enabled, harness) is -/// intentionally not carried here — by the time `submit_handoff` consumes this, the -/// pane's env selector chip has already updated the model's `environment_id` and -/// `build_default_spawn_config` reads the rest from the model + global preferences. -pub(crate) struct HandoffPrepared { - /// `handoff_prep_token` returned by `prepare_handoff_snapshot`. `None` when the - /// touched workspace had no declarations — the cloud-side spawn skips snapshot - /// rehydration in that case. - pub prep_token: Option, - /// `fork_from_conversation_id` to set on the spawn request — always the source - /// conversation's server token. - pub fork_from_conversation_id: String, - /// User prompt typed into the handoff pane. - pub prompt: String, -} - -/// Drive the prep + upload phases of a handoff. Runs entirely off the main thread; -/// callers should `ctx.spawn` this future so the local pane stays interactive -/// throughout. The actual `spawn_agent` call is intentionally NOT performed here -/// — see the module docs for why. -pub(crate) async fn run_handoff( - source_conversation_id: ServerConversationToken, - workspace: TouchedWorkspace, - prompt: String, - client: Arc, - http: Arc, -) -> Result { - let repo_paths = workspace.repos.into_iter().map(|r| r.git_root).collect(); - let prep_token = upload_snapshot_for_handoff( - repo_paths, - workspace.orphan_files, - client, - http.as_ref(), - &source_conversation_id, - ) - .await?; - - Ok(HandoffPrepared { - prep_token, - fork_from_conversation_id: source_conversation_id.as_str().to_string(), - prompt, - }) -} diff --git a/app/src/ai/blocklist/history_model.rs b/app/src/ai/blocklist/history_model.rs index eb0910aad..e31809a19 100644 --- a/app/src/ai/blocklist/history_model.rs +++ b/app/src/ai/blocklist/history_model.rs @@ -1030,10 +1030,17 @@ impl BlocklistAIHistoryModel { /// /// The `prefix` parameter specifies the prefix added to the root task description /// (e.g., `FORK_PREFIX` for forks, `PRE_REWIND_PREFIX` for pre-rewind backups). + /// + /// When `preserve_task_ids` is true, the forked conversation reuses the source's task ids + /// instead of minting new ones. Used by the REMOTE-1519 local-to-cloud handoff path so the + /// local fork's task store matches the cloud-side fork (which is a byte-for-byte copy of the + /// source's GCS data and therefore preserves task ids). The cloud agent's `ClientAction`s + /// reference those task ids; if we minted new ones locally they would fail to resolve. pub fn fork_conversation( &mut self, source_conversation: &AIConversation, prefix: &str, + preserve_task_ids: bool, app: &AppContext, ) -> Result { let tasks: Vec = source_conversation @@ -1041,7 +1048,8 @@ impl BlocklistAIHistoryModel { .filter_map(|t| t.source().cloned()) .collect(); - let updated_tasks_with_new_ids = update_forked_task_properties(tasks, prefix); + let updated_tasks_with_new_ids = + update_forked_task_properties(tasks, prefix, preserve_task_ids); let Some(sqlite_sender) = GlobalResourceHandlesProvider::as_ref(app) .get() .model_event_sender @@ -1193,7 +1201,8 @@ impl BlocklistAIHistoryModel { )); } - let updated_tasks_with_new_ids = update_forked_task_properties(truncated_tasks, prefix); + let updated_tasks_with_new_ids = + update_forked_task_properties(truncated_tasks, prefix, false); let Some(sqlite_sender) = GlobalResourceHandlesProvider::as_ref(app) .get() @@ -2395,12 +2404,34 @@ impl From<&AIAgentOutputStatus> for AIQueryHistoryOutputStatus { /// Updates the given tasks, which are presumed to be clones of tasks from a source conversation to be /// used to back a fork or copy of the source conversation. /// -/// Reassigns new task IDs to each forked task to ensure task IDs remain globally unique and updates -/// description of the root task, prepending it with the given prefix. +/// When `preserve_task_ids` is false, reassigns new task IDs to each forked task to ensure task IDs +/// remain globally unique. When true, leaves task IDs as-is so the local fork's task store matches +/// an externally-known set of task ids (e.g. for REMOTE-1519 local-to-cloud handoff, where the cloud +/// agent's ClientActions reference the source's task ids and must resolve in the local fork). +/// +/// Always prepends the given prefix to the root task's description. fn update_forked_task_properties( tasks: Vec, prefix: &str, + preserve_task_ids: bool, ) -> Vec { + if preserve_task_ids { + return tasks + .into_iter() + .map(|mut t| { + let is_root = t + .dependencies + .as_ref() + .map(|deps| deps.parent_task_id.is_empty()) + .unwrap_or(true); + if is_root { + t.description = format!("{}{}", prefix, t.description); + } + t + }) + .collect(); + } + let mut old_to_new_task_ids = HashMap::new(); fn get_new_task_id(new_ids: &mut HashMap, old_task_id: &str) -> String { new_ids diff --git a/app/src/ai/blocklist/history_model_test.rs b/app/src/ai/blocklist/history_model_test.rs index 6dc2a4f48..de99ed911 100644 --- a/app/src/ai/blocklist/history_model_test.rs +++ b/app/src/ai/blocklist/history_model_test.rs @@ -1273,3 +1273,101 @@ fn test_set_server_conversation_token_rebinds_reverse_index() { }); }); } + +/// REMOTE-1519 fork-on-chip-click flow. +/// Forking the local conversation must: +/// 1. carry the source's server token forward as `forked_from_*` (so the +/// cloud agent's response stream can be reconciled to the right local +/// conversation during replay), and +/// 2. accept a binding to the cloud T_C via +/// `set_server_conversation_token_for_conversation` such that the reverse +/// index resolves the cloud token to the forked conversation. +#[test] +fn test_fork_then_bind_handoff_token_resolves_to_forked_conversation() { + use crate::ai::agent::conversation::AIConversation; + use crate::persistence::model::AgentConversationData; + use crate::test_util::ai_agent_tasks::{create_api_task, create_message}; + + App::test((), |mut app| async move { + initialize_settings_for_tests(&mut app); + + // `fork_conversation` writes the new conversation through the + // sqlite sender, so a mock sender must be wired up. + let (sender, _receiver) = std::sync::mpsc::sync_channel(2); + let mut global_resource_handles = GlobalResourceHandles::mock(&mut app); + global_resource_handles.model_event_sender = Some(sender); + app.add_singleton_model(|_| GlobalResourceHandlesProvider::new(global_resource_handles)); + + let history_model = app.add_singleton_model(|_| BlocklistAIHistoryModel::new(vec![], &[])); + let terminal_view_id = EntityId::new(); + + // Build a source conversation with a real root task (so `fork_conversation` + // has a `Task::source()` to copy forward) and the local-side server token T_L. + let source_id = AIConversationId::new(); + let root_task = create_api_task( + "root-task", + vec![create_message("root-task-message", "root-task")], + ); + let source = AIConversation::new_restored( + source_id, + vec![root_task], + Some(AgentConversationData { + server_conversation_token: Some("src-token".to_string()), + conversation_usage_metadata: None, + reverted_action_ids: None, + forked_from_server_conversation_token: None, + artifacts_json: None, + parent_agent_id: None, + agent_name: None, + parent_conversation_id: None, + run_id: None, + autoexecute_override: None, + last_event_sequence: None, + }), + ) + .expect("restored source conversation should build"); + history_model.update(&mut app, |model, ctx| { + model.restore_conversations(terminal_view_id, vec![source], ctx); + }); + + // Fork the local conversation (REMOTE-1519: fork-on-chip-click). + let forked_id = history_model.update(&mut app, |model, ctx| { + let source = model + .conversation(&source_id) + .expect("source conversation must be in memory after restore") + .clone(); + let forked = model + .fork_conversation(&source, "[Fork] ", false, ctx) + .expect("fork must succeed when sqlite sender is wired up"); + assert_eq!( + forked + .forked_from_server_conversation_token() + .map(|t| t.as_str()), + Some("src-token"), + "forked conversation must carry its source token for replay reconciliation", + ); + assert!( + forked.server_conversation_token().is_none(), + "freshly forked conversation must not yet have a server token of its own", + ); + forked.id() + }); + + // Bind the cloud T_C returned by `prepare-fork` to the forked conversation. + history_model.update(&mut app, |model, _| { + model.set_server_conversation_token_for_conversation( + forked_id, + "cloud-T".to_string(), + ); + }); + + let cloud_token = ServerConversationToken::new("cloud-T".to_string()); + history_model.read(&app, |model, _| { + assert_eq!( + model.find_conversation_id_by_server_token(&cloud_token), + Some(forked_id), + "after binding, cloud T_C must resolve to the forked conversation", + ); + }); + }); +} diff --git a/app/src/pane_group/pane/terminal_pane.rs b/app/src/pane_group/pane/terminal_pane.rs index c28f5aa55..ccf96726d 100644 --- a/app/src/pane_group/pane/terminal_pane.rs +++ b/app/src/pane_group/pane/terminal_pane.rs @@ -1389,7 +1389,7 @@ fn handle_terminal_view_event( parent_run_id: Some(parent_run_id), runtime_skills, referenced_attachments: vec![], - fork_from_conversation_id: None, + conversation_id: None, handoff_prep_token: None, }; diff --git a/app/src/server/server_api/ai.rs b/app/src/server/server_api/ai.rs index a4f124679..bd7f545a3 100644 --- a/app/src/server/server_api/ai.rs +++ b/app/src/server/server_api/ai.rs @@ -226,12 +226,11 @@ pub struct SpawnAgentRequest { /// Base64-encoded `warp.multi_agent.v1.Attachment` payloads to restore as referenced attachments. #[serde(skip_serializing_if = "Vec::is_empty")] pub referenced_attachments: Vec, - /// When set, instructs the server to fork the named conversation and use the resulting - /// fork id as `task.AgentConversationID`. Mutually exclusive with the existing - /// `conversation_id` field (resume semantics) on the server. Used by the local-to-cloud - /// handoff flow (REMOTE-1486). + /// Server-side conversation id to resume against (sets `task.AgentConversationID`). + /// For local-to-cloud handoff (REMOTE-1519) this is the forked conversation id + /// returned by `POST /agent/handoff/prepare-fork` at chip-click time. #[serde(skip_serializing_if = "Option::is_none")] - pub fork_from_conversation_id: Option, + pub conversation_id: Option, /// References a batch of files previously uploaded to handoff_prep/{prep_token}/ via /// `POST /agent/handoff/prepare-snapshot`. The server moves them into /// snapshots/{task_id}/{execution_id}/ post-task-creation. @@ -269,6 +268,22 @@ pub struct HandoffSnapshotUploadInfo { pub upload_url: String, } +/// Request body for `POST /agent/handoff/prepare-fork`. Used by the local-to-cloud +/// handoff flow (REMOTE-1519) to materialize a server-side fork of the source +/// conversation at chip-click time so the client can pre-populate the new pane. +#[derive(Debug, Clone, serde::Serialize)] +pub struct PrepareHandoffForkRequest { + pub source_conversation_id: String, +} + +/// Response body for `POST /agent/handoff/prepare-fork`. The returned id is sent on +/// the subsequent `POST /agent/runs` request under `conversation_id` (resume +/// semantics) so the new task picks up the fork directly. +#[derive(Debug, Clone, serde::Deserialize)] +pub struct PrepareHandoffForkResponse { + pub forked_conversation_id: String, +} + #[derive(Debug, Clone, serde::Serialize)] pub struct RunFollowupRequest { pub message: String, @@ -879,6 +894,14 @@ pub trait AIClient: 'static + Send + Sync { request: PrepareHandoffSnapshotRequest, ) -> anyhow::Result; + /// Materialize a server-side fork of the source conversation for a local-to-cloud handoff. + /// Called at chip-click time (REMOTE-1519) so the client can pre-populate the new pane + /// with the forked conversation before any task exists. + async fn prepare_handoff_fork( + &self, + request: PrepareHandoffForkRequest, + ) -> anyhow::Result; + async fn list_ambient_agent_tasks( &self, limit: i32, @@ -1508,6 +1531,16 @@ impl AIClient for ServerApi { Ok(response) } + async fn prepare_handoff_fork( + &self, + request: PrepareHandoffForkRequest, + ) -> anyhow::Result { + let response: PrepareHandoffForkResponse = self + .post_public_api("agent/handoff/prepare-fork", &request) + .await?; + Ok(response) + } + async fn list_ambient_agent_tasks( &self, limit: i32, diff --git a/app/src/server/server_api/ai_test.rs b/app/src/server/server_api/ai_test.rs index 1a859dbf7..f9c86c1fe 100644 --- a/app/src/server/server_api/ai_test.rs +++ b/app/src/server/server_api/ai_test.rs @@ -4,8 +4,8 @@ use chrono::Utc; use super::{ build_list_agent_runs_url, build_run_followup_url, AgentMessageHeader, AgentRunEvent, AgentSource, AmbientAgentTaskState, Artifact, ArtifactDownloadResponse, ArtifactType, - ExecutionLocation, ListRunsResponse, ReadAgentMessageResponse, RunFollowupRequest, RunSortBy, - RunSortOrder, TaskListFilter, + ExecutionLocation, ListRunsResponse, PrepareHandoffForkRequest, PrepareHandoffForkResponse, + ReadAgentMessageResponse, RunFollowupRequest, RunSortBy, RunSortOrder, TaskListFilter, }; use crate::notebooks::NotebookId; @@ -998,3 +998,31 @@ fn serialize_run_followup_request() { }) ); } + +#[test] +fn serialize_prepare_handoff_fork_request() { + let request = PrepareHandoffForkRequest { + source_conversation_id: "550e8400-e29b-41d4-a716-446655440000".to_string(), + }; + + let json = serde_json::to_value(request).unwrap(); + + assert_eq!( + json, + serde_json::json!({ + "source_conversation_id": "550e8400-e29b-41d4-a716-446655440000", + }) + ); +} + +#[test] +fn deserialize_prepare_handoff_fork_response() { + let response: PrepareHandoffForkResponse = serde_json::from_value(serde_json::json!({ + "forked_conversation_id": "abcdef01-2345-6789-abcd-ef0123456789", + })) + .unwrap(); + assert_eq!( + response.forked_conversation_id, + "abcdef01-2345-6789-abcd-ef0123456789" + ); +} diff --git a/app/src/terminal/input.rs b/app/src/terminal/input.rs index 05d724052..aebca7bc8 100644 --- a/app/src/terminal/input.rs +++ b/app/src/terminal/input.rs @@ -2142,18 +2142,14 @@ impl Input { }); } }); - // REMOTE-1486: prep+upload failures arrive here so we can - // repopulate the editor with the user's original prompt (the - // submit path cleared it before the orchestrator started) and - // surface the error as a toast. Without this branch the user is - // left staring at a blank composing pane after a silent log - // line. - if let AmbientAgentViewModelEvent::HandoffSubmissionFailed { - prompt, - error_message, - } = event + // REMOTE-1519: chip-click handoff prep+upload failures arrive + // here so we can surface the error as a toast. The editor + // buffer is intentionally left alone — the user's prompt was + // never cleared (chip-click happens before submit), so there + // is nothing to restore. + if let AmbientAgentViewModelEvent::HandoffSubmissionFailed { error_message } = + event { - me.replace_buffer_content(prompt, ctx); let window_id = ctx.window_id(); let toast_message = format!("Failed to prepare cloud handoff: {error_message}"); diff --git a/app/src/terminal/shared_session/viewer/terminal_manager.rs b/app/src/terminal/shared_session/viewer/terminal_manager.rs index 5160e9575..2fa097e33 100644 --- a/app/src/terminal/shared_session/viewer/terminal_manager.rs +++ b/app/src/terminal/shared_session/viewer/terminal_manager.rs @@ -319,14 +319,27 @@ impl TerminalManager { /// Connects a deferred terminal manager to a shared session. /// This can only be called on a TerminalManager created with `new_deferred`. /// Returns `true` if the connection was initiated, `false` if already connected. - pub fn connect_to_session(&mut self, session_id: SessionId, ctx: &mut AppContext) -> bool { + /// + /// `append_followup_scrollback` controls whether the initial join uses + /// `AppendFollowupScrollback` mode instead of `ReplaceFromSessionScrollback`. + /// REMOTE-1519's local-to-cloud handoff pane sets this to `true` so the + /// pre-populated forked conversation isn't blown away by the cloud session's + /// replay scrollback, and so the existing `should_suppress_existing_agent_conversation_replay` + /// machinery skips response streams whose conversation we already have. + pub fn connect_to_session( + &mut self, + session_id: SessionId, + append_followup_scrollback: bool, + ctx: &mut AppContext, + ) -> bool { + let load_mode = if append_followup_scrollback { + SharedSessionInitialLoadMode::AppendFollowupScrollback + } else { + SharedSessionInitialLoadMode::ReplaceFromSessionScrollback + }; match self.network_state { NetworkState::Idle => { - self.connect_session( - session_id, - SharedSessionInitialLoadMode::ReplaceFromSessionScrollback, - ctx, - ); + self.connect_session(session_id, load_mode, ctx); true } NetworkState::Connecting => { @@ -986,6 +999,7 @@ impl TerminalManager { && is_cloud_agent_pre_first_exchange( view.ambient_agent_view_model(), view.agent_view_controller(), + &view.model, ctx, ) { @@ -1296,12 +1310,15 @@ impl TerminalManager { }; // During cloud startup (pre-first-exchange), keep local input mode stable // and ignore remote shell/ai mode toggles from session-sharing context sync. - let is_pre_first_exchange = FeatureFlag::CloudModeSetupV2.is_enabled() - && is_cloud_agent_pre_first_exchange( - view.as_ref(ctx).ambient_agent_view_model(), - view.as_ref(ctx).agent_view_controller(), + let is_pre_first_exchange = FeatureFlag::CloudModeSetupV2.is_enabled() && { + let view_ref = view.as_ref(ctx); + is_cloud_agent_pre_first_exchange( + view_ref.ambient_agent_view_model(), + view_ref.agent_view_controller(), + &view_ref.model, ctx, - ); + ) + }; let suppress_input_mode_update = view.as_ref(ctx).is_shared_ambient_agent_session() || is_pre_first_exchange; if suppress_input_mode_update { diff --git a/app/src/terminal/view.rs b/app/src/terminal/view.rs index a19aa27c5..8c5db9961 100644 --- a/app/src/terminal/view.rs +++ b/app/src/terminal/view.rs @@ -5135,19 +5135,6 @@ impl TerminalView { .set_is_executing_oz_environment_startup_commands(false); } - // REMOTE-1486: clear the queued-prompt block on the cloud agent's first - // exchange for an Oz local-to-cloud handoff. Mirrors the third-party-harness - // path's `HarnessCommandStarted` cleanup, but for the Oz harness the first - // `AppendedExchange` is the analogous transition. Idempotent when no block - // is currently inserted. - if self - .ambient_agent_view_model - .as_ref() - .is_some_and(|model| model.as_ref(ctx).is_local_to_cloud_handoff()) - { - self.remove_pending_user_query_block(ctx); - } - let should_add_ai_block = history_model .as_ref(ctx) .conversation(conversation_id) @@ -6896,6 +6883,7 @@ impl TerminalView { && is_cloud_agent_pre_first_exchange( self.ambient_agent_view_model.as_ref(), &self.agent_view_controller, + &self.model, app, ) { @@ -23193,7 +23181,12 @@ impl TerminalView { // Save a backup of the conversation before truncating, so users can restore it later. BlocklistAIHistoryModel::handle(ctx).update(ctx, |history_model, ctx| { if let Some(conversation) = history_model.conversation(&conversation_id).cloned() { - if let Err(e) = history_model.fork_conversation(&conversation, PRE_REWIND_PREFIX, ctx) { + if let Err(e) = history_model.fork_conversation( + &conversation, + PRE_REWIND_PREFIX, + false, /* preserve_task_ids */ + ctx, + ) { log::warn!("Failed to save pre-rewind backup of conversation {conversation_id}: {e}"); } } else { @@ -25739,6 +25732,7 @@ impl View for TerminalView { && is_cloud_agent_pre_first_exchange( self.ambient_agent_view_model.as_ref(), &self.agent_view_controller, + &self.model, app, ) { diff --git a/app/src/terminal/view/ambient_agent/block/setup_command_text.rs b/app/src/terminal/view/ambient_agent/block/setup_command_text.rs index a0cdd947d..1ec2c9462 100644 --- a/app/src/terminal/view/ambient_agent/block/setup_command_text.rs +++ b/app/src/terminal/view/ambient_agent/block/setup_command_text.rs @@ -1,3 +1,5 @@ +use parking_lot::FairMutex; +use std::sync::Arc; use warp_core::ui::{appearance::Appearance, Icon}; use warpui::{ elements::ParentElement, @@ -14,8 +16,11 @@ use crate::{ inline_action::inline_action_icons, BlocklistAIHistoryEvent, BlocklistAIHistoryModel, }, - terminal::view::ambient_agent::{ - is_cloud_agent_pre_first_exchange, AmbientAgentViewModel, AmbientAgentViewModelEvent, + terminal::{ + view::ambient_agent::{ + is_cloud_agent_pre_first_exchange, AmbientAgentViewModel, AmbientAgentViewModelEvent, + }, + TerminalModel, }, }; @@ -55,6 +60,7 @@ impl SetupCommandState { pub struct CloudModeSetupTextBlock { ambient_agent_view_model: ModelHandle, agent_view_controller: ModelHandle, + terminal_model: Arc>, mouse_state: MouseStateHandle, } @@ -62,6 +68,7 @@ impl CloudModeSetupTextBlock { pub fn new( ambient_agent_view_model: ModelHandle, agent_view_controller: ModelHandle, + terminal_model: Arc>, ctx: &mut ViewContext, ) -> Self { if let Some(conversation_id) = agent_view_controller @@ -104,6 +111,7 @@ impl CloudModeSetupTextBlock { Self { ambient_agent_view_model, agent_view_controller, + terminal_model, mouse_state: Default::default(), } } @@ -148,6 +156,7 @@ impl View for CloudModeSetupTextBlock { if is_cloud_agent_pre_first_exchange( Some(&self.ambient_agent_view_model), &self.agent_view_controller, + &self.terminal_model, app, ) { "Running setup commands..." diff --git a/app/src/terminal/view/ambient_agent/mod.rs b/app/src/terminal/view/ambient_agent/mod.rs index 9df6c1bde..c3ba5dcb1 100644 --- a/app/src/terminal/view/ambient_agent/mod.rs +++ b/app/src/terminal/view/ambient_agent/mod.rs @@ -28,16 +28,18 @@ pub use model_selector::{ModelSelector, ModelSelectorAction, ModelSelectorEvent} pub use progress::{render_progress, ProgressProps, ProgressStep, ProgressStepState}; pub use progress_ui_state::AmbientAgentProgressUIState; pub use tips::{get_cloud_mode_tips, CloudModeTip}; +use parking_lot::FairMutex; +use std::sync::Arc; use warp_core::features::FeatureFlag; use crate::ai::blocklist::agent_view::{AgentViewController, AgentViewState}; -use crate::ai::blocklist::BlocklistAIHistoryModel; use crate::pane_group::TerminalViewResources; use crate::terminal::shared_session; use crate::terminal::TerminalManager; +use crate::terminal::TerminalModel; use crate::terminal::TerminalView; use warpui::geometry::vector::Vector2F; -use warpui::{AppContext, ModelHandle, SingletonEntity, ViewHandle, WindowId}; +use warpui::{AppContext, ModelHandle, ViewHandle, WindowId}; /// Creates a cloud mode terminal view and manager for ambient agent sessions. /// @@ -76,6 +78,7 @@ pub fn create_cloud_mode_view( log::warn!("Cloud mode view was created without an ambient agent view model"); return (terminal_view, terminal_manager); }; + let view_model_for_subscription = view_model.clone(); terminal_manager.update(ctx, |_, ctx| { ctx.subscribe_to_model(&view_model, move |manager, event, ctx| { let Some(manager) = manager @@ -86,7 +89,14 @@ pub fn create_cloud_mode_view( }; match event { AmbientAgentViewModelEvent::SessionReady { session_id } => { - manager.connect_to_session(*session_id, ctx); + // Local-to-cloud handoff panes pre-populate the forked + // conversation on chip click (REMOTE-1519). Use append-mode + // scrollback + replay suppression so the cloud agent's + // replay doesn't duplicate the blocks we already have. + let append_followup_scrollback = view_model_for_subscription + .as_ref(ctx) + .is_local_to_cloud_handoff(); + manager.connect_to_session(*session_id, append_followup_scrollback, ctx); } AmbientAgentViewModelEvent::FollowupSessionReady { session_id } => { manager.attach_followup_session(*session_id, ctx); @@ -114,12 +124,24 @@ pub fn create_cloud_mode_view( (terminal_view, terminal_manager) } -/// Returns `true` when a cloud agent shared session is ready but no agent exchange has been -/// received yet. In this state, we hide the interactive input and render a loading footer -/// instead. +/// Returns `true` when a cloud agent shared session is in any pre-first-exchange phase — +/// either still spawning (loading: "Connecting to Host" / "Creating Environment" / +/// "Starting Environment") or running setup commands before the first agent turn. In this +/// state, we hide the interactive input and render a loading footer instead. +/// +/// During the loading phase the view-model status is `WaitingForSession`; once the cloud +/// session is ready and setup commands are running it transitions to `AgentRunning` and we +/// rely on `is_executing_oz_environment_startup_commands` (initialized true on cloud-agent +/// pane creation, flipped false on the first `AppendedExchange`) to decide whether the +/// agent has produced its first real turn yet. The flag is correct for both fresh cloud +/// panes and REMOTE-1519 local-to-cloud handoff panes (whose forked conversation already +/// has exchanges from the local source, but whose cloud agent has not yet produced its +/// first new turn) — the `AppendedExchange` handler in `view.rs` ensures the flag only +/// flips to false on a NEW cloud turn, not on replay-driven events. pub fn is_cloud_agent_pre_first_exchange( ambient_agent_view_model: Option<&ModelHandle>, agent_view_controller: &ModelHandle, + terminal_model: &Arc>, app: &AppContext, ) -> bool { if !(FeatureFlag::CloudMode.is_enabled() && FeatureFlag::AgentView.is_enabled()) { @@ -130,38 +152,44 @@ pub fn is_cloud_agent_pre_first_exchange( return false; }; - if !matches!( - ambient_agent_view_model.as_ref(app).status(), - Status::AgentRunning - ) { + let view_model = ambient_agent_view_model.as_ref(app); + + let is_in_pre_first_exchange_status = matches!( + view_model.status(), + Status::WaitingForSession { .. } | Status::AgentRunning + ); + if !is_in_pre_first_exchange_status { return false; } let agent_view_state = agent_view_controller.as_ref(app).agent_view_state().clone(); - let AgentViewState::Active { - conversation_id, - origin, - .. - } = agent_view_state - else { + let AgentViewState::Active { origin, .. } = agent_view_state else { return false; }; - if !origin.is_cloud_agent() { + // REMOTE-1519 handoff panes enter agent view with `RestoreExistingConversation` (because + // they restore the forked conversation), not `CloudAgent`. The `is_local_to_cloud_handoff` + // flag on the view model is the authoritative "this is a cloud agent pane" signal for that + // path, so accept either. + if !origin.is_cloud_agent() && !view_model.is_local_to_cloud_handoff() { return false; } // For non-oz harness runs, there is no Oz `AppendedExchange` to key off of, so we also // exit the pre-first-exchange phase when the harness CLI (e.g. `claude`, `gemini`) has // been detected. See `mark_harness_command_started`. - if ambient_agent_view_model - .as_ref(app) - .harness_command_started() - { + if view_model.harness_command_started() { return false; } - BlocklistAIHistoryModel::as_ref(app) - .conversation(&conversation_id) - .is_some_and(|conversation| conversation.exchange_count() == 0) + // Loading phase (`WaitingForSession`): no setup commands have started yet, but we're + // still pre-first-exchange. Skip the block-list flag check. + if matches!(view_model.status(), Status::WaitingForSession { .. }) { + return true; + } + + terminal_model + .lock() + .block_list() + .is_executing_oz_environment_startup_commands() } diff --git a/app/src/terminal/view/ambient_agent/model.rs b/app/src/terminal/view/ambient_agent/model.rs index dd025ea80..f16d88880 100644 --- a/app/src/terminal/view/ambient_agent/model.rs +++ b/app/src/terminal/view/ambient_agent/model.rs @@ -9,7 +9,6 @@ use warpui::r#async::{SpawnedFutureHandle, Timer}; use warpui::{AppContext, Entity, EntityId, ModelContext, SingletonEntity}; use crate::ai::active_agent_views_model::ActiveAgentViewsModel; -use crate::ai::agent::api::ServerConversationToken; use crate::ai::agent::{conversation::AIConversationId, extract_user_query_mode}; use crate::ai::ambient_agents::spawn::{spawn_task, submit_run_followup, AmbientAgentEvent}; use crate::ai::ambient_agents::task::HarnessConfig; @@ -18,7 +17,6 @@ use crate::ai::ambient_agents::AmbientAgentTaskId; use crate::ai::ambient_agents::{ OUT_OF_CREDITS_TASK_FAILURE_MESSAGE, SERVER_OVERLOADED_TASK_FAILURE_MESSAGE, }; -use crate::ai::blocklist::handoff::orchestrator::run_handoff; use crate::ai::blocklist::handoff::touched_repos::TouchedWorkspace; use crate::ai::blocklist::BlocklistAIHistoryModel; use crate::ai::cloud_environments::CloudAmbientAgentEnvironment; @@ -86,11 +84,18 @@ pub enum HandoffSubmissionState { /// `is_local_to_cloud_handoff()`. #[derive(Debug, Clone)] pub(crate) struct PendingHandoff { - /// Source conversation id (the local conversation's `server_conversation_token`). - pub(crate) source_conversation_id: ServerConversationToken, - /// `None` until `derive_touched_workspace` completes. + /// Forked conversation id minted by `POST /agent/handoff/prepare-fork` at + /// chip-click time. Sent under `conversation_id` (resume semantics) on the + /// subsequent `POST /agent/runs` request so the new task picks up the fork. + pub(crate) forked_conversation_id: String, + /// `None` until `derive_touched_workspace` completes (REMOTE-1486). pub(crate) touched_workspace: Option, - /// Gates submit — prevents double-submitting while the orchestrator is in flight. + /// Snapshot upload outcome: `None` while the upload is in flight or never + /// started; `Some(Some(token))` once minted (the standard case); + /// `Some(None)` when the workspace was empty so no upload happened. + /// `submit_handoff` requires this to be `Some(_)` before spawning. + pub(crate) snapshot_prep_token: Option>, + /// Gates submit — prevents double-submitting while the spawn is in flight. pub(crate) submission_state: HandoffSubmissionState, } @@ -155,8 +160,6 @@ pub struct AmbientAgentViewModel { /// Selected execution harness for the cloud agent run. /// Defaults to `Harness::Oz`. Used to populate `AgentConfigSnapshot.harness` on spawn. harness: Harness, - /// Whether the optimistic InitialUserQuery block has been inserted for the current run. - has_inserted_cloud_mode_user_query_block: bool, /// Whether the harness CLI (e.g. `claude`, `gemini`) has started running for a non-oz run. /// Used to transition the cloud-mode setup UI out of the pre-first-exchange phase when /// there is no oz `AppendedExchange` to key off of. @@ -199,7 +202,6 @@ impl AmbientAgentViewModel { task_id: None, conversation_id: None, harness: Harness::default(), - has_inserted_cloud_mode_user_query_block: false, harness_command_started: false, optimistically_rendered_user_queries: vec![], active_execution_session_id: None, @@ -375,6 +377,39 @@ impl AmbientAgentViewModel { ctx.emit(AmbientAgentViewModelEvent::PendingHandoffChanged); } + /// Records a chip-click handoff prep+upload failure on the pending handoff. + /// Flips the submission state to `Failed` (so the status footer / banner + /// reflects the error) and emits `HandoffSubmissionFailed` so the input + /// layer can surface a user-visible toast. + pub(crate) fn record_handoff_prep_failed( + &mut self, + error_message: String, + ctx: &mut ModelContext, + ) { + self.set_pending_handoff_submission_state( + HandoffSubmissionState::Failed(error_message.clone()), + ctx, + ); + ctx.emit(AmbientAgentViewModelEvent::HandoffSubmissionFailed { error_message }); + } + + /// Records the outcome of the chip-click async snapshot upload on the pending + /// handoff so `submit_handoff` can read the prep token without re-running + /// the upload. `Some(token)` is the standard success case; `None` means the + /// touched workspace was empty (no upload happened, no rehydration needed). + /// No-op when no handoff context is set. + pub(crate) fn set_pending_handoff_snapshot_prep_token( + &mut self, + prep_token: Option, + ctx: &mut ModelContext, + ) { + let Some(handoff) = self.pending_handoff.as_mut() else { + return; + }; + handoff.snapshot_prep_token = Some(prep_token); + ctx.emit(AmbientAgentViewModelEvent::PendingHandoffChanged); + } + /// Whether the harness CLI has started running. Only meaningful for non-oz runs. pub(super) fn harness_command_started(&self) -> bool { self.harness_command_started @@ -421,14 +456,6 @@ impl AmbientAgentViewModel { self.task_id } - pub fn has_inserted_cloud_mode_user_query_block(&self) -> bool { - self.has_inserted_cloud_mode_user_query_block - } - - pub fn set_has_inserted_cloud_mode_user_query_block(&mut self, has_inserted: bool) { - self.has_inserted_cloud_mode_user_query_block = has_inserted; - } - pub fn record_optimistic_user_query(&mut self, prompt: String) { self.optimistically_rendered_user_queries.push(prompt); } @@ -649,7 +676,6 @@ impl AmbientAgentViewModel { self.environment_id = None; self.task_id = None; self.conversation_id = None; - self.has_inserted_cloud_mode_user_query_block = false; self.harness_command_started = false; self.optimistically_rendered_user_queries.clear(); self.active_execution_session_id = None; @@ -721,7 +747,7 @@ impl AmbientAgentViewModel { parent_run_id: None, runtime_skills: vec![], referenced_attachments: vec![], - fork_from_conversation_id: None, + conversation_id: None, handoff_prep_token: None, }; @@ -1119,11 +1145,10 @@ impl AmbientAgentViewModel { /// Drive the local-to-cloud handoff submission for this pane. /// /// Called by the cloud-mode submit dispatch when the pane has `pending_handoff` - /// set. Runs the orchestrator off the main thread; on success, builds a - /// `SpawnAgentRequest` with `fork_from_conversation_id` + `handoff_prep_token` - /// set and routes it through the same `spawn_agent_with_request` path that - /// regular cloud-mode runs use — so `WaitingForSession` → `SessionStarted` - /// streaming reaches the same pane unchanged. + /// set. The fork (REMOTE-1519) and snapshot upload (REMOTE-1486) both happen + /// at chip-click time — this method just reads the cached `forked_conversation_id` + /// and `snapshot_prep_token` off the pending handoff and routes through the + /// same `spawn_agent_with_request` path that regular cloud-mode runs use. pub(crate) fn submit_handoff( &mut self, prompt: String, @@ -1135,73 +1160,45 @@ impl AmbientAgentViewModel { return; }; if matches!(handoff.submission_state, HandoffSubmissionState::Starting) { - // Double-submit guard: orchestrator already in flight. + // Double-submit guard: spawn already in flight. return; } - let Some(workspace) = handoff.touched_workspace.clone() else { + if handoff.touched_workspace.is_none() { log::warn!("submit_handoff called before touched-workspace derivation completed"); return; + } + let Some(prep_token) = handoff.snapshot_prep_token.clone() else { + log::warn!("submit_handoff called before snapshot upload completed"); + return; }; - let source_conversation_id = handoff.source_conversation_id.clone(); + let forked_conversation_id = handoff.forked_conversation_id.clone(); handoff.submission_state = HandoffSubmissionState::Starting; ctx.emit(AmbientAgentViewModelEvent::PendingHandoffChanged); - let server_api_provider = ServerApiProvider::as_ref(ctx); - let ai_client = server_api_provider.get_ai_client(); - let http = server_api_provider.get_http_client(); - - // Clone the prompt so the failure path can hand it back to the input - // layer for restoration. The orchestrator future consumes the original. - let prompt_for_retry = prompt.clone(); - - ctx.spawn( - async move { - run_handoff(source_conversation_id, workspace, prompt, ai_client, http).await - }, - move |me, result, ctx| match result { - Ok(prepared) => { - // Build the spawn config from the model so the env selector chip's - // pick (and `WARP_CLOUD_MODE_DEFAULT_HOST` / model / harness defaults) - // propagate into the spawn request. - let config = Some(me.build_default_spawn_config(ctx)); - // Strip any `/plan` / `/orchestrate` prefix from the prompt and surface - // it as the request's `mode` so the cloud agent honors the same modes - // the local-mode spawn path does. - let (prompt, mode) = extract_user_query_mode(prepared.prompt); - let request = SpawnAgentRequest { - prompt, - mode, - config, - title: None, - team: None, - skill: None, - attachments, - interactive: None, - parent_run_id: None, - runtime_skills: vec![], - referenced_attachments: vec![], - fork_from_conversation_id: Some(prepared.fork_from_conversation_id), - handoff_prep_token: prepared.prep_token, - }; - me.spawn_agent_with_request(request, ctx); - } - Err(err) => { - let error_message = format!("{err}"); - log::warn!("Handoff prep+upload failed: {err:#}"); - me.set_pending_handoff_submission_state( - HandoffSubmissionState::Failed(error_message.clone()), - ctx, - ); - // Emit the prompt back so the input layer can repopulate the - // editor and surface the error — otherwise the user is left - // staring at a blank composing pane with no retry path. - ctx.emit(AmbientAgentViewModelEvent::HandoffSubmissionFailed { - prompt: prompt_for_retry, - error_message, - }); - } - }, - ); + // Build the spawn config from the model so the env selector chip's + // pick (and `WARP_CLOUD_MODE_DEFAULT_HOST` / model / harness defaults) + // propagate into the spawn request. + let config = Some(self.build_default_spawn_config(ctx)); + // Strip any `/plan` / `/orchestrate` prefix from the prompt and surface + // it as the request's `mode` so the cloud agent honors the same modes + // the local-mode spawn path does. + let (prompt, mode) = extract_user_query_mode(prompt); + let request = SpawnAgentRequest { + prompt, + mode, + config, + title: None, + team: None, + skill: None, + attachments, + interactive: None, + parent_run_id: None, + runtime_skills: vec![], + referenced_attachments: vec![], + conversation_id: Some(forked_conversation_id), + handoff_prep_token: prep_token, + }; + self.spawn_agent_with_request(request, ctx); } /// Cancels the ambient agent task if one is currently running. @@ -1280,11 +1277,12 @@ pub enum AmbientAgentViewModelEvent { /// The pane's `pending_handoff` was updated — derivation completed, submission /// state transitioned, etc. PendingHandoffChanged, - /// The handoff prep + upload phase failed before the cloud agent was spawned. - /// Carries the user's original prompt so the input layer can repopulate the - /// editor for retry, plus the error message to surface as a toast. + /// The handoff prep + upload phase failed at chip-click time. The input + /// layer subscribes to surface the error as a toast; the editor buffer is + /// untouched because the user's prompt was never cleared (submit is gated + /// behind the cached prep token, so a failed upload prevents submit + /// entirely instead of consuming the prompt). HandoffSubmissionFailed { - prompt: String, error_message: String, }, diff --git a/app/src/terminal/view/ambient_agent/view_impl.rs b/app/src/terminal/view/ambient_agent/view_impl.rs index 5e4fef931..64f7b421d 100644 --- a/app/src/terminal/view/ambient_agent/view_impl.rs +++ b/app/src/terminal/view/ambient_agent/view_impl.rs @@ -141,14 +141,11 @@ impl TerminalView { } if FeatureFlag::CloudModeSetupV2.is_enabled() { let view_model = ambient_agent_view_model.as_ref(ctx); - let use_queued_prompt = view_model.is_third_party_harness() - || view_model.is_local_to_cloud_handoff(); + let use_queued_prompt = view_model.is_third_party_harness(); if use_queued_prompt { - // Non-oz runs and local-to-cloud handoff (REMOTE-1486) runs: - // render the submitted prompt via the queued-prompt UI on top of - // the conversation-history scaffold. The block is removed later - // by `HarnessCommandStarted` (non-oz) / first `AppendedExchange` - // (oz handoff) / failure / cancel / auth handlers. + // Non-oz runs render the submitted prompt via the queued-prompt UI on + // top of the conversation-history scaffold. The block is removed later + // by `HarnessCommandStarted` / failure / cancel / auth handlers. // // `request.prompt` is stored stripped of any `/plan` / `/orchestrate` // prefix; rebuild the display form from `request.mode` so the user sees @@ -176,7 +173,6 @@ impl TerminalView { ctx, ); ambient_agent_view_model.update(ctx, |model, _| { - model.set_has_inserted_cloud_mode_user_query_block(true); if let Some(prompt) = model.request().map(|request| request.prompt.clone()) { @@ -362,9 +358,9 @@ impl TerminalView { ctx.notify(); } AmbientAgentViewModelEvent::HandoffSubmissionFailed { .. } => { - // Restoration of the editor buffer + the user-visible toast are - // handled by `Input`'s subscription to the same event; nothing - // for the terminal view to do here beyond the implicit re-render. + // The user-visible toast is handled by `Input`'s subscription + // to the same event; nothing for the terminal view to do here + // beyond the implicit re-render. ctx.notify(); } AmbientAgentViewModelEvent::UpdatedSetupCommandVisibility => (), @@ -387,6 +383,7 @@ impl TerminalView { if !is_cloud_agent_pre_first_exchange( self.ambient_agent_view_model.as_ref(), &self.agent_view_controller, + &self.model, ctx, ) { return; @@ -423,10 +420,11 @@ impl TerminalView { .set_did_execute_a_setup_command(true); }); - let setup_command_text = ctx.add_typed_action_view(|ctx| { + let setup_command_text = ctx.add_typed_action_view(|ctx| { super::CloudModeSetupTextBlock::new( ambient_agent_view_model.clone(), self.agent_view_controller.clone(), + self.model.clone(), ctx, ) }); diff --git a/app/src/terminal/view/shared_session/view_impl_test.rs b/app/src/terminal/view/shared_session/view_impl_test.rs index 96b6cc8dd..811e718de 100644 --- a/app/src/terminal/view/shared_session/view_impl_test.rs +++ b/app/src/terminal/view/shared_session/view_impl_test.rs @@ -452,6 +452,8 @@ fn create_cloud_mode_task_for_user(creator_uid: &str) -> AmbientAgentTask { is_sandbox_running: false, agent_config_snapshot: None, artifacts: vec![], + last_event_sequence: None, + children: Vec::new(), } } diff --git a/app/src/workspace/view.rs b/app/src/workspace/view.rs index 97c7708dd..1b78bc4ec 100644 --- a/app/src/workspace/view.rs +++ b/app/src/workspace/view.rs @@ -110,6 +110,8 @@ use crate::util::openable_file_type::FileTarget; #[cfg(feature = "local_fs")] use crate::util::openable_file_type::{resolve_file_target_with_editor_choice, EditorLayout}; +use crate::ai::agent::conversation::AIConversation; +use crate::ai::agent_sdk::driver::upload_snapshot_for_handoff; use crate::ai::blocklist::agent_view::agent_input_footer::sort_environments_by_recency; use crate::ai::blocklist::handoff::touched_repos::{ derive_touched_workspace, extract_paths_from_conversation, pick_handoff_overlap_env, @@ -117,6 +119,7 @@ use crate::ai::blocklist::handoff::touched_repos::{ use crate::ai::blocklist::history_model::CloudConversationData; use crate::ai::blocklist::FORK_PREFIX; use crate::ai::cloud_environments::CloudAmbientAgentEnvironment; +use crate::server::server_api::ai::PrepareHandoffForkRequest; #[cfg(not(target_family = "wasm"))] use crate::terminal::cli_agent_sessions::plugin_manager::{plugin_manager_for, PluginModalKind}; use crate::terminal::cli_agent_sessions::{CLIAgentSessionsModel, CLIAgentSessionsModelEvent}; @@ -11600,7 +11603,12 @@ impl Workspace { ctx, ) } else { - history_model.fork_conversation(&source_conversation, FORK_PREFIX, ctx) + history_model.fork_conversation( + &source_conversation, + FORK_PREFIX, + false, /* preserve_task_ids */ + ctx, + ) } }); @@ -12980,15 +12988,18 @@ impl Workspace { /// Open a local-to-cloud handoff pane next to the active local pane. Triggered /// by the `/oz-cloud-handoff` slash command and the "Hand off to cloud" footer - /// chip. + /// chip (REMOTE-1486 / REMOTE-1519). /// - /// Resolves the active conversation up front. If there's an eligible source - /// conversation (active, non-empty, has a `server_conversation_token`), splits a - /// fresh cloud-mode pane to the right and seeds it with handoff context so the - /// submit path routes through the orchestrator. Otherwise, still splits a fresh - /// cloud-mode pane (no handoff context) so the chip is always-clickable per the - /// existing posture — there's nothing meaningful to hand off in that state, but - /// the user clearly wanted a cloud-mode pane. + /// When the active conversation is non-empty and has a server token, mints a + /// server-side fork via `POST /agent/handoff/prepare-fork`, then splits a fresh + /// cloud-mode pane next to the local pane and pre-populates it with the forked + /// conversation. + /// + /// All failure modes — ineligibility (no active conversation, empty, or no + /// synced server token), prepare-fork RPC failure, and local fork + /// materialization failure — surface an error toast in the local window and + /// **do not open** any pane. The local conversation is unaffected and the + /// user can retry by re-clicking the chip. fn start_local_to_cloud_handoff( &mut self, initial_prompt: Option, @@ -13019,10 +13030,94 @@ impl Workspace { }) }); - // Split a fresh cloud-mode pane to the right of the active pane. Mirrors - // `Workspace::open_network_log_pane`'s pattern but uses `add_ambient_agent_pane` - // so the new pane is wired up as a cloud-mode terminal (with the right pre- - // session shared-session viewer manager). + let Some((source_conversation, source_token)) = source else { + // Not eligible: surface an error toast and bail out. We deliberately + // do not open a fresh cloud-mode pane here — the chip is a + // hand-off-this-conversation action, and silently opening an + // unrelated fresh pane hides the failure from the user. + self.show_handoff_error_toast(ctx); + return; + }; + + // Eligible: kick off the prepare-fork RPC. The pane is **not** opened + // until the fork resolves, so a failed fork doesn't leave a stranded + // empty pane on screen. + let ai_client = ServerApiProvider::as_ref(ctx).get_ai_client(); + let request = PrepareHandoffForkRequest { + source_conversation_id: source_token.as_str().to_string(), + }; + ctx.spawn( + async move { ai_client.prepare_handoff_fork(request).await }, + move |me, result, ctx| match result { + Ok(response) => { + me.complete_local_to_cloud_handoff_open( + source_conversation, + source_token, + response.forked_conversation_id, + initial_prompt, + ctx, + ); + } + Err(err) => { + log::warn!("prepare_handoff_fork failed: {err:#}"); + me.show_handoff_error_toast(ctx); + } + }, + ); + } + + /// Surface the shared "Failed to prepare handoff" toast in the local + /// window. Used by every failure path in `start_local_to_cloud_handoff` + /// (ineligibility, prepare-fork RPC failure, local fork materialization + /// failure) so the user sees a single consistent error treatment. + fn show_handoff_error_toast(&self, ctx: &mut ViewContext) { + let window_id = ctx.window_id(); + WorkspaceToastStack::handle(ctx).update(ctx, |toast_stack, ctx| { + let toast = DismissibleToast::error( + "Failed to prepare handoff. Please try again.".to_owned(), + ); + toast_stack.add_ephemeral_toast(toast, window_id, ctx); + }); + } + + /// Finishes the local-to-cloud handoff open after the prepare-fork RPC + /// returns. Materializes a local fork bound to the server's forked + /// conversation id, splits a fresh cloud-mode pane next to the active + /// pane, restores the forked conversation into it, seeds `PendingHandoff`, + /// and kicks off async derivation + snapshot upload (REMOTE-1486). + fn complete_local_to_cloud_handoff_open( + &mut self, + source_conversation: AIConversation, + source_token: ServerConversationToken, + forked_conversation_id: String, + initial_prompt: Option, + ctx: &mut ViewContext, + ) { + // Materialize the local fork up-front so the new pane has something to + // restore. `fork_conversation` already handles SQLite persistence and + // copies tasks / messages over from the source. + let history_model = BlocklistAIHistoryModel::handle(ctx); + let local_fork = match history_model.update(ctx, |history_model, ctx| { + // Preserve source task ids so the local fork's task store matches the cloud-side + // fork (which is a byte copy of the source's GCS data). The cloud agent's + // ClientActions reference these task ids and must resolve locally. + history_model.fork_conversation( + &source_conversation, + FORK_PREFIX, + true, /* preserve_task_ids */ + ctx, + ) + }) { + Ok(forked) => forked, + Err(err) => { + log::warn!("Failed to materialize local fork for handoff: {err:#}"); + self.show_handoff_error_toast(ctx); + return; + } + }; + let local_fork_id = local_fork.id(); + + // Split the new cloud-mode pane next to the active pane. self.active_tab_pane_group().update(ctx, |pane_group, ctx| { pane_group.add_ambient_agent_pane(ctx); }); @@ -13036,7 +13131,6 @@ impl Workspace { ); return; }; - let Some(model_handle) = new_pane_view .as_ref(ctx) .ambient_agent_view_model() @@ -13046,9 +13140,7 @@ impl Workspace { return; }; - // `add_ambient_agent_pane` already entered cloud agent view via - // `enter_ambient_agent_setup` (which transitions the model into `Composing` / - // `Setup`). Pre-fill the prompt input from the slash command argument, if any. + // Pre-fill the prompt input if the slash command supplied one. if let Some(prompt) = initial_prompt.as_deref().filter(|p| !p.is_empty()) { new_pane_view.update(ctx, |terminal_view, view_ctx| { terminal_view.input().update(view_ctx, |input, input_ctx| { @@ -13057,41 +13149,77 @@ impl Workspace { }); } - // Fall through to a fresh cloud-mode pane (no handoff context) when there's - // nothing meaningful to hand off. The pane was already opened above. - let Some((conversation, source_token)) = source else { - return; - }; + // Restore the forked conversation into the new pane so its AI exchanges + // are visible immediately. Mirrors the `/fork` in-current-pane flow at + // `Self::fork_ai_conversation`. + let local_fork_for_restore = local_fork.clone(); + new_pane_view.update(ctx, |terminal_view, view_ctx| { + terminal_view.restore_conversation_after_view_creation( + RestoredAIConversation::new(local_fork_for_restore), + /* use_live_appearance */ true, + view_ctx, + ); + }); + + // Bind the local fork's `server_conversation_token` to the forked + // conversation id minted by the server. Must run AFTER + // `restore_conversation_after_view_creation`, since `restore_conversations` + // overwrites the entry in `conversations_by_id` with the (token-less) + // clone we hand it. Binding here ensures that when the cloud agent's + // shared session connects with `StreamInit { conversation_id: T_C }`, + // `find_existing_conversation_by_server_token` finds the live fork and + // `should_skip_replayed_response_for_existing_conversation` correctly + // suppresses the replayed response stream — otherwise the replay would + // re-enter as new exchanges, flipping `is_executing_oz_environment_startup_commands` + // false and breaking setup-command block UI for the handoff pane. + history_model.update(ctx, |history_model, _| { + history_model.set_server_conversation_token_for_conversation( + local_fork_id, + forked_conversation_id.clone(), + ); + }); - // Seed the handoff context onto the new pane's `AmbientAgentViewModel` so - // `is_local_to_cloud_handoff()` is true from this point on (the V2 input - // is suppressed and the submit path routes through the orchestrator). + // Seed `PendingHandoff` so `is_local_to_cloud_handoff()` is true from + // here on. `submit_handoff` reads the cached `forked_conversation_id` + // and `snapshot_prep_token` directly from this struct — the orchestrator + // path that REMOTE-1486 used has been inlined into the async block below. let pending = PendingHandoff { - source_conversation_id: source_token, + forked_conversation_id: forked_conversation_id.clone(), touched_workspace: None, + snapshot_prep_token: None, submission_state: HandoffSubmissionState::Idle, }; model_handle.update(ctx, |model, model_ctx| { model.set_pending_handoff(Some(pending), model_ctx); }); - // Kick off touched-repo derivation off the main thread. The conversation - // walk lives inside the spawned future too so we don't pay it on chip click - // (long conversations have hundreds of action results to traverse). When - // derivation completes, apply the repo-aware overlap pick on top of - // whatever `ensure_default_selection` already picked, but only if the pane - // is still in handoff mode — the pane could have been closed in the - // interim. On a real overlap match we override unconditionally so the - // user's last-selected (potentially empty) env doesn't shadow a matching - // env; on no-overlap we leave the existing selection alone, since the env - // selector's recency-based default is the best fallback. + // Kick off async background prep: derive the touched workspace, then + // upload the snapshot. The pane is fully interactive throughout — the + // user can scroll, type, and pick an env while this runs. The send + // button gate inside `submit_handoff` waits for both the workspace and + // the prep token to be cached before allowing a spawn. let async_model_handle = model_handle.clone(); + let server_api_provider = ServerApiProvider::as_ref(ctx); + let ai_client = server_api_provider.get_ai_client(); + let http = server_api_provider.get_http_client(); + let log_token = source_token.clone(); ctx.spawn( async move { - let paths = extract_paths_from_conversation(&conversation); - derive_touched_workspace(paths).await + let paths = extract_paths_from_conversation(&source_conversation); + let workspace = derive_touched_workspace(paths).await; + let repo_paths: Vec<_> = + workspace.repos.iter().map(|r| r.git_root.clone()).collect(); + let upload_result = upload_snapshot_for_handoff( + repo_paths, + workspace.orphan_files.clone(), + ai_client, + http.as_ref(), + &log_token, + ) + .await; + (workspace, upload_result) }, - move |_workspace, derived_workspace, ctx| { + move |_workspace, (derived_workspace, upload_result), ctx| { async_model_handle.update(ctx, |model, model_ctx| { if !model.is_local_to_cloud_handoff() { return; @@ -13102,6 +13230,15 @@ impl Workspace { model.set_environment_id(Some(overlap_env), model_ctx); } model.set_pending_handoff_workspace(derived_workspace, model_ctx); + match upload_result { + Ok(prep_token) => { + model.set_pending_handoff_snapshot_prep_token(prep_token, model_ctx); + } + Err(err) => { + log::warn!("Handoff snapshot upload failed: {err:#}"); + model.record_handoff_prep_failed(format!("{err}"), model_ctx); + } + } }); }, ); diff --git a/specs/REMOTE-1519/PRODUCT.md b/specs/REMOTE-1519/PRODUCT.md new file mode 100644 index 000000000..f749e77a9 --- /dev/null +++ b/specs/REMOTE-1519/PRODUCT.md @@ -0,0 +1,43 @@ +# Local-to-Cloud Handoff: UI Polish — Product Spec +Linear: [REMOTE-1519](https://linear.app/warpdotdev/issue/REMOTE-1519/make-ui-better-for-local-cloud-handoff) +## Summary +Polish the local-to-cloud handoff (REMOTE-1486) so that the cloud-mode pane that opens next to the local pane already shows the source conversation, and looks identical to a regular fresh cloud-mode run while the cloud agent is starting up. Today the user clicks the chip and is dropped into a blank pane that only fills in once the cloud agent's first turn streams in. +## Problem +Two related rough edges in the V0 handoff flow: +1. The new cloud-mode pane is empty between chip click and the cloud agent's first response. The user has lost their context — they have to remember what they handed off, or look at the local pane next to it. +2. The cloud-mode setup-v2 affordances (the "Running setup commands…" collapsible row that wraps the environment startup PTY output, the cloud-mode loading screen / queued-prompt indicator) work for fresh cloud-mode runs but render incorrectly during handoff. The handoff pane shows raw startup output instead of the polished setup-v2 surface. +## Goals +- The handoff pane is hydrated with the source conversation's AI exchanges immediately on chip click. The user sees the same conversation history they were just looking at, in the new pane, before they finish typing the follow-up. +- The cloud agent's shared-session replay (which rebroadcasts every exchange in the forked conversation) does not double-render content already on screen. Only genuinely new exchanges from the cloud agent appear after replay. +- The handoff pane uses the cloud-mode setup-v2 affordances during the loading phase, the same way a fresh cloud-mode run does: queued-prompt indicator, "Setting up environment" loading screen, "Running setup commands…" collapsible block wrapping the startup PTY output. +## Non-goals +- Bidirectional sync after handoff. The forked conversation diverges at chip-click; later edits in the local pane do not propagate to the cloud, and vice versa. Same posture as REMOTE-1486 V0. +- Restoring shell command blocks from the local pane into the new cloud pane. Only the conversation's AI exchanges are hydrated; terminal output that lived on the local terminal (e.g. unrelated commands run between agent turns) stays on the local pane. +- Cloud→cloud setup-v2 fixes. The cloud-cloud follow-up path (REMOTE-1290) may have similar gaps but is out of scope here; we'll only address local→cloud. +- A local "this conversation was handed off to " breadcrumb on the source pane. +## Behavior +### Fork timing and hydration on chip click +1. Clicking the "Hand off to cloud" chip (or invoking `/oz-cloud-handoff`) immediately mints a server-side fork of the source conversation. The new conversation token is returned synchronously to the client. +2. The new cloud-mode pane opens next to the local pane and is pre-populated with the source conversation's AI exchanges, rendered with live (non-restored) appearance — visually indistinguishable from staying in the local pane. +3. The forked conversation appears in the user's history under their account, owned by them. +4. Subsequent edits in the local pane after chip click do **not** appear in the handoff pane. The cloud agent will work against the conversation as it was at chip-click time. Users who want a more recent snapshot must close the handoff pane and click the chip again. +### Eligibility and fallback +5. Per-conversation eligibility requires an active, non-empty conversation with a synced server token. When the active conversation isn't eligible, the chip surfaces an error toast in the local window and **does not open** any pane. The local conversation is unaffected and the user can retry once the source has synced. +6. If the server fork call fails for any reason (network, auth, source not synced to GCS), the new pane is **not** opened. The failure surfaces as the same error toast in the local window. The local conversation is unaffected and the user can retry by clicking the chip again. +### Cloud session replay and dedup +7. When the cloud agent's shared session connects to the handoff pane, the agent's conversation replay rebroadcasts every exchange in the forked conversation. Because we already pre-populated the same exchanges, the replay events are suppressed at the response-stream level, identical to how cloud→cloud follow-up sessions handle stale replay (REMOTE-1290). +8. After the replay completes, genuinely new exchanges (the cloud agent's first response to the user's follow-up prompt) are appended normally. The user sees a smooth transition from "frozen pre-handoff state" to "cloud agent answering my follow-up prompt". +### Setup-v2 affordances during loading +9. After the user submits, the handoff pane shows the same cloud-mode setup-v2 affordances a fresh cloud-mode run shows: + - The submitted prompt as a queued user-query indicator (REMOTE-1454 visual treatment, no Send-now / dismiss buttons). + - The "Setting up environment" loading screen during the pre-session phase. + - The "Running setup commands…" collapsible row that wraps environment startup PTY output once the shared session connects. +10. When the cloud agent's first turn arrives, the queued-prompt indicator and the setup-v2 affordances tear down on the same transitions a fresh cloud-mode run uses (`AppendedExchange` for Oz, `HarnessCommandStarted` for non-Oz). +### Edge cases +11. If the user closes the handoff pane between chip click and submit, the server-side fork is orphaned (visible in the user's conversation history but never run against). V0 does not clean these up. +12. If the user clicks the chip twice on the same source conversation, two independent forks are minted — same as today's REMOTE-1486 chip behavior; nothing changes here. +13. The local pane is unaffected throughout: its conversation is not duplicated, archived, or annotated. The user can keep typing in the local pane. +## Success criteria +- Clicking the chip on a long conversation produces a fully populated handoff pane within ~300ms (network-dependent on the fork RPC), without flicker. +- The user never sees duplicate exchange blocks during the cloud agent's session connect / replay phase. +- The handoff pane's loading-phase UI is byte-for-byte identical to a fresh cloud-mode run's (modulo the pre-populated exchanges above the queued-prompt indicator). diff --git a/specs/REMOTE-1519/TECH.md b/specs/REMOTE-1519/TECH.md new file mode 100644 index 000000000..ba643c8df --- /dev/null +++ b/specs/REMOTE-1519/TECH.md @@ -0,0 +1,170 @@ +# Local-to-Cloud Handoff: UI Polish — Tech Spec +Product spec: `specs/REMOTE-1519/PRODUCT.md` +Linear: [REMOTE-1519](https://linear.app/warpdotdev/issue/REMOTE-1519/make-ui-better-for-local-cloud-handoff) +## Context +REMOTE-1486 shipped the V0 local-to-cloud handoff: a chip in the agent input footer (or `/oz-cloud-handoff`) opens a fresh cloud-mode pane next to the local pane, the user types a follow-up prompt, and on submit the client snapshots the workspace and spawns a cloud agent that's forked from the local conversation. +That V0 has two rough edges this spec addresses: +1. **No hydration of the source conversation in the new pane.** The fork is materialized server-side at submit time only (`enqueueAgentRun` in `../warp-server-2/router/handlers/public_api/agent_webhooks.go:376-386` calls `ForkConversationForHandoff` and points `task.AgentConversationID` at the fork). Until the cloud agent's shared session connects and replays the conversation transcript, the new pane is blank. The cloud session's replay then re-broadcasts every exchange the user already saw in the local pane. +2. **Setup-v2 affordances are not consistent with fresh cloud-mode runs.** A fresh cloud-mode pane uses `BlockList::set_is_executing_oz_environment_startup_commands(true)` (set in `app/src/terminal/model/terminal_model.rs:1238-1241`), which hides the active block, marks it as a setup command, and renders a "Running setup commands…" collapsible row above it (`CloudModeSetupTextBlock` in `app/src/terminal/view/ambient_agent/block/setup_command_text.rs`). The flag is reset on the first `AppendedExchange` (`app/src/terminal/view.rs:5113-5124`). For handoff panes the pre-populated conversation's exchanges trip that reset path early (when we restore them via `restore_conversations_on_view_creation`), unhiding the active block before the cloud session has even connected — so when the cloud agent's environment startup PTY output arrives it renders raw rather than wrapped in the setup-v2 surface. +The pieces this spec builds on: +- **Cloud-cloud handoff replay suppression.** When `attach_followup_session` joins a fresh shared session for a follow-up cloud execution, it uses `SharedSessionInitialLoadMode::AppendFollowupScrollback` (`app/src/terminal/shared_session/viewer/terminal_manager.rs:340-370`), which (a) deduplicates blocks by ID via `BlockList::append_followup_shared_session_scrollback` (`app/src/terminal/model/blocks.rs:725`) and (b) sets `should_suppress_existing_agent_conversation_replay = true` (`app/src/terminal/shared_session/viewer/event_loop.rs:132-134`). When the cloud agent's replay arrives, `BlocklistAIController::should_skip_replayed_response_for_existing_conversation` (`app/src/ai/blocklist/controller/shared_session.rs:220-239`) skips response streams whose conversation already has exchanges in our local history. We will reuse this exact mechanism for the local→cloud first-session connect. +- **Fork-into-new-pane restoration.** `BlocklistAIHistoryModel::fork_conversation` (`app/src/ai/blocklist/history_model.rs:1033`) materializes a forked `AIConversation` locally from a source conversation. `ConversationRestorationInNewPaneType::Forked { conversation }` (`app/src/terminal/view/load_ai_conversation.rs:104-106`) feeds it into a freshly-created pane via `restore_conversations_on_view_creation`, which restores AI blocks for every exchange with live (non-restored) appearance. +- **Server-side fork and conversation-token binding.** `ForkConversationForHandoff` in `../warp-server-2/logic/ai_conversation_fork.go` already implements the server fork end-to-end (auth on source, GCS data copy, metadata insert, `has_gcs_data = TRUE`); it's currently called only from `enqueueAgentRun`. The viewer-side `BlocklistAIController::find_existing_conversation_by_server_token` (`app/src/ai/blocklist/controller/shared_session.rs:418-433`) maps a `StreamInit.conversation_id` to a local `AIConversation` by token; if we set the local fork's `server_conversation_token` to the server fork's id at chip-click time, this lookup wires them up automatically when the cloud session arrives. +- **REMOTE-1486 client surface area.** `Workspace::start_local_to_cloud_handoff` (`app/src/workspace/view.rs:12952-13079`) is the entry point invoked by the chip and slash command. It splits a fresh cloud-mode pane via `pane_group.add_ambient_agent_pane(ctx)`, seeds `PendingHandoff` onto the new pane's `AmbientAgentViewModel`, and kicks off async touched-repo derivation. `AmbientAgentViewModel::submit_handoff` (`app/src/terminal/view/ambient_agent/model.rs:1108-1177`) runs the snapshot prep + upload orchestrator and then calls `spawn_agent_with_request` with `fork_from_conversation_id` set on the `SpawnAgentRequest`. +The Linear ticket description ("we should fork the conversation into the cloud pane and re-use the cloud mode loading v2 for the setup commands") covers both pieces; this spec wires them together because the fork-timing change is what enables the setup-v2 fix. +## Diagram +```mermaid +sequenceDiagram + participant U as User + participant C as Local Warp Client + participant LP as Local Pane + participant HP as Handoff Pane (new) + participant API as warp-server (public API) + participant Sand as Cloud Sandbox + U->>C: Click "Hand off to cloud" chip on local pane + C->>API: POST /agent/handoff/prepare-fork {source_conversation_id} + API->>API: ForkConversationForHandoff (auth, copy GCS, insert metadata) + API-->>C: {forked_conversation_id: T_C} + Note over C: On error here: error toast, no pane opens + C->>C: BlocklistAIHistoryModel::fork_conversation (local fork L', bind T_C) + C->>HP: split fresh cloud-mode pane next to LP + C->>HP: restore_conversations_on_view_creation(Forked { L' }) + Note over HP: Pre-populated with source's AI exchanges + par Background prep (kicked off after pane opens) + C->>C: derive_touched_workspace (walks conversation, git remotes) + C->>API: POST /agent/handoff/prepare-snapshot + API-->>C: {prep_token, upload_urls} + C->>API: PUT snapshot files (parallel) + end + U->>HP: Type follow-up prompt, submit + Note over HP: Send button disabled until prep_token cached on PendingHandoff + C->>API: POST /agent/runs {conversation_id: T_C, handoff_prep_token, prompt, config} + API-->>C: {task_id, run_id} + Note over HP: Setup-v2 affordances render: queued prompt, loading screen + Sand->>Sand: bootstrap, run setup commands (PTY → active block, hidden) + Sand-->>HP: shared session ready + HP->>HP: connect_to_session with AppendFollowupScrollback + Note over HP: should_suppress_existing_agent_conversation_replay = true + Sand-->>HP: replay forked conversation transcript + Note over HP: Replay events skipped (existing conversation has exchanges) + Sand-->>HP: cloud agent's first turn (rehydration prompt + user follow-up + response) + HP->>HP: AppendedExchange clears setup-v2 flag, queued-prompt block + Note over LP: Local pane unchanged throughout +``` +## Proposed changes +### 1. Server-side: split fork from spawn (`../warp-server-2`) +**Why split fork from spawn?** This whole spec hinges on pre-populating the new cloud pane with the source conversation at chip click. That requires a stable, materialized fork at chip-click time, not at submit time, for two reasons: +1. **Stable target.** Once the cloud pane is hydrated we don't want to keep re-syncing it as the user continues typing in the local pane — that would be O(local-conversation-edits) GCS writes for nothing, and would have to merge against whatever the cloud agent is doing in parallel. Forking on click freezes the cloud's view at the moment the user opted into the handoff and lets the two conversations evolve independently. +2. **Semantic match.** Handoff is fork→cloud per the product model: clicking the chip is the user saying "this conversation, as it stands right now, is what I'm sending to the cloud." Forking at submit-time is an implementation accident inherited from REMOTE-1486 V0 (which had no hydration so it didn't matter when the fork happened); forking at click-time mirrors the user's mental model exactly. +The fork currently happens inside `enqueueAgentRun` when `ForkFromConversationID` is set on the `RunAgentRequest` (`router/handlers/public_api/agent_webhooks.go:376-386`). This spec moves the fork to a new dedicated endpoint so the client can mint the fork at chip-click time and pre-populate the pane. +**New endpoint** `POST /api/v1/agent/handoff/prepare-fork`: +```go path=null start=null +type PrepareLocalHandoffForkRequest struct { + SourceConversationID string `json:"source_conversation_id" binding:"required"` +} +type PrepareLocalHandoffForkResponse struct { + ForkedConversationID string `json:"forked_conversation_id"` +} +``` +Add the handler alongside `PrepareLocalHandoffSnapshotHandler` in `router/handlers/public_api/agent_handoff.go`. It is a thin wrapper that: +1. Gates on `features.LocalToCloudHandoffEnabled()`. +2. Resolves `principal` via `middleware.GetRequiredPrincipalFromContext`. +3. Calls the existing `logic.ForkConversationForHandoff(ctx, db, datastores, req.SourceConversationID, principal)` and returns `{forked_conversation_id}`. +Wire the route under the same `aiCheckedGroup` as the existing snapshot prep endpoint at `router/handlers/public_api/agent_webhooks.go:205-207`. +**Remove `ForkFromConversationID` from `RunAgentRequest`.** Per user direction, no backwards compatibility is needed — the field is only used by the under-flag REMOTE-1486 branch which isn't merged. Delete the field declaration (`agent_webhooks.go:235-240`), the validation block (`agent_webhooks.go:337-344`), and the inline fork call (`agent_webhooks.go:376-386`). The existing `ConversationID *string` field at `agent_webhooks.go:222` continues to drive `task.AgentConversationID` (resume semantics) and is what the client now uses to point the new task at the pre-minted fork. +**`HandoffPrepToken` stays.** Snapshot prep + upload still flow through the existing `prepare-snapshot` endpoint and the same `attachHandoffSnapshotToTask` post-task-creation step; the only thing that moves is when the client triggers them (now async on chip click instead of submit time — see §3). The server handler block at `agent_webhooks.go:476-484` is unchanged. +### 2. Client-side API surface (`app/src/server/server_api/ai.rs`) +- Add `prepare_handoff_fork` to the `AIClient` trait: +```rust path=null start=null +async fn prepare_handoff_fork( + &self, + request: PrepareHandoffForkRequest, +) -> Result; +``` +implemented in `ServerApi` as `POST agent/handoff/prepare-fork`. Mirror the request/response shape pattern of `PrepareHandoffSnapshotRequest` (currently around line 221-249). +- On `SpawnAgentRequest`, **remove** the `fork_from_conversation_id: Option` field (currently line 213) and **add** `conversation_id: Option` for resume semantics. The client now always pre-mints the fork via the new endpoint and sends the resulting id under `conversation_id`. +- Update the snapshot pipeline call site that takes a `&ServerConversationToken` only for log labelling (`upload_snapshot_for_handoff` in `app/src/ai/agent_sdk/driver/snapshot.rs`) — no signature change needed; the source conversation token is still available on the `PendingHandoff`. +### 3. Client-side fork-on-chip-click (`app/src/workspace/view.rs`) +Extend `Workspace::start_local_to_cloud_handoff` (currently at `app/src/workspace/view.rs:12952-13079`) into a strict-ordering open path: +1. **Resolve eligibility synchronously.** Read the active session view's conversation via `BlocklistAIHistoryModel::active_conversation`. If the conversation is missing, empty, or has no `server_conversation_token`, surface the same error toast as the prepare-fork RPC failure path (step 2 below) and return without opening any pane. There is no "fresh cloud-mode pane" fall-through — the chip is a hand-off-this-conversation action, and silently opening an unrelated fresh pane would hide the failure from the user. +2. **Await the fork before opening the pane.** When the source resolves, `ctx.spawn` a future that calls `AIClient::prepare_handoff_fork({source_conversation_id: T_L})`. The new pane is **not** split until this returns. `start_local_to_cloud_handoff` itself returns to the caller immediately so the click handler doesn't block, but the pane-open work is gated on the RPC. + - **On error** (network, auth, `SourceConversationNotPersisted`, etc.), surface a `WorkspaceToastStack` error toast (mirroring the pattern used by `Self::show_fork_toast` at `app/src/workspace/view.rs:11586-11588` for failed local forks). Log the underlying error. Do **not** open a pane. + - **On success**, on the main thread, run the rest of the open path described below. +3. **Open and pre-populate the pane.** With `T_C` in hand: + - Call `pane_group.add_ambient_agent_pane(ctx)` to split the new pane next to the active pane (today's call site). + - Call `BlocklistAIHistoryModel::fork_conversation(&source_conversation, FORK_PREFIX, app)` to materialize a local fork `L'`. `fork_conversation` already handles SQLite persistence, the `forked_from_server_conversation_token` field, and reverted-action-id preservation. + - Set `L'.server_conversation_token = T_C` via `BlocklistAIHistoryModel::set_server_conversation_token_for_conversation` (existing helper used by the `link_forked_conversation_token` path). This makes `find_existing_conversation_by_server_token(T_C)` immediately return `L'` once the cloud session connects. + - On the new pane's terminal view, call `terminal_view.restore_conversation_after_view_creation(RestoredAIConversation::new(L'.clone()), /* use_live_appearance */ true, ctx)` (existing helper at `app/src/terminal/view/load_ai_conversation.rs:542-603`). This is the same restoration helper used by the in-current-pane fork path at `app/src/workspace/view.rs:11597-11607`. + - Set the new pane's `BlocklistAIContextModel` pending-query state for the forked conversation so the agent view's selected conversation matches `L'` (mirrors `restore_conversations_from_block_params` at `app/src/terminal/view/load_ai_conversation.rs:482-491`). + - Seed `PendingHandoff` on the new pane's `AmbientAgentViewModel` with `source_conversation_id: T_L`, `forked_conversation_id: T_C`, `touched_workspace: None`, `snapshot_prep_token: None`, `submission_state: Idle`. + - Apply the slash-command-supplied prompt pre-fill if any. +4. **Kick off async background prep.** After the pane is open, `ctx.spawn` a single chained future on the new pane's `AmbientAgentViewModel` that runs `derive_touched_workspace` → `upload_snapshot_for_handoff` (existing helpers in `app/src/ai/blocklist/handoff/touched_repos.rs` and `app/src/ai/agent_sdk/driver/snapshot.rs`). When derivation completes, call `set_pending_handoff_workspace` so the env-overlap pick can apply (existing behavior). When the upload completes, store the resulting prep token via a new `set_pending_handoff_snapshot_prep_token(Option, ctx)` setter on the model. The pane is fully interactive throughout — the user can type, scroll, and pick an env while this runs. +The send button's existing gate (`pending_handoff.touched_workspace.is_some()` plus prompt non-empty) is extended to also require `snapshot_prep_token.is_some_or_skipped()` — i.e. the upload is either complete or the touched workspace was empty (the existing `upload_snapshot_for_handoff` returns `Ok(None)` for empty workspaces and that's a valid skip). +### 4. Submit path uses resume semantics (`app/src/terminal/view/ambient_agent/model.rs`) +With the fork and the snapshot upload both completed during the chip-click open path, `AmbientAgentViewModel::submit_handoff` becomes a thin shim over `spawn_agent_with_request`. It reads the cached `forked_conversation_id` and `snapshot_prep_token` directly off `pending_handoff` — no orchestrator runtime needed: +```rust path=null start=null +let handoff = self.pending_handoff.as_ref()?; +let request = SpawnAgentRequest { + prompt, + config: Some(self.build_default_spawn_config(ctx)), + title: None, + team: None, + skill: None, + attachments, + interactive: None, + parent_run_id: None, + runtime_skills: vec![], + referenced_attachments: vec![], + conversation_id: Some(handoff.forked_conversation_id.clone()), + handoff_prep_token: handoff.snapshot_prep_token.clone(), +}; +self.spawn_agent_with_request(request, ctx); +``` +Delete the existing `app/src/ai/blocklist/handoff/orchestrator.rs` (`run_handoff` + `HandoffPrepared`) — the prep-and-upload phase moves to the chip-click path described in §3, and the orchestrator's only remaining role would be a redundant wrapper around `upload_snapshot_for_handoff`. Inline the call directly there. `submit_handoff` retains its existing double-submit guard via `submission_state`. +### 5. Replay-suppressing initial connect (`app/src/terminal/shared_session/viewer/terminal_manager.rs`) +`TerminalManager::connect_to_session` (`app/src/terminal/shared_session/viewer/terminal_manager.rs:322-338`) currently always uses `SharedSessionInitialLoadMode::ReplaceFromSessionScrollback`. Change it so handoff panes use `AppendFollowupScrollback` instead: +- Plumb a `should_append_followup: bool` flag into `connect_to_session` (or a new `connect_to_session_with_load_mode(session_id, load_mode, ctx)` variant — caller's choice). +- The cloud-mode subscription in `app/src/terminal/view/ambient_agent/mod.rs:88-90` calls `manager.connect_to_session(*session_id, ctx)` on `SessionReady`. Update it to also pass `view_model.is_local_to_cloud_handoff()` (read from the model on the same line). When true, use append mode. +The append mode then handles both pieces of dedup automatically: `BlockList::append_followup_shared_session_scrollback` skips block IDs we already have, and `EventLoop::should_suppress_existing_agent_conversation_replay = true` (`event_loop.rs:132-134`) drives `BlocklistAIController::should_skip_replayed_response_for_existing_conversation` to skip the historical response streams. No changes to the suppression machinery itself. +### 6. Setup-v2 active-block guard during conversation restore (`app/src/terminal/view.rs`) +The flag-reset block at `app/src/terminal/view.rs:5113-5124` flips `is_executing_oz_environment_startup_commands` to `false` whenever an `AppendedExchange` arrives in an ambient agent session. During `restore_conversations_on_view_creation`, every restored exchange emits `AppendedExchange` (via `update_conversation_for_new_request_input` → `BlocklistAIHistoryEvent::AppendedExchange`), which trips this reset before the cloud agent has even started its setup commands. +Gate the reset on the model not being in handoff-pre-spawn state: +```rust path=null start=null +if self.is_ambient_agent_session(ctx) + && self.model.lock().block_list().is_executing_oz_environment_startup_commands() + && !self.is_in_handoff_replay_phase(ctx) +{ + // existing reset... +} +``` +where `is_in_handoff_replay_phase` returns true when `ambient_agent_view_model.is_local_to_cloud_handoff() && (model.is_in_setup() || model.is_configuring_ambient_agent() || model.is_waiting_for_session())` — i.e. the cloud session has not yet connected and the active block should still be treated as a setup-command surface. After `SessionReady` (and thus once `Status::AgentRunning` is set), the predicate becomes false; the cloud agent's actual `AppendedExchange` (its first response post-rehydration) trips the existing reset path normally. +This is the single behavior fix needed for the setup-v2 affordances to render correctly during handoff. The "Running setup commands…" collapsible row, queued-prompt indicator, and loading screen are all already wired up via existing `CloudModeSetupV2`-gated paths and Just Work once the active block stays hidden through the pre-session window. +### 7. Drop the V2-input opt-out for handoff panes (`app/src/terminal/input/agent.rs`) +REMOTE-1486 added a guard at `app/src/terminal/input/agent.rs:65` so handoff panes don't opt into `CloudModeInputV2`. With the setup-v2 affordances now intentionally enabled for handoff panes (per §6 + the product spec's #9), remove the `&& !ambient_agent_view_model.is_local_to_cloud_handoff()` clause from `Input::is_cloud_mode_input_v2_composing`. Handoff panes go through the same V2 input path as fresh cloud-mode runs. +### 8. Feature-flag posture +No new feature flags. All of the changes are gated on the existing `FeatureFlag::OzHandoff && FeatureFlag::LocalToCloudHandoff` (client) and `features.LocalToCloudHandoffEnabled()` (server) used by REMOTE-1486. The client and server flags continue to roll out together. +## Risks and mitigations +- **Chip-click latency is now gated on the prepare-fork RPC.** Previously the pane opened instantly; now the user sees nothing until the fork resolves. *Mitigation:* the fork is a synchronous metadata + GCS-copy round-trip already used at submit time today; expected latency is similar to other authenticated public-API RPCs (<300ms p50). On error we surface a toast immediately so the user knows what happened. +- **Source conversation not synced to GCS.** `ForkConversationForHandoff` returns `InvalidRequestError.New("source conversation %s has not been fully synced to cloud storage; try again in a moment")` when `BatchDoesConversationDataExist` is false. *Mitigation:* the client surfaces this as the toast described above; the user can wait a moment and click again. +- **Replay suppression skips a genuinely new exchange.** `should_skip_replayed_response_for_existing_conversation` skips response streams during replay if the local conversation already has exchanges. If the cloud agent's first response stream arrives during the replay phase (before `AgentConversationReplayEnded`) it could be suppressed too. *Mitigation:* this is the same posture cloud→cloud uses today (`AppendFollowupScrollback`); the runtime emits `AgentConversationReplayEnded` before the new turn streams in, so the new turn lands in the post-replay window. +- **Snapshot upload still in flight at submit time.** The user types a follow-up faster than `derive_touched_workspace` + `upload_snapshot_for_handoff` complete. *Mitigation:* the send button gate already requires `pending_handoff.touched_workspace.is_some()` (existing); we extend it to also require the snapshot upload to be settled (either succeeded with `Some(prep_token)`, deliberately skipped with `Ok(None)` for empty workspaces, or failed with the existing `report_error!` posture so submit can proceed best-effort). +- **Snapshot upload failure.** Per-blob failures already retry with bounded backoff via `upload_snapshot_for_handoff`. If every blob fails, the existing `report_error!` fires and the prep token is still minted (cloud agent starts with no rehydration content). *Mitigation:* unchanged — same best-effort posture as cloud→cloud handoff today, just kicked off earlier. +## Testing and validation +### Unit tests +- `app/src/server/server_api/ai_test.rs`: serialization test for `PrepareHandoffForkRequest`, path test for `build_prepare_handoff_fork_url`, mirroring the pattern of the existing `serialize_run_followup_request` test. +- `app/src/ai/blocklist/history_model_test.rs`: test that `set_server_conversation_token_for_conversation` after `fork_conversation` updates the token-to-conversation reverse index so `find_conversation_id_by_server_token(T_C)` finds the fork. +- `app/src/terminal/view/view_test.rs`: a minimal regression covering the setup-v2 reset gate — restoring exchanges into a handoff pane while the model is in `Setup`/`Composing`/`WaitingForSession` does NOT flip `is_executing_oz_environment_startup_commands` to false. +- `app/src/terminal/shared_session/viewer/event_loop_test.rs`: extend the existing append-mode tests to cover the local→cloud connect path (i.e. `AppendFollowupScrollback` mode is what `connect_to_session` uses when the model reports `is_local_to_cloud_handoff`). +### Server tests (`../warp-server-2`) +- `router/handlers/public_api/agent_handoff_test.go`: extend the existing test file with a `TestPrepareLocalHandoffForkHandler_*` suite covering: feature-flag-off returns the standard error; missing `source_conversation_id` returns `invalid request payload`; happy path returns a valid UUID; auth failure on the source returns the wrapped `NotAuthorizedError`. +- Update the existing `agent_webhooks_test.go::TestHandoff_*` cases that exercise `ForkFromConversationID`. With the field removed those tests should switch to driving the new `prepare-fork` endpoint and then sending `ConversationID` on the run request, asserting the same end-state (`task.AgentConversationID = `, `snapshots/{task_id}/0/` populated). +### Integration / manual +- Click the chip on a long Oz conversation; verify the new pane is visibly populated with the AI exchanges before the cloud session connects, with no flicker or duplicate blocks during the connect/replay window. +- Submit a follow-up; verify the queued-prompt indicator + "Setting up environment" loading screen + "Running setup commands…" collapsible block all render the same way they do for a fresh cloud-mode run. +- After the cloud agent's first turn arrives, verify the pre-populated blocks remain in place, the queued-prompt indicator clears, and the new exchange appends below them. +- Click the chip on a non-eligible conversation (no synced server token); verify **no pane opens** and an error toast surfaces in the local window. The local conversation should be unaffected. +- Manually break a network connection during chip click so the prepare-fork RPC fails; verify **no pane opens** and an error toast surfaces in the local window. The local conversation should be unaffected and the chip should be re-clickable. +## Parallelization +The two-side change (server endpoint + client wiring) is small enough that one engineer/agent can implement it sequentially in two PRs — a server PR for the prepare-fork endpoint + `ForkFromConversationID` removal, then a client PR for the hydration + load mode + setup-v2 reset gate. The user has indicated they will handle the server-side changes themselves in `../warp-server-2`, so the client agent does not need to coordinate with a parallel server agent. No sub-agents needed for this scope. +## Follow-ups +- Cloud→cloud setup-v2 fixes. Cloud-cloud follow-ups (REMOTE-1290) likely have the same setup-v2 active-block reset issue when the follow-up's environment runs setup commands. Out of scope here, but the gate added in §6 can be generalized to also check for follow-up startups.