From b3d4ec5d98f946e3468bad4430c6c30e41e6196d Mon Sep 17 00:00:00 2001 From: Spencer Bryngelson Date: Thu, 12 Feb 2026 19:17:08 -0500 Subject: [PATCH 1/2] Update m_data_output.fpp --- src/post_process/m_data_output.fpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/post_process/m_data_output.fpp b/src/post_process/m_data_output.fpp index def195a61d..7824860436 100644 --- a/src/post_process/m_data_output.fpp +++ b/src/post_process/m_data_output.fpp @@ -11,7 +11,7 @@ module m_data_output use m_derived_types ! Definitions of the derived types - use m_global_parameters ! Global parameters for the code + use m_global_parameters ! Global parameters use m_derived_variables !< Procedures used to compute quantities derived From 0a79011129bfa66f16f22d7caed5f8b7d9926e2a Mon Sep 17 00:00:00 2001 From: Spencer Bryngelson Date: Thu, 12 Feb 2026 23:13:52 -0500 Subject: [PATCH 2/2] Fix 1D multi-rank MPI_GATHERV undefined behavior in post-process The 1D paths in s_mpi_gather_spatial_extents and s_mpi_gather_data_extents reused recvcounts/displs arrays sized for grid defragmentation (m+1 per rank), but each rank only sends 1 scalar value. This sendcount/recvcounts mismatch is undefined behavior per the MPI standard and caused nondeterministic crashes with Intel MPI, preventing silo files from being written. Replace MPI_GATHERV with MPI_GATHER + temp buffer for the 1D case. Multi-D paths and 1D defragmentation functions are unchanged. Co-Authored-By: Claude Opus 4.6 --- src/post_process/m_mpi_proxy.fpp | 59 ++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/src/post_process/m_mpi_proxy.fpp b/src/post_process/m_mpi_proxy.fpp index bb9bea6b4e..41b35d90be 100644 --- a/src/post_process/m_mpi_proxy.fpp +++ b/src/post_process/m_mpi_proxy.fpp @@ -168,6 +168,7 @@ contains #ifdef MFC_MPI integer :: ierr !< Generic flag used to identify and report MPI errors + real(wp) :: ext_temp(0:num_procs - 1) ! Simulation is 3D if (p > 0) then @@ -273,17 +274,20 @@ contains ! Simulation is 1D else + ! For 1D, recvcounts/displs are sized for grid defragmentation + ! (m+1 per rank), not for scalar gathers. Use MPI_GATHER instead. + ! Minimum spatial extent in the x-direction - call MPI_GATHERV(minval(x_cb), 1, mpi_p, & - spatial_extents(1, 0), recvcounts, 4*displs, & - mpi_p, 0, MPI_COMM_WORLD, & - ierr) + call MPI_GATHER(minval(x_cb), 1, mpi_p, & + ext_temp, 1, mpi_p, 0, & + MPI_COMM_WORLD, ierr) + if (proc_rank == 0) spatial_extents(1, :) = ext_temp ! Maximum spatial extent in the x-direction - call MPI_GATHERV(maxval(x_cb), 1, mpi_p, & - spatial_extents(2, 0), recvcounts, 4*displs, & - mpi_p, 0, MPI_COMM_WORLD, & - ierr) + call MPI_GATHER(maxval(x_cb), 1, mpi_p, & + ext_temp, 1, mpi_p, 0, & + MPI_COMM_WORLD, ierr) + if (proc_rank == 0) spatial_extents(2, :) = ext_temp end if #endif @@ -339,16 +343,35 @@ contains #ifdef MFC_MPI integer :: ierr !< Generic flag used to identify and report MPI errors - - ! Minimum flow variable extent - call MPI_GATHERV(minval(q_sf), 1, mpi_p, & - data_extents(1, 0), recvcounts, 2*displs, & - mpi_p, 0, MPI_COMM_WORLD, ierr) - - ! Maximum flow variable extent - call MPI_GATHERV(maxval(q_sf), 1, mpi_p, & - data_extents(2, 0), recvcounts, 2*displs, & - mpi_p, 0, MPI_COMM_WORLD, ierr) + real(wp) :: ext_temp(0:num_procs - 1) + + if (n > 0) then + ! Multi-D: recvcounts = 1, so strided MPI_GATHERV works correctly + ! Minimum flow variable extent + call MPI_GATHERV(minval(q_sf), 1, mpi_p, & + data_extents(1, 0), recvcounts, 2*displs, & + mpi_p, 0, MPI_COMM_WORLD, ierr) + + ! Maximum flow variable extent + call MPI_GATHERV(maxval(q_sf), 1, mpi_p, & + data_extents(2, 0), recvcounts, 2*displs, & + mpi_p, 0, MPI_COMM_WORLD, ierr) + else + ! 1D: recvcounts/displs are sized for grid defragmentation + ! (m+1 per rank), not for scalar gathers. Use MPI_GATHER instead. + + ! Minimum flow variable extent + call MPI_GATHER(minval(q_sf), 1, mpi_p, & + ext_temp, 1, mpi_p, 0, & + MPI_COMM_WORLD, ierr) + if (proc_rank == 0) data_extents(1, :) = ext_temp + + ! Maximum flow variable extent + call MPI_GATHER(maxval(q_sf), 1, mpi_p, & + ext_temp, 1, mpi_p, 0, & + MPI_COMM_WORLD, ierr) + if (proc_rank == 0) data_extents(2, :) = ext_temp + end if #endif