Communicates lagrangian objects across MPI_rank. This subroutine relies on a rank locator procedure (GetOwnerRankOpt) to determine the rank that should own each Lagrangian object. The default rank locator is the one provided by the block object associated with this Lagrangian_set. From there, each rank will send objects that they no longer own and receive objects from other ranks that belongs to it. Note that this subroutine allocates an array (buf_send) of size (MAX NUMBER of OBJECTS to SEND) x (NUMBER OF MPI RANKS) For massively parallel simulations, this may cause out of memory issues. In those cases, care must be exercised to reduce the number of objects to be sent at any given time (e.g.: by doing communications in small batches).
| Type | Intent | Optional | Attributes | Name | ||
|---|---|---|---|---|---|---|
| class(lagrangian_set), | intent(inout) | :: | this |
A set of Lagrangian objects |
||
| procedure(locator), | optional | :: | GetOwnerRankOpt |
MPI Rank locator for communications |
impure subroutine lagrangian_set_Communicate(this,GetOwnerRankOpt) !> Communicates lagrangian objects across MPI_rank. ! This subroutine relies on a rank locator procedure (GetOwnerRankOpt) ! to determine the rank that should own each Lagrangian object. The default ! rank locator is the one provided by the block object associated with this ! Lagrangian_set. From there, each rank will send objects that they no longer ! own and receive objects from other ranks that belongs to it. ! Note that this subroutine allocates an array (buf_send) of size ! (MAX NUMBER of OBJECTS to SEND) x (NUMBER OF MPI RANKS) ! For massively parallel simulations, this may cause out of memory issues. ! In those cases, care must be exercised to reduce the number of objects ! to be sent at any given time (e.g.: by doing communications in small batches). implicit none class(lagrangian_set), intent(inout) :: this !! A set of Lagrangian objects procedure(locator), optional :: GetOwnerRankOpt !! MPI Rank locator for communications ! work variables procedure(locator), & pointer :: GetOwnerRank => null() class(lagrangian_obj), & allocatable :: buf_send(:,:) integer :: counter(this%parallel%nproc) integer :: who_send(this%parallel%nproc) integer :: who_recv(this%parallel%nproc) integer :: nb_recv integer :: nb_send integer :: nobjects_old integer :: rank_recv integer :: rank_send integer :: object_rank integer :: ierr type(MPI_status) :: status integer :: i ! Select the appropriate rank locator if (present(GetOwnerRankOpt)) then ! User-defined locator GetOwnerRank => GetOwnerRankOpt else ! Default locator ! Returns the rank of the grid block that contains ! the position of the lagrangian_object GetOwnerRank => lagrangian_set_GetOwnerRankByBlock end if associate(mpi=>this%parallel) ! Recycle call this%Recycle() ! Prepare information about who sends what to whom who_send=0 ! Loop over objects owned by rank do i=1,this%count_ ! Find MPI rank that owns the object object_rank=GetOwnerRank(this,this%p(i)) ! Tell rank that this object will go to object_rank who_send(object_rank)=who_send(object_rank)+1 end do ! Remove the diagonal (no send-recieve from one MPI-rank to itself) who_send(mpi%rank%mine)=0 do rank_recv=1,mpi%nproc ! Gather the content of every who_send(rank_recv) to MPI rank rank_recv ! in the array who_recv call MPI_GATHER(who_send(rank_recv),1,mpi%INTEGER, & who_recv,1,mpi%INTEGER,rank_recv-1,mpi%comm%g,ierr) end do ! Prepare the buffers nb_send=maxval(who_send)! maximum number of objects to be sent to any rank nb_recv=sum(who_recv) ! number of objects to be received from other ranks ! Allocate buffers to send objects allocate(buf_send(nb_send,mpi%nproc),source=this%sample) ! Find and pack the objects to be sent ! Prepare the counter counter(:)=0 do i=1,this%count_ ! Get the cpu object_rank=GetOwnerRank(this,this%p(i)) if (object_rank .NE. mpi%rank%mine) then ! Prepare for sending counter(object_rank)=counter(object_rank)+1 buf_send(counter(object_rank),object_rank)=this%p(i) ! Need to remove the object from this rank this%p(i)%id=-1 end if end do ! Everybody resizes nobjects_old = this%count_ call this%Resize(nobjects_old+nb_recv) ! Larger array to accomodate old and new objects this%count_=nobjects_old ! We just loop through the CPUs, pack objects in buf_send, send, unpack do rank_send=1,mpi%nproc ! Start transaction between rank_send and rank if (mpi%rank%mine.eq.rank_send) then ! I'm the sender ! Send out all my objects do rank_recv=1,mpi%nproc if(who_send(rank_recv) .GT. 0) then call MPI_SEND(buf_send(1,rank_recv)%id,who_send(rank_recv), & this%MPI_TYPE,rank_recv-1,0,mpi%comm%g,ierr) if (ierr.ne.0) error stop "Problem with MPI_SEND" end if end do else ! I'm not the sender, I receive if (who_recv(rank_send) .GT. 0) then ! rank recieves new objects from rank_send call MPI_RECV(this%p(this%count_+1)%id,who_recv(rank_send), & this%MPI_TYPE, rank_send-1,0,mpi%comm%g,status,ierr) ! Update actual number of objects owned this%count_ = this%count_+who_recv(rank_send) if (ierr.ne.0) error stop "Problem with MPI_RECV" end if end if end do deallocate(buf_send) ! Remove inactive variables call this%Recycle ! Nullify pointer GetOwnerRank => null() end associate return end subroutine lagrangian_set_Communicate