lagrangian_set_Communicate Subroutine

private impure subroutine lagrangian_set_Communicate(this, GetOwnerRankOpt)

Communicates lagrangian objects across MPI_rank. This subroutine relies on a rank locator procedure (GetOwnerRankOpt) to determine the rank that should own each Lagrangian object. The default rank locator is the one provided by the block object associated with this Lagrangian_set. From there, each rank will send objects that they no longer own and receive objects from other ranks that belongs to it. Note that this subroutine allocates an array (buf_send) of size (MAX NUMBER of OBJECTS to SEND) x (NUMBER OF MPI RANKS) For massively parallel simulations, this may cause out of memory issues. In those cases, care must be exercised to reduce the number of objects to be sent at any given time (e.g.: by doing communications in small batches).

Type Bound

lagrangian_set

Arguments

Type IntentOptional Attributes Name
class(lagrangian_set), intent(inout) :: this

A set of Lagrangian objects

procedure(locator), optional :: GetOwnerRankOpt

MPI Rank locator for communications


Calls

proc~~lagrangian_set_communicate~~CallsGraph proc~lagrangian_set_communicate lagrangian_set%lagrangian_set_Communicate mpi_gather mpi_gather proc~lagrangian_set_communicate->mpi_gather mpi_recv mpi_recv proc~lagrangian_set_communicate->mpi_recv mpi_send mpi_send proc~lagrangian_set_communicate->mpi_send proc~lagrangian_set_recycle lagrangian_set%lagrangian_set_Recycle proc~lagrangian_set_communicate->proc~lagrangian_set_recycle proc~lagrangian_set_resize lagrangian_set%lagrangian_set_Resize proc~lagrangian_set_communicate->proc~lagrangian_set_resize proc~lagrangian_set_recycle->proc~lagrangian_set_resize

Called by

proc~~lagrangian_set_communicate~~CalledByGraph proc~lagrangian_set_communicate lagrangian_set%lagrangian_set_Communicate proc~cdifs_obj_advancesolutionrp cdifs_obj_AdvanceSolutionRP proc~cdifs_obj_advancesolutionrp->proc~lagrangian_set_communicate proc~respart_set_regroup ResPart_set%ResPart_set_Regroup proc~cdifs_obj_advancesolutionrp->proc~respart_set_regroup proc~respart_set_gethydroforces ResPart_set%ResPart_set_GetHydroForces proc~cdifs_obj_advancesolutionrp->proc~respart_set_gethydroforces proc~grans_obj_advancesolution grans_obj_AdvanceSolution proc~grans_obj_advancesolution->proc~lagrangian_set_communicate proc~grans_obj_advancesolution->proc~respart_set_regroup proc~marker_set_readh5hut marker_set%marker_set_ReadH5HUT proc~marker_set_readh5hut->proc~lagrangian_set_communicate proc~marker_set_readhdf5 marker_set%marker_set_ReadHDF5 proc~marker_set_readhdf5->proc~lagrangian_set_communicate proc~respart_set_readh5hut ResPart_set%ResPart_set_ReadH5HUT proc~respart_set_readh5hut->proc~lagrangian_set_communicate proc~respart_set_readhdf5 ResPart_set%ResPart_set_ReadHDF5 proc~respart_set_readhdf5->proc~lagrangian_set_communicate proc~respart_set_readhdf5->proc~marker_set_readhdf5 proc~respart_set_regroup->proc~lagrangian_set_communicate proc~solid_set_communicate solid_set%solid_set_Communicate proc~solid_set_communicate->proc~lagrangian_set_communicate interface~grans_obj_advancesolution grans_obj%grans_obj_AdvanceSolution interface~grans_obj_advancesolution->proc~grans_obj_advancesolution proc~cdifs_obj_advancesolution cdifs_obj_AdvanceSolution proc~cdifs_obj_advancesolution->proc~cdifs_obj_advancesolutionrp proc~respart_set_gethydroforces->proc~respart_set_regroup proc~solid_set_readhdf5 solid_set%solid_set_ReadHDF5 proc~solid_set_readhdf5->proc~marker_set_readhdf5 interface~cdifs_obj_advancesolution cdifs_obj%cdifs_obj_AdvanceSolution interface~cdifs_obj_advancesolution->proc~cdifs_obj_advancesolution

Source Code

    impure subroutine lagrangian_set_Communicate(this,GetOwnerRankOpt)
      !> Communicates lagrangian objects across MPI_rank.
      ! This subroutine relies on a rank locator procedure (GetOwnerRankOpt)
      ! to determine the rank that should own each Lagrangian object. The default
      ! rank locator is the one provided by the block object associated with this
      ! Lagrangian_set. From there, each rank will send objects that they no longer
      ! own and receive objects from other ranks that belongs to it.
      ! Note that this subroutine allocates an array (buf_send) of size
      !     (MAX NUMBER of OBJECTS to SEND) x (NUMBER OF MPI RANKS)
      ! For massively parallel simulations, this may cause out of memory issues.
      ! In those cases, care must be exercised to reduce the number of objects
      ! to be sent at any given time (e.g.: by doing communications in small batches).
      implicit none
      class(lagrangian_set), intent(inout) :: this                             !! A set of Lagrangian objects
      procedure(locator),    optional      :: GetOwnerRankOpt
                                                                               !! MPI Rank locator for communications
      ! work variables
      procedure(locator),    &
          pointer      :: GetOwnerRank => null()
      class(lagrangian_obj), &
          allocatable  :: buf_send(:,:)
      integer          ::  counter(this%parallel%nproc)
      integer          :: who_send(this%parallel%nproc)
      integer          :: who_recv(this%parallel%nproc)
      integer          :: nb_recv
      integer          :: nb_send
      integer          :: nobjects_old
      integer          :: rank_recv
      integer          :: rank_send
      integer          :: object_rank
      integer          :: ierr
      type(MPI_status) :: status
      integer          :: i

      ! Select the appropriate rank locator
      if (present(GetOwnerRankOpt)) then
        ! User-defined locator
        GetOwnerRank => GetOwnerRankOpt
      else
        ! Default locator
        ! Returns the rank of the grid block that contains
        ! the position of the lagrangian_object
        GetOwnerRank => lagrangian_set_GetOwnerRankByBlock
      end if

      associate(mpi=>this%parallel)
        ! Recycle
        call this%Recycle()

        ! Prepare information about who sends what to whom
        who_send=0

        ! Loop over objects owned by rank
        do i=1,this%count_
          ! Find MPI rank that owns the object
          object_rank=GetOwnerRank(this,this%p(i))

          ! Tell rank that this object will go to object_rank
          who_send(object_rank)=who_send(object_rank)+1
        end do

        ! Remove the diagonal (no send-recieve from one MPI-rank to itself)
        who_send(mpi%rank%mine)=0

        do rank_recv=1,mpi%nproc
          ! Gather the content of every who_send(rank_recv) to MPI rank rank_recv
          ! in the array who_recv
          call MPI_GATHER(who_send(rank_recv),1,mpi%INTEGER,          &
                          who_recv,1,mpi%INTEGER,rank_recv-1,mpi%comm%g,ierr)
        end do

        ! Prepare the buffers
        nb_send=maxval(who_send)! maximum number of objects to be sent to any rank
        nb_recv=sum(who_recv)   ! number of objects to be received from other ranks

        ! Allocate buffers to send objects
        allocate(buf_send(nb_send,mpi%nproc),source=this%sample)

        ! Find and pack the objects to be sent
        ! Prepare the counter
        counter(:)=0
        do i=1,this%count_
          ! Get the cpu
          object_rank=GetOwnerRank(this,this%p(i))

          if (object_rank .NE. mpi%rank%mine) then
            ! Prepare for sending
            counter(object_rank)=counter(object_rank)+1
            buf_send(counter(object_rank),object_rank)=this%p(i)

            ! Need to remove the object from this rank
            this%p(i)%id=-1
          end if
        end do

        ! Everybody resizes
        nobjects_old = this%count_
        call this%Resize(nobjects_old+nb_recv) ! Larger array to accomodate old and new objects
        this%count_=nobjects_old

        ! We just loop through the CPUs, pack objects in buf_send, send, unpack
        do rank_send=1,mpi%nproc
          ! Start transaction between rank_send and rank
          if (mpi%rank%mine.eq.rank_send) then
            ! I'm the sender
            ! Send out all my objects
            do rank_recv=1,mpi%nproc
              if(who_send(rank_recv) .GT. 0) then
                call MPI_SEND(buf_send(1,rank_recv)%id,who_send(rank_recv), &
                              this%MPI_TYPE,rank_recv-1,0,mpi%comm%g,ierr)
                if (ierr.ne.0) error stop "Problem with MPI_SEND"
              end if
            end do
          else
            ! I'm not the sender, I receive
            if (who_recv(rank_send) .GT. 0) then
              ! rank recieves new objects from rank_send
              call MPI_RECV(this%p(this%count_+1)%id,who_recv(rank_send), &
                                   this%MPI_TYPE, rank_send-1,0,mpi%comm%g,status,ierr)

              ! Update actual number of objects owned
              this%count_ = this%count_+who_recv(rank_send)
              if (ierr.ne.0) error stop "Problem with MPI_RECV"
            end if
          end if
        end do

        deallocate(buf_send)

        ! Remove inactive variables
        call this%Recycle

        ! Nullify pointer
        GetOwnerRank => null()

      end associate

      return
    end subroutine lagrangian_set_Communicate