block_obj_UpdateGridGhostCells Subroutine

private impure subroutine block_obj_UpdateGridGhostCells(this)

Updates the ghostcell values of local grid owned by the current MPI rank. Note that each MPI rank stores only its portion of the grid, thus needs to have proper ghostcell values.

Type Bound

block_obj

Arguments

Type IntentOptional Attributes Name
class(block_obj), intent(inout) :: this

A block object


Calls

proc~~block_obj_updategridghostcells~~CallsGraph proc~block_obj_updategridghostcells block_obj%block_obj_UpdateGridGhostCells mpi_irecv mpi_irecv proc~block_obj_updategridghostcells->mpi_irecv mpi_isend mpi_isend proc~block_obj_updategridghostcells->mpi_isend mpi_wait mpi_wait proc~block_obj_updategridghostcells->mpi_wait mpi_waitall mpi_waitall proc~block_obj_updategridghostcells->mpi_waitall proc~block_obj_updateextents block_obj%block_obj_UpdateExtents proc~block_obj_updategridghostcells->proc~block_obj_updateextents proc~axis_obj_init axis_obj%axis_obj_Init proc~block_obj_updateextents->proc~axis_obj_init

Called by

proc~~block_obj_updategridghostcells~~CalledByGraph proc~block_obj_updategridghostcells block_obj%block_obj_UpdateGridGhostCells proc~block_obj_partition block_obj%block_obj_Partition proc~block_obj_partition->proc~block_obj_updategridghostcells proc~block_obj_read block_obj%block_obj_Read proc~block_obj_read->proc~block_obj_updategridghostcells proc~block_obj_setupuniformgrid block_obj%block_obj_SetupUniformGrid proc~block_obj_setupuniformgrid->proc~block_obj_updategridghostcells proc~collision_obj_setupcollisionblock2 collision_obj%collision_obj_SetupCollisionBlock2 proc~collision_obj_setupcollisionblock2->proc~block_obj_updategridghostcells proc~collision_obj_setupcollisionblock2->proc~block_obj_setupuniformgrid none~initialize~6 block_obj%Initialize proc~collision_obj_setupcollisionblock2->none~initialize~6 proc~block_obj_init2 block_obj%block_obj_Init2 proc~block_obj_init2->proc~block_obj_setupuniformgrid proc~cdifs_obj_preparesolverblock cdifs_obj_PrepareSolverBlock proc~cdifs_obj_preparesolverblock->proc~block_obj_partition proc~cdifs_obj_preparesolverblock->none~initialize~6 proc~collision_obj_setupcollisionblock collision_obj%collision_obj_SetupCollisionBlock proc~collision_obj_setupcollisionblock->proc~block_obj_setupuniformgrid proc~collision_obj_setupcollisionblock->none~initialize~6 proc~grans_obj_preparesolverblock grans_obj_PrepareSolverBlock proc~grans_obj_preparesolverblock->proc~block_obj_partition proc~grans_obj_preparesolverblock->none~initialize~6 none~initialize~6->proc~block_obj_init2 proc~cdifs_obj_preparesolver cdifs_obj_PrepareSolver proc~cdifs_obj_preparesolver->proc~cdifs_obj_preparesolverblock proc~cdifs_obj_preparesolver->proc~collision_obj_setupcollisionblock proc~grans_obj_preparesolver grans_obj_PrepareSolver proc~grans_obj_preparesolver->proc~grans_obj_preparesolverblock proc~grans_obj_preparesolvercollision grans_obj_PrepareSolverCollision proc~grans_obj_preparesolver->proc~grans_obj_preparesolvercollision proc~grans_obj_preparesolvercollision->proc~collision_obj_setupcollisionblock interface~cdifs_obj_preparesolver cdifs_obj%cdifs_obj_PrepareSolver interface~cdifs_obj_preparesolver->proc~cdifs_obj_preparesolver interface~grans_obj_preparesolver grans_obj%grans_obj_PrepareSolver interface~grans_obj_preparesolver->proc~grans_obj_preparesolver

Source Code

    impure subroutine block_obj_UpdateGridGhostCells(this)
      !> Updates the ghostcell values of local grid owned by
      ! the current MPI rank. Note that each MPI rank stores
      ! only its portion of the grid, thus needs to have
      ! proper ghostcell values.
      implicit none
      class(block_obj), intent(inout) :: this                                  !! A block object
      ! Work variables
      integer           :: sendR, sendL
      integer           :: recvR, recvL
      integer           :: nsendR, nsendL
      integer           :: nrecvR, nrecvL
      real(wp)          :: L
      type(MPI_STATUS)  :: statuses(4)
      type(MPI_REQUEST) :: requests(4)
      integer           :: i,dir
      integer           :: ierr
      real(wp)          :: spacing

      ! Update block dimensions
      call this%UpdateExtents()

      associate (lo => this%lo, hi => this%hi, ngc=> this%ngc, &
         axis => this%axis, parallel => this%parallel)

        do dir=1,3

          if ( (.not.this%is_partitioned)) then

            ! Treatment for unpartitioned, 2D/1D,
            ! and pseudo-2D/1D blocks:
            ! simply extend grid into ghostcells
            if (this%periods(dir).eqv..true.) then
              L = this%pmax(dir)-this%pmin(dir)
              do i=1,ngc
                axis(dir)%x(lo(dir)-i  ) = axis(dir)%x(hi(dir)+1-i) - L
                axis(dir)%x(hi(dir)+1+i) = axis(dir)%x(lo(dir)+i)   + L
              end do
            else
              do i=1,ngc
                axis(dir)%x(lo(dir)-i  ) = axis(dir)%x(lo(dir))   - real(i,wp)*(axis(dir)%x(lo(dir)+1)-axis(dir)%x(lo(dir)))
                axis(dir)%x(hi(dir)+1+i) = axis(dir)%x(hi(dir)+1) + real(i,wp)*(axis(dir)%x(hi(dir)+1)-axis(dir)%x(hi(dir)))
              end do
            end if

          else
            ! Treatment for partitioned blocks

            ! Figure out size of message to send
            if (parallel%rank%R(dir)-1.ne.MPI_PROC_NULL) then
              nsendR = min(hi(dir)-lo(dir)+1,ngc)
            else
              nsendR = 0
            end if

            if (parallel%rank%L(dir)-1.ne.MPI_PROC_NULL) then
              nsendL = min(hi(dir)-lo(dir)+1,ngc)
            else
              nsendL = 0
            end if

            nrecvR = 0
            nrecvL = 0
            ! Post receives from left and right ranks
            call MPI_IRECV(nrecvL, 1, parallel%INTEGER, &
                   parallel%rank%L(dir)-1, 0, parallel%comm%g, requests(1), ierr)
            call MPI_IRECV(nrecvR, 1, parallel%INTEGER, &
                   parallel%rank%R(dir)-1, 0, parallel%comm%g, requests(2), ierr)
            ! send sizes to and right ranks
            call MPI_ISEND(nsendR, 1, parallel%INTEGER, &
                   parallel%rank%R(dir)-1, 0, parallel%comm%g, requests(3), ierr)
            call MPI_ISEND(nsendL, 1, parallel%INTEGER, &
                   parallel%rank%L(dir)-1, 0, parallel%comm%g, requests(4), ierr)
            ! Synchronize
            call MPI_WAITALL( 4, requests, statuses, ierr )

            ! Address of first elements to send
            sendL = lo(dir) + 1; sendR  = hi(dir) + 1 - nsendR

            ! Address of first element in buffer
            recvL= lo(dir)-nrecvL;  recvR=hi(dir)+2

            ! Post receives from Left and right ranks
            if (nrecvL.gt.0) then
              call MPI_IRECV( axis(dir)%x(recvL  ), nrecvL, parallel%REAL_WP, &
                     parallel%rank%L(dir)-1, 0, parallel%comm%g, requests(1), ierr)
            end if
            if (nrecvR.gt.0) then
              call MPI_IRECV( axis(dir)%x(recvR  ), nrecvR, parallel%REAL_WP, &
                     parallel%rank%R(dir)-1, 0, parallel%comm%g, requests(2), ierr)
            end if
            ! Send buffers to left and right ranks
            if (nsendR.gt.0) then
              call MPI_ISEND( axis(dir)%x(sendR  ), nsendR, parallel%REAL_WP, &
                     parallel%rank%R(dir)-1, 0, parallel%comm%g, requests(3), ierr)
            end if
            if (nsendL.gt.0) then
              call MPI_ISEND( axis(dir)%x(sendL  ), nsendL, parallel%REAL_WP, &
                     parallel%rank%L(dir)-1, 0, parallel%comm%g, requests(4), ierr)
            end if
            ! Synchronize
            if (nrecvL.gt.0) call MPI_WAIT(requests(1),statuses(1),ierr)
            if (nrecvR.gt.0) call MPI_WAIT(requests(2),statuses(2),ierr)
            if (nsendR.gt.0) call MPI_WAIT(requests(3),statuses(3),ierr)
            if (nsendL.gt.0) call MPI_WAIT(requests(4),statuses(4),ierr)

            ! Treatment of border block
            if (this%periods(dir).eqv..true.) then
              ! Adjust grid for periodicity
              L = this%pmax(dir)-this%pmin(dir)
              ! Left border block
              if (parallel%rank%dir(dir).eq.1) axis(dir)%x(lo(dir)-ngc:lo(dir)-1) = axis(dir)%x(lo(dir)-ngc:lo(dir)-1) - L
              ! Right border block
              if (parallel%rank%dir(dir).eq.parallel%np(dir)) axis(dir)%x(hi(dir)+2:hi(dir)+1+ngc) = axis(dir)%x(hi(dir)+2:hi(dir)+1+ngc) + L
            else
              ! Extend axis in non-periodic directions
              ! Left border block
              if (parallel%rank%dir(dir).eq.1) then
                spacing = (axis(dir)%x(lo(dir)+1)-axis(dir)%x(lo(dir)))
                do i=1,ngc
                  axis(dir)%x(lo(dir)-i  ) = axis(dir)%x(lo(dir))   - real(i,wp)*spacing
                end do
              end if
              ! Right border block
              if (parallel%rank%dir(dir).eq.parallel%np(dir)) then
                spacing = (axis(dir)%x(hi(dir)+1)-axis(dir)%x(hi(dir)))
                do i=1,ngc
                  axis(dir)%x(hi(dir)+1+i) = axis(dir)%x(hi(dir)+1) + real(i,wp)*spacing
                end do
              end if
            end if

            ! Treatment for small blocks
            if (nrecvR.lt.ngc) then
              spacing = axis(dir)%x(hi(dir)+1+nrecvR)-axis(dir)%x(hi(dir)+nrecvR)
              do i=nrecvR+1,ngc
                axis(dir)%x(hi(dir)+1+i) = axis(dir)%x(hi(dir)+1+nrecvR)   + real(i-nrecvR,wp)*spacing
              end do
            end if

           if (nrecvL.lt.ngc) then
             spacing = axis(dir)%x(lo(dir)+1-nrecvL)-axis(dir)%x(lo(dir)-nrecvL)
             do i=nrecvL+1,ngc
               axis(dir)%x(lo(dir)-i) = axis(dir)%x(lo(dir)-nrecvL)- real(i-nrecvL,wp)*spacing
             end do
           end if

          end if
        end do
      end associate

      return
    end subroutine block_obj_UpdateGridGhostCells