Updates the ghostcell values of local grid owned by the current MPI rank. Note that each MPI rank stores only its portion of the grid, thus needs to have proper ghostcell values.
| Type | Intent | Optional | Attributes | Name | ||
|---|---|---|---|---|---|---|
| class(block_obj), | intent(inout) | :: | this |
A block object |
impure subroutine block_obj_UpdateGridGhostCells(this) !> Updates the ghostcell values of local grid owned by ! the current MPI rank. Note that each MPI rank stores ! only its portion of the grid, thus needs to have ! proper ghostcell values. implicit none class(block_obj), intent(inout) :: this !! A block object ! Work variables integer :: sendR, sendL integer :: recvR, recvL integer :: nsendR, nsendL integer :: nrecvR, nrecvL real(wp) :: L type(MPI_STATUS) :: statuses(4) type(MPI_REQUEST) :: requests(4) integer :: i,dir integer :: ierr real(wp) :: spacing ! Update block dimensions call this%UpdateExtents() associate (lo => this%lo, hi => this%hi, ngc=> this%ngc, & axis => this%axis, parallel => this%parallel) do dir=1,3 if ( (.not.this%is_partitioned)) then ! Treatment for unpartitioned, 2D/1D, ! and pseudo-2D/1D blocks: ! simply extend grid into ghostcells if (this%periods(dir).eqv..true.) then L = this%pmax(dir)-this%pmin(dir) do i=1,ngc axis(dir)%x(lo(dir)-i ) = axis(dir)%x(hi(dir)+1-i) - L axis(dir)%x(hi(dir)+1+i) = axis(dir)%x(lo(dir)+i) + L end do else do i=1,ngc axis(dir)%x(lo(dir)-i ) = axis(dir)%x(lo(dir)) - real(i,wp)*(axis(dir)%x(lo(dir)+1)-axis(dir)%x(lo(dir))) axis(dir)%x(hi(dir)+1+i) = axis(dir)%x(hi(dir)+1) + real(i,wp)*(axis(dir)%x(hi(dir)+1)-axis(dir)%x(hi(dir))) end do end if else ! Treatment for partitioned blocks ! Figure out size of message to send if (parallel%rank%R(dir)-1.ne.MPI_PROC_NULL) then nsendR = min(hi(dir)-lo(dir)+1,ngc) else nsendR = 0 end if if (parallel%rank%L(dir)-1.ne.MPI_PROC_NULL) then nsendL = min(hi(dir)-lo(dir)+1,ngc) else nsendL = 0 end if nrecvR = 0 nrecvL = 0 ! Post receives from left and right ranks call MPI_IRECV(nrecvL, 1, parallel%INTEGER, & parallel%rank%L(dir)-1, 0, parallel%comm%g, requests(1), ierr) call MPI_IRECV(nrecvR, 1, parallel%INTEGER, & parallel%rank%R(dir)-1, 0, parallel%comm%g, requests(2), ierr) ! send sizes to and right ranks call MPI_ISEND(nsendR, 1, parallel%INTEGER, & parallel%rank%R(dir)-1, 0, parallel%comm%g, requests(3), ierr) call MPI_ISEND(nsendL, 1, parallel%INTEGER, & parallel%rank%L(dir)-1, 0, parallel%comm%g, requests(4), ierr) ! Synchronize call MPI_WAITALL( 4, requests, statuses, ierr ) ! Address of first elements to send sendL = lo(dir) + 1; sendR = hi(dir) + 1 - nsendR ! Address of first element in buffer recvL= lo(dir)-nrecvL; recvR=hi(dir)+2 ! Post receives from Left and right ranks if (nrecvL.gt.0) then call MPI_IRECV( axis(dir)%x(recvL ), nrecvL, parallel%REAL_WP, & parallel%rank%L(dir)-1, 0, parallel%comm%g, requests(1), ierr) end if if (nrecvR.gt.0) then call MPI_IRECV( axis(dir)%x(recvR ), nrecvR, parallel%REAL_WP, & parallel%rank%R(dir)-1, 0, parallel%comm%g, requests(2), ierr) end if ! Send buffers to left and right ranks if (nsendR.gt.0) then call MPI_ISEND( axis(dir)%x(sendR ), nsendR, parallel%REAL_WP, & parallel%rank%R(dir)-1, 0, parallel%comm%g, requests(3), ierr) end if if (nsendL.gt.0) then call MPI_ISEND( axis(dir)%x(sendL ), nsendL, parallel%REAL_WP, & parallel%rank%L(dir)-1, 0, parallel%comm%g, requests(4), ierr) end if ! Synchronize if (nrecvL.gt.0) call MPI_WAIT(requests(1),statuses(1),ierr) if (nrecvR.gt.0) call MPI_WAIT(requests(2),statuses(2),ierr) if (nsendR.gt.0) call MPI_WAIT(requests(3),statuses(3),ierr) if (nsendL.gt.0) call MPI_WAIT(requests(4),statuses(4),ierr) ! Treatment of border block if (this%periods(dir).eqv..true.) then ! Adjust grid for periodicity L = this%pmax(dir)-this%pmin(dir) ! Left border block if (parallel%rank%dir(dir).eq.1) axis(dir)%x(lo(dir)-ngc:lo(dir)-1) = axis(dir)%x(lo(dir)-ngc:lo(dir)-1) - L ! Right border block if (parallel%rank%dir(dir).eq.parallel%np(dir)) axis(dir)%x(hi(dir)+2:hi(dir)+1+ngc) = axis(dir)%x(hi(dir)+2:hi(dir)+1+ngc) + L else ! Extend axis in non-periodic directions ! Left border block if (parallel%rank%dir(dir).eq.1) then spacing = (axis(dir)%x(lo(dir)+1)-axis(dir)%x(lo(dir))) do i=1,ngc axis(dir)%x(lo(dir)-i ) = axis(dir)%x(lo(dir)) - real(i,wp)*spacing end do end if ! Right border block if (parallel%rank%dir(dir).eq.parallel%np(dir)) then spacing = (axis(dir)%x(hi(dir)+1)-axis(dir)%x(hi(dir))) do i=1,ngc axis(dir)%x(hi(dir)+1+i) = axis(dir)%x(hi(dir)+1) + real(i,wp)*spacing end do end if end if ! Treatment for small blocks if (nrecvR.lt.ngc) then spacing = axis(dir)%x(hi(dir)+1+nrecvR)-axis(dir)%x(hi(dir)+nrecvR) do i=nrecvR+1,ngc axis(dir)%x(hi(dir)+1+i) = axis(dir)%x(hi(dir)+1+nrecvR) + real(i-nrecvR,wp)*spacing end do end if if (nrecvL.lt.ngc) then spacing = axis(dir)%x(lo(dir)+1-nrecvL)-axis(dir)%x(lo(dir)-nrecvL) do i=nrecvL+1,ngc axis(dir)%x(lo(dir)-i) = axis(dir)%x(lo(dir)-nrecvL)- real(i-nrecvL,wp)*spacing end do end if end if end do end associate return end subroutine block_obj_UpdateGridGhostCells