init_jobs Subroutine

public subroutine init_jobs(ncolumns, group0, ierr)

FIXME : Add documentation

no circular shift

Arguments

Type IntentOptional Attributes Name
integer, intent(in) :: ncolumns
integer, intent(out), dimension(0:) :: group0
integer, intent(out) :: ierr

Contents

Source Code


Source Code

  subroutine init_jobs (ncolumns, group0, ierr)

    implicit none
# ifdef MPI
!    integer, parameter :: reorder=1
    ! TT: I changed variable definition by assuming integer 1 corresponds to
    ! TT: logical .true. but I'm not sure if reorder is needed.
    ! TT: In any case this subroutine is only called when you use job fork.
    logical, parameter :: reorder=.true.
    integer :: ip, j, comm2d, id2d, nrows
# endif
    integer, intent(in) :: ncolumns
    integer, dimension(0:), intent (out) :: group0
    integer, intent(out) :: ierr
# ifndef MPI
    group0 = 0
    ierr = 0
    if (ncolumns /= 1) call mp_abort ("jobs")
# else
    integer, parameter :: ndim=2
    integer, dimension(ndim) :: dims
    integer, dimension(0:ndim-1) :: coords1d, coords2d
    logical, dimension(0:ndim-1) :: belongs
    logical, dimension(ndim) :: period

    logical :: isroot

    if (.not. allocated(grp0)) allocate (grp0(0:size(group0)-1))

! calculate dimensions  mpi processor grid will have and check that
! ncolumns*nrows = number of processes

! nrows is # of processors per job (or group)
    nrows = ntot_proc/ncolumns
    dims=(/ ncolumns, nrows /)
    if(ntot_proc /= ncolumns*nrows) then
       ierr = 1
       if(aproc0) write(*,*) 'Number of processes must be divisible by number of groups'
       return
    endif
    ngroup_proc = nrows

    ! create 2d cartesian topology for processes

    period=(/ .false., .false. /)  !< no circular shift

    !    call mpi_cart_create(mpi_comm_world, ndim, dims, period, reorder, comm2d, ierr)
!$OMP MASTER
    call time_message(.false., time_mp_other, ' MPI Overheads')
!$OMP END MASTER
    call mpi_cart_create(comm_all, ndim, dims, period, reorder, comm2d, ierr)
    call mpi_comm_rank(comm2d, id2d, ierr)
    call mpi_cart_coords(comm2d, id2d, ndim, coords2d, ierr)
!$OMP MASTER
    call time_message(.false., time_mp_other, ' MPI Overheads')
!$OMP END MASTER
! each processor knows which subgrid it is in from variable mpi_group
    job = coords2d(0)

! create 1d subgrids from 2d processor grid, variable belongs denotes
! whether processor grid is split by column or row

    belongs(1) = .true.    ! this dimension belongs to subgrid
    belongs(0) = .false.

!$OMP MASTER
    call time_message(.false., time_mp_other, ' MPI Overheads')
!$OMP END MASTER
    call mpi_cart_sub(comm2d, belongs, comm_group, ierr)
    call mpi_comm_rank(comm_group, gproc, ierr)
    call mpi_cart_coords(comm_group, gproc, 1, coords1d, ierr)
!$OMP MASTER
    call time_message(.false., time_mp_other, ' MPI Overheads')
!$OMP END MASTER
    gproc0 = (gproc == 0)

! find root process of each 1d subgrid and place in array group0 indexed
! from 0 to subgrids-1

! MAB> following two lines were incorrect
!    j=1
!    group0(0) = 0
! replace with
    j = 0
    if (proc0 .and. gproc0) then
       group0(0) = 0
       j = 1
    end if
! <MAB
    do ip = 1, ntot_proc-1
       if (proc0) then
          call receive (isroot, ip)
          if (isroot) then
             group0(j) = ip
             j=j+1
          end if
       else if (ip == aproc) then
          call send (gproc0, 0)
       end if
       call barrier
    end do

!let all processors have the group0 array
    call broadcast (group0)

    grp0 = group0

! TT> brought down here from init_job_name in file_utils.fpp
    call scope (subprocs)
! <TT

# endif
  end subroutine init_jobs