TABLE OF CONTENTS


ABINIT/xmpi_sum_c0dc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c0dc

FUNCTION

  Combines values from all processes and distribute the result back to all processes.
  Target: double complex scalar

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= scalar to be summed.

SOURCE

2482 subroutine xmpi_sum_c0dc(xval,comm,ier)
2483 
2484 !Arguments-------------------------
2485  complex(dpc),intent(inout) :: xval
2486  integer,intent(in) :: comm
2487  integer,intent(out)   :: ier
2488 
2489 !Local variables-------------------
2490 #if defined HAVE_MPI
2491  integer :: nproc_space_comm
2492  complex(dpc) :: arr_xsum(1)
2493 #endif
2494 
2495 ! *************************************************************************
2496 
2497  ier=0
2498 #if defined HAVE_MPI
2499  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2500    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2501    if (nproc_space_comm /= 1) then
2502 !    Accumulate xval on all proc. in comm
2503      call MPI_ALLREDUCE([xval],arr_xsum,1,MPI_DOUBLE_COMPLEX,MPI_SUM,comm,ier)
2504      xval = arr_xsum(1)
2505    end if
2506  end if
2507 #endif
2508 
2509 end subroutine xmpi_sum_c0dc

ABINIT/xmpi_sum_c0sc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c0sc

FUNCTION

  Combines values from all processes and distribute the result back to all processes.
  Target: single-precision complex scalar

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= scalar to be summed.

SOURCE

2533 subroutine xmpi_sum_c0sc(xval,comm,ier)
2534 
2535 !Arguments-------------------------
2536  complex(sp),intent(inout) :: xval
2537  integer,intent(in) :: comm
2538  integer,intent(out)   :: ier
2539 
2540 !Local variables-------------------
2541 #if defined HAVE_MPI
2542  integer :: nproc_space_comm
2543  complex(sp) :: arr_xsum(1)
2544 #endif
2545 
2546 ! *************************************************************************
2547 
2548  ier=0
2549 #if defined HAVE_MPI
2550  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2551    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2552    if (nproc_space_comm /= 1) then
2553      ! Accumulate xval on all proc. in comm
2554      call MPI_ALLREDUCE([xval],arr_xsum,1,MPI_COMPLEX,MPI_SUM,comm,ier)
2555      xval = arr_xsum(1)
2556    end if
2557  end if
2558 #endif
2559 
2560 end subroutine xmpi_sum_c0sc

ABINIT/xmpi_sum_c1cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c1cplx

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: one-dimensional complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

3145 subroutine xmpi_sum_c1cplx(xval,comm,ier)
3146 
3147 !Arguments----------------
3148  complex(spc), DEV_CONTARRD intent(inout) :: xval(:)
3149  integer,intent(in) :: comm
3150  integer,intent(out) :: ier
3151 
3152 !Local variables--------------
3153 #if defined HAVE_MPI
3154  integer :: n1,nproc_space_comm
3155  complex(spc),allocatable :: xsum(:)
3156 #endif
3157 
3158 ! *************************************************************************
3159 
3160  ier=0
3161 #if defined HAVE_MPI
3162  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
3163    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
3164    if (nproc_space_comm /= 1) then
3165      n1 =size(xval,dim=1)
3166 
3167 !    Accumulate xval on all proc. in comm
3168 #if defined HAVE_MPI2_INPLACE
3169      if (xmpi_use_inplace_operations) then
3170        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,n1,MPI_COMPLEX,MPI_SUM,comm,ier)
3171      else
3172 #endif
3173        ABI_STAT_MALLOC(xsum,(n1), ier)
3174        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c1cplx')
3175        call MPI_ALLREDUCE(xval,xsum,n1,MPI_COMPLEX,MPI_SUM,comm,ier)
3176        xval (:) = xsum(:)
3177        ABI_FREE(xsum)
3178 #if defined HAVE_MPI2_INPLACE
3179      endif
3180 #endif
3181 
3182    end if
3183  end if
3184 #endif
3185 
3186 end subroutine xmpi_sum_c1cplx

ABINIT/xmpi_sum_c1dc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c1dc

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: one-dimensional double complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

2585 subroutine xmpi_sum_c1dc(xval,comm,ier)
2586 
2587 !Arguments-------------------------
2588  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:)
2589  integer,intent(in) :: comm
2590  integer,intent(out) :: ier
2591 
2592 !Local variables-------------------
2593 #if defined HAVE_MPI
2594  integer :: n1,nproc_space_comm
2595  complex(dpc) , allocatable :: xsum(:)
2596 #endif
2597 
2598 ! *************************************************************************
2599 
2600  ier=0
2601 #if defined HAVE_MPI
2602  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2603    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2604    if (nproc_space_comm /= 1) then
2605      n1 =size(xval,dim=1)
2606 
2607 !    Accumulate xval on all proc. in comm
2608 #if defined HAVE_MPI2_INPLACE
2609      if (xmpi_use_inplace_operations) then
2610        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,n1,MPI_DOUBLE_COMPLEX,MPI_SUM,comm,ier)
2611      else
2612 #endif
2613        ABI_STAT_MALLOC(xsum,(n1), ier)
2614        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c1dc')
2615        call MPI_ALLREDUCE(xval,xsum,n1,MPI_DOUBLE_COMPLEX,MPI_SUM,comm,ier)
2616        xval (:) = xsum(:)
2617        ABI_FREE(xsum)
2618 #if defined HAVE_MPI2_INPLACE
2619      endif
2620 #endif
2621 
2622    end if
2623  end if
2624 #endif
2625 
2626 end subroutine xmpi_sum_c1dc

ABINIT/xmpi_sum_c2cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c2cplx

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: two-dimensional complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

3211 subroutine xmpi_sum_c2cplx(xval,comm,ier)
3212 
3213 !Arguments----------------
3214  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:)
3215  integer,intent(in) :: comm
3216  integer,intent(out) :: ier
3217 
3218 !Local variables--------------
3219 #if defined HAVE_MPI
3220  integer :: my_dt,my_op,n1,n2,nn,nproc_space_comm
3221  integer(kind=int64) :: ntot
3222  complex(spc), allocatable :: xsum(:,:)
3223 #endif
3224 
3225 ! *************************************************************************
3226 
3227  ier=0
3228 #if defined HAVE_MPI
3229  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
3230    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
3231    if (nproc_space_comm /= 1) then
3232      n1 =size(xval,dim=1)
3233      n2 =size(xval,dim=2)
3234 
3235      !This product of dimensions can be greater than a 32bit integer
3236      !We use a INT64 to store it. If it is too large, we switch to an
3237      !alternate routine because MPI<4 doesnt handle 64 bit counts.
3238      ntot=int(n1*n2,kind=int64)
3239      if (ntot<=xmpi_maxint32_64) then
3240        nn=n1*n2 ; my_dt=MPI_COMPLEX ; my_op=MPI_SUM
3241      else
3242        nn=1 ; call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
3243      end if
3244 
3245 !    Accumulate xval on all proc. in comm
3246 #if defined HAVE_MPI2_INPLACE
3247      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
3248        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
3249        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
3250      else
3251 #endif
3252        ABI_STAT_MALLOC(xsum,(n1,n2), ier)
3253        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c2cplx')
3254        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
3255        xval (:,:) = xsum(:,:)
3256        ABI_FREE(xsum)
3257 #if defined HAVE_MPI2_INPLACE
3258      endif
3259 #endif
3260 
3261      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
3262    end if
3263  end if
3264 #endif
3265 
3266 end subroutine xmpi_sum_c2cplx

ABINIT/xmpi_sum_c2dc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c2dc

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: two-dimensional double complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

2651 subroutine xmpi_sum_c2dc(xval,comm,ier)
2652 
2653 !Arguments-------------------------
2654  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:)
2655  integer,intent(in) :: comm
2656  integer,intent(out) :: ier
2657 
2658 !Local variables-------------------
2659 #if defined HAVE_MPI
2660  integer :: my_dt,my_op,n1,n2,nn,nproc_space_comm
2661  integer(kind=int64) :: ntot
2662  complex(dpc),allocatable :: xsum(:,:)
2663 #endif
2664 
2665 ! *************************************************************************
2666 
2667  ier=0
2668 #if defined HAVE_MPI
2669  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2670    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2671    if (nproc_space_comm /= 1) then
2672      n1 =size(xval,dim=1)
2673      n2 =size(xval,dim=2)
2674 
2675      !This product of dimensions can be greater than a 32bit integer
2676      !We use a INT64 to store it. If it is too large, we switch to an
2677      !alternate routine because MPI<4 doesnt handle 64 bit counts.
2678      ntot=int(n1*n2,kind=int64)
2679      if (ntot<=xmpi_maxint32_64) then
2680        nn=n1*n2 ; my_dt=MPI_DOUBLE_COMPLEX ; my_op=MPI_SUM
2681      else
2682        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
2683      end if
2684 
2685 !    Accumulate xval on all proc. in comm
2686 #if defined HAVE_MPI2_INPLACE
2687      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
2688        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
2689        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
2690      else
2691 #endif
2692        ABI_STAT_MALLOC(xsum,(n1,n2), ier)
2693        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c2dc')
2694        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
2695        xval (:,:) = xsum(:,:)
2696        ABI_FREE(xsum)
2697 #if defined HAVE_MPI2_INPLACE
2698      end if
2699 #endif
2700 
2701      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
2702    end if
2703  end if
2704 #endif
2705 
2706 end subroutine xmpi_sum_c2dc

ABINIT/xmpi_sum_c3cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c3cplx

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: three-dimensional complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

PARENTS

CHILDREN

      mpi_allreduce,xmpi_abort

SOURCE

3296 subroutine xmpi_sum_c3cplx(xval,comm,ier)
3297 
3298 !Arguments----------------
3299  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:,:)
3300  integer,intent(in) :: comm
3301  integer,intent(out) :: ier
3302 
3303 !Local variables--------------
3304 #if defined HAVE_MPI
3305  integer :: my_dt,my_op,n1,n2,n3,nn,nproc_space_comm
3306  integer(kind=int64) :: ntot
3307  complex(spc), allocatable :: xsum(:,:,:)
3308 #endif
3309 
3310 ! *************************************************************************
3311 
3312  ier=0
3313 #if defined HAVE_MPI
3314  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
3315    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
3316    if (nproc_space_comm /= 1) then
3317      n1 =size(xval,dim=1)
3318      n2 =size(xval,dim=2)
3319      n3 =size(xval,dim=3)
3320 
3321      !This product of dimensions can be greater than a 32bit integer
3322      !We use a INT64 to store it. If it is too large, we switch to an
3323      !alternate routine because MPI<4 doesnt handle 64 bit counts.
3324      ntot=int(n1*n2*n3,kind=int64)
3325      if (ntot<=xmpi_maxint32_64) then
3326        nn=n1*n2*n3 ; my_dt=MPI_COMPLEX ; my_op=MPI_SUM
3327      else
3328        nn=1 ; call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
3329      end if
3330 
3331 !    Accumulate xval on all proc. in comm
3332 #if defined HAVE_MPI2_INPLACE
3333      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
3334        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
3335        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
3336      else
3337 #endif
3338        ABI_STAT_MALLOC(xsum,(n1,n2,n3), ier)
3339        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c3cplx')
3340        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
3341        xval (:,:,:) = xsum(:,:,:)
3342        ABI_FREE(xsum)
3343 #if defined HAVE_MPI2_INPLACE
3344      endif
3345 #endif
3346 
3347      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
3348    end if
3349  end if
3350 #endif
3351 
3352 end subroutine xmpi_sum_c3cplx

ABINIT/xmpi_sum_c3dc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c3dc

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: three-dimensional double complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

2731 subroutine xmpi_sum_c3dc(xval,comm,ier)
2732 
2733 !Arguments-------------------------
2734  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:,:)
2735  integer,intent(in) :: comm
2736  integer,intent(out) :: ier
2737 
2738 !Local variables-------------------
2739 #if defined HAVE_MPI
2740  integer :: my_dt,my_op,n1,n2,n3,nn,nproc_space_comm
2741  integer(kind=int64) :: ntot
2742  complex(dpc),allocatable :: xsum(:,:,:)
2743 #endif
2744 
2745 ! *************************************************************************
2746 
2747  ier=0
2748 #if defined HAVE_MPI
2749  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2750    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2751    if (nproc_space_comm /= 1) then
2752      n1 =size(xval,dim=1)
2753      n2 =size(xval,dim=2)
2754      n3 =size(xval,dim=3)
2755 
2756      !This product of dimensions can be greater than a 32bit integer
2757      !We use a INT64 to store it. If it is too large, we switch to an
2758      !alternate routine because MPI<4 doesnt handle 64 bit counts.
2759      ntot=int(n1*n2*n3,kind=int64)
2760      if (ntot<=xmpi_maxint32_64) then
2761        nn=n1*n2*n3 ; my_dt=MPI_DOUBLE_COMPLEX ; my_op=MPI_SUM
2762      else
2763        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
2764      end if
2765 
2766 !    Accumulate xval on all proc. in comm
2767 #if defined HAVE_MPI2_INPLACE
2768      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
2769        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
2770        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
2771      else
2772 #endif
2773        ABI_STAT_MALLOC(xsum,(n1,n2,n3), ier)
2774        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c3dc')
2775        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
2776        xval (:,:,:) = xsum(:,:,:)
2777        ABI_FREE(xsum)
2778 #if defined HAVE_MPI2_INPLACE
2779      end if
2780 #endif
2781 
2782      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
2783    end if
2784  end if
2785 #endif
2786 
2787 end subroutine xmpi_sum_c3dc

ABINIT/xmpi_sum_c4cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c4cplx

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: four-dimensional complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

PARENTS

CHILDREN

      mpi_allreduce,xmpi_abort

SOURCE

3382 subroutine xmpi_sum_c4cplx(xval,comm,ier)
3383 
3384 !Arguments----------------
3385  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
3386  integer,intent(in) :: comm
3387  integer,intent(out) :: ier
3388 
3389 !Local variables--------------
3390 #if defined HAVE_MPI
3391  integer :: my_dt,my_op,n1,n2,n3,n4,nn,nproc_space_comm
3392  integer(kind=int64) :: ntot
3393  complex(spc),allocatable :: xsum(:,:,:,:)
3394 #endif
3395 
3396 ! *************************************************************************
3397 
3398  ier=0
3399 #if defined HAVE_MPI
3400  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
3401    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
3402    if (nproc_space_comm /= 1) then
3403      n1 =size(xval,dim=1)
3404      n2 =size(xval,dim=2)
3405      n3 =size(xval,dim=3)
3406      n4 =size(xval,dim=4)
3407 
3408      !This product of dimensions can be greater than a 32bit integer
3409      !We use a INT64 to store it. If it is too large, we switch to an
3410      !alternate routine because MPI<4 doesnt handle 64 bit counts.
3411      ntot=int(n1*n2*n3*n4,kind=int64)
3412      if (ntot<=xmpi_maxint32_64) then
3413        nn=n1*n2*n3*n4 ; my_dt=MPI_COMPLEX ; my_op=MPI_SUM
3414      else
3415        nn=1 ; call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
3416      end if
3417 
3418 !    Accumulate xval on all proc. in comm
3419 #if defined HAVE_MPI2_INPLACE
3420      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
3421        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
3422        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
3423      else
3424 #endif
3425        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
3426        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c4cplx')
3427        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
3428        xval (:,:,:,:) = xsum(:,:,:,:)
3429        ABI_FREE(xsum)
3430 #if defined HAVE_MPI2_INPLACE
3431      endif
3432 #endif
3433 
3434      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
3435    end if
3436  end if
3437 #endif
3438 
3439 end subroutine xmpi_sum_c4cplx

ABINIT/xmpi_sum_c4dc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c4dc

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: four-dimensional double complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

2812 subroutine xmpi_sum_c4dc(xval,comm,ier)
2813 
2814 !Arguments-------------------------
2815  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
2816  integer,intent(in) :: comm
2817  integer,intent(out) :: ier
2818 
2819 !Local variables-------------------
2820 #if defined HAVE_MPI
2821  integer :: my_dt,my_op,n1,n2,n3,n4,nn,nproc_space_comm
2822  integer(kind=int64) :: ntot
2823  complex(dpc),allocatable :: xsum(:,:,:,:)
2824 #endif
2825 
2826 ! *************************************************************************
2827 
2828  ier=0
2829 #if defined HAVE_MPI
2830  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2831    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2832    if (nproc_space_comm /= 1) then
2833      n1 =size(xval,dim=1)
2834      n2 =size(xval,dim=2)
2835      n3 =size(xval,dim=3)
2836      n4 =size(xval,dim=4)
2837 
2838      !This product of dimensions can be greater than a 32bit integer
2839      !We use a INT64 to store it. If it is too large, we switch to an
2840      !alternate routine because MPI<4 doesnt handle 64 bit counts.
2841      ntot=int(n1*n2*n3*n4,kind=int64)
2842      if (ntot<=xmpi_maxint32_64) then
2843        nn=n1*n2*n3*n4 ; my_dt=MPI_DOUBLE_COMPLEX ; my_op=MPI_SUM
2844      else
2845        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
2846      end if
2847 
2848 !    Accumulate xval on all proc. in comm
2849 #if defined HAVE_MPI2_INPLACE
2850      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
2851        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
2852        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
2853      else
2854 #endif
2855        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
2856        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c4dc')
2857        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
2858        xval (:,:,:,:) = xsum(:,:,:,:)
2859        ABI_FREE(xsum)
2860 #if defined HAVE_MPI2_INPLACE
2861      endif
2862 #endif
2863 
2864      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
2865    end if
2866  end if
2867 #endif
2868 
2869 end subroutine xmpi_sum_c4dc

ABINIT/xmpi_sum_c5cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c5cplx

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: five-dimensional complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

3464 subroutine xmpi_sum_c5cplx(xval,comm,ier)
3465 
3466 !Arguments----------------
3467  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:)
3468  integer,intent(in) :: comm
3469  integer,intent(out) :: ier
3470 
3471 !Local variables--------------
3472 #if defined HAVE_MPI
3473  integer :: my_dt,my_op,n1,n2,n3,n4,n5,nn,nproc_space_comm
3474  integer(kind=int64) :: ntot
3475  complex(spc),allocatable :: xsum(:,:,:,:,:)
3476 #endif
3477 
3478 ! *************************************************************************
3479 
3480  ier=0
3481 #if defined HAVE_MPI
3482  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
3483    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
3484    if (nproc_space_comm /= 1) then
3485      n1 =size(xval,dim=1)
3486      n2 =size(xval,dim=2)
3487      n3 =size(xval,dim=3)
3488      n4 =size(xval,dim=4)
3489      n5 =size(xval,dim=5)
3490 
3491      !This product of dimensions can be greater than a 32bit integer
3492      !We use a INT64 to store it. If it is too large, we switch to an
3493      !alternate routine because MPI<4 doesnt handle 64 bit counts.
3494      ntot=int(n1*n2*n3*n4*n5,kind=int64)
3495      if (ntot<=xmpi_maxint32_64) then
3496        nn=n1*n2*n3*n4*n5 ; my_dt=MPI_COMPLEX ; my_op=MPI_SUM
3497      else
3498        nn=1 ; call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
3499      end if
3500 
3501 !    Accumulate xval on all proc. in comm
3502 #if defined HAVE_MPI2_INPLACE
3503      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
3504        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
3505        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
3506      else
3507 #endif
3508        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5), ier)
3509        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c5cplx')
3510        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
3511        xval (:,:,:,:,:) = xsum(:,:,:,:,:)
3512        ABI_FREE(xsum)
3513 #if defined HAVE_MPI2_INPLACE
3514      endif
3515 #endif
3516 
3517      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
3518    end if
3519  end if
3520 #endif
3521 
3522 end subroutine xmpi_sum_c5cplx

ABINIT/xmpi_sum_c5dc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c5dc

FUNCTION

  Combines values from all processes and distribute the result back to all processes.
  Target: five-dimensional double precision complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

2893 subroutine xmpi_sum_c5dc(xval,comm,ier)
2894 
2895 !Arguments-------------------------
2896  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:)
2897  integer,intent(in) :: comm
2898  integer,intent(out) :: ier
2899 
2900 !Local variables-------------------
2901 #if defined HAVE_MPI
2902  integer :: my_dt,my_op,n1,n2,n3,n4,n5,nn,nproc_space_comm
2903  integer(kind=int64) :: ntot
2904  complex(dpc),allocatable :: xsum(:,:,:,:,:)
2905 #endif
2906 
2907 ! *************************************************************************
2908 
2909  ier=0
2910 #if defined HAVE_MPI
2911  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2912    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2913    if (nproc_space_comm /= 1) then
2914      n1 =size(xval,dim=1)
2915      n2 =size(xval,dim=2)
2916      n3 =size(xval,dim=3)
2917      n4 =size(xval,dim=4)
2918      n5 =size(xval,dim=5)
2919 
2920      !This product of dimensions can be greater than a 32bit integer
2921      !We use a INT64 to store it. If it is too large, we switch to an
2922      !alternate routine because MPI<4 doesnt handle 64 bit counts.
2923      ntot=int(n1*n2*n3*n4*n5,kind=int64)
2924      if (ntot<=xmpi_maxint32_64) then
2925        nn=n1*n2*n3*n4*n5 ; my_dt=MPI_DOUBLE_COMPLEX ; my_op=MPI_SUM
2926      else
2927        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
2928      end if
2929 
2930 !    Accumulate xval on all proc. in comm
2931 #if defined HAVE_MPI2_INPLACE
2932      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
2933        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
2934        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
2935      else
2936 #endif
2937        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5), ier)
2938        if (ier/=0) call xmpi_abort(comm,msg='error allocating xsum in xmpi_sum_c5dc')
2939        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
2940        xval (:,:,:,:,:) = xsum(:,:,:,:,:)
2941        ABI_FREE(xsum)
2942 #if defined HAVE_MPI2_INPLACE
2943      endif
2944 #endif
2945 
2946      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
2947    end if
2948  end if
2949 #endif
2950 
2951 end subroutine xmpi_sum_c5dc

ABINIT/xmpi_sum_c6cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c6cplx

FUNCTION

  Combines values from all processes and distribute the result back to all processes.
  Target: six-dimensional complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

3546 subroutine xmpi_sum_c6cplx(xval,comm,ier)
3547 
3548 !Arguments----------------
3549  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:)
3550  integer,intent(in) :: comm
3551  integer,intent(out) :: ier
3552 
3553 !Local variables--------------
3554 #if defined HAVE_MPI
3555  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6,nn,nproc_space_comm
3556  integer(kind=int64) :: ntot
3557  complex(spc),allocatable :: xsum(:,:,:,:,:,:)
3558 #endif
3559 
3560 ! *************************************************************************
3561 
3562  ier=0
3563 #if defined HAVE_MPI
3564  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
3565    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
3566    if (nproc_space_comm /= 1) then
3567      n1 =size(xval,dim=1)
3568      n2 =size(xval,dim=2)
3569      n3 =size(xval,dim=3)
3570      n4 =size(xval,dim=4)
3571      n5 =size(xval,dim=5)
3572      n6 =size(xval,dim=6)
3573 
3574      !This product of dimensions can be greater than a 32bit integer
3575      !We use a INT64 to store it. If it is too large, we switch to an
3576      !alternate routine because MPI<4 doesnt handle 64 bit counts.
3577      ntot=int(n1*n2*n3*n4*n5*n6,kind=int64)
3578      if (ntot<=xmpi_maxint32_64) then
3579        nn=n1*n2*n3*n4*n5*n6 ; my_dt=MPI_COMPLEX ; my_op=MPI_SUM
3580      else
3581        nn=1 ; call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
3582      end if
3583 
3584 !    Accumulate xval on all proc. in comm
3585 #if defined HAVE_MPI2_INPLACE
3586      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
3587        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
3588        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
3589      else
3590 #endif
3591        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6), ier)
3592        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c6cplx')
3593        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
3594        xval = xsum
3595        ABI_FREE(xsum)
3596 #if defined HAVE_MPI2_INPLACE
3597      endif
3598 #endif
3599 
3600      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
3601    end if
3602  end if
3603 #endif
3604 
3605 end subroutine xmpi_sum_c6cplx

ABINIT/xmpi_sum_c6dc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c6dc

FUNCTION

  Combines values from all processes and distribute the result back to all processes.
  Target: six-dimensional double precision complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

2975 subroutine xmpi_sum_c6dc(xval,comm,ier)
2976 
2977 !Arguments-------------------------
2978  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:)
2979  integer,intent(in) :: comm
2980  integer,intent(out) :: ier
2981 
2982 !Local variables-------------------
2983 #if defined HAVE_MPI
2984  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6,nn,nproc_space_comm
2985  integer(kind=int64) :: ntot
2986  complex(dpc),allocatable :: xsum(:,:,:,:,:,:)
2987 #endif
2988 
2989 ! *************************************************************************
2990 
2991  ier=0
2992 #if defined HAVE_MPI
2993  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2994    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2995    if (nproc_space_comm /= 1) then
2996      n1 =size(xval,dim=1)
2997      n2 =size(xval,dim=2)
2998      n3 =size(xval,dim=3)
2999      n4 =size(xval,dim=4)
3000      n5 =size(xval,dim=5)
3001      n6 =size(xval,dim=6)
3002 
3003      !This product of dimensions can be greater than a 32bit integer
3004      !We use a INT64 to store it. If it is too large, we switch to an
3005      !alternate routine because MPI<4 doesnt handle 64 bit counts.
3006      ntot=int(n1*n2*n3*n4*n5*n6,kind=int64)
3007      if (ntot<=xmpi_maxint32_64) then
3008        nn=n1*n2*n3*n4*n5*n6 ; my_dt=MPI_DOUBLE_COMPLEX ; my_op=MPI_SUM
3009      else
3010        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
3011      end if
3012 
3013 !    Accumulate xval on all proc. in comm
3014 #if defined HAVE_MPI2_INPLACE
3015      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
3016        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
3017        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
3018      else
3019 #endif
3020        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6), ier)
3021        if (ier/=0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_c6dc')
3022        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
3023        xval = xsum
3024        ABI_FREE(xsum)
3025 #if defined HAVE_MPI2_INPLACE
3026      endif
3027 #endif
3028 
3029      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
3030    end if
3031  end if
3032 #endif
3033 
3034 end subroutine xmpi_sum_c6dc

ABINIT/xmpi_sum_c7dc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_c7dc

FUNCTION

  Combines values from all processes and distribute the result back to all processes.
  Target: six-dimensional double precision complex arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

3058 subroutine xmpi_sum_c7dc(xval,comm,ier)
3059 
3060 !Arguments-------------------------
3061  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:,:)
3062  integer,intent(in) :: comm
3063  integer,intent(out) :: ier
3064 
3065 !Local variables-------------------
3066 #if defined HAVE_MPI
3067  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6,n7,nn,nproc_space_comm
3068  integer(kind=int64) :: ntot
3069  complex(dpc),allocatable :: xsum(:,:,:,:,:,:,:)
3070 #endif
3071 
3072 ! *************************************************************************
3073 
3074  ier=0
3075 #if defined HAVE_MPI
3076  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
3077    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
3078    if (nproc_space_comm /= 1) then
3079      n1 =size(xval,dim=1)
3080      n2 =size(xval,dim=2)
3081      n3 =size(xval,dim=3)
3082      n4 =size(xval,dim=4)
3083      n5 =size(xval,dim=5)
3084      n6 =size(xval,dim=6)
3085      n7 =size(xval,dim=7)
3086 
3087      !This product of dimensions can be greater than a 32bit integer
3088      !We use a INT64 to store it. If it is too large, we switch to an
3089      !alternate routine because MPI<4 doesnt handle 64 bit counts.
3090      ntot=int(n1*n2*n3*n4*n5*n6*n7,kind=int64)
3091      if (ntot<=xmpi_maxint32_64) then
3092        nn=n1*n2*n3*n4*n5*n6*n7 ; my_dt=MPI_DOUBLE_COMPLEX ; my_op=MPI_SUM
3093      else
3094        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
3095      end if
3096 
3097 !    Accumulate xval on all proc. in comm
3098 #if defined HAVE_MPI2_INPLACE
3099      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
3100        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
3101        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
3102      else
3103 #endif
3104        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6,n7),ier)
3105        if (ier/=0) then
3106          call xmpi_abort(comm=comm,msg='error allocating xsum in xmpi_sum_c7dc')
3107        end if
3108        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
3109        xval = xsum
3110        ABI_FREE(xsum)
3111 #if defined HAVE_MPI2_INPLACE
3112      endif
3113 #endif
3114 
3115      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
3116    end if
3117  end if
3118 #endif
3119 
3120 end subroutine xmpi_sum_c7dc

ABINIT/xmpi_sum_coeff5d1 [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_coeff5d1

FUNCTION

  Combines values from all processes and distribute the result back to all processes.
  Target: coeff5_type 1D-structure

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval = coeff5d_type array structure (input and output)

SOURCE

3629 subroutine xmpi_sum_coeff5d1(xval,comm,ier)
3630 
3631 !Arguments ------------------------------------
3632  type(coeff5_type),intent(inout) :: xval(:)
3633  integer,intent(in) :: comm
3634  integer,intent(out)   :: ier
3635 
3636 !Local variables-------------------------------
3637 #if defined HAVE_MPI
3638  integer :: buf_size,i2,i3,i4,i5,ii,indx_buf,n1,n2,n3,n4,n5,nb,nproc_space_comm
3639  integer, allocatable :: dims(:,:)
3640  real(dp),allocatable :: buf(:),xsum(:)
3641 #endif
3642 
3643 ! *************************************************************************
3644 
3645  ier=0
3646 #if defined HAVE_MPI
3647  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
3648    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
3649    if (nproc_space_comm /= 1) then
3650      nb = size(xval,1)
3651 
3652 !    Retrieve sizes of 'value' fields
3653      ABI_MALLOC(dims,(5,nb))
3654      buf_size=0
3655      do ii=1,nb
3656        if (.not.allocated(xval(ii)%value)) &
3657 &        call xmpi_abort(msg='bug in xmpi_sum(coeff5): xval should be allocated!')
3658        dims(1,ii)=size(xval(ii)%value,dim=1)
3659        dims(2,ii)=size(xval(ii)%value,dim=2)
3660        dims(3,ii)=size(xval(ii)%value,dim=3)
3661        dims(4,ii)=size(xval(ii)%value,dim=4)
3662        dims(5,ii)=size(xval(ii)%value,dim=5)
3663        buf_size=buf_size+dims(1,ii)*dims(2,ii)*dims(3,ii)*dims(4,ii)*dims(5,ii)
3664      end do
3665 
3666 !    Fill in buffer
3667      ABI_STAT_MALLOC(buf,(buf_size) ,ier)
3668      if (ier/= 0) call xmpi_abort(msg='error allocating buf in xmpi_sum(coeff5)!')
3669      indx_buf=1
3670      do ii=1,nb
3671        n1=dims(1,ii); n2=dims(2,ii)
3672        n3=dims(3,ii); n4=dims(4,ii); n5=dims(5,ii)
3673        do i5=1,n5
3674          do i4=1,n4
3675            do i3=1,n3
3676              do i2=1,n2
3677                buf(indx_buf:indx_buf+n1-1)=xval(ii)%value(1:n1,i2,i3,i4,i5)
3678                indx_buf=indx_buf+n1
3679              end do
3680            end do
3681          end do
3682        end do
3683      end do
3684 
3685 !    Accumulate xval%value on all proc. in comm
3686 #if defined HAVE_MPI2_INPLACE
3687      if (xmpi_use_inplace_operations) then
3688        call MPI_ALLREDUCE(MPI_IN_PLACE,buf,buf_size,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
3689      else
3690 #endif
3691        ABI_STAT_MALLOC(xsum,(buf_size), ier)
3692        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum(coeff5)!')
3693        call MPI_ALLREDUCE(buf,xsum,buf_size,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
3694        buf = xsum
3695        ABI_FREE(xsum)
3696 #if defined HAVE_MPI2_INPLACE
3697      endif
3698 #endif
3699 
3700 !    Transfer buffer into output datastructure
3701      indx_buf=1
3702      do ii=1,nb
3703        n1=dims(1,ii); n2=dims(2,ii)
3704        n3=dims(3,ii); n4=dims(4,ii); n5=dims(5,ii)
3705        do i5=1,n5
3706          do i4=1,n4
3707            do i3=1,n3
3708              do i2=1,n2
3709                xval(ii)%value(1:n1,i2,i3,i4,i5)=buf(indx_buf:indx_buf+n1-1)
3710                indx_buf=indx_buf+n1
3711              end do
3712            end do
3713          end do
3714        end do
3715      end do
3716 
3717      ABI_FREE(dims)
3718      ABI_FREE(buf)
3719 
3720    end if
3721  end if
3722 #endif
3723 
3724 end subroutine xmpi_sum_coeff5d1

ABINIT/xmpi_sum_dp [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: one-dimensional double precision arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

704 subroutine xmpi_sum_dp(xval,comm,ier)
705 
706 !Arguments-------------------------
707  real(dp), DEV_CONTARRD intent(inout) :: xval(:)
708  integer,intent(in) :: comm
709  integer,intent(out) :: ier
710 
711 !Local variables-------------------
712 #if defined HAVE_MPI
713  integer :: n1,nproc_space_comm
714  real(dp),allocatable :: xsum(:)
715 #endif
716 
717 ! *************************************************************************
718 
719  ier=0
720 #if defined HAVE_MPI
721  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
722    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
723    if (nproc_space_comm /= 1) then
724      n1 = size(xval)
725 
726 !    Accumulate xval on all proc. in comm
727 #if defined HAVE_MPI2_INPLACE
728      if (xmpi_use_inplace_operations) then
729        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,n1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
730      else
731 #endif
732        ABI_STAT_MALLOC(xsum,(n1), ier)
733        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_dp')
734        call MPI_ALLREDUCE(xval,xsum,n1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
735        xval (:) = xsum(:)
736        ABI_FREE(xsum)
737 #if defined HAVE_MPI2_INPLACE
738      end if
739 #endif
740 
741    end if
742  end if
743 #endif
744 
745 end subroutine xmpi_sum_dp

ABINIT/xmpi_sum_dp2d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp2d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision two-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1662 subroutine xmpi_sum_dp2d(xval,comm,ier)
1663 
1664 !Arguments-------------------------
1665  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:)
1666  integer ,intent(in) :: comm
1667  integer ,intent(out) :: ier
1668 
1669 !Local variables-------------------
1670 #if defined HAVE_MPI
1671  integer :: my_dt,my_op,n1,n2,nn,nproc_space_comm
1672  integer(kind=int64) :: ntot
1673  real(dp),allocatable :: xsum(:,:)
1674 #endif
1675 
1676 ! *************************************************************************
1677 
1678  ier=0
1679 #if defined HAVE_MPI
1680  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1681    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1682    if (nproc_space_comm /= 1) then
1683      n1 = size(xval,dim=1)
1684      n2 = size(xval,dim=2)
1685 
1686      !This product of dimensions can be greater than a 32bit integer
1687      !We use a INT64 to store it. If it is too large, we switch to an
1688      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1689      ntot=int(n1*n2,kind=int64)
1690      if (ntot<=xmpi_maxint32_64) then
1691        nn=n1*n2 ; my_dt=MPI_DOUBLE_PRECISION ; my_op=MPI_SUM
1692      else
1693        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
1694      end if
1695 
1696 !    Accumulate xval on all proc. in comm
1697 #if defined HAVE_MPI2_INPLACE
1698      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1699        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1700        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1701      else
1702 #endif
1703        ABI_STAT_MALLOC(xsum,(n1,n2), ier)
1704        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_dp2d')
1705        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1706        xval (:,:) = xsum(:,:)
1707        ABI_FREE(xsum)
1708 #if defined HAVE_MPI2_INPLACE
1709      endif
1710 #endif
1711 
1712      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1713    end if
1714  end if
1715 #endif
1716 
1717 end subroutine xmpi_sum_dp2d

ABINIT/xmpi_sum_dp2d2t [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp2d2t

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target:  double precisions bi-dimensional array

INPUTS

  n = total send size
  xval= buffer array
  comm= MPI communicator

OUTPUT

  xsum= receive buffer
  ier= exit status, a non-zero value meaning there is an error

SOURCE

2336 subroutine xmpi_sum_dp2d2t(xval,xsum,n,comm,ier)
2337 
2338 !Arguments-------------------------
2339  real(dp), DEV_CONTARRD intent(in) :: xval(:,:)
2340  real(dp), DEV_CONTARRD intent(out) :: xsum(:,:)
2341  integer ,intent(in) :: n
2342  integer ,intent(in) :: comm
2343  integer ,intent(out) :: ier
2344 
2345 !Local variables-------------------
2346 
2347 ! *************************************************************************
2348 
2349  ier=0
2350 #if defined HAVE_MPI
2351  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2352 !  Accumulate xval on all proc. in comm
2353    call MPI_ALLREDUCE(xval,xsum,n,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
2354  else
2355 #endif
2356    xsum=xval
2357 #if defined HAVE_MPI
2358  end if
2359 #endif
2360 
2361 end subroutine xmpi_sum_dp2d2t

ABINIT/xmpi_sum_dp2t [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp2t

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision one-dimensional array without transfers.

INPUTS

  n1= first dimension of the array
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array
  xsum= receive buffer

SOURCE

2289 subroutine xmpi_sum_dp2t(xval,xsum,n1,comm,ier)
2290 
2291 !Arguments-------------------------
2292  real(dp), DEV_CONTARRD intent(inout) :: xval(:),xsum(:)
2293  integer ,intent(in) :: n1
2294  integer ,intent(in) :: comm
2295  integer ,intent(out) :: ier
2296 
2297 ! *************************************************************************
2298 
2299  ier=0
2300 #if defined HAVE_MPI
2301  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2302 !  Accumulate xval on all proc. in comm
2303    call MPI_ALLREDUCE(xval,xsum,n1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
2304  else
2305 #endif
2306    xsum=xval
2307 #if defined HAVE_MPI
2308  end if
2309 #endif
2310 
2311 end subroutine xmpi_sum_dp2t

ABINIT/xmpi_sum_dp3d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp3d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision three-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1764 subroutine xmpi_sum_dp3d(xval,comm,ier)
1765 
1766 !Arguments-------------------------
1767  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:)
1768  integer ,intent(in) :: comm
1769  integer ,intent(out)   :: ier
1770 
1771 !Local variables-------------------
1772 #if defined HAVE_MPI
1773  integer :: my_dt,my_op,n1,n2,n3,nn,nproc_space_comm
1774  integer(kind=int64) :: ntot
1775  real(dp),allocatable :: xsum(:,:,:)
1776 #endif
1777 
1778 ! *************************************************************************
1779 
1780  ier=0
1781 #if defined HAVE_MPI
1782  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1783    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1784    if (nproc_space_comm /= 1) then
1785      n1 = size(xval,dim=1)
1786      n2 = size(xval,dim=2)
1787      n3 = size(xval,dim=3)
1788 
1789      !This product of dimensions can be greater than a 32bit integer
1790      !We use a INT64 to store it. If it is too large, we switch to an
1791      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1792      ntot=int(n1*n2*n3,kind=int64)
1793      if (ntot<=xmpi_maxint32_64) then
1794        nn=n1*n2*n3 ; my_dt=MPI_DOUBLE_PRECISION ; my_op=MPI_SUM
1795      else
1796        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
1797      end if
1798 
1799 !    Accumulate xval on all proc. in comm
1800 #if defined HAVE_MPI2_INPLACE
1801      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1802        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1803        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1804      else
1805 #endif
1806        ABI_STAT_MALLOC(xsum,(n1,n2,n3), ier)
1807        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_dp3d')
1808        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1809        xval (:,:,:) = xsum(:,:,:)
1810        ABI_FREE(xsum)
1811 #if defined HAVE_MPI2_INPLACE
1812      endif
1813 #endif
1814 
1815      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1816    end if
1817  end if
1818 #endif
1819 
1820 end subroutine xmpi_sum_dp3d

ABINIT/xmpi_sum_dp3d2t [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp3d2t

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision three-dimensional array without transfers.

INPUTS

  n1= first dimension of the array
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array
  xsum= receive buffer

SOURCE

2388 subroutine xmpi_sum_dp3d2t(xval,xsum,n1,comm,ier)
2389 
2390 !Arguments-------------------------
2391  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:),xsum(:,:,:)
2392  integer ,intent(in) :: n1
2393  integer ,intent(in) :: comm
2394  integer ,intent(out) :: ier
2395 
2396 ! *************************************************************************
2397  ier=0
2398 #if defined HAVE_MPI
2399  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2400 !  Accumulate xval on all proc. in comm
2401    call MPI_ALLREDUCE(xval,xsum,n1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
2402  else
2403 #endif
2404    xsum=xval
2405 #if defined HAVE_MPI
2406  end if
2407 #endif
2408 
2409 end subroutine xmpi_sum_dp3d2t

ABINIT/xmpi_sum_dp4d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp4d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision four-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1867 subroutine xmpi_sum_dp4d(xval,comm,ier)
1868 
1869 !Arguments-------------------------
1870  real(dp),DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
1871  integer ,intent(in) :: comm
1872  integer ,intent(out) :: ier
1873 
1874 !Local variables-------------------
1875 #if defined HAVE_MPI
1876  integer :: my_dt,my_op,n1,n2,n3,n4,nn,nproc_space_comm
1877  integer(kind=int64) :: ntot
1878  real(dp),allocatable :: xsum(:,:,:,:)
1879 #endif
1880 
1881 ! *************************************************************************
1882 
1883  ier=0
1884 #if defined HAVE_MPI
1885  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1886    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1887    if (nproc_space_comm /= 1) then
1888      n1 = size(xval,dim=1)
1889      n2 = size(xval,dim=2)
1890      n3 = size(xval,dim=3)
1891      n4 = size(xval,dim=4)
1892 
1893      !This product of dimensions can be greater than a 32bit integer
1894      !We use a INT64 to store it. If it is too large, we switch to an
1895      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1896      ntot=int(n1*n2*n3*n4,kind=int64)
1897      if (ntot<=xmpi_maxint32_64) then
1898        nn=n1*n2*n3*n4 ; my_dt=MPI_DOUBLE_PRECISION ; my_op=MPI_SUM
1899      else
1900        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
1901      end if
1902 
1903 !    Accumulate xval on all proc. in comm
1904 #if defined HAVE_MPI2_INPLACE
1905      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1906        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1907        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1908      else
1909 #endif
1910        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
1911        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_dp4d')
1912        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1913        xval (:,:,:,:) = xsum(:,:,:,:)
1914        ABI_FREE(xsum)
1915 #if defined HAVE_MPI2_INPLACE
1916      endif
1917 #endif
1918 
1919      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1920    end if
1921  end if
1922 #endif
1923 
1924 end subroutine xmpi_sum_dp4d

ABINIT/xmpi_sum_dp4d2t [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp4d2t

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision four-dimensional array without transfers.

INPUTS

  n1= first dimension of the array
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array
  xsum= receive buffer

SOURCE

2436 subroutine xmpi_sum_dp4d2t(xval,xsum,n1,comm,ier)
2437 
2438 !Arguments-------------------------
2439  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:),xsum(:,:,:,:)
2440  integer ,intent(in) :: n1
2441  integer ,intent(in) :: comm
2442  integer ,intent(out) :: ier
2443 
2444 ! *************************************************************************
2445 
2446  ier=0
2447 #if defined HAVE_MPI
2448  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2449 !  Accumulate xval on all proc. in comm
2450    call MPI_ALLREDUCE(xval,xsum,n1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
2451  else
2452 #endif
2453    xsum=xval
2454 #if defined HAVE_MPI
2455  end if
2456 #endif
2457 
2458 end subroutine xmpi_sum_dp4d2t

ABINIT/xmpi_sum_dp5d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp5d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision five-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1971 subroutine xmpi_sum_dp5d(xval,comm,ier)
1972 
1973 !Arguments-------------------------
1974  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:)
1975  integer ,intent(in) :: comm
1976  integer ,intent(out) :: ier
1977 
1978 !Local variables-------------------
1979 #if defined HAVE_MPI
1980  integer :: my_dt,my_op,n1,n2,n3,n4,n5,nn,nproc_space_comm
1981  integer(kind=int64) :: ntot
1982  real(dp),allocatable :: xsum(:,:,:,:,:)
1983 #endif
1984 
1985 ! *************************************************************************
1986 
1987  ier=0
1988 #if defined HAVE_MPI
1989  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1990    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1991    if (nproc_space_comm /= 1) then
1992      n1 = size(xval,dim=1)
1993      n2 = size(xval,dim=2)
1994      n3 = size(xval,dim=3)
1995      n4 = size(xval,dim=4)
1996      n5 = size(xval,dim=5)
1997 
1998      !This product of dimensions can be greater than a 32bit integer
1999      !We use a INT64 to store it. If it is too large, we switch to an
2000      !alternate routine because MPI<4 doesnt handle 64 bit counts.
2001      ntot=int(n1*n2*n3*n4*n5,kind=int64)
2002      if (ntot<=xmpi_maxint32_64) then
2003        nn=n1*n2*n3*n4*n5 ; my_dt=MPI_DOUBLE_PRECISION ; my_op=MPI_SUM
2004      else
2005        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
2006      end if
2007 
2008 !    Accumulate xval on all proc. in comm
2009 #if defined HAVE_MPI2_INPLACE
2010      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
2011        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
2012        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
2013      else
2014 #endif
2015        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5), ier)
2016        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_dp5d')
2017        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
2018        xval (:,:,:,:,:) = xsum(:,:,:,:,:)
2019        ABI_FREE(xsum)
2020 #if defined HAVE_MPI2_INPLACE
2021      endif
2022 #endif
2023 
2024      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
2025    end if
2026  end if
2027 #endif
2028 
2029 end subroutine xmpi_sum_dp5d

ABINIT/xmpi_sum_dp6d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp6d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision six-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

2075 subroutine xmpi_sum_dp6d(xval,comm,ier)
2076 
2077 !Arguments-------------------------
2078  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:)
2079  integer ,intent(in) :: comm
2080  integer ,intent(out) :: ier
2081 
2082 !Local variables-------------------
2083 #if defined HAVE_MPI
2084  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6,nn,nproc_space_comm
2085  integer(kind=int64) :: ntot
2086  real(dp), allocatable :: xsum(:,:,:,:,:,:)
2087 #endif
2088 
2089 ! *************************************************************************
2090 
2091  ier=0
2092 #if defined HAVE_MPI
2093  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2094    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2095    if (nproc_space_comm /= 1) then
2096      n1 = size(xval,dim=1)
2097      n2 = size(xval,dim=2)
2098      n3 = size(xval,dim=3)
2099      n4 = size(xval,dim=4)
2100      n5 = size(xval,dim=5)
2101      n6 = size(xval,dim=6)
2102 
2103      !This product of dimensions can be greater than a 32bit integer
2104      !We use a INT64 to store it. If it is too large, we switch to an
2105      !alternate routine because MPI<4 doesnt handle 64 bit counts.
2106      ntot=int(n1*n2*n3*n4*n5*n6,kind=int64)
2107      if (ntot<=xmpi_maxint32_64) then
2108        nn=n1*n2*n3*n4*n5*n6 ; my_dt=MPI_DOUBLE_PRECISION ; my_op=MPI_SUM
2109      else
2110        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
2111      end if
2112 
2113 !    Accumulate xval on all proc. in comm
2114 #if defined HAVE_MPI2_INPLACE
2115      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
2116        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
2117        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
2118      else
2119 #endif
2120        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6), ier)
2121        if (ier/=0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_dp6d')
2122        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
2123        xval (:,:,:,:,:,:) = xsum(:,:,:,:,:,:)
2124        ABI_FREE(xsum)
2125 #if defined HAVE_MPI2_INPLACE
2126      endif
2127 #endif
2128 
2129      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
2130    end if
2131  end if
2132 #endif
2133 
2134 end subroutine xmpi_sum_dp6d

ABINIT/xmpi_sum_dp7d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dp7d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision six-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

2181 subroutine xmpi_sum_dp7d(xval,comm,ier)
2182 
2183 !Arguments-------------------------
2184  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:,:)
2185  integer ,intent(in) :: comm
2186  integer ,intent(out) :: ier
2187 
2188 !Local variables-------------------
2189 #if defined HAVE_MPI
2190  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6,n7,nn,nproc_space_comm
2191  integer(kind=int64) :: ntot
2192  real(dp),allocatable :: xsum(:,:,:,:,:,:,:)
2193 #endif
2194 
2195 ! *************************************************************************
2196  ier=0
2197 #if defined HAVE_MPI
2198  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
2199    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
2200    if (nproc_space_comm /= 1) then
2201      n1 = size(xval,dim=1)
2202      n2 = size(xval,dim=2)
2203      n3 = size(xval,dim=3)
2204      n4 = size(xval,dim=4)
2205      n5 = size(xval,dim=5)
2206      n6 = size(xval,dim=6)
2207      n7 = size(xval,dim=7)
2208 
2209      !This product of dimensions can be greater than a 32bit integer
2210      !We use a INT64 to store it. If it is too large, we switch to an
2211      !alternate routine because MPI<4 doesnt handle 64 bit counts.
2212      ntot=int(n1*n2*n3*n4*n5*n6*n7,kind=int64)
2213      if (ntot<=xmpi_maxint32_64) then
2214        nn=n1*n2*n3*n4*n5*n6*n7 ; my_dt=MPI_DOUBLE_PRECISION ; my_op=MPI_SUM
2215      else
2216        nn=1 ; call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
2217      end if
2218 
2219 !    Accumulate xval on all proc. in comm
2220 #if defined HAVE_MPI2_INPLACE
2221      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
2222        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
2223        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
2224      else
2225 #endif
2226        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6,n7), ier)
2227        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_dp7d')
2228        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
2229        xval (:,:,:,:,:,:,:) = xsum(:,:,:,:,:,:,:)
2230        ABI_FREE(xsum)
2231 #if defined HAVE_MPI2_INPLACE
2232      endif
2233 #endif
2234 
2235      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
2236    end if
2237  end if
2238 #endif
2239 
2240 end subroutine xmpi_sum_dp7d

ABINIT/xmpi_sum_dpn [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dpn

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: one-dimensional double precision arrays.

INPUTS

  n1= first dimension of the array
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

950 subroutine xmpi_sum_dpn(xval,n1,comm,ier)
951 
952 !Arguments-------------------------
953  real(dp), DEV_CONTARRD intent(inout) :: xval(:)
954  integer ,intent(in) :: n1
955  integer ,intent(in) :: comm
956  integer ,intent(out) :: ier
957 
958 !Local variables-------------------
959 #if defined HAVE_MPI
960  integer :: nproc_space_comm
961  real(dp) , allocatable :: xsum(:)
962 #endif
963 
964 ! *************************************************************************
965 
966  ier=0
967 #if defined HAVE_MPI
968  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
969    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
970    if (nproc_space_comm /= 1) then
971 
972 !    Accumulate xval on all proc. in comm
973 #if defined HAVE_MPI2_INPLACE
974      if (xmpi_use_inplace_operations) then
975        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,n1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
976      else
977 #endif
978        ABI_STAT_MALLOC(xsum,(n1), ier)
979        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_dpn')
980        call MPI_ALLREDUCE(xval,xsum,n1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
981        xval (:) = xsum(:)
982        ABI_FREE(xsum)
983 #if defined HAVE_MPI2_INPLACE
984      end if
985 #endif
986 
987    end if
988  end if
989 #endif
990 
991 end subroutine xmpi_sum_dpn

ABINIT/xmpi_sum_dpv [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dpv

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: scalar double precisions.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

876 subroutine xmpi_sum_dpv(xval,comm,ier)
877 
878 !Arguments-------------------------
879  real(dp),intent(inout) :: xval
880  integer ,intent(in) :: comm
881  integer ,intent(out) :: ier
882 
883 !Local variables-------------------
884 #if defined HAVE_MPI
885  integer :: nproc_space_comm
886  real(dp) :: arr_xsum(1)
887 #endif
888 
889 ! *************************************************************************
890 
891  ier=0
892 #if defined HAVE_MPI
893  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
894 !  Accumulate xval on all proc. in comm
895    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
896    if (nproc_space_comm /= 1) then
897      call MPI_ALLREDUCE([xval],arr_xsum,1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
898      xval  = arr_xsum(1)
899    end if
900  end if
901 #endif
902 
903 end subroutine xmpi_sum_dpv

ABINIT/xmpi_sum_dpvt [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_dpvt

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: scalar double precisions.

INPUTS

  xval= buffer array
  comm= MPI communicator

OUTPUT

  xsum= receive buffer
  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  None

SOURCE

794 subroutine xmpi_sum_dpvt(xval,xsum,comm,ier)
795 
796 !Arguments-------------------------
797  real(dp),intent(in) :: xval
798  real(dp),intent(out) :: xsum
799  integer ,intent(in) :: comm
800  integer ,intent(out)   :: ier
801 
802 !Local variables-------------------
803 #if defined HAVE_MPI
804  integer :: nproc_space_comm
805  real(dp) :: arr_xsum(1)
806 #endif
807 
808 ! *************************************************************************
809 
810  ier=0
811 #if defined HAVE_MPI
812  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
813    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
814    if (nproc_space_comm /= 1) then
815 !    Accumulate xval on all proc. in comm
816      call MPI_ALLREDUCE([xval],arr_xsum,1,MPI_DOUBLE_PRECISION,MPI_SUM,comm,ier)
817      xsum=arr_xsum(1)
818    else
819      xsum=xval
820    end if
821  else
822 #endif
823    xsum=xval
824 #if defined HAVE_MPI
825  end if
826 #endif
827 
828 end subroutine xmpi_sum_dpvt

ABINIT/xmpi_sum_int [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_int

FUNCTION

  This module contains functions that calls MPI routine,
  if we compile the code using the MPI CPP flags.
  xmpi_sum is the generic function.

COPYRIGHT

  Copyright (C) 2001-2024 ABINIT group (AR,XG,MB)
  This file is distributed under the terms of the
  GNU General Public License, see ~ABINIT/COPYING
  or http://www.gnu.org/copyleft/gpl.txt .

NOTES

  MPI2 defines an option MPI_IN_PLACE to do the SUM in-place in the case of intra-communicators.
  The additional array xsum is therefore not needed if MPI_INPLACE is defined.

PARENTS

CHILDREN

      mpi_allreduce,xmpi_abort

SOURCE

28 subroutine xmpi_sum_int(xval,comm,ier)
29 
30 !Arguments ------------------------------------
31  integer, DEV_CONTARRD intent(inout) :: xval(:)
32  integer,intent(in)                  :: comm
33  integer,intent(out)                 :: ier
34 
35 !Local variables-------------------------------
36 #if defined HAVE_MPI
37  integer :: n1
38  integer,allocatable :: xsum(:)
39 #endif
40 
41 ! *************************************************************************
42 
43  ier=0
44 #if defined HAVE_MPI
45  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
46    n1 = size(xval)
47 
48    !  Accumulate xval on all proc. in comm
49 #if defined HAVE_MPI2_INPLACE
50    if (xmpi_use_inplace_operations) then
51      call MPI_ALLREDUCE(MPI_IN_PLACE,xval,n1,MPI_INTEGER,MPI_SUM,comm,ier)
52    else
53 #endif
54      ABI_STAT_MALLOC(xsum,(n1), ier)
55      if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_int')
56      call MPI_ALLREDUCE(xval,xsum,n1,MPI_INTEGER,MPI_SUM,comm,ier)
57      xval (:) = xsum(:)
58      ABI_FREE(xsum)
59 #if defined HAVE_MPI2_INPLACE
60    end if
61 #endif
62 
63  end if
64 #endif
65 
66 end subroutine xmpi_sum_int

ABINIT/xmpi_sum_int2d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_int2d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: two-dimensional integer arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

395 subroutine xmpi_sum_int2d(xval,comm,ier)
396 
397 !Arguments-------------------------
398  integer, DEV_CONTARRD intent(inout) :: xval(:,:)
399  integer,intent(in) :: comm
400  integer,intent(out) :: ier
401 
402 !Local variables-------------------
403 #if defined HAVE_MPI
404  integer :: my_dt,my_op,n1,n2,nn,nproc_space_comm
405  integer(kind=int64) :: ntot
406  integer,allocatable :: xsum(:,:)
407 #endif
408 
409 ! *************************************************************************
410 
411  ier=0
412 #if defined HAVE_MPI
413  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
414    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
415    if (nproc_space_comm /= 1) then
416      n1 =size(xval,dim=1)
417      n2 =size(xval,dim=2)
418 
419      !This product of dimensions can be greater than a 32bit integer
420      !We use a INT64 to store it. If it is too large, we switch to an
421      !alternate routine because MPI<4 doesnt handle 64 bit counts.
422      ntot=int(n1*n2,kind=int64)
423      if (ntot<=xmpi_maxint32_64) then
424        nn=n1*n2 ; my_dt=MPI_INTEGER ; my_op=MPI_SUM
425      else
426        nn=1 ; call xmpi_largetype_create(ntot,MPI_INTEGER,my_dt,my_op,MPI_SUM)
427      end if
428 
429 !    Accumulate xval on all proc. in comm
430 #if defined HAVE_MPI2_INPLACE
431      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
432        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
433        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
434      else
435 #endif
436        ABI_STAT_MALLOC(xsum,(n1,n2), ier)
437        if (ier/=0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_int2d')
438        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
439        xval (:,:) = xsum(:,:)
440        ABI_FREE(xsum)
441 #if defined HAVE_MPI2_INPLACE
442      end if
443 #endif
444 
445      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
446    end if
447  end if
448 #endif
449 
450 end subroutine xmpi_sum_int2d

ABINIT/xmpi_sum_int2t [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_int2t

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: one-dimensional integer array without transfers.

INPUTS

  n1= first dimension of the array
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array
  xsum= receive buffer

SOURCE

348 subroutine xmpi_sum_int2t(xval,xsum,n1,comm,ier)
349 
350 !Arguments-------------------------
351  integer, DEV_CONTARRD intent(inout) :: xval(:),xsum(:)
352  integer,intent(in) :: n1
353  integer,intent(in) :: comm
354  integer,intent(out) :: ier
355 
356 ! *************************************************************************
357 
358  ier=0
359 #if defined HAVE_MPI
360  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
361 !  Accumulate xval on all proc. in comm
362    call MPI_ALLREDUCE(xval,xsum,n1,MPI_INTEGER,MPI_SUM,comm,ier)
363  else
364 #endif
365    xsum=xval
366 #if defined HAVE_MPI
367  end if
368 #endif
369 
370 end subroutine xmpi_sum_int2t

ABINIT/xmpi_sum_int3d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_int3d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: three-dimensional integer arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

497 subroutine xmpi_sum_int3d(xval,comm,ier)
498 
499 !Arguments-------------------------
500  integer, DEV_CONTARRD intent(inout) :: xval(:,:,:)
501  integer,intent(in) :: comm
502  integer,intent(out) :: ier
503 
504 !Local variables-------------------
505 #if defined HAVE_MPI
506  integer :: my_dt,my_op,n1,n2,n3,nn,nproc_space_comm
507  integer(kind=int64) :: ntot
508  integer,allocatable :: xsum(:,:,:)
509 #endif
510 
511 ! *************************************************************************
512 
513  ier=0
514 #if defined HAVE_MPI
515  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
516    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
517    if (nproc_space_comm /= 1) then
518      n1 =size(xval,dim=1)
519      n2 =size(xval,dim=2)
520      n3 =size(xval,dim=3)
521 
522      !This product of dimensions can be greater than a 32bit integer
523      !We use a INT64 to store it. If it is too large, we switch to an
524      !alternate routine because MPI<4 doesnt handle 64 bit counts.
525      ntot=int(n1*n2*n3,kind=int64)
526      if (ntot<=xmpi_maxint32_64) then
527        nn=n1*n2*n3 ; my_dt=MPI_INTEGER ; my_op=MPI_SUM
528      else
529        nn=1 ; call xmpi_largetype_create(ntot,MPI_INTEGER,my_dt,my_op,MPI_SUM)
530      end if
531 
532 !    Accumulate xval on all proc. in comm
533 #if defined HAVE_MPI2_INPLACE
534      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
535        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
536        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
537 #endif
538      else
539        ABI_STAT_MALLOC(xsum,(n1,n2,n3), ier)
540        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_int3d')
541        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
542        xval (:,:,:) = xsum(:,:,:)
543        ABI_FREE(xsum)
544 #if defined HAVE_MPI2_INPLACE
545      end if
546 #endif
547 
548      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
549    end if
550  end if
551 #endif
552 
553 end subroutine xmpi_sum_int3d

ABINIT/xmpi_sum_int4d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_int4d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: four-dimensional integer arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

600 subroutine xmpi_sum_int4d(xval,comm,ier)
601 
602 !Arguments-------------------------
603  integer, DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
604  integer,intent(in) :: comm
605  integer,intent(out) :: ier
606 
607 !Local variables-------------------
608 #if defined HAVE_MPI
609  integer :: my_dt,my_op,n1,n2,n3,n4,nn,nproc_space_comm
610  integer(kind=int64) :: ntot
611  integer,allocatable :: xsum(:,:,:,:)
612 #endif
613 
614 ! *************************************************************************
615 
616  ier=0
617 #if defined HAVE_MPI
618  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
619    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
620    if (nproc_space_comm /= 1) then
621      n1 =size(xval,dim=1)
622      n2 =size(xval,dim=2)
623      n3 =size(xval,dim=3)
624      n4 =size(xval,dim=4)
625 
626      !This product of dimensions can be greater than a 32bit integer
627      !We use a INT64 to store it. If it is too large, we switch to an
628      !alternate routine because MPI<4 doesnt handle 64 bit counts.
629      ntot=int(n1*n2*n3*n4,kind=int64)
630      if (ntot<=xmpi_maxint32_64) then
631        nn=n1*n2*n3*n4 ; my_dt=MPI_INTEGER ; my_op=MPI_SUM
632      else
633        nn=1 ; call xmpi_largetype_create(ntot,MPI_INTEGER,my_dt,my_op,MPI_SUM)
634      end if
635 
636 !    Accumulate xval on all proc. in comm
637 #if defined HAVE_MPI2_INPLACE
638      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
639        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
640        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
641      else
642 #endif
643        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
644        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_int4d')
645        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
646        xval (:,:,:,:) = xsum(:,:,:,:)
647        ABI_FREE(xsum)
648 #if defined HAVE_MPI2_INPLACE
649      end if
650 #endif
651 
652      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
653    end if
654  end if
655 #endif
656 
657 end subroutine xmpi_sum_int4d

ABINIT/xmpi_sum_intn [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_intn

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: one-dimensional integer arrays.

INPUTS

  n1= first dimension of the array
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

257 subroutine xmpi_sum_intn(xval,n1,comm,ier)
258 
259 !Arguments-------------------------
260  integer, DEV_CONTARRD intent(inout) :: xval(:)
261  integer,intent(in) :: n1
262  integer,intent(in) :: comm
263  integer,intent(out) :: ier
264 
265 !Local variables-------------------
266 #if defined HAVE_MPI
267  integer :: nproc_space_comm
268  integer , allocatable :: xsum(:)
269 #endif
270 
271 ! *************************************************************************
272 
273  ier=0
274 #if defined HAVE_MPI
275  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
276    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
277    if (nproc_space_comm /= 1) then
278 
279 !    Accumulate xval on all proc. in comm
280 #if defined HAVE_MPI2_INPLACE
281      if (xmpi_use_inplace_operations) then
282        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,n1,MPI_INTEGER,MPI_SUM,comm,ier)
283      else
284 #endif
285        ABI_STAT_MALLOC(xsum,(n1), ier)
286        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_intn')
287        call MPI_ALLREDUCE(xval,xsum,n1,MPI_INTEGER,MPI_SUM,comm,ier)
288        xval (:) = xsum(:)
289        ABI_FREE(xsum)
290 #if defined HAVE_MPI2_INPLACE
291      endif
292 #endif
293 
294    end if
295  end if
296 #endif
297 
298 end subroutine xmpi_sum_intn

ABINIT/xmpi_sum_intv [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_intv

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: scalar integers.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

113 subroutine xmpi_sum_intv(xval,comm,ier)
114 
115 !Arguments----------------------
116  integer,intent(inout) :: xval
117  integer,intent(in)    :: comm
118  integer,intent(out)   :: ier
119 
120 !Local variables----------------
121 #if defined HAVE_MPI
122  integer :: arr_xsum(1)
123 #endif
124 
125 ! *************************************************************************
126 
127  ier=0
128 #if defined HAVE_MPI
129  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
130 !  Accumulate xval on all proc. in comm
131    call MPI_ALLREDUCE([xval],arr_xsum,1,MPI_INTEGER,MPI_SUM,comm,ier)
132    xval = arr_xsum(1)
133  end if
134 #endif
135 end subroutine xmpi_sum_intv

ABINIT/xmpi_sum_intv2 [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_intv2

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: scalar integer without transfers.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array
  xsum= receive buffer

SOURCE

183 subroutine xmpi_sum_intv2(xval,xsum,comm,ier)
184 
185 !Arguments---------------------
186  integer,intent(inout) :: xval,xsum
187  integer,intent(in) :: comm
188  integer,intent(out) :: ier
189 
190 !Local variables----------------
191 #if defined HAVE_MPI
192  integer :: arr_xsum(1)
193 #endif
194 
195 ! *************************************************************************
196 
197  ier=0
198 #if defined HAVE_MPI
199  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
200    call MPI_ALLREDUCE([xval],arr_xsum,1,MPI_INTEGER,MPI_SUM,comm,ier)
201    xsum=arr_xsum(1)
202  else
203 #endif
204    xsum=xval
205 #if defined HAVE_MPI
206  end if
207 #endif
208 
209 end subroutine xmpi_sum_intv2

ABINIT/xmpi_sum_sp2d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_sp2d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision two-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1039 subroutine xmpi_sum_sp2d(xval,comm,ier)
1040 
1041 !Arguments-------------------------
1042  real(sp), DEV_CONTARRD intent(inout) :: xval(:,:)
1043  integer ,intent(in) :: comm
1044  integer ,intent(out) :: ier
1045 
1046 !Local variables-------------------
1047 #if defined HAVE_MPI
1048  integer :: my_dt,my_op,n1,n2,nn,nproc_space_comm
1049  integer(kind=int64) :: ntot
1050  real(sp),allocatable :: xsum(:,:)
1051 #endif
1052 
1053 ! *************************************************************************
1054 
1055  ier=0
1056 #if defined HAVE_MPI
1057  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1058    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1059    if (nproc_space_comm /= 1) then
1060      n1 = size(xval,dim=1)
1061      n2 = size(xval,dim=2)
1062 
1063      !This product of dimensions can be greater than a 32bit integer
1064      !We use a INT64 to store it. If it is too large, we switch to an
1065      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1066      ntot=int(n1*n2,kind=int64)
1067      if (ntot<=xmpi_maxint32_64) then
1068        nn=n1*n2 ; my_dt=MPI_REAL ; my_op=MPI_SUM
1069      else
1070        nn=1 ; call xmpi_largetype_create(ntot,MPI_REAL,my_dt,my_op,MPI_SUM)
1071      end if
1072 
1073 !    Accumulate xval on all proc. in comm
1074 #if defined HAVE_MPI2_INPLACE
1075      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1076        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1077        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1078      else
1079 #endif
1080        ABI_STAT_MALLOC(xsum,(n1,n2), ier)
1081        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_sp2d')
1082        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1083        xval (:,:) = xsum(:,:)
1084        ABI_FREE(xsum)
1085 #if defined HAVE_MPI2_INPLACE
1086     end if
1087 #endif
1088 
1089      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1090    end if
1091  end if
1092 #endif
1093 
1094 end subroutine xmpi_sum_sp2d

ABINIT/xmpi_sum_sp3d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_sp3d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision three-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1141 subroutine xmpi_sum_sp3d(xval,comm,ier)
1142 
1143 !Arguments-------------------------
1144  real(sp), DEV_CONTARRD intent(inout) :: xval(:,:,:)
1145  integer ,intent(in) :: comm
1146  integer ,intent(out)   :: ier
1147 
1148 !Local variables-------------------
1149 #if defined HAVE_MPI
1150  integer :: my_dt,my_op,n1,n2,n3,nn,nproc_space_comm
1151  integer(kind=int64) :: ntot
1152  real(sp),allocatable :: xsum(:,:,:)
1153 #endif
1154 
1155 ! *************************************************************************
1156 
1157  ier=0
1158 #if defined HAVE_MPI
1159  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1160    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1161    if (nproc_space_comm /= 1) then
1162      n1 = size(xval,dim=1)
1163      n2 = size(xval,dim=2)
1164      n3 = size(xval,dim=3)
1165 
1166      !This product of dimensions can be greater than a 32bit integer
1167      !We use a INT64 to store it. If it is too large, we switch to an
1168      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1169      ntot=int(n1*n2*n3,kind=int64)
1170      if (ntot<=xmpi_maxint32_64) then
1171        nn=n1*n2*n3 ; my_dt=MPI_REAL ; my_op=MPI_SUM
1172      else
1173        nn=1 ; call xmpi_largetype_create(ntot,MPI_REAL,my_dt,my_op,MPI_SUM)
1174      end if
1175 
1176 !    Accumulate xval on all proc. in comm
1177 #if defined HAVE_MPI2_INPLACE
1178      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1179        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1180        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1181      else
1182 #endif
1183        ABI_STAT_MALLOC(xsum,(n1,n2,n3), ier)
1184        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_sp3d')
1185        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1186        xval (:,:,:) = xsum(:,:,:)
1187        ABI_FREE(xsum)
1188 #if defined HAVE_MPI2_INPLACE
1189      end if
1190 #endif
1191 
1192      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1193    end if
1194  end if
1195 #endif
1196 
1197 end subroutine xmpi_sum_sp3d

ABINIT/xmpi_sum_sp4d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_sp4d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision four-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1243 subroutine xmpi_sum_sp4d(xval,comm,ier)
1244 
1245 !Arguments-------------------------
1246  real(sp),DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
1247  integer ,intent(in) :: comm
1248  integer ,intent(out) :: ier
1249 
1250 !Local variables-------------------
1251 #if defined HAVE_MPI
1252  integer :: my_dt,my_op,n1,n2,n3,n4,nn,nproc_space_comm
1253  integer(kind=int64) :: ntot
1254  real(sp),allocatable :: xsum(:,:,:,:)
1255 #endif
1256 
1257 ! *************************************************************************
1258 
1259  ier=0
1260 #if defined HAVE_MPI
1261  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1262    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1263    if (nproc_space_comm /= 1) then
1264      n1 = size(xval,dim=1)
1265      n2 = size(xval,dim=2)
1266      n3 = size(xval,dim=3)
1267      n4 = size(xval,dim=4)
1268 
1269      !This product of dimensions can be greater than a 32bit integer
1270      !We use a INT64 to store it. If it is too large, we switch to an
1271      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1272      ntot=int(n1*n2*n3*n4,kind=int64)
1273      if (ntot<=xmpi_maxint32_64) then
1274        nn=n1*n2*n3*n4 ; my_dt=MPI_REAL ; my_op=MPI_SUM
1275      else
1276        nn=1 ; call xmpi_largetype_create(ntot,MPI_REAL,my_dt,my_op,MPI_SUM)
1277      end if
1278 
1279 !    Accumulate xval on all proc. in comm
1280 #if defined HAVE_MPI2_INPLACE
1281      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1282        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1283        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1284      else
1285 #endif
1286        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
1287        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_sp4d')
1288        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1289        xval (:,:,:,:) = xsum(:,:,:,:)
1290        ABI_FREE(xsum)
1291 #if defined HAVE_MPI2_INPLACE
1292      endif
1293 #endif
1294 
1295      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1296    end if
1297  end if
1298 #endif
1299 
1300 end subroutine xmpi_sum_sp4d

ABINIT/xmpi_sum_sp5d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_sp5d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision five-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1346 subroutine xmpi_sum_sp5d(xval,comm,ier)
1347 
1348 !Arguments-------------------------
1349  real(sp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:)
1350  integer ,intent(in) :: comm
1351  integer ,intent(out) :: ier
1352 
1353 !Local variables-------------------
1354 #if defined HAVE_MPI
1355  integer :: my_dt,my_op,n1,n2,n3,n4,n5,nn,nproc_space_comm
1356  integer(kind=int64) :: ntot
1357  real(sp),allocatable :: xsum(:,:,:,:,:)
1358 #endif
1359 
1360 ! *************************************************************************
1361 
1362  ier=0
1363 #if defined HAVE_MPI
1364  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1365    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1366    if (nproc_space_comm /= 1) then
1367      n1 = size(xval,dim=1)
1368      n2 = size(xval,dim=2)
1369      n3 = size(xval,dim=3)
1370      n4 = size(xval,dim=4)
1371      n5 = size(xval,dim=5)
1372 
1373      !This product of dimensions can be greater than a 32bit integer
1374      !We use a INT64 to store it. If it is too large, we switch to an
1375      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1376      ntot=int(n1*n2*n3*n4*n5,kind=int64)
1377      if (ntot<=xmpi_maxint32_64) then
1378        nn=n1*n2*n3*n4*n5 ; my_dt=MPI_REAL ; my_op=MPI_SUM
1379      else
1380        nn=1 ; call xmpi_largetype_create(ntot,MPI_REAL,my_dt,my_op,MPI_SUM)
1381      end if
1382 
1383 !    Accumulate xval on all proc. in comm
1384 #if defined HAVE_MPI2_INPLACE
1385      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1386        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1387        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1388      else
1389 #endif
1390        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5), ier)
1391        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_sp5d')
1392        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1393        xval (:,:,:,:,:) = xsum(:,:,:,:,:)
1394        ABI_FREE(xsum)
1395 #if defined HAVE_MPI2_INPLACE
1396      endif
1397 #endif
1398 
1399      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1400    end if
1401  end if
1402 #endif
1403 
1404 end subroutine xmpi_sum_sp5d

ABINIT/xmpi_sum_sp6d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_sp6d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision six-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1450 subroutine xmpi_sum_sp6d(xval,comm,ier)
1451 
1452 !Arguments-------------------------
1453  real(sp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:)
1454  integer ,intent(in) :: comm
1455  integer ,intent(out) :: ier
1456 
1457 !Local variables-------------------
1458 #if defined HAVE_MPI
1459  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6,nn,nproc_space_comm
1460  integer(kind=int64) :: ntot
1461  real(sp), allocatable :: xsum(:,:,:,:,:,:)
1462 #endif
1463 
1464 ! *************************************************************************
1465 
1466  ier=0
1467 #if defined HAVE_MPI
1468  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1469    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1470    if (nproc_space_comm /= 1) then
1471      n1 = size(xval,dim=1)
1472      n2 = size(xval,dim=2)
1473      n3 = size(xval,dim=3)
1474      n4 = size(xval,dim=4)
1475      n5 = size(xval,dim=5)
1476      n6 = size(xval,dim=6)
1477 
1478      !This product of dimensions can be greater than a 32bit integer
1479      !We use a INT64 to store it. If it is too large, we switch to an
1480      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1481      ntot=int(n1*n2*n3*n4*n5*n6,kind=int64)
1482      if (ntot<=xmpi_maxint32_64) then
1483        nn=n1*n2*n3*n4*n5*n6 ; my_dt=MPI_REAL ; my_op=MPI_SUM
1484      else
1485        nn=1 ; call xmpi_largetype_create(ntot,MPI_REAL,my_dt,my_op,MPI_SUM)
1486      end if
1487 
1488 !    Accumulate xval on all proc. in comm
1489 #if defined HAVE_MPI2_INPLACE
1490      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1491        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1492        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1493      else
1494 #endif
1495        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6), ier)
1496        if (ier/=0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_sp6d')
1497        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1498        xval (:,:,:,:,:,:) = xsum(:,:,:,:,:,:)
1499        ABI_FREE(xsum)
1500 #if defined HAVE_MPI2_INPLACE
1501      end if
1502 #endif
1503 
1504      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1505    end if
1506  end if
1507 #endif
1508 
1509 end subroutine xmpi_sum_sp6d

ABINIT/xmpi_sum_sp7d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_sp7d

FUNCTION

  Combines values from all processes and distribute
  the result back to all processes.
  Target: double precision six-dimensional arrays.

INPUTS

  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1555 subroutine xmpi_sum_sp7d(xval,comm,ier)
1556 
1557 !Arguments-------------------------
1558  real(sp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:,:)
1559  integer ,intent(in) :: comm
1560  integer ,intent(out) :: ier
1561 
1562 !Local variables-------------------
1563 #if defined HAVE_MPI
1564  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6,n7,nn,nproc_space_comm
1565  integer(kind=int64) :: ntot
1566  real(sp),allocatable :: xsum(:,:,:,:,:,:,:)
1567 #endif
1568 
1569 ! *************************************************************************
1570  ier=0
1571 #if defined HAVE_MPI
1572  if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then
1573    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1574    if (nproc_space_comm /= 1) then
1575      n1 = size(xval,dim=1)
1576      n2 = size(xval,dim=2)
1577      n3 = size(xval,dim=3)
1578      n4 = size(xval,dim=4)
1579      n5 = size(xval,dim=5)
1580      n6 = size(xval,dim=6)
1581      n7 = size(xval,dim=7)
1582 
1583      !This product of dimensions can be greater than a 32bit integer
1584      !We use a INT64 to store it. If it is too large, we switch to an
1585      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1586      ntot=int(n1*n2*n3*n4*n5*n6*n7,kind=int64)
1587      if (ntot<=xmpi_maxint32_64) then
1588        nn=n1*n2*n3*n4*n5*n6*n7 ; my_dt=MPI_REAL ; my_op=MPI_SUM
1589      else
1590        nn=1 ; call xmpi_largetype_create(ntot,MPI_REAL,my_dt,my_op,MPI_SUM)
1591      end if
1592 
1593 !    Accumulate xval on all proc. in comm
1594 #if defined HAVE_MPI2_INPLACE
1595      if (xmpi_use_inplace_operations .and. my_op == MPI_SUM) then
1596        if (my_op/=MPI_SUM) call xmpi_abort(msg="Too many data for in-place reductions!")
1597        call MPI_ALLREDUCE(MPI_IN_PLACE,xval,nn,my_dt,my_op,comm,ier)
1598      else
1599 #endif
1600        ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6,n7), ier)
1601        if (ier/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_sum_sp7d')
1602        call MPI_ALLREDUCE(xval,xsum,nn,my_dt,my_op,comm,ier)
1603        xval (:,:,:,:,:,:,:) = xsum(:,:,:,:,:,:,:)
1604        ABI_FREE(xsum)
1605 #if defined HAVE_MPI2_INPLACE
1606      endif
1607 #endif
1608 
1609      if (ntot>xmpi_maxint32_64) call xmpi_largetype_free(my_dt,my_op)
1610    end if
1611  end if
1612 #endif
1613 
1614 end subroutine xmpi_sum_sp7d