TABLE OF CONTENTS


ABINIT/xmpi_sum_master [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master

FUNCTION

  This module contains functions that calls MPI routine,
  if we compile the code using the MPI CPP flags.
  xmpi_sum_master is the generic function.

COPYRIGHT

  Copyright (C) 2001-2022 ABINIT group (AR,XG,MB)
  This file is distributed under the terms of the
  GNU General Public License, see ~ABINIT/COPYING
  or http://www.gnu.org/copyleft/gpl.txt .

NOTES

  The workspace array xsum is filled to zeros to avoid SIFPE in [mpiio][t28_MPI4][np=4] on tikal_gnu_4.9_mpich
  On this bot, the code is compiled with -ffpe-trap and the illegal operation in the MPI library
  make tests using xmpi_sum_master abort.
  Strictly speaking the initialization is not needed because xsum has intent(out) --> bug in mpich3-3.1.3 (gcc492)

SOURCE


ABINIT/xmpi_sum_master_c1cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c1cplx

FUNCTION

  Reduces values on all processes to a single value.
  Target: one-dimensional complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

802 subroutine xmpi_sum_master_c1cplx(xval,master,comm,ier)
803 
804 !Arguments-------------------------
805  complex(spc), DEV_CONTARRD intent(inout) :: xval(:)
806  integer ,intent(in) :: master
807  integer ,intent(in) :: comm
808  integer ,intent(out) :: ier
809 
810 !Local variables-------------------
811 #if defined HAVE_MPI
812  integer :: n1,nproc_space_comm
813  complex(spc),allocatable :: xsum(:)
814 #endif
815 
816 ! *************************************************************************
817 
818  ier=0
819 #if defined HAVE_MPI
820  if (comm /= MPI_COMM_NULL) then
821    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
822    if (nproc_space_comm /= 1) then
823      n1 = size(xval,dim=1)
824      ABI_STAT_MALLOC(xsum,(n1), ier)
825      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
826      xsum = (0_sp,0_sp) ! See notes
827 !    Collect xval from processors on master in comm
828      call MPI_REDUCE(xval,xsum,n1,MPI_COMPLEX,MPI_SUM,master,comm,ier)
829      xval = xsum
830      ABI_FREE(xsum)
831    end if
832  end if
833 #endif
834 
835 end subroutine xmpi_sum_master_c1cplx

ABINIT/xmpi_sum_master_c1dpc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c1dpc

FUNCTION

  Reduces values on all processes to a single value.
  Target: one-dimensional double complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1156 subroutine xmpi_sum_master_c1dpc(xval,master,comm,ier)
1157 
1158 !Arguments-------------------------
1159  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:)
1160  integer,intent(in) :: master
1161  integer,intent(in) :: comm
1162  integer,intent(out) :: ier
1163 
1164 !Local variables-------------------
1165 #if defined HAVE_MPI
1166  integer :: n1
1167  integer :: nproc_space_comm
1168  complex(dpc),allocatable :: xsum(:)
1169 #endif
1170 
1171 ! *************************************************************************
1172 
1173  ier=0
1174 #if defined HAVE_MPI
1175  if (comm /= MPI_COMM_NULL) then
1176    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1177    if (nproc_space_comm /= 1) then
1178      n1 = size(xval,dim=1)
1179 !    Collect xval from processors on master in comm
1180      ABI_STAT_MALLOC(xsum,(n1), ier)
1181      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
1182      xsum = (0_dp,0_dp) ! See notes
1183      call MPI_REDUCE(xval,xsum,n1,MPI_DOUBLE_COMPLEX,MPI_SUM,master,comm,ier)
1184      xval (:) = xsum(:)
1185      ABI_FREE(xsum)
1186    end if
1187  end if
1188 #endif
1189 
1190 end subroutine xmpi_sum_master_c1dpc

ABINIT/xmpi_sum_master_c2cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c2cplx

FUNCTION

  Reduces values on all processes to a single value.
  Target: two-dimensional complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

858 subroutine xmpi_sum_master_c2cplx(xval,master,comm,ier)
859 
860 !Arguments-------------------------
861  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:)
862  integer,intent(in) :: master
863  integer,intent(in) :: comm
864  integer,intent(out) :: ier
865 
866 !Local variables-------------------
867 #if defined HAVE_MPI
868  integer :: my_dt,my_op,n1,n2
869  integer(kind=int64) :: ntot
870  integer :: nproc_space_comm
871  complex(spc),allocatable :: xsum(:,:)
872 #endif
873 
874 ! *************************************************************************
875 
876  ier=0
877 #if defined HAVE_MPI
878  if (comm /= MPI_COMM_NULL) then
879    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
880    if (nproc_space_comm /= 1) then
881      n1 = size(xval,dim=1)
882      n2 = size(xval,dim=2)
883 
884      ABI_STAT_MALLOC(xsum,(n1,n2), ier)
885      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
886      xsum = (0_sp,0_sp) ! See notes
887 
888      !This product of dimensions can be greater than a 32bit integer
889      !We use a INT64 to store it. If it is too large, we switch to an
890      !alternate routine because MPI<4 doesnt handle 64 bit counts.
891      ntot=int(n1*n2,kind=int64)
892 
893 !    Accumulate xval on all proc. in comm
894      if (ntot<=xmpi_maxint32_64) then
895        call MPI_reduce(xval,xsum,n1*n2,MPI_COMPLEX,MPI_SUM,master,comm,ier)
896      else
897        call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
898        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
899        call xmpi_largetype_free(my_dt,my_op)
900      end if
901 
902      xval (:,:) = xsum(:,:)
903      ABI_FREE(xsum)
904    end if
905  end if
906 #endif
907 
908 end subroutine xmpi_sum_master_c2cplx

ABINIT/xmpi_sum_master_c2dpc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c2dpc

FUNCTION

  Reduces values on all processes to a single value.
  Target: two-dimensional double complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1213 subroutine xmpi_sum_master_c2dpc(xval,master,comm,ier)
1214 
1215 !Arguments-------------------------
1216  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:)
1217  integer ,intent(in) :: master
1218  integer ,intent(in) :: comm
1219  integer ,intent(out) :: ier
1220 
1221 !Local variables-------------------
1222 #if defined HAVE_MPI
1223  integer :: my_dt,my_op,n1,n2
1224  integer(kind=int64) :: ntot
1225  complex(dpc) , allocatable :: xsum(:,:)
1226  integer :: nproc_space_comm
1227 #endif
1228 
1229 ! *************************************************************************
1230 
1231  ier=0
1232 #if defined HAVE_MPI
1233  if (comm /= MPI_COMM_NULL) then
1234    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1235    if (nproc_space_comm /= 1) then
1236      n1 = size(xval,dim=1)
1237      n2 = size(xval,dim=2)
1238 
1239      ABI_STAT_MALLOC(xsum,(n1,n2), ier)
1240      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
1241      xsum = (0_dp,0_dp) ! See notes
1242 
1243      !This product of dimensions can be greater than a 32bit integer
1244      !We use a INT64 to store it. If it is too large, we switch to an
1245      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1246      ntot=int(n1*n2,kind=int64)
1247 
1248 !    Accumulate xval on all proc. in comm
1249      if (ntot<=xmpi_maxint32_64) then
1250        call MPI_reduce(xval,xsum,n1*n2,MPI_DOUBLE_COMPLEX,MPI_SUM,master,comm,ier)
1251      else
1252        call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
1253        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
1254        call xmpi_largetype_free(my_dt,my_op)
1255      end if
1256 
1257      xval (:,:) = xsum(:,:)
1258      ABI_FREE(xsum)
1259    end if
1260  end if
1261 #endif
1262 
1263 end subroutine xmpi_sum_master_c2dpc

ABINIT/xmpi_sum_master_c3cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c3cplx

FUNCTION

  Reduces values on all processes to a single value.
  Target: three-dimensional complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

931 subroutine xmpi_sum_master_c3cplx(xval,master,comm,ier)
932 
933 !Arguments-------------------------
934  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:,:)
935  integer,intent(in) :: master
936  integer,intent(in) :: comm
937  integer,intent(out) :: ier
938 
939 !Local variables-------------------
940 #if defined HAVE_MPI
941  integer :: my_dt,my_op,n1,n2,n3
942  integer(kind=int64) :: ntot
943  complex(spc), allocatable :: xsum(:,:,:)
944  integer :: nproc_space_comm
945 #endif
946 
947 ! *************************************************************************
948 
949  ier=0
950 #if defined HAVE_MPI
951  if (comm /= MPI_COMM_NULL) then
952    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
953    if (nproc_space_comm /= 1) then
954      n1 = size(xval,dim=1)
955      n2 = size(xval,dim=2)
956      n3 = size(xval,dim=3)
957 
958      ABI_STAT_MALLOC(xsum,(n1,n2,n3), ier)
959      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
960      xsum = (0_sp,0_sp) ! See notes
961 
962      !This product of dimensions can be greater than a 32bit integer
963      !We use a INT64 to store it. If it is too large, we switch to an
964      !alternate routine because MPI<4 doesnt handle 64 bit counts.
965      ntot=int(n1*n2*n3,kind=int64)
966 
967 !    Accumulate xval on all proc. in comm
968      if (ntot<=xmpi_maxint32_64) then
969        call MPI_reduce(xval,xsum,n1*n2*n3,MPI_COMPLEX,MPI_SUM,master,comm,ier)
970      else
971        call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
972        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
973        call xmpi_largetype_free(my_dt,my_op)
974      end if
975 
976      xval (:,:,:) = xsum(:,:,:)
977      ABI_FREE(xsum)
978    end if
979  end if
980 #endif
981 
982 end subroutine xmpi_sum_master_c3cplx

ABINIT/xmpi_sum_master_c3dpc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c3dpc

FUNCTION

  Reduces values on all processes to a single value.
  Target: three-dimensional double complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1286 subroutine xmpi_sum_master_c3dpc(xval,master,comm,ier)
1287 
1288 !Arguments-------------------------
1289  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:,:)
1290  integer,intent(in) :: master
1291  integer,intent(in) :: comm
1292  integer,intent(out) :: ier
1293 
1294 !Local variables-------------------
1295 #if defined HAVE_MPI
1296  integer :: my_dt,my_op,n1,n2,n3
1297  integer(kind=int64) :: ntot
1298  complex(dpc) , allocatable :: xsum(:,:,:)
1299  integer :: nproc_space_comm
1300 #endif
1301 
1302 ! *************************************************************************
1303 
1304  ier=0
1305 #if defined HAVE_MPI
1306  if (comm /= MPI_COMM_NULL) then
1307    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1308    if (nproc_space_comm /= 1) then
1309      n1 = size(xval,dim=1)
1310      n2 = size(xval,dim=2)
1311      n3 = size(xval,dim=3)
1312 
1313      ABI_STAT_MALLOC(xsum,(n1,n2,n3), ier)
1314      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
1315      xsum = (0_dp,0_dp) ! See notes
1316 
1317      !This product of dimensions can be greater than a 32bit integer
1318      !We use a INT64 to store it. If it is too large, we switch to an
1319      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1320      ntot=int(n1*n2*n3,kind=int64)
1321 
1322 !    Accumulate xval on all proc. in comm
1323      if (ntot<=xmpi_maxint32_64) then
1324        call MPI_reduce(xval,xsum,n1*n2*n3,MPI_DOUBLE_COMPLEX,MPI_SUM,master,comm,ier)
1325      else
1326        call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
1327        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
1328        call xmpi_largetype_free(my_dt,my_op)
1329      end if
1330 
1331      xval (:,:,:) = xsum(:,:,:)
1332      ABI_FREE(xsum)
1333    end if
1334  end if
1335 #endif
1336 end subroutine xmpi_sum_master_c3dpc

ABINIT/xmpi_sum_master_c4cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c4cplx

FUNCTION

  Reduces values on all processes to a single value.
  Target: four-dimensional complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1005 subroutine xmpi_sum_master_c4cplx(xval,master,comm,ier)
1006 
1007 !Arguments-------------------------
1008  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
1009  integer,intent(in) :: master
1010  integer,intent(in) :: comm
1011  integer,intent(out) :: ier
1012 
1013 !Local variables-------------------
1014 #if defined HAVE_MPI
1015  integer :: my_dt,my_op,n1,n2,n3,n4
1016  integer(kind=int64) :: ntot
1017  integer :: nproc_space_comm
1018  complex(spc), allocatable :: xsum(:,:,:,:)
1019 #endif
1020 
1021 ! *************************************************************************
1022 
1023  ier=0
1024 #if defined HAVE_MPI
1025  if (comm /= MPI_COMM_NULL) then
1026    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1027    if (nproc_space_comm /= 1) then
1028      n1 = size(xval,dim=1)
1029      n2 = size(xval,dim=2)
1030      n3 = size(xval,dim=3)
1031      n4 = size(xval,dim=4)
1032 
1033      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
1034      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
1035      xsum = (0_sp,0_sp) ! See notes
1036 
1037      !This product of dimensions can be greater than a 32bit integer
1038      !We use a INT64 to store it. If it is too large, we switch to an
1039      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1040      ntot=int(n1*n2*n3*n4,kind=int64)
1041 
1042 !    Accumulate xval on all proc. in comm
1043      if (ntot<=xmpi_maxint32_64) then
1044        call MPI_reduce(xval,xsum,n1*n2*n3*n4,MPI_COMPLEX,MPI_SUM,master,comm,ier)
1045      else
1046        call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
1047        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
1048        call xmpi_largetype_free(my_dt,my_op)
1049      end if
1050 
1051      xval (:,:,:,:) = xsum(:,:,:,:)
1052      ABI_FREE(xsum)
1053    end if
1054  end if
1055 #endif
1056 end subroutine xmpi_sum_master_c4cplx

ABINIT/xmpi_sum_master_c4dpc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c4dpc

FUNCTION

  Reduces values on all processes to a single value.
  Target: four-dimensional double complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1359 subroutine xmpi_sum_master_c4dpc(xval,master,comm,ier)
1360 
1361 !Arguments-------------------------
1362  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
1363  integer,intent(in) :: master
1364  integer,intent(in) :: comm
1365  integer,intent(out) :: ier
1366 
1367 !Local variables-------------------
1368 #if defined HAVE_MPI
1369  integer :: my_dt,my_op,n1,n2,n3,n4
1370  integer(kind=int64) :: ntot
1371  complex(dpc) , allocatable :: xsum(:,:,:,:)
1372  integer :: nproc_space_comm
1373 #endif
1374 
1375 ! *************************************************************************
1376 
1377  ier=0
1378 #if defined HAVE_MPI
1379  if (comm /= MPI_COMM_NULL) then
1380    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1381    if (nproc_space_comm /= 1) then
1382      n1 = size(xval,dim=1)
1383      n2 = size(xval,dim=2)
1384      n3 = size(xval,dim=3)
1385      n4 = size(xval,dim=4)
1386 
1387      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
1388      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
1389      xsum = (0_dp,0_dp) ! See notes
1390 
1391      !This product of dimensions can be greater than a 32bit integer
1392      !We use a INT64 to store it. If it is too large, we switch to an
1393      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1394      ntot=int(n1*n2*n3*n4,kind=int64)
1395 
1396 !    Accumulate xval on all proc. in comm
1397      if (ntot<=xmpi_maxint32_64) then
1398        call MPI_reduce(xval,xsum,n1*n2*n3*n4,MPI_DOUBLE_COMPLEX,MPI_SUM,master,comm,ier)
1399      else
1400        call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
1401        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
1402        call xmpi_largetype_free(my_dt,my_op)
1403      end if
1404 
1405      xval (:,:,:,:) = xsum(:,:,:,:)
1406      ABI_FREE(xsum)
1407    end if
1408  end if
1409 #endif
1410 end subroutine xmpi_sum_master_c4dpc

ABINIT/xmpi_sum_master_c5cplx [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c5cplx

FUNCTION

  Reduces values on all processes to a single value.
  Target: five-dimensional single precision complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1081 subroutine xmpi_sum_master_c5cplx(xval,master,comm,ier)
1082 
1083 !Arguments-------------------------
1084  complex(spc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:)
1085  integer,intent(in) :: master
1086  integer,intent(in) :: comm
1087  integer,intent(out) :: ier
1088 
1089 !Local variables-------------------
1090 #if defined HAVE_MPI
1091  integer :: my_dt,my_op,n1,n2,n3,n4,n5
1092  integer(kind=int64) :: ntot
1093  complex(spc),allocatable :: xsum(:,:,:,:,:)
1094  integer :: nproc_space_comm
1095 #endif
1096 
1097 ! *************************************************************************
1098 
1099  ier=0
1100 #if defined HAVE_MPI
1101  if (comm /= MPI_COMM_NULL) then
1102    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1103    if (nproc_space_comm /= 1) then
1104      n1 = size(xval,dim=1)
1105      n2 = size(xval,dim=2)
1106      n3 = size(xval,dim=3)
1107      n4 = size(xval,dim=4)
1108      n5 = size(xval,dim=5)
1109 
1110      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5), ier)
1111      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
1112      xsum = (0_sp,0_sp) ! See notes
1113 
1114      !This product of dimensions can be greater than a 32bit integer
1115      !We use a INT64 to store it. If it is too large, we switch to an
1116      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1117      ntot=int(n1*n2*n3*n4*n5,kind=int64)
1118 
1119 !    Accumulate xval on all proc. in comm
1120      if (ntot<=xmpi_maxint32_64) then
1121        call MPI_reduce(xval,xsum,n1*n2*n3*n4*n5,MPI_COMPLEX,MPI_SUM,master,comm,ier)
1122      else
1123        call xmpi_largetype_create(ntot,MPI_COMPLEX,my_dt,my_op,MPI_SUM)
1124        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
1125        call xmpi_largetype_free(my_dt,my_op)
1126      end if
1127 
1128      xval = xsum
1129      ABI_FREE(xsum)
1130    end if
1131  end if
1132 #endif
1133 end subroutine xmpi_sum_master_c5cplx

ABINIT/xmpi_sum_master_c5dpc [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_c5dpc

FUNCTION

  Reduces values on all processes to a single value.
  Target: five-dimensional double complex arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

1433 subroutine xmpi_sum_master_c5dpc(xval,master,comm,ier)
1434 
1435 !Arguments-------------------------
1436  complex(dpc), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:)
1437  integer,intent(in) :: master
1438  integer,intent(in) :: comm
1439  integer,intent(out) :: ier
1440 
1441 !Local variables-------------------
1442 #if defined HAVE_MPI
1443  integer :: my_dt,my_op,n1,n2,n3,n4,n5
1444  integer(kind=int64) :: ntot
1445  complex(dpc),allocatable :: xsum(:,:,:,:,:)
1446  integer :: nproc_space_comm
1447 #endif
1448 
1449 ! *************************************************************************
1450 
1451  ier=0
1452 #if defined HAVE_MPI
1453  if (comm /= MPI_COMM_NULL) then
1454    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
1455    if (nproc_space_comm /= 1) then
1456      n1 = size(xval,dim=1)
1457      n2 = size(xval,dim=2)
1458      n3 = size(xval,dim=3)
1459      n4 = size(xval,dim=4)
1460      n5 = size(xval,dim=5)
1461 
1462      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5), ier)
1463      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
1464      xsum = (0_dp,0_dp) ! See notes
1465 
1466      !This product of dimensions can be greater than a 32bit integer
1467      !We use a INT64 to store it. If it is too large, we switch to an
1468      !alternate routine because MPI<4 doesnt handle 64 bit counts.
1469      ntot=int(n1*n2*n3*n4*n5,kind=int64)
1470 
1471 !    Accumulate xval on all proc. in comm
1472      if (ntot<=xmpi_maxint32_64) then
1473        call MPI_reduce(xval,xsum,n1*n2*n3*n4*n5,MPI_DOUBLE_COMPLEX,MPI_SUM,master,comm,ier)
1474      else
1475        call xmpi_largetype_create(ntot,MPI_DOUBLE_COMPLEX,my_dt,my_op,MPI_SUM)
1476        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
1477        call xmpi_largetype_free(my_dt,my_op)
1478      end if
1479 
1480      xval (:,:,:,:,:) = xsum(:,:,:,:,:)
1481      ABI_FREE(xsum)
1482    end if
1483  end if
1484 #endif
1485 end subroutine xmpi_sum_master_c5dpc

ABINIT/xmpi_sum_master_dp [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_p

FUNCTION

  Reduces values on all processes to a single value.
  Target: integer scalars.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

 97 subroutine xmpi_sum_master_dp(xval,master,comm,ier)
 98 
 99 !Arguments-------------------------
100  real(dp),intent(inout) :: xval
101  integer ,intent(in) :: master
102  integer ,intent(in) :: comm
103  integer ,intent(out)   :: ier
104 
105 !Local variables-------------------
106 #if defined HAVE_MPI
107  real(dp) :: xsum
108  integer :: nproc_space_comm
109 #endif
110 
111 ! *************************************************************************
112 
113  ier=0
114 #if defined HAVE_MPI
115  if (comm /= MPI_COMM_NULL) then
116    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
117    if (nproc_space_comm /= 1) then
118 !    Accumulate xval on all proc. in comm
119      call MPI_REDUCE(xval,xsum,1,MPI_DOUBLE_PRECISION,MPI_SUM,master,comm,ier)
120      xval = xsum
121    end if
122  end if
123 #endif
124 end subroutine xmpi_sum_master_dp

ABINIT/xmpi_sum_master_dp1d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_dp1d

FUNCTION

  Reduces values on all processes to a single value.
  Target: double precision one-dimensional arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

220 subroutine xmpi_sum_master_dp1d(xval,master,comm,ier)
221 
222 !Arguments-------------------------
223  real(dp), DEV_CONTARRD intent(inout) :: xval(:)
224  integer ,intent(in) :: master
225  integer ,intent(in) :: comm
226  integer ,intent(out) :: ier
227 
228 !Local variables-------------------
229 #if defined HAVE_MPI
230  integer :: n1
231  real(dp) , allocatable :: xsum(:)
232  integer :: nproc_space_comm
233 #endif
234 
235 ! *************************************************************************
236 
237  ier=0
238 #if defined HAVE_MPI
239  if (comm /= MPI_COMM_NULL) then
240    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
241    if (nproc_space_comm /= 1) then
242      n1 = size(xval,dim=1)
243 !    Accumulate xval on all proc. in comm
244      ABI_STAT_MALLOC(xsum,(n1), ier)
245      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
246      xsum = zero ! See notes
247      call MPI_REDUCE(xval,xsum,n1,MPI_DOUBLE_PRECISION,MPI_SUM,master,comm,ier)
248      xval (:) = xsum(:)
249      ABI_FREE(xsum)
250    end if
251  end if
252 #endif
253 end subroutine xmpi_sum_master_dp1d

ABINIT/xmpi_sum_master_dp2d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_dp2d

FUNCTION

  Reduces values on all processes to a single value.
  Target: double precision two-dimensional arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

276 subroutine xmpi_sum_master_dp2d(xval,master,comm,ier)
277 
278 !Arguments-------------------------
279  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:)
280  integer ,intent(in) :: master
281  integer ,intent(in) :: comm
282  integer ,intent(out) :: ier
283 
284 !Local variables-------------------
285 #if defined HAVE_MPI
286  integer :: my_dt,my_op,n1,n2
287  integer(kind=int64) :: ntot
288  real(dp) , allocatable :: xsum(:,:)
289  integer :: nproc_space_comm
290 #endif
291 
292 ! *************************************************************************
293 
294  ier=0
295 #if defined HAVE_MPI
296  if (comm /= MPI_COMM_NULL) then
297    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
298    if (nproc_space_comm /= 1) then
299      n1 = size(xval,dim=1)
300      n2 = size(xval,dim=2)
301 
302      ABI_STAT_MALLOC(xsum,(n1,n2), ier)
303      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
304      xsum = zero ! See notes
305 
306      !This product of dimensions can be greater than a 32bit integer
307      !We use a INT64 to store it. If it is too large, we switch to an
308      !alternate routine because MPI<4 doesnt handle 64 bit counts.
309      ntot=int(n1*n2,kind=int64)
310 
311 !    Accumulate xval on all proc. in comm
312      if (ntot<=xmpi_maxint32_64) then
313        call MPI_reduce(xval,xsum,n1*n2,MPI_DOUBLE_PRECISION,MPI_SUM,master,comm,ier)
314      else
315        call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
316        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
317        call xmpi_largetype_free(my_dt,my_op)
318      end if
319 
320      xval (:,:) = xsum(:,:)
321      ABI_FREE(xsum)
322    end if
323  end if
324 #endif
325 end subroutine xmpi_sum_master_dp2d

ABINIT/xmpi_sum_master_dp3d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_dp3d

FUNCTION

  Reduces values on all processes to a single value.
  Target: double precision three-dimensional arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

348 subroutine xmpi_sum_master_dp3d(xval,master,comm,ier)
349 
350 !Arguments-------------------------
351  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:)
352  integer ,intent(in) :: master
353  integer ,intent(in) :: comm
354  integer ,intent(out) :: ier
355 
356 !Local variables-------------------
357 #if defined HAVE_MPI
358  integer :: my_dt,my_op,n1,n2,n3
359  integer(kind=int64) :: ntot
360  real(dp) , allocatable :: xsum(:,:,:)
361  integer :: nproc_space_comm
362 #endif
363 
364 ! *************************************************************************
365 
366  ier=0
367 #if defined HAVE_MPI
368  if (comm /= MPI_COMM_NULL) then
369    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
370    if (nproc_space_comm /= 1) then
371      n1 = size(xval,dim=1)
372      n2 = size(xval,dim=2)
373      n3 = size(xval,dim=3)
374      ABI_STAT_MALLOC(xsum,(n1,n2,n3), ier)
375      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
376      xsum = zero ! See notes
377 
378 
379      !This product of dimensions can be greater than a 32bit integer
380      !We use a INT64 to store it. If it is too large, we switch to an
381      !alternate routine because MPI<4 doesnt handle 64 bit counts.
382      ntot=int(n1*n2*n3,kind=int64)
383 
384 !    Accumulate xval on all proc. in comm
385      if (ntot<=xmpi_maxint32_64) then
386        call MPI_reduce(xval,xsum,n1*n2*n3,MPI_DOUBLE_PRECISION,MPI_SUM,master,comm,ier)
387      else
388        call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
389        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
390        call xmpi_largetype_free(my_dt,my_op)
391      end if
392 
393      xval (:,:,:) = xsum(:,:,:)
394      ABI_FREE(xsum)
395    end if
396  end if
397 #endif
398 
399 end subroutine xmpi_sum_master_dp3d

ABINIT/xmpi_sum_master_dp4d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_dp4d

FUNCTION

  Reduces values on all processes to a single value.
  Target: double precision four-dimensional arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

422 subroutine xmpi_sum_master_dp4d(xval,master,comm,ier)
423 
424 !Arguments-------------------------
425  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
426  integer ,intent(in) :: master
427  integer ,intent(in) :: comm
428  integer ,intent(out) :: ier
429 
430 !Local variables-------------------
431 #if defined HAVE_MPI
432  integer :: my_dt,my_op,n1,n2,n3,n4
433  integer(kind=int64) :: ntot
434  real(dp) , allocatable :: xsum(:,:,:,:)
435  integer :: nproc_space_comm
436 #endif
437 
438 ! *************************************************************************
439 
440  ier=0
441 #if defined HAVE_MPI
442  if (comm /= MPI_COMM_NULL) then
443    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
444    if (nproc_space_comm /= 1) then
445      n1 = size(xval,dim=1)
446      n2 = size(xval,dim=2)
447      n3 = size(xval,dim=3)
448      n4 = size(xval,dim=4)
449      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
450      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
451      xsum = zero ! See notes
452 
453      !This product of dimensions can be greater than a 32bit integer
454      !We use a INT64 to store it. If it is too large, we switch to an
455      !alternate routine because MPI<4 doesnt handle 64 bit counts.
456      ntot=int(n1*n2*n3*n4,kind=int64)
457 
458 !    Accumulate xval on all proc. in comm
459      if (ntot<=xmpi_maxint32_64) then
460        call MPI_reduce(xval,xsum,n1*n2*n3*n4,MPI_DOUBLE_PRECISION,MPI_SUM,master,comm,ier)
461      else
462        call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
463        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
464        call xmpi_largetype_free(my_dt,my_op)
465      end if
466 
467      xval (:,:,:,:) = xsum(:,:,:,:)
468      ABI_FREE(xsum)
469    end if
470  end if
471 #endif
472 
473 end subroutine xmpi_sum_master_dp4d

ABINIT/xmpi_sum_master_dp5d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_dp5d

FUNCTION

  Reduces values on all processes to a single value.
  Target: double precision five-dimensional arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

496 subroutine xmpi_sum_master_dp5d(xval,master,comm,ier)
497 
498 !Arguments ------------------------------------
499  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:)
500  integer ,intent(in) :: master
501  integer ,intent(in) :: comm
502  integer ,intent(out)   :: ier
503 
504 !Local variables-------------------------------
505 #if defined HAVE_MPI
506  integer :: my_dt,my_op,n1,n2,n3,n4,n5
507  integer(kind=int64) :: ntot
508  real(dp), allocatable :: xsum(:,:,:,:,:)
509  integer :: nproc_space_comm
510 #endif
511 
512 ! *************************************************************************
513 
514  ier=0
515 #if defined HAVE_MPI
516  if (comm /= MPI_COMM_NULL) then
517    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
518    if (nproc_space_comm /= 1) then
519      n1 = size(xval,dim=1)
520      n2 = size(xval,dim=2)
521      n3 = size(xval,dim=3)
522      n4 = size(xval,dim=4)
523      n5 = size(xval,dim=5)
524 
525      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5), ier)
526      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
527      xsum = zero ! See notes
528 
529      !This product of dimensions can be greater than a 32bit integer
530      !We use a INT64 to store it. If it is too large, we switch to an
531      !alternate routine because MPI<4 doesnt handle 64 bit counts.
532      ntot=int(n1*n2*n3*n4*n5,kind=int64)
533 
534 !    Accumulate xval on all proc. in comm
535      if (ntot<=xmpi_maxint32_64) then
536        call MPI_reduce(xval,xsum,n1*n2*n3*n4*n5,MPI_DOUBLE_PRECISION,MPI_SUM,master,comm,ier)
537      else
538        call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
539        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
540        call xmpi_largetype_free(my_dt,my_op)
541      end if
542 
543      xval (:,:,:,:,:) = xsum(:,:,:,:,:)
544      ABI_FREE(xsum)
545    end if
546  end if
547 #endif
548 
549 end subroutine xmpi_sum_master_dp5d

ABINIT/xmpi_sum_master_dp6d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_dp6d

FUNCTION

  Reduces values on all processes to a single value.
  Target: double precision six-dimensional arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

572 subroutine xmpi_sum_master_dp6d(xval,master,comm,ier)
573 
574 !Arguments ------------------------------------
575  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:)
576  integer ,intent(in) :: master
577  integer ,intent(in) :: comm
578  integer ,intent(out) :: ier
579 
580 !Local variables-------------------------------
581 #if defined HAVE_MPI
582  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6
583  integer(kind=int64) :: ntot
584  real(dp), allocatable :: xsum(:,:,:,:,:,:)
585  integer :: nproc_space_comm
586 #endif
587 
588 ! *************************************************************************
589 
590  ier=0
591 #if defined HAVE_MPI
592  if (comm /= MPI_COMM_NULL) then
593    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
594    if (nproc_space_comm /= 1) then
595      n1 = size(xval,dim=1)
596      n2 = size(xval,dim=2)
597      n3 = size(xval,dim=3)
598      n4 = size(xval,dim=4)
599      n5 = size(xval,dim=5)
600      n6 = size(xval,dim=6)
601 
602      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6), ier)
603      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
604      xsum = zero ! See notes
605 
606      !This product of dimensions can be greater than a 32bit integer
607      !We use a INT64 to store it. If it is too large, we switch to an
608      !alternate routine because MPI<4 doesnt handle 64 bit counts.
609      ntot=int(n1*n2*n3*n4*n5*n6,kind=int64)
610 
611 !    Accumulate xval on all proc. in comm
612      if (ntot<=xmpi_maxint32_64) then
613        call MPI_reduce(xval,xsum,n1*n2*n3*n4*n5*n6,MPI_DOUBLE_PRECISION,MPI_SUM,master,comm,ier)
614      else
615        call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
616        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
617        call xmpi_largetype_free(my_dt,my_op)
618      end if
619 
620      xval (:,:,:,:,:,:) = xsum(:,:,:,:,:,:)
621      ABI_FREE(xsum)
622    end if
623  end if
624 #endif
625 
626 end subroutine xmpi_sum_master_dp6d

ABINIT/xmpi_sum_master_dp7d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_dp7d

FUNCTION

  Reduces values on all processes to a single value.
  Target: double precision seven-dimensional arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

649 subroutine xmpi_sum_master_dp7d(xval,master,comm,ier)
650 
651 !Arguments ------------------------------------
652  real(dp), DEV_CONTARRD intent(inout) :: xval(:,:,:,:,:,:,:)
653  integer ,intent(in) :: master
654  integer ,intent(in) :: comm
655  integer ,intent(out) :: ier
656 
657 !Local variables-------------------------------
658 #if defined HAVE_MPI
659  integer :: my_dt,my_op,n1,n2,n3,n4,n5,n6,n7
660  integer(kind=int64) :: ntot
661  real(dp), allocatable :: xsum(:,:,:,:,:,:,:)
662  integer :: nproc_space_comm
663 #endif
664 
665 ! *************************************************************************
666 
667  ier=0
668 #if defined HAVE_MPI
669  if (comm /= MPI_COMM_NULL) then
670    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
671    if (nproc_space_comm /= 1) then
672      n1 = size(xval,dim=1)
673      n2 = size(xval,dim=2)
674      n3 = size(xval,dim=3)
675      n4 = size(xval,dim=4)
676      n5 = size(xval,dim=5)
677      n6 = size(xval,dim=6)
678      n7 = size(xval,dim=7)
679 
680      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4,n5,n6,n7), ier)
681      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
682      xsum = zero ! See notes
683 
684      !This product of dimensions can be greater than a 32bit integer
685      !We use a INT64 to store it. If it is too large, we switch to an
686      !alternate routine because MPI<4 doesnt handle 64 bit counts.
687      ntot=int(n1*n2*n3*n4*n5*n6*n7,kind=int64)
688 
689 !    Accumulate xval on all proc. in comm
690      if (ntot<=xmpi_maxint32_64) then
691        call MPI_reduce(xval,xsum,n1*n2*n3*n4*n5*n6*n7,MPI_DOUBLE_PRECISION,MPI_SUM,master,comm,ier)
692      else
693        call xmpi_largetype_create(ntot,MPI_DOUBLE_PRECISION,my_dt,my_op,MPI_SUM)
694        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
695        call xmpi_largetype_free(my_dt,my_op)
696      end if
697 
698      xval (:,:,:,:,:,:,:) = xsum(:,:,:,:,:,:,:)
699      ABI_FREE(xsum)
700    end if
701  end if
702 #endif
703 
704 end subroutine xmpi_sum_master_dp7d

ABINIT/xmpi_sum_master_int [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_int

FUNCTION

  Reduces values on all processes to a single value.
  Target: integer scalars.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

47 subroutine xmpi_sum_master_int(xval,master,comm,ier)
48 
49 !Arguments-------------------------
50  integer,intent(inout) :: xval
51  integer ,intent(in) :: master
52  integer ,intent(in) :: comm
53  integer ,intent(out)   :: ier
54 
55 !Local variables-------------------
56 #if defined HAVE_MPI
57  integer :: xsum
58  integer :: nproc_space_comm
59 #endif
60 
61 ! *************************************************************************
62 
63  ier=0
64 #if defined HAVE_MPI
65  if (comm /= MPI_COMM_NULL) then
66    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
67    if (nproc_space_comm /= 1) then
68 !    Accumulate xval on all proc. in comm
69      call MPI_REDUCE(xval,xsum,1,MPI_INTEGER,MPI_SUM,master,comm,ier)
70      xval = xsum
71    end if
72  end if
73 #endif
74 end subroutine xmpi_sum_master_int

ABINIT/xmpi_sum_master_int2d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_int2d

FUNCTION

  Reduces values on all processes to a single value.
  Target: two-dimensional integer arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

148 subroutine xmpi_sum_master_int2d(xval,master,comm,ier)
149 
150 !Arguments ------------------------------------
151  integer, DEV_CONTARRD intent(inout) :: xval(:,:)
152  integer,intent(in) :: master,comm
153  integer,intent(out) :: ier
154 
155 !Local variables-------------------------------
156 #if defined HAVE_MPI
157  integer :: my_dt,my_op,n1,n2
158  integer(kind=int64) :: ntot
159  integer, allocatable :: xsum(:,:)
160  integer :: nproc_space_comm
161 #endif
162 
163 ! *************************************************************************
164 
165  ier=0
166 #if defined HAVE_MPI
167  if (comm /= MPI_COMM_NULL) then
168    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
169    if (nproc_space_comm /= 1) then
170      n1 = size(xval,dim=1)
171      n2 = size(xval,dim=2)
172 
173      ABI_STAT_MALLOC(xsum,(n1,n2), ier)
174      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
175      xsum = 0 ! See notes
176 
177      !This product of dimensions can be greater than a 32bit integer
178      !We use a INT64 to store it. If it is too large, we switch to an
179      !alternate routine because MPI<4 doesnt handle 64 bit counts.
180      ntot=int(n1*n2,kind=int64)
181 
182 !    Accumulate xval on all proc. in comm
183      if (ntot<=xmpi_maxint32_64) then
184        call MPI_reduce(xval,xsum,n1*n2,MPI_INTEGER,MPI_SUM,master,comm,ier)
185      else
186        call xmpi_largetype_create(ntot,MPI_INTEGER,my_dt,my_op,MPI_SUM)
187        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
188        call xmpi_largetype_free(my_dt,my_op)
189      end if
190 
191      xval = xsum
192      ABI_FREE(xsum)
193    end if
194  end if
195 #endif
196 
197 end subroutine xmpi_sum_master_int2d

ABINIT/xmpi_sum_master_int4d [ Functions ]

[ Top ] [ Functions ]

NAME

  xmpi_sum_master_int4d

FUNCTION

  Reduces values on all processes to a single value.
  Target: four-diemnsional integer arrays.

INPUTS

  master= master MPI node
  comm= MPI communicator

OUTPUT

  ier= exit status, a non-zero value meaning there is an error

SIDE EFFECTS

  xval= buffer array

SOURCE

727 subroutine xmpi_sum_master_int4d(xval,master,comm,ier)
728 
729 !Arguments ------------------------------------
730  integer, DEV_CONTARRD intent(inout) :: xval(:,:,:,:)
731  integer,intent(in) :: master
732  integer,intent(in) :: comm
733  integer,intent(out) :: ier
734 
735 !Local variables-------------------------------
736 #if defined HAVE_MPI
737  integer :: my_dt,my_op,n1,n2,n3,n4
738  integer(kind=int64) :: ntot
739  integer, allocatable :: xsum(:,:,:,:)
740  integer :: nproc_space_comm
741 #endif
742 
743 ! *************************************************************************
744 
745  ier=0
746 #if defined HAVE_MPI
747  if (comm /= MPI_COMM_NULL) then
748    call MPI_COMM_SIZE(comm,nproc_space_comm,ier)
749    if (nproc_space_comm /= 1) then
750      n1 = size(xval,dim=1)
751      n2 = size(xval,dim=2)
752      n3 = size(xval,dim=3)
753      n4 = size(xval,dim=4)
754 
755      ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ier)
756      if (ier /= 0) call xmpi_abort(msg='error allocating xsum')
757      xsum = 0 ! See notes
758 
759      !This product of dimensions can be greater than a 32bit integer
760      !We use a INT64 to store it. If it is too large, we switch to an
761      !alternate routine because MPI<4 doesnt handle 64 bit counts.
762      ntot=int(n1*n2*n3*n4,kind=int64)
763 
764 !    Accumulate xval on all proc. in comm
765      if (ntot<=xmpi_maxint32_64) then
766        call MPI_reduce(xval,xsum,n1*n2*n3*n4,MPI_INTEGER,MPI_SUM,master,comm,ier)
767      else
768        call xmpi_largetype_create(ntot,MPI_INTEGER,my_dt,my_op,MPI_SUM)
769        call MPI_reduce(xval,xsum,1,my_dt,my_op,master,comm,ier)
770        call xmpi_largetype_free(my_dt,my_op)
771      end if
772 
773      xval (:,:,:,:) = xsum(:,:,:,:)
774      ABI_FREE(xsum)
775    end if
776  end if
777 #endif
778 
779 end subroutine xmpi_sum_master_int4d