gnu: hdf5: Remove use of deprecated MPI1 symbols.

* gnu/packages/patches/hdf5-1.8-mpi-deprecations.patch,
  gnu/packages/patches/hdf5-mpi-deprecations.patch: New files.
* gnu/local.mk (dist_patch_DATA): Add them.
* gnu/packages/maths.scm (hdf5, hdf5-1.10)[source]: Use them.
master
Eric Bavier 2019-02-06 15:32:22 -06:00
parent 2bdf26f1c8
commit 7fedc3fdfb
No known key found for this signature in database
GPG Key ID: FD73CAC719D32566
4 changed files with 236 additions and 2 deletions

View File

@ -873,6 +873,8 @@ dist_patch_DATA = \
%D%/packages/patches/hdf4-reproducibility.patch \
%D%/packages/patches/hdf4-shared-fortran.patch \
%D%/packages/patches/hdf5-config-date.patch \
%D%/packages/patches/hdf5-mpi-deprecations.patch \
%D%/packages/patches/hdf5-1.8-mpi-deprecations.patch \
%D%/packages/patches/hdf-eos2-build-shared.patch \
%D%/packages/patches/hdf-eos2-remove-gctp.patch \
%D%/packages/patches/hdf-eos2-fortrantests.patch \

View File

@ -764,7 +764,8 @@ incompatible with HDF5.")
"/src/hdf5-" version ".tar.bz2")))
(sha256
(base32 "0f3jfbqpaaq21ighi40qzs52nb52kc2d2yjk541rjmsx20b3ih2r"))
(patches (list (search-patch "hdf5-config-date.patch")))))
(patches (search-patches "hdf5-config-date.patch"
"hdf5-1.8-mpi-deprecations.patch"))))
(build-system gnu-build-system)
(inputs
`(("zlib" ,zlib)))
@ -877,7 +878,8 @@ extremely large and complex data collections.")
"/src/hdf5-" version ".tar.bz2")))
(sha256
(base32 "1pr85fa1sh2ky6ai2hs3f21lp252grl2cq3wbyi4rh7dm83gyrqj"))
(patches (list (search-patch "hdf5-config-date.patch")))))))
(patches (search-patches "hdf5-config-date.patch"
"hdf5-mpi-deprecations.patch"))))))
(define-public hdf-java
(package

View File

@ -0,0 +1,169 @@
--- a/src/H5.c
+++ b/src/H5.c
@@ -138,7 +138,7 @@
if (mpi_initialized && !mpi_finalized) {
int key_val;
- if(MPI_SUCCESS != (mpi_code = MPI_Comm_create_keyval(MPI_NULL_COPY_FN,
+ if(MPI_SUCCESS != (mpi_code = MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN,
(MPI_Comm_delete_attr_function *)H5_mpi_delete_cb,
&key_val, NULL)))
HMPI_GOTO_ERROR(FAIL, "MPI_Comm_create_keyval failed", mpi_code)
--- hdf5-1.8.19/testpar/t_cache.c
+++ hdf5-1.8.19/testpar/t_cache.c
@@ -1187,20 +1187,20 @@
struct mssg_t sample; /* used to compute displacements */
/* setup the displacements array */
- if ( ( MPI_SUCCESS != MPI_Address(&sample.req, &displs[0]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.src, &displs[1]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.dest, &displs[2]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.mssg_num, &displs[3]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.base_addr, &displs[4]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.len, &displs[5]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.ver, &displs[6]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.count, &displs[7]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.magic, &displs[8]) ) ) {
+ if ( ( MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]) ) ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n",
world_mpi_rank, fcn_name);
}
@@ -1215,14 +1215,14 @@
if ( success ) {
- result = MPI_Type_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
+ result = MPI_Type_create_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
if ( result != MPI_SUCCESS ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Type_struct() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n",
world_mpi_rank, fcn_name);
}
}
--- hdf5-1.8.19/testpar/t_mpi.c
+++ hdf5-1.8.19/testpar/t_mpi.c
@@ -279,7 +279,7 @@
printf("Skipped GB file range test "
"because MPI_Offset cannot support it\n");
}else{
- buf = HDmalloc(MB);
+ buf = (char *)HDmalloc(MB);
VRFY((buf!=NULL), "malloc succeed");
/* open a new file. Remove it first in case it exists. */
@@ -624,7 +624,7 @@
and this platform.
1. Details for the test:
-1) Create two derived datatypes with MPI_Type_hindexed:
+1) Create two derived datatypes with MPI_Type_create_hindexed:
datatype1:
count = 1, blocklens = 1, offsets = 0,
base type = MPI_BYTE(essentially a char)
@@ -633,7 +633,7 @@
base type = MPI_BYTE
2) Using these two derived datatypes,
- Build another derived datatype with MPI_Type_struct:
+ Build another derived datatype with MPI_Type_create_struct:
advtype: derived from datatype1 and datatype2
advtype:
count = 2, blocklens[0] = 1, blocklens[1]=1,
@@ -676,10 +676,9 @@
int mpi_err_strlen;
int mpi_err;
int i;
- int nerrors = 0; /* number of errors */
MPI_Datatype etype,filetype;
MPI_Datatype adv_filetype,bas_filetype[2];
- MPI_Datatype etypenew, filetypenew;
+ MPI_Datatype filetypenew;
MPI_Offset disp;
MPI_Status Status;
MPI_Aint adv_disp[2];
@@ -715,7 +714,7 @@
blocklens[0] = 1;
offsets[0] = 0;
- if((mpi_err= MPI_Type_hindexed(count,blocklens,offsets,MPI_BYTE,&filetype))
+ if((mpi_err= MPI_Type_create_hindexed(count,blocklens,offsets,MPI_BYTE,&filetype))
!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
@@ -731,7 +730,7 @@
count = 1;
blocklens[0]=1;
offsets[0] = 1;
- if((mpi_err= MPI_Type_hindexed(count,blocklens,offsets,MPI_BYTE,&filetypenew))
+ if((mpi_err= MPI_Type_create_hindexed(count,blocklens,offsets,MPI_BYTE,&filetypenew))
!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
@@ -752,10 +751,10 @@
bas_filetype[0] = filetype;
bas_filetype[1] = filetypenew;
- if((mpi_err= MPI_Type_struct(outcount,adv_blocklens,adv_disp,bas_filetype,&adv_filetype))
+ if((mpi_err= MPI_Type_create_struct(outcount,adv_blocklens,adv_disp,bas_filetype,&adv_filetype))
!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_struct failed (%s)\n", mpi_err_str);
+ printf("MPI_Type_create_struct failed (%s)\n", mpi_err_str);
return 1;
}
if((mpi_err=MPI_Type_commit(&adv_filetype))!=MPI_SUCCESS){
@@ -842,7 +841,7 @@
processes are needed.
1. Details for the test:
-1) Create one derived datatype with MPI_Type_hindexed:
+1) Create one derived datatype with MPI_Type_create_hindexed:
2) Choosing at least two processes to contribute none for IO with
the buf size inside MPI_Write_at_all to 0.
@@ -898,7 +897,7 @@
offsets[1] = (mpi_size+mpi_rank)*count;
if(count !=0) {
- if((mpi_err = MPI_Type_hindexed(2,
+ if((mpi_err = MPI_Type_create_hindexed(2,
blocklens,
offsets,
etype,
@@ -914,7 +913,7 @@
return 1;
} /* end if */
- if((mpi_err = MPI_Type_hindexed(2,
+ if((mpi_err = MPI_Type_create_hindexed(2,
blocklens,
offsets,
etype,
@@ -1098,7 +1097,7 @@
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("Failed to turn off atexit processing. Continue.\n", mpi_rank);
+ printf("Failed to turn off atexit processing. Continue.\n");
};
H5open();
if (parse_options(argc, argv) != 0){

View File

@ -0,0 +1,61 @@
--- a/src/H5.c
+++ b/src/H5.c
@@ -138,7 +138,7 @@
if (mpi_initialized && !mpi_finalized) {
int key_val;
- if(MPI_SUCCESS != (mpi_code = MPI_Comm_create_keyval(MPI_NULL_COPY_FN,
+ if(MPI_SUCCESS != (mpi_code = MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN,
(MPI_Comm_delete_attr_function *)H5_mpi_delete_cb,
&key_val, NULL)))
HMPI_GOTO_ERROR(FAIL, "MPI_Comm_create_keyval failed", mpi_code)
--- hdf5-1.10.4/testpar/t_cache.c
+++ hdf5-1.10.4/testpar/t_cache.c
@@ -1217,20 +1217,20 @@
struct mssg_t sample; /* used to compute displacements */
/* setup the displacements array */
- if ( ( MPI_SUCCESS != MPI_Address(&sample.req, &displs[0]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.src, &displs[1]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.dest, &displs[2]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.mssg_num, &displs[3]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.base_addr, &displs[4]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.len, &displs[5]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.ver, &displs[6]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.count, &displs[7]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.magic, &displs[8]) ) ) {
+ if ( ( MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]) ) ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n",
world_mpi_rank, FUNC);
}
@@ -1245,14 +1245,14 @@
if ( success ) {
- result = MPI_Type_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
+ result = MPI_Type_create_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
if ( result != MPI_SUCCESS ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Type_struct() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n",
world_mpi_rank, FUNC);
}
}