MACSio  0.9
Multi-purpose, Application-Centric, Scalable I/O Proxy App
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
macsio_hdf5.c
Go to the documentation of this file.
1 /*
2 Copyright (c) 2015, Lawrence Livermore National Security, LLC.
3 Produced at the Lawrence Livermore National Laboratory.
4 Written by Mark C. Miller
5 
6 LLNL-CODE-676051. All rights reserved.
7 
8 This file is part of MACSio
9 
10 Please also read the LICENSE file at the top of the source code directory or
11 folder hierarchy.
12 
13 This program is free software; you can redistribute it and/or modify it under
14 the terms of the GNU General Public License (as published by the Free Software
15 Foundation) version 2, dated June 1991.
16 
17 This program is distributed in the hope that it will be useful, but WITHOUT
18 ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS
19 FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General
20 Public License for more details.
21 
22 You should have received a copy of the GNU General Public License along with
23 this program; if not, write to the Free Software Foundation, Inc., 59 Temple
24 Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26 
27 #include <limits.h>
28 #include <math.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <string.h>
32 
33 #include <macsio_clargs.h>
34 #include <macsio_iface.h>
35 #include <macsio_log.h>
36 #include <macsio_main.h>
37 #include <macsio_mif.h>
38 #include <macsio_utils.h>
39 
40 #ifdef HAVE_MPI
41 #include <mpi.h>
42 #endif
43 
44 #ifdef HAVE_SILO
45 #include <silo.h> /* for the Silo block based VFD option */
46 #endif
47 
48 #include <H5pubconf.h>
49 #include <hdf5.h>
50 #include <H5Tpublic.h>
51 
52 /* Disable debugging messages */
53 
64 #ifdef HAVE_ZFP
65 
103 #include <zfp.h>
104 
105 #define PUSH_ERR(func, minor, str) \
106  H5Epush1(__FILE__, func, __LINE__, H5E_PLINE, minor, str)
107 #define H5Z_class_t_vers 2
108 #define ZFP_H5FILTER_ID 299
109 
110 static htri_t H5Z_can_apply_zfp(hid_t dcpl_id, /* dataset creation property list */
111  hid_t type_id, /* datatype */
112  hid_t space_id); /* a dataspace describing a chunk */
113 
114 static herr_t H5Z_set_local_zfp(hid_t dcpl_id, /* dataset creation property list */
115  hid_t type_id, /* datatype */
116  hid_t space_id); /* dataspace describing the chunk */
117 
118 static size_t H5Z_filter_zfp(unsigned int flags,
119  size_t cd_nelmts,
120  const unsigned int cd_values[],
121  size_t nbytes,
122  size_t *buf_size,
123  void **buf);
124 
125 static int H5Z_register_zfp(void);
126 
127 /* write a 64 bit unsigned integer to a buffer in big endian order. */
128 static void zfp_write_uint64(void* buf, uint64_t num) {
129  uint8_t* b = static_cast<uint8_t*>(buf);
130  uint64_t pow28 = (uint64_t)1 << 8;
131  for (int ii = 7; ii >= 0; ii--) {
132  b[ii] = num % pow28;
133  num = num / pow28;
134  }
135 }
136 
137 /* read a 64 bit unsigned integer from a buffer big endian order. */
138 static uint64_t zfp_read_uint64(void* buf) {
139  uint8_t* b = static_cast<uint8_t*>(buf);
140  uint64_t num = 0, pow28 = (uint64_t)1 << 8, cp = 1;
141  for (int ii = 7; ii >= 0; ii--) {
142  num += b[ii] * cp;
143  cp *= pow28;
144  }
145  return num;
146 }
147 
148 /* write a 64 bit signed integer to a buffer in big endian order. */
149 static void zfp_write_int64(void* buf, int64_t num) {
150  uint64_t pow2x = (uint64_t)1 << 63;
151  uint64_t num_uint = (num + pow2x);
152  zfp_write_uint64(buf, num_uint);
153 }
154 
155 /* read a 64 bit signed integer from a buffer big endian order. */
156 static int64_t zfp_read_int64(void* buf) {
157  uint64_t num_uint = zfp_read_uint64(buf);
158  uint64_t pow2x = (uint64_t)1 << 63;
159  return (num_uint -pow2x);
160 }
161 
162 /* write a 32 bit unsigned integer to a buffer in big endian order. */
163 static void zfp_write_uint32(void* buf, uint32_t num) {
164  uint8_t* b = static_cast<uint8_t*>(buf);
165  uint32_t pow28 = (uint32_t)1 << 8;
166  for (int ii = 3; ii >= 0; ii--) {
167  b[ii] = num % pow28;
168  num = num / pow28;
169  }
170 }
171 
172 /* read a 32 bit unsigned integer from a buffer big endian order. */
173 static uint32_t zfp_read_uint32(void* buf) {
174  uint8_t* b = static_cast<uint8_t*>(buf);
175  uint32_t num = 0, pow28 = (uint32_t)1 << 8, cp = 1;
176  for (int ii = 3; ii >= 0; ii--) {
177  num += b[ii] * cp;
178  cp *= pow28;
179  }
180  return num;
181 }
182 
183 /* write a 32 bit signed integer to a buffer in big endian order. */
184 static void zfp_write_int32(void* buf, int32_t num) {
185  uint32_t pow2x = (uint32_t)1 << 31;
186  uint32_t num_uint = (num + pow2x);
187  zfp_write_uint32(buf, num_uint);
188 }
189 
190 /* read a 32 bit signed integer from a buffer big endian order. */
191 static int32_t zfp_read_int32(void* buf) {
192  uint32_t num_uint = zfp_read_uint32(buf);
193  uint32_t pow2x = (uint32_t)1 << 31;
194  return (num_uint -pow2x);
195 }
196 
197 const H5Z_class2_t ZFP_H5Filter[1] = {{
198  H5Z_CLASS_T_VERS, /* H5Z_class_t version */
199  (H5Z_filter_t)(ZFP_H5FILTER_ID), /* Filter id number */
200  1, /* encoder_present flag (set to true) */
201  1, /* decoder_present flag (set to true) */
202  "Lindstrom-ZFP compression; See http://computation.llnl.gov/casc/zfp/",
203  /* Filter name for debugging */
204  (H5Z_can_apply_func_t) H5Z_can_apply_zfp, /* The "can apply" callback */
205  (H5Z_set_local_func_t) H5Z_set_local_zfp, /* The "set local" callback */
206  (H5Z_func_t) H5Z_filter_zfp, /* The actual filter function */
207 }};
208 
219 static herr_t
220 H5Z_can_apply_zfp(hid_t dcpl_id, hid_t type_id, hid_t space_id)
221 {
222  hid_t dclass;
223  size_t dsize;
224  hid_t dntype;
225  size_t ndims;
226  unsigned int flags;
227 
228  size_t cd_nelmts_in = 5;
229  unsigned int cd_values_in[] = {0,0,0,0,0};
230 
231  /* check object identifier */
232  /* ----------------------- */
233 
234  /* Check if object exists as datatype - try to get pointer to memory referenced by id */
235  if(H5Iis_valid(type_id)) {
236  PUSH_ERR("H5Z_can_apply_zfp", H5E_CALLBACK, "no valid type");
237  return -1;
238  }
239 
240 #if 0
241  /* TODO: print datatype name if debug-mode */
242  ssize_t dname_size = 0;
243  if(0 >= (dname_size = H5Iget_name(type_id, NULL, 0))) {
244  PUSH_ERR("H5Z_can_apply_zfp", H5E_CALLBACK, "no valid datatype name");
245  return -1;
246  }
247  dname_size = dname_size+1;
248  char dname[dname_size];
249  H5Iget_name( type_id, (char *)(&dname), (size_t)dname_size );
250  fprintf(stdout," H5Z_can_apply_zfp: dname = %d, '%.*s' \n",(int)dname_size, (int)dname_size, dname);
251 #endif
252 
253  /* check datatype ( which describes elements of a dataset ) */
254  /* -------------- */
255 
256  /* check datatype's class - try to get datatype class identifier
257  * typedef enum H5T_class_t {
258  * H5T_NO_CLASS = -1, error
259  * H5T_INTEGER = 0, integer types
260  * H5T_FLOAT = 1, floating-point types
261  */
262  if(H5T_NO_CLASS == (dclass = H5Tget_class(type_id))) {
263  PUSH_ERR("H5Z_can_apply_zfp", H5E_CALLBACK, "no valid datatype class");
264  return -1;
265  }
266 #ifndef NDEBUG
267  fprintf(stdout," H5Z_can_apply_zfp: dclass = %d", dclass);
268  if(dclass != H5T_FLOAT) {
269  fprintf(stdout," ... NOT SUPPORTED => skip compression\n");
270  return 0;
271  };
272  fprintf(stdout,"\n");
273 #else
274  if(dclass != H5T_FLOAT) {return 0;}; /* only support floats */
275 #endif
276 
277  /* check datatype's native-type - must be double or float */
278 #if 0
279 /* TODO: this check results in register a new native type instead of test for a type
280  dntype = H5Tget_native_type(type_id, H5T_DIR_DESCEND); //H5T_DIR_ASCEND);
281  if( H5T_NATIVE_FLOAT == dntype) { fprintf(stdout,"\nzfp_h5_can_apply: dntype = %d \n", dntype); }
282  else if(H5T_NATIVE_DOUBLE == dntype) { fprintf(stdout,"\nzfp_h5_can_apply: dntype = %d \n", dntype); }
283  else {
284  PUSH_ERR("H5Z_can_apply_zfp", H5E_CALLBACK, "no valid datatype type");
285  }
286 */
287 #endif
288 
289  /* check datatype's byte-size per value - must be single(==4) or double(==8) */
290  if(0 == (dsize = H5Tget_size(type_id))) {
291  PUSH_ERR("H5Z_can_apply_zfp", H5E_CALLBACK, "no valid datatype size");
292  return -1;
293  }
294 #ifndef NDEBUG
295  fprintf(stdout," H5Z_can_apply_zfp: dsize = %d ", (int)dsize);
296  if(dsize != 4 && dsize != 8) {
297  fprintf(stdout," ... NOT SUPPORTED => skip compression\n");
298  return 0;
299  }
300  fprintf(stdout,"\n");
301 #else
302  if(dsize != 4 && dsize != 8) { return 0;}; /* only support 4byte and 8byte floats */
303 #endif
304 
305  /* check chunk */
306  /* -------------- */
307 
308  if(0 == (ndims = H5Sget_simple_extent_ndims(space_id))) {
309  PUSH_ERR("H5Z_can_apply_zfp", H5E_CALLBACK, "no valid no. space dimensions");
310  return -1;
311  }
312  hsize_t dims[ndims];
313  H5Sget_simple_extent_dims(space_id, dims, NULL);
314 
315 #ifndef NDEBUG
316  fprintf(stdout," H5Z_can_apply_zfp: ndims = %d ", (int)ndims);
317  for (size_t ii=0; ii < ndims; ii++) { fprintf(stdout," %d,",(int)dims[ii]); }
318  if(ndims != 1 && ndims != 2 && ndims != 3) {
319  fprintf(stdout," ... NOT SUPPORTED => skip compression\n");
320  return 0;
321  }
322  fprintf(stdout,"\n");
323 #else
324  if(ndims != 1 && ndims != 2 && ndims != 3) { return 0;}; /* only support 1d, 2d and 3d arrays */
325 #endif
326 
327  /* check filter */
328  /* ------------ */
329 
330  /* get current filter values */
331  if(H5Pget_filter_by_id2(dcpl_id, ZFP_H5FILTER_ID,
332  &flags, &cd_nelmts_in, cd_values_in,
333  0, NULL, NULL) < 0) { return -1; }
334 
335  /* check no. elements */
336  /* -------------- */
337  long dcount = 1;
338  int dcount_threshold = cd_values_in[4];
339  if(dcount_threshold <= 0) { dcount_threshold = 64; }
340  for ( size_t ii=0; ii < ndims; ii++) { dcount = dcount *dims[ii]; }
341 
342 #ifndef NDEBUG
343  fprintf(stdout," H5Z_can_apply_zfp: dcount = %ld ", dcount);
344  if(dcount < dcount_threshold) {
345  fprintf(stdout," ... TOO SMALL => skip compression\n");
346  return 0;
347  }
348  fprintf(stdout,"\n");
349 #else
350  if(dcount < dcount_threshold) { return 0; } /* only support datasets with count >= threshold */
351 #endif
352 
353 #ifndef NDEBUG
354  fprintf(stdout," H5Z_can_apply_zfp: cd_values = [");
355  for (size_t ii=0; ii < cd_nelmts_in; ii++) { fprintf(stdout," %d,",cd_values_in[ii]); }
356  fprintf(stdout,"]\n");
357 #endif
358  return 1;
359 
360  cleanupAndFail:
361  return 0;
362 }
363 
372 static herr_t
373 H5Z_set_local_zfp(hid_t dcpl_id, hid_t type_id, hid_t space_id)
374 {
375  size_t dsize;
376  size_t ndims;
377 
378  unsigned int flags;
379  size_t cd_nelmts = 5;
380  size_t cd_nelmts_max = 12;
381  unsigned int cd_values_in[] = {0,0,0,0,0};
382  unsigned int cd_values_out[] = {0,0,0,0,0,0,0,0,0,0,0,0};
383  hsize_t dims[6];
384 
385  /* check datatype's byte-size per value - must be single(==4) or double(==8) */
386  if(0 == (dsize = H5Tget_size(type_id))) {
387  PUSH_ERR("H5Z_set_local_zfp", H5E_CALLBACK, "no valid datatype size");
388  return -1;
389  }
390 
391  /* check chunk */
392  /* -------------- */
393 
394  if(0 == (ndims = H5Sget_simple_extent_ndims(space_id))) {
395  PUSH_ERR("H5Z_set_local_zfp", H5E_CALLBACK, "no valid no. space dimensions");
396  return -1;
397  }
398  H5Sget_simple_extent_dims(space_id, dims, NULL);
399 
400  /* check filter */
401  /* ------------ */
402 
403  /* get current filter values */
404  if(H5Pget_filter_by_id2(dcpl_id, ZFP_H5FILTER_ID,
405  &flags, &cd_nelmts, cd_values_in, /*cd_nelmts (inout) */
406  0, NULL, NULL) < 0) { return -1; }
407 
408  /* correct number of parameters and fill missing with defaults
409  ignore value-ids larger 5
410  add zero for non-existent value-ids <= 4
411  add default threshold for value-id == 5 */
412  if(cd_nelmts != 5) {
413  if(cd_nelmts < 5) cd_values_in[4] = 64; /* set default threshold = 64 (already used and not required any more) */
414  if(cd_nelmts < 4) cd_values_in[3] = -1074; /* set default minexp = -1074 */
415  if(cd_nelmts < 3) cd_values_in[2] = 0; /* set default maxprec = 0 (set default in compression filter) */
416  if(cd_nelmts < 2) cd_values_in[1] = 0; /* set default maxbits = 0 (set default in compression filter) */
417  if(cd_nelmts < 1) cd_values_in[0] = 0; /* set default minbits = 0 (set default in compression filter) */
418  cd_nelmts = 5;
419  }
420 
421  /* modify cd_values to pass them to filter-func */
422  /* -------------------------------------------- */
423 
424  /* First 4+ndims slots reserved. Move any passed options to higher addresses. */
425  size_t valshift = 4 +ndims;
426  for (size_t ii = 0; ii < cd_nelmts && ii +valshift < cd_nelmts_max; ii++) {
427  cd_values_out[ii + valshift] = cd_values_in[ii];
428  }
429 
430  int16_t version = ZFP_VERSION;
431  cd_values_out[0] = version >> 4; /* major version */
432  cd_values_out[1] = version & 0x000f; /* minor version */
433  cd_values_out[2] = dsize; /* bytes per element */
434  cd_values_out[3] = ndims; /* number of dimensions */
435  for (size_t ii = 0; ii < ndims; ii++)
436  cd_values_out[ii +4] = dims[ii]; /* size of dimensions */
437 
438  cd_nelmts = cd_nelmts + valshift;
439 
440  if(H5Pmodify_filter(dcpl_id, ZFP_H5FILTER_ID, flags, cd_nelmts, cd_values_out)) {
441  PUSH_ERR("H5Z_set_local_zfp", H5E_CALLBACK, "modify filter not possible");
442  return -1;
443  }
444 
445  return 1;
446 
447  cleanupAndFail:
448  return 0;
449 }
450 
456 static size_t
457 H5Z_filter_zfp(unsigned int flags,
458  size_t cd_nelmts, /* config data elements */
459  const unsigned int cd_values[], /* config data values */
460  size_t nbytes, /* number of bytes */
461  size_t *buf_size, /* buffer size */
462  void **buf) /* buffer */
463 {
464  unsigned int zfp_major_ver, zfp_minor_ver;
465  size_t dsize;
466  size_t ndims;
467 
468  size_t buf_size_out = -1;
469  size_t nbytes_out = -1;
470  bool dp = false;
471 
472  void* tmp_buf = NULL;
473  void* zfp_buf = NULL;
474  void* out_buf = NULL;
475 
476  /* get config values
477  - from H5Z_set_local_zfp(..) on compression
478  - from chunk-header on reverse */
479  if (cd_nelmts < 4) {
480  PUSH_ERR("H5Z_filter_zfp", H5E_CALLBACK, "Not enough parameters.");
481  return 0;
482  }
483  zfp_major_ver = cd_values[0]; /* not used - we could add it to the header ? */
484  zfp_minor_ver = cd_values[1]; /* not used - we could add it to the header ? */
485  dsize = cd_values[2]; /* bytes per element */
486  ndims = cd_values[3]; /* number of dimensions */
487  hsize_t dims[ndims];
488  for (size_t ii = 0; ii < ndims; ii++)
489  dims[ii] = cd_values[ii +4]; /* size of dimensions */
490 
491  /* get size of out_buf
492  -----------------------*/
493 
494  /* size of floating-point type in bytes */
495  if(dsize == 8) { dp = true; }
496  else if(dsize == 4) {dp = false; }
497  else {
498  PUSH_ERR("H5Z_filter_zfp", H5E_CALLBACK, "unknown precision type");
499  return 0;
500  }
501  size_t typesize = dp ? sizeof(double) : sizeof(float);
502 
503  /* effective array dimensions */
504  uint nx = 0;
505  uint ny = 0;
506  uint nz = 0;
507  if(ndims > 0) nx = dims[0];
508  if(ndims > 1) ny = dims[1];
509  if(ndims > 2) nz = dims[2];
510 
511  /* initialize zfp parameters */
512  zfp_params params;
513  zfp_init(&params);
514  zfp_set_type(&params, dp ? ZFP_TYPE_DOUBLE : ZFP_TYPE_FLOAT);
515  if(ndims > 0) zfp_set_size_1d(&params, nx);
516  if(ndims > 1) zfp_set_size_2d(&params, nx, ny);
517  if(ndims > 2) zfp_set_size_3d(&params, nx, ny, nz);
518 
519  /* decompress data */
520  if (flags & H5Z_FLAG_REVERSE) {
521 
522  /* read 16byte header = (minbits, maxbits, maxprec, minexp) */
523  tmp_buf = *buf;
524  params.minbits = zfp_read_uint32((char*) tmp_buf + 0);
525  params.maxbits = zfp_read_uint32((char*) tmp_buf + 4);
526  params.maxprec = zfp_read_uint32((char*) tmp_buf + 8);
527  params.minexp = zfp_read_int32 ((char*) tmp_buf +12);
528 
529  /* allocate for uncompressed */
530  size_t buf_size_in = std::max(params.nx, 1u)
531  * std::max(params.ny, 1u)
532  * std::max(params.nz, 1u)
533  * ((params.type == ZFP_TYPE_DOUBLE) ? sizeof(double) : sizeof(float));
534  out_buf = malloc(buf_size_in);
535  if (out_buf == NULL) {
536  PUSH_ERR("H5Z_filter_zfp", H5E_CALLBACK, "Could not allocate output buffer.");
537  return 0;
538  }
539 
540 #ifndef NDEBUG
541  fprintf(stdout, " H5Z_filter_zfp: decompress: [%d, (%d, %d, %d), %d, %d, %d, %d] = %d \n",
542  dp, nx, ny, nz,
543  params.minbits, params.maxbits, params.maxprec, params.minexp,
544  (int)buf_size_in);
545 #endif
546 
547  /* buf[16+] -> out_buf */
548  zfp_buf = (char*) tmp_buf +16;
549 
550 #if 0
551  /* decompress 1D, 2D, or 3D floating-point array */
552  // int /* nonzero on success */
553  // zfp_decompress(
554  // const zfp_params* params, /* array meta data and compression parameters */
555  // void* out, /* decompressed floating-point data */
556  // const void* in, /* compressed stream */
557  // size_t insize /* bytes allocated for compressed stream */
558  // );
559 #endif
560  if (zfp_decompress(&params, out_buf, zfp_buf, buf_size_in) == 0) {
561  PUSH_ERR("H5Z_filter_zfp", H5E_CALLBACK, "decompression failed");
562  free(out_buf);
563  return 0;
564  }
565  nbytes_out = buf_size_in;
566 
567  /* compress data */
568  } else {
569 
570  /* compression parameter (0 == no compression) */
571  uint minbits = cd_values[4+ndims +0]; /* minimum number of bits per 4^d values in d dimensions (= maxbits for fixed rate) */
572  uint maxbits = cd_values[4+ndims +1]; /* maximum number of bits per 4^d values in d dimensions */
573  uint maxprec = cd_values[4+ndims +2]; /* maximum number of bits of precision per value (0 for fixed rate) */
574  int minexp = cd_values[4+ndims +3]; /* minimum bit plane coded (soft error tolerance = 2^minexp; -1024 for fixed rate) */
575 
576  /* correct compression parameters if zero initialized */
577  uint blocksize = 1u << (2 * ndims); // number of floating-point values per block
578  if(minbits <= 0 || minbits > 4096) minbits = blocksize * CHAR_BIT * typesize; /* {1, ..., 4096} == 12 bits */
579  if(maxbits <= 0 || maxbits > 4096) maxbits = blocksize * CHAR_BIT * typesize; /* {1, ..., 4096} == 12 bits */
580  if(maxprec <= 0 || maxprec > 64) maxprec = CHAR_BIT * typesize; /* {1, ..., 64} == 6 bits */
581  if(minexp < -1074) minexp = -1074; /* {-1074, ..., 1023} == 12 bits */
582  if(minexp > 1023) minexp = 1023;
583 
584  /* set compression parameters */
585  params.minbits = minbits;
586  params.maxbits = maxbits;
587  params.maxprec = maxprec;
588  params.minexp = minexp;
589 
590  /* max.(!) size of compressed data */
591  size_t buf_size_maxout = zfp_estimate_compressed_size(&params);
592  if (buf_size_maxout == 0) {
593  std::cerr << "invalid compression parameters" << std::endl;
594  return 0;
595  }
596 
597  /* allocate for header + compressed data */
598  tmp_buf = malloc(buf_size_maxout +16);
599  if (tmp_buf == NULL) {
600  PUSH_ERR("H5Z_filter_zfp", H5E_CALLBACK, "Could not allocate output buffer.");
601  return 0;
602  }
603 
604  /* ptr to data (skipping header) */
605  zfp_buf = (char*) tmp_buf +16;
606 
607 #if 0
608  /* compress 1D, 2D, or 3D floating-point array */
609  // size_t /* byte size of compressed stream (0 on failure) */
610  // zfp_compress(
611  // const zfp_params* params, /* array meta data and compression parameters */
612  // const void* in, /* uncompressed floating-point data */
613  // void* out, /* compressed stream (must be large enough) */
614  // size_t outsize /* bytes allocated for compressed stream */
615  // );
616 #endif
617  buf_size_out = zfp_compress(&params, *buf, zfp_buf, buf_size_maxout);
618  if (buf_size_out == 0) {
619  PUSH_ERR("H5Z_filter_zfp", H5E_CALLBACK, "compression failed");
620  free(out_buf);
621  return 0;
622  }
623 
624 #ifndef NDEBUG
625  fprintf(stdout, " H5Z_filter_zfp: compress(out): [%d, (%d, %d, %d), %d, %d, %d, %d] = %d \n",
626  dp, nx, ny, nz,
627  params.minbits, params.maxbits, params.maxprec, params.minexp,
628  (int)buf_size_out);
629 #endif
630 
631  /* add 32byte header = (minbits, maxbits, maxprec, minexp) */
632  zfp_write_uint32((char*) tmp_buf + 0, (uint32_t) minbits);
633  zfp_write_uint32((char*) tmp_buf + 4, (uint32_t) maxbits);
634  zfp_write_uint32((char*) tmp_buf + 8, (uint32_t) maxprec);
635  zfp_write_int32 ((char*) tmp_buf +12, ( int32_t) minexp);
636 
637  nbytes_out = buf_size_out +16;
638 
639  /* realloc (free partly) if required */
640  if(buf_size_out < buf_size_maxout) {
641  out_buf = realloc(tmp_buf, nbytes_out); /* free out_buf partly (which was not used by ZFP::compress(..)) */
642  if (out_buf == NULL) {
643  PUSH_ERR("H5Z_filter_zfp", H5E_CALLBACK, "Could not reallocate output buffer.");
644  return 0;
645  }
646  } else {
647  out_buf = tmp_buf;
648  }
649  }
650 
651  /* flip buffer and return
652  -------------------------*/
653  free(*buf);
654  *buf = out_buf;
655  *buf_size = nbytes_out;
656 
657  return nbytes_out;
658 }
659 
660 static int
661 H5Z_register_zfp(void)
662 {
663  H5Zregister(ZFP_H5Filter);
664 }
665 
666 #endif /* HAVE_ZFP */
667 
670 /* the name you want to assign to the interface */
671 static char const *iface_name = "hdf5";
672 static char const *iface_ext = "h5";
673 
674 static int use_log = 0;
675 static int no_collective = 0;
676 static int no_single_chunk = 0;
677 static int silo_block_size = 0;
678 static int silo_block_count = 0;
679 static int sbuf_size = -1;
680 static int mbuf_size = -1;
681 static int rbuf_size = -1;
682 static int lbuf_size = 0;
683 static const char *filename;
684 static hid_t fid;
685 static hid_t dspc = -1;
686 static int show_errors = 0;
687 static char compression_alg_str[64];
688 static char compression_params_str[512];
689 
690 static hid_t make_fapl()
691 {
692  hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS);
693  herr_t h5status = 0;
694 
695  if (sbuf_size >= 0)
696  h5status |= H5Pset_sieve_buf_size(fapl_id, sbuf_size);
697 
698  if (mbuf_size >= 0)
699  h5status |= H5Pset_meta_block_size(fapl_id, mbuf_size);
700 
701  if (rbuf_size >= 0)
702  h5status |= H5Pset_small_data_block_size(fapl_id, mbuf_size);
703 
704 #if 0
705  if (silo_block_size && silo_block_count)
706  {
707  h5status |= H5Pset_fapl_silo(fapl_id);
708  h5status |= H5Pset_silo_block_size_and_count(fapl_id, (hsize_t) silo_block_size,
709  silo_block_count);
710  }
711  else
712  if (use_log)
713  {
714  int flags = H5FD_LOG_LOC_IO|H5FD_LOG_NUM_IO|H5FD_LOG_TIME_IO|H5FD_LOG_ALLOC;
715 
716  if (lbuf_size > 0)
717  flags = H5FD_LOG_ALL;
718 
719  h5status |= H5Pset_fapl_log(fapl_id, "macsio_hdf5_log.out", flags, lbuf_size);
720  }
721 #endif
722 
723  if (h5status < 0)
724  {
725  if (fapl_id >= 0)
726  H5Pclose(fapl_id);
727  return 0;
728  }
729 
730  return fapl_id;
731 }
732 
733 static int
734 get_tokval(char const *src_str, char const *token_to_match, void *val_ptr)
735 {
736  int toklen;
737  char dummy[16];
738  void *val_ptr_tmp = &dummy[0];
739 
740  if (!src_str) return 0;
741  if (!token_to_match) return 0;
742 
743  toklen = strlen(token_to_match)-2;
744 
745  if (strncasecmp(src_str, token_to_match, toklen))
746  return 0;
747 
748  /* First, check matching sscanf *without* risk of writing to val_ptr */
749  if (sscanf(&src_str[toklen], &token_to_match[toklen], val_ptr_tmp) != 1)
750  return 0;
751 
752  sscanf(&src_str[toklen], &token_to_match[toklen], val_ptr);
753  return 1;
754 }
755 
756 static hid_t make_dcpl(char const *alg_str, char const *params_str, hid_t space_id, hid_t dtype_id)
757 {
758  int shuffle = -1;
759  int minsize = -1;
760  int gzip_level = -1;
761  int zfp_precision = -1;
762  unsigned int szip_pixels_per_block = 0;
763  float zfp_rate = -1;
764  float zfp_accuracy = -1;
765  char szip_method[64], szip_chunk_str[64];
766  char *token, *string, *tofree;
767  int ndims;
768  hsize_t dims[4], maxdims[4];
769  hid_t retval = H5Pcreate(H5P_DATASET_CREATE);
770 
771  szip_method[0] = '\0';
772  szip_chunk_str[0] = '\0';
773 
774  /* Initially, set contiguous layout. May reset to chunked later */
775  H5Pset_layout(retval, H5D_CONTIGUOUS);
776 
777  if (!alg_str || !strlen(alg_str))
778  return retval;
779 
780  ndims = H5Sget_simple_extent_ndims(space_id);
781  H5Sget_simple_extent_dims(space_id, dims, maxdims);
782 
783  /* We can make a pass through params string without being specific about
784  algorithm because there are presently no symbol collisions there */
785  tofree = string = strdup(params_str);
786  while ((token = strsep(&string, ",")) != NULL)
787  {
788  if (get_tokval(token, "minsize=%d", &minsize))
789  continue;
790  if (get_tokval(token, "shuffle=%d", &shuffle))
791  continue;
792  if (get_tokval(token, "level=%d", &gzip_level))
793  continue;
794  if (get_tokval(token, "rate=%f", &zfp_rate))
795  continue;
796  if (get_tokval(token, "precision=%d", &zfp_precision))
797  continue;
798  if (get_tokval(token, "accuracy=%f", &zfp_accuracy))
799  continue;
800  if (get_tokval(token, "method=%s", szip_method))
801  continue;
802  if (get_tokval(token, "block=%u", &szip_pixels_per_block))
803  continue;
804  if (get_tokval(token, "chunk=%s", szip_chunk_str))
805  continue;
806  }
807  free(tofree);
808 
809  /* check for minsize compression threshold */
810  minsize = minsize != -1 ? minsize : 1024;
811  if (H5Sget_simple_extent_npoints(space_id) < minsize)
812  return retval;
813 
814  /*
815  * Ok, now handle various properties related to compression
816  */
817 
818  /* Initially, as a default in case nothing else is selected,
819  set chunk size equal to dataset size (e.g. single chunk) */
820  H5Pset_chunk(retval, ndims, dims);
821 
822  if (!strncasecmp(alg_str, "gzip", 4))
823  {
824  if (shuffle == -1 || shuffle == 1)
825  H5Pset_shuffle(retval);
826  H5Pset_deflate(retval, gzip_level!=-1?gzip_level:9);
827  }
828 #ifdef HAVE_ZFP
829  else if (!strncasecmp(alg_str, "lindstrom-zfp", 13))
830  {
831  int i;
832  zfp_params params;
833  unsigned int cd_values[32];
834 
835  /* We don't shuffle zfp. That is handled internally to zfp */
836 
837  zfp_init(&params);
838  params.type = H5Tget_size(dtype_id) == 4 ? ZFP_TYPE_FLOAT : ZFP_TYPE_DOUBLE;
839  if (ndims >= 1) params.nx = dims[0];
840  if (ndims >= 2) params.ny = dims[1];
841  if (ndims >= 3) params.nz = dims[2];
842  if (zfp_rate != -1)
843  zfp_set_rate(&params, (double) zfp_rate);
844  else if (zfp_precision != -1)
845  zfp_set_precision(&params, zfp_precision);
846  else if (zfp_accuracy != -1)
847  zfp_set_accuracy(&params, (double) zfp_accuracy);
848  else
849  zfp_set_rate(&params, 0.0); /* default rate-constrained */
850 
851  i = 0;
852  cd_values[i++] = (unsigned int) params.minbits;
853  cd_values[i++] = (unsigned int) params.maxbits;
854  cd_values[i++] = (unsigned int) params.maxprec;
855  cd_values[i++] = (unsigned int) params.minexp;
856  cd_values[i++] = (unsigned int) minsize;
857  H5Pset_filter(retval, ZFP_H5FILTER_ID, H5Z_FLAG_OPTIONAL, i, cd_values);
858  }
859 #endif
860  else if (!strncasecmp(alg_str, "szip", 4))
861  {
862 #ifdef HAVE_SZIP
863  unsigned int method = H5_SZIP_NN_OPTION_MASK;
864  int const szip_max_blocks_per_scanline = 128; /* from szip lib */
865 
866  if (shuffle == -1 || shuffle == 1)
867  H5Pset_shuffle(retval);
868 
869  if (szip_pixels_per_block == 0)
870  szip_pixels_per_block = 32;
871  if (!strcasecmp(szip_method, "ec"))
872  method = H5_SZIP_EC_OPTION_MASK;
873 
874  H5Pset_szip(retval, method, szip_pixels_per_block);
875 
876  if (strlen(szip_chunk_str))
877  {
878  hsize_t chunk_dims[3] = {0, 0, 0};
879  int i, vals[3];
880  int nvals = sscanf(szip_chunk_str, "%d:%d:%d", &vals[0], &vals[1], &vals[2]);
881  if (nvals == ndims)
882  {
883  for (i = 0; i < ndims; i++)
884  chunk_dims[i] = vals[i];
885  }
886  else if (nvals == ndims-1)
887  {
888  chunk_dims[0] = szip_max_blocks_per_scanline * szip_pixels_per_block;
889  for (i = 1; i < ndims; i++)
890  chunk_dims[i] = vals[i-1];
891  }
892  for (i = 0; i < ndims; i++)
893  {
894  if (chunk_dims[i] > dims[i]) chunk_dims[i] = dims[0];
895  if (chunk_dims[i] == 0) chunk_dims[i] = dims[0];
896  }
897  H5Pset_chunk(retval, ndims, chunk_dims);
898  }
899 #else
900  static int have_issued_warning = 0;
901  if (!have_issued_warning)
902  MACSIO_LOG_MSG(Warn, ("szip compressor not available in this build"));
903  have_issued_warning = 1;
904 #endif
905  }
906 
907  return retval;
908 }
909 
910 static int process_args(int argi, int argc, char *argv[])
911 {
913 
914  char *c_alg = compression_alg_str;
915  char *c_params = compression_params_str;
916 
917  MACSIO_CLARGS_ProcessCmdline(0, argFlags, argi, argc, argv,
918  "--show_errors", "",
919  "Show low-level HDF5 errors",
920  &show_errors,
921  "--compression %s %s", MACSIO_CLARGS_NODEFAULT,
922  "The first string argument is the compression algorithm name. The second\n"
923  "string argument is a comma-separated set of params of the form\n"
924  "'param1=val1,param2=val2,param3=val3. The various algorithm names and\n"
925  "their parameter meanings are described below. Note that some parameters are\n"
926  "not specific to any algorithm. Those are described first followed by\n"
927  "individual algorithm-specific parameters.\n"
928  "\n"
929  "minsize=%d : min. size of dataset (in terms of a count of values)\n"
930  " upon which compression will even be attempted. Default is 1024.\n"
931  "shuffle=<int>: Boolean (zero or non-zero) to indicate whether to use\n"
932  " HDF5's byte shuffling filter *prior* to compression. Default depends\n"
933  " on algorithm. By default, shuffling is NOT used for lindstrom-zfp but IS\n"
934  " used with all other algorithms.\n"
935  "\n"
936 #ifdef HAVE_ZFP
937  "\"lindstrom-zfp\"\n"
938  " Use Peter Lindstrom's ZFP compression (computation.llnl.gov/casc/zfp)\n"
939  " The following options are *mutually*exclusive*. In any command-line\n"
940  " specifying more than one of the following options, only the last\n"
941  " specified will be honored.\n"
942  " rate=%f : target # bits per compressed output datum. Fractional values\n"
943  " are permitted. 0 selects defaults: 4 bits/flt or 8 bits/dbl.\n"
944  " Use this option to hit a target compressed size but where error\n"
945  " varies. OTOH, use one of the following two options for fixed\n"
946  " error but amount of compression, if any, varies.\n"
947  " precision=%d : # bits of precision to preserve in each input datum.\n"
948  " accuracy=%f : absolute error tolerance in each output datum.\n"
949  " In many respects, 'precision' represents a sort of relative error\n"
950  " tolerance while 'accuracy' represents an absolute tolerance.\n"
951  " See http://en.wikipedia.org/wiki/Accuracy_and_precision.\n"
952  "\n"
953 #endif
954  "\"szip\"\n"
955  " method=%s : specify 'ec' for entropy coding or 'nn' for nearest\n"
956  " neighbor. Default is 'nn'\n"
957  " block=%d : (pixels-per-block) must be an even integer <= 32. See\n"
958  " See H5Pset_szip in HDF5 documentation for more information.\n"
959  " Default is 32.\n"
960  " chunk=%d:%d : colon-separated dimensions specifying chunk size in\n"
961  " each dimension higher than the first (fastest varying) dimension.\n"
962  "\n"
963  "\"gzip\"\n"
964  " level=%d : A value in the range [1,9], inclusive, trading off time to\n"
965  " compress with amount of compression. Level=1 results in best speed\n"
966  " but worst compression whereas level=9 results in best compression\n"
967  " but worst speed. Values outside [1,9] are clamped. Default is 9.\n"
968  "\n"
969  "Examples:\n"
970  " --compression lindstrom-zfp rate=18.5\n"
971  " --compression gzip minsize=1024,level=9\n"
972  " --compression szip shuffle=0,options=nn,pixels_per_block=16\n"
973  "\n",
974  &c_alg, &c_params,
975  "--no_collective", "",
976  "Use independent, not collective, I/O calls in SIF mode.",
977  &no_collective,
978  "--no_single_chunk", "",
979  "Do not single chunk the datasets (currently ignored).",
980  &no_single_chunk,
981  "--sieve_buf_size %d", MACSIO_CLARGS_NODEFAULT,
982  "Specify sieve buffer size (see H5Pset_sieve_buf_size)",
983  &sbuf_size,
984  "--meta_block_size %d", MACSIO_CLARGS_NODEFAULT,
985  "Specify size of meta data blocks (see H5Pset_meta_block_size)",
986  &mbuf_size,
987  "--small_block_size %d", MACSIO_CLARGS_NODEFAULT,
988  "Specify threshold size for data blocks considered to be 'small'\n"
989  "(see H5Pset_small_data_block_size)",
990  &rbuf_size,
991  "--log", "",
992  "Use logging Virtual File Driver (see H5Pset_fapl_log)",
993  &use_log,
994 #ifdef HAVE_SILO
995  "--silo_fapl %d %d", MACSIO_CLARGS_NODEFAULT,
996  "Use Silo's block-based VFD and specify block size and block count",
997  &silo_block_size, &silo_block_count,
998 #endif
1000 
1001  if (!show_errors)
1002  H5Eset_auto1(0,0);
1003  return 0;
1004 }
1005 
1006 static void main_dump_sif(json_object *main_obj, int dumpn, double dumpt)
1007 {
1008 #ifdef HAVE_MPI
1009  int ndims;
1010  int i, v, p;
1011  char const *mesh_type = json_object_path_get_string(main_obj, "clargs/part_type");
1012  char fileName[256];
1013  int use_part_count;
1014 
1015  hid_t h5file_id;
1016  hid_t fapl_id = make_fapl();
1017  hid_t dxpl_id = H5Pcreate(H5P_DATASET_XFER);
1018  hid_t dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
1019  hid_t null_space_id = H5Screate(H5S_NULL);
1020  hid_t fspace_nodal_id, fspace_zonal_id;
1021  hsize_t global_log_dims_nodal[3];
1022  hsize_t global_log_dims_zonal[3];
1023 
1024  MPI_Info mpiInfo = MPI_INFO_NULL;
1025 
1026 #warning WE ARE DOING SIF SLIGHTLY WRONG, DUPLICATING SHARED NODES
1027 #warning INCLUDE ARGS FOR ISTORE AND K_SYM
1028 #warning INCLUDE ARG PROCESS FOR HINTS
1029 #warning FAPL PROPS: ALIGNMENT
1030 #if H5_HAVE_PARALLEL
1031  H5Pset_fapl_mpio(fapl_id, MACSIO_MAIN_Comm, mpiInfo);
1032 #endif
1033 
1034 #warning FOR MIF, NEED A FILEROOT ARGUMENT OR CHANGE TO FILEFMT ARGUMENT
1035  /* Construct name for the HDF5 file */
1036  sprintf(fileName, "%s_hdf5_%03d.%s",
1037  json_object_path_get_string(main_obj, "clargs/filebase"),
1038  dumpn,
1039  json_object_path_get_string(main_obj, "clargs/fileext"));
1040 
1041  h5file_id = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
1042 
1043  /* Create an HDF5 Dataspace for the global whole of mesh and var objects in the file. */
1044  ndims = json_object_path_get_int(main_obj, "clargs/part_dim");
1045  json_object *global_log_dims_array =
1046  json_object_path_get_array(main_obj, "problem/global/LogDims");
1047  json_object *global_parts_log_dims_array =
1048  json_object_path_get_array(main_obj, "problem/global/PartsLogDims");
1049  /* Note that global zonal array is smaller in each dimension by one *ON*EACH*BLOCK*
1050  in the associated dimension. */
1051  for (i = 0; i < ndims; i++)
1052  {
1053  int parts_log_dims_val = JsonGetInt(global_parts_log_dims_array, "", i);
1054  global_log_dims_nodal[ndims-1-i] = (hsize_t) JsonGetInt(global_log_dims_array, "", i);
1055  global_log_dims_zonal[ndims-1-i] = global_log_dims_nodal[ndims-1-i] -
1056  JsonGetInt(global_parts_log_dims_array, "", i);
1057  }
1058  fspace_nodal_id = H5Screate_simple(ndims, global_log_dims_nodal, 0);
1059  fspace_zonal_id = H5Screate_simple(ndims, global_log_dims_zonal, 0);
1060 
1061  /* Get the list of vars on the first part as a guide to loop over vars */
1062  json_object *part_array = json_object_path_get_array(main_obj, "problem/parts");
1063  json_object *first_part_obj = json_object_array_get_idx(part_array, 0);
1064  json_object *first_part_vars_array = json_object_path_get_array(first_part_obj, "Vars");
1065 
1066  /* Dataset transfer property list used in all H5Dwrite calls */
1067 #if H5_HAVE_PARALLEL
1068  if (no_collective)
1069  H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_INDEPENDENT);
1070  else
1071  H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
1072 #endif
1073 
1074 
1075  /* Loop over vars and then over parts */
1076  /* currently assumes all vars exist on all ranks. but not all parts */
1077  for (v = -1; v < json_object_array_length(first_part_vars_array); v++) /* -1 start is for Mesh */
1078  {
1079 
1080 #warning SKIPPING MESH
1081  if (v == -1) continue; /* All ranks skip mesh (coords) for now */
1082 
1083  /* Inspect the first part's var object for name, datatype, etc. */
1084  json_object *var_obj = json_object_array_get_idx(first_part_vars_array, v);
1085  char const *varName = json_object_path_get_string(var_obj, "name");
1086  char *centering = strdup(json_object_path_get_string(var_obj, "centering"));
1087  json_object *dataobj = json_object_path_get_extarr(var_obj, "data");
1088 #warning JUST ASSUMING TWO TYPES NOW. CHANGE TO A FUNCTION
1089  hid_t dtype_id = json_object_extarr_type(dataobj)==json_extarr_type_flt64?
1090  H5T_NATIVE_DOUBLE:H5T_NATIVE_INT;
1091  hid_t fspace_id = H5Scopy(strcmp(centering, "zone") ? fspace_nodal_id : fspace_zonal_id);
1092  hid_t dcpl_id = make_dcpl(compression_alg_str, compression_params_str, fspace_id, dtype_id);
1093 
1094  /* Create the file dataset (using old-style H5Dcreate API here) */
1095 #warning USING DEFAULT DCPL: LATER ADD COMPRESSION, ETC.
1096 
1097  hid_t ds_id = H5Dcreate1(h5file_id, varName, dtype_id, fspace_id, dcpl_id);
1098  H5Sclose(fspace_id);
1099  H5Pclose(dcpl_id);
1100 
1101  /* Loop to make write calls for this var for each part on this rank */
1102 #warning USE NEW MULTI-DATASET API WHEN AVAILABLE TO AGLOMERATE ALL PARTS INTO ONE CALL
1103  use_part_count = (int) ceil(json_object_path_get_double(main_obj, "clargs/avg_num_parts"));
1104  for (p = 0; p < use_part_count; p++)
1105  {
1106  json_object *part_obj = json_object_array_get_idx(part_array, p);
1107  json_object *var_obj = 0;
1108  hid_t mspace_id = H5Scopy(null_space_id);
1109  void const *buf = 0;
1110 
1111  fspace_id = H5Scopy(null_space_id);
1112 
1113  /* this rank actually has something to contribute to the H5Dwrite call */
1114  if (part_obj)
1115  {
1116  int i;
1117  hsize_t starts[3], counts[3];
1118  json_object *vars_array = json_object_path_get_array(part_obj, "Vars");
1119  json_object *mesh_obj = json_object_path_get_object(part_obj, "Mesh");
1120  json_object *var_obj = json_object_array_get_idx(vars_array, v);
1121  json_object *extarr_obj = json_object_path_get_extarr(var_obj, "data");
1122  json_object *global_log_origin_array =
1123  json_object_path_get_array(part_obj, "GlobalLogOrigin");
1124  json_object *global_log_indices_array =
1125  json_object_path_get_array(part_obj, "GlobalLogIndices");
1126  json_object *mesh_dims_array = json_object_path_get_array(mesh_obj, "LogDims");
1127  for (i = 0; i < ndims; i++)
1128  {
1129  starts[ndims-1-i] =
1130  json_object_get_int(json_object_array_get_idx(global_log_origin_array,i));
1131  counts[ndims-1-i] =
1132  json_object_get_int(json_object_array_get_idx(mesh_dims_array,i));
1133  if (!strcmp(centering, "zone"))
1134  {
1135  counts[ndims-1-i]--;
1136  starts[ndims-1-i] -=
1137  json_object_get_int(json_object_array_get_idx(global_log_indices_array,i));
1138  }
1139  }
1140 
1141  /* set selection of filespace */
1142  fspace_id = H5Dget_space(ds_id);
1143  H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, starts, 0, counts, 0);
1144 
1145  /* set dataspace of data in memory */
1146  mspace_id = H5Screate_simple(ndims, counts, 0);
1147  buf = json_object_extarr_data(extarr_obj);
1148  }
1149 
1150  H5Dwrite(ds_id, dtype_id, mspace_id, fspace_id, dxpl_id, buf);
1151  H5Sclose(fspace_id);
1152  H5Sclose(mspace_id);
1153 
1154  }
1155 
1156  H5Dclose(ds_id);
1157  free(centering);
1158  }
1159 
1160  H5Sclose(fspace_nodal_id);
1161  H5Sclose(fspace_zonal_id);
1162  H5Sclose(null_space_id);
1163  H5Pclose(dxpl_id);
1164  H5Pclose(fapl_id);
1165  H5Fclose(h5file_id);
1166 
1167 #endif
1168 }
1169 
1170 typedef struct _user_data {
1171  hid_t groupId;
1172 } user_data_t;
1173 
1174 static void *CreateHDF5File(const char *fname, const char *nsname, void *userData)
1175 {
1176  hid_t *retval = 0;
1177  hid_t h5File = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
1178  if (h5File >= 0)
1179  {
1180 #warning USE NEWER GROUP CREATION SETTINGS OF HDF5
1181  if (nsname && userData)
1182  {
1183  user_data_t *ud = (user_data_t *) userData;
1184  ud->groupId = H5Gcreate1(h5File, nsname, 0);
1185  }
1186  retval = (hid_t *) malloc(sizeof(hid_t));
1187  *retval = h5File;
1188  }
1189  return (void *) retval;
1190 }
1191 
1192 static void *OpenHDF5File(const char *fname, const char *nsname,
1193  MACSIO_MIF_ioFlags_t ioFlags, void *userData)
1194 {
1195  hid_t *retval;
1196  hid_t h5File = H5Fopen(fname, ioFlags.do_wr ? H5F_ACC_RDWR : H5F_ACC_RDONLY, H5P_DEFAULT);
1197  if (h5File >= 0)
1198  {
1199  if (ioFlags.do_wr && nsname && userData)
1200  {
1201  user_data_t *ud = (user_data_t *) userData;
1202  ud->groupId = H5Gcreate1(h5File, nsname, 0);
1203  }
1204  retval = (hid_t *) malloc(sizeof(hid_t));
1205  *retval = h5File;
1206  }
1207  return (void *) retval;
1208 }
1209 
1210 static void CloseHDF5File(void *file, void *userData)
1211 {
1212  if (userData)
1213  {
1214  user_data_t *ud = (user_data_t *) userData;
1215  H5Gclose(ud->groupId);
1216  }
1217  H5Fclose(*((hid_t*) file));
1218  free(file);
1219 }
1220 
1221 static void write_mesh_part(hid_t h5loc, json_object *part_obj)
1222 {
1223 #warning WERE SKPPING THE MESH (COORDS) OBJECT PRESENTLY
1224  int i;
1225  json_object *vars_array = json_object_path_get_array(part_obj, "Vars");
1226 
1227  for (i = 0; i < json_object_array_length(vars_array); i++)
1228  {
1229  int j;
1230  hsize_t var_dims[3];
1231  hid_t fspace_id, ds_id, dcpl_id;
1232  json_object *var_obj = json_object_array_get_idx(vars_array, i);
1233  json_object *data_obj = json_object_path_get_extarr(var_obj, "data");
1234  char const *varname = json_object_path_get_string(var_obj, "name");
1235  int ndims = json_object_extarr_ndims(data_obj);
1236  void const *buf = json_object_extarr_data(data_obj);
1237  hid_t dtype_id = json_object_extarr_type(data_obj)==json_extarr_type_flt64?
1238  H5T_NATIVE_DOUBLE:H5T_NATIVE_INT;
1239 
1240  for (j = 0; j < ndims; j++)
1241  var_dims[j] = json_object_extarr_dim(data_obj, j);
1242 
1243  fspace_id = H5Screate_simple(ndims, var_dims, 0);
1244  dcpl_id = make_dcpl(compression_alg_str, compression_params_str, fspace_id, dtype_id);
1245  ds_id = H5Dcreate1(h5loc, varname, dtype_id, fspace_id, dcpl_id);
1246  H5Dwrite(ds_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
1247  H5Dclose(ds_id);
1248  H5Pclose(dcpl_id);
1249  H5Sclose(fspace_id);
1250  }
1251 }
1252 
1253 static void main_dump_mif(json_object *main_obj, int numFiles, int dumpn, double dumpt)
1254 {
1255  int size, rank;
1256  hid_t *h5File_ptr;
1257  hid_t h5File;
1258  hid_t h5Group;
1259  char fileName[256];
1260  int i, len;
1261  int *theData;
1262  user_data_t userData;
1264  JsonGetInt(main_obj, "clargs/exercise_scr")&0x1};
1265 
1266 #warning MAKE WHOLE FILE USE HDF5 1.8 INTERFACE
1267 #warning SET FILE AND DATASET PROPERTIES
1268 #warning DIFFERENT MPI TAGS FOR DIFFERENT PLUGINS AND CONTEXTS
1269  MACSIO_MIF_baton_t *bat = MACSIO_MIF_Init(numFiles, ioFlags, MACSIO_MAIN_Comm, 3,
1271 
1272  rank = json_object_path_get_int(main_obj, "parallel/mpi_rank");
1273  size = json_object_path_get_int(main_obj, "parallel/mpi_size");
1274 
1275  /* Construct name for the silo file */
1276  sprintf(fileName, "%s_hdf5_%05d_%03d.%s",
1277  json_object_path_get_string(main_obj, "clargs/filebase"),
1278  MACSIO_MIF_RankOfGroup(bat, rank),
1279  dumpn,
1280  json_object_path_get_string(main_obj, "clargs/fileext"));
1281 
1282  h5File_ptr = (hid_t *) MACSIO_MIF_WaitForBaton(bat, fileName, 0);
1283  h5File = *h5File_ptr;
1284  h5Group = userData.groupId;
1285 
1286  json_object *parts = json_object_path_get_array(main_obj, "problem/parts");
1287 
1288  for (int i = 0; i < json_object_array_length(parts); i++)
1289  {
1290  char domain_dir[256];
1291  json_object *this_part = json_object_array_get_idx(parts, i);
1292  hid_t domain_group_id;
1293 
1294  snprintf(domain_dir, sizeof(domain_dir), "domain_%07d",
1295  json_object_path_get_int(this_part, "Mesh/ChunkID"));
1296 
1297  domain_group_id = H5Gcreate1(h5File, domain_dir, 0);
1298 
1299  write_mesh_part(domain_group_id, this_part);
1300 
1301  H5Gclose(domain_group_id);
1302  }
1303 
1304  /* If this is the 'root' processor, also write Silo's multi-XXX objects */
1305 #if 0
1306  if (rank == 0)
1307  WriteMultiXXXObjects(main_obj, siloFile, bat);
1308 #endif
1309 
1310  /* Hand off the baton to the next processor. This winds up closing
1311  * the file so that the next processor that opens it can be assured
1312  * of getting a consistent and up to date view of the file's contents. */
1313  MACSIO_MIF_HandOffBaton(bat, h5File_ptr);
1314 
1315  /* We're done using MACSIO_MIF, so finish it off */
1316  MACSIO_MIF_Finish(bat);
1317 
1318 }
1319 
1320 static void main_dump(int argi, int argc, char **argv, json_object *main_obj,
1321  int dumpn, double dumpt)
1322 {
1323  int rank, size, numFiles;
1324 
1325 #warning SET ERROR MODE OF HDF5 LIBRARY
1326 
1327  /* Without this barrier, I get strange behavior with Silo's MACSIO_MIF interface */
1328  mpi_errno = MPI_Barrier(MACSIO_MAIN_Comm);
1329 
1330  /* process cl args */
1331  process_args(argi, argc, argv);
1332 
1333  rank = json_object_path_get_int(main_obj, "parallel/mpi_rank");
1334  size = json_object_path_get_int(main_obj, "parallel/mpi_size");
1335 
1336 #warning MOVE TO A FUNCTION
1337  /* ensure we're in MIF mode and determine the file count */
1338  json_object *parfmode_obj = json_object_path_get_array(main_obj, "clargs/parallel_file_mode");
1339  if (parfmode_obj)
1340  {
1341  json_object *modestr = json_object_array_get_idx(parfmode_obj, 0);
1342  json_object *filecnt = json_object_array_get_idx(parfmode_obj, 1);
1343 #warning ERRORS NEED TO GO TO LOG FILES AND ERROR BEHAVIOR NEEDS TO BE HONORED
1344  if (!strcmp(json_object_get_string(modestr), "SIF"))
1345  {
1346  main_dump_sif(main_obj, dumpn, dumpt);
1347  }
1348  else
1349  {
1350  numFiles = json_object_get_int(filecnt);
1351  main_dump_mif(main_obj, numFiles, dumpn, dumpt);
1352  }
1353  }
1354  else
1355  {
1356  char const * modestr = json_object_path_get_string(main_obj, "clargs/parallel_file_mode");
1357  if (!strcmp(modestr, "SIF"))
1358  {
1359  float avg_num_parts = json_object_path_get_double(main_obj, "clargs/avg_num_parts");
1360  if (avg_num_parts == (float ((int) avg_num_parts)))
1361  main_dump_sif(main_obj, dumpn, dumpt);
1362  else
1363  {
1364 #warning CURRENTLY, SIF CAN WORK ONLY ON WHOLE PART COUNTS
1365  MACSIO_LOG_MSG(Die, ("HDF5 plugin cannot currently handle SIF mode where "
1366  "there are different numbers of parts on each MPI rank. "
1367  "Set --avg_num_parts to an integral value." ));
1368  }
1369  }
1370  else if (!strcmp(modestr, "MIFMAX"))
1371  numFiles = json_object_path_get_int(main_obj, "parallel/mpi_size");
1372  else if (!strcmp(modestr, "MIFAUTO"))
1373  {
1374  /* Call utility to determine optimal file count */
1375 #warning ADD UTILIT TO DETERMINE OPTIMAL FILE COUNT
1376  }
1377  main_dump_mif(main_obj, numFiles, dumpn, dumpt);
1378  }
1379 }
1380 
1382 {
1383  MACSIO_IFACE_Handle_t iface;
1384 
1385  if (strlen(iface_name) >= MACSIO_IFACE_MAX_NAME)
1386  MACSIO_LOG_MSG(Die, ("Interface name \"%s\" too long", iface_name));
1387 
1388 #warning DO HDF5 LIB WIDE (DEFAULT) INITITILIAZATIONS HERE
1389 
1390  /* Populate information about this plugin */
1391  strcpy(iface.name, iface_name);
1392  strcpy(iface.ext, iface_ext);
1393  iface.dumpFunc = main_dump;
1394  iface.processArgsFunc = process_args;
1395 
1396  /* Register custom compression methods with HDF5 library */
1397  H5dont_atexit();
1398 #ifdef HAVE_ZFP
1399  H5Z_register_zfp();
1400 #endif
1401 
1402  /* Register this plugin */
1403  if (!MACSIO_IFACE_Register(&iface))
1404  MACSIO_LOG_MSG(Die, ("Failed to register interface \"%s\"", iface_name));
1405 
1406  return 0;
1407 }
1408 
1409 /* this one statement is the only statement requiring compilation by
1410  a C++ compiler. That is because it involves initialization and non
1411  constant expressions (a function call in this case). This function
1412  call is guaranteed to occur during *initialization* (that is before
1413  even 'main' is called) and so will have the effect of populating the
1414  iface_map array merely by virtue of the fact that this code is linked
1415  with a main. */
1417 
static int register_this_interface()
Definition: macsio_hdf5.c:1381
int MACSIO_MIF_RankOfGroup(MACSIO_MIF_baton_t const *Bat, int rankInComm)
Rank of the group in which a given (global) rank exists.
Definition: macsio_mif.c:311
hid_t groupId
Definition: macsio_hdf5.c:1171
static const char * filename
Definition: macsio_hdf5.c:683
static int dummy
Definition: macsio_hdf5.c:1416
static void main_dump(int argi, int argc, char **argv, json_object *main_obj, int dumpn, double dumpt)
Definition: macsio_hdf5.c:1320
static void write_mesh_part(hid_t h5loc, json_object *part_obj)
Definition: macsio_hdf5.c:1221
unsigned int do_wr
Definition: macsio_mif.h:155
void * MACSIO_MIF_WaitForBaton(MACSIO_MIF_baton_t *Bat, char const *fname, char const *nsname)
Wait for exclusive access to the group's file.
Definition: macsio_mif.c:210
#define MACSIO_CLARGS_WARN
Definition: macsio_clargs.h:35
static void * CreateHDF5File(const char *fname, const char *nsname, void *userData)
Definition: macsio_hdf5.c:1174
static int silo_block_count
Definition: macsio_hdf5.c:678
int mpi_errno
Error code returned by most recent MPI call.
Definition: macsio_log.c:43
static hid_t make_dcpl(char const *alg_str, char const *params_str, hid_t space_id, hid_t dtype_id)
Definition: macsio_hdf5.c:756
#define MACSIO_MIF_WRITE
Definition: macsio_mif.h:149
static int use_log
Definition: macsio_hdf5.c:674
void const * json_object_extarr_data(struct json_object *jso)
Definition: json_object.c:1206
struct json_object * json_object_path_get_extarr(struct json_object *src, char const *key_path)
Get the extarr object at specified path.
Definition: json_object.c:2249
static hid_t dspc
Definition: macsio_hdf5.c:685
static hid_t make_fapl()
Definition: macsio_hdf5.c:690
int MACSIO_CLARGS_ProcessCmdline(void **retobj, MACSIO_CLARGS_ArgvFlags_t flags, int argi, int argc, char **argv,...)
static int get_tokval(char const *src_str, char const *token_to_match, void *val_ptr)
Definition: macsio_hdf5.c:734
static int no_single_chunk
Definition: macsio_hdf5.c:676
static int mbuf_size
Definition: macsio_hdf5.c:680
int MACSIO_IFACE_Register(MACSIO_IFACE_Handle_t const *iface)
Definition: macsio_iface.c:35
static int silo_block_size
Definition: macsio_hdf5.c:677
static void main_dump_sif(json_object *main_obj, int dumpn, double dumpt)
Definition: macsio_hdf5.c:1006
void MACSIO_MIF_Finish(MACSIO_MIF_baton_t *bat)
End a MACSIO_MIF I/O operation and free resources.
Definition: macsio_mif.c:191
ProcessArgsFunc processArgsFunc
Definition: macsio_iface.h:59
static int process_args(int argi, int argc, char *argv[])
Definition: macsio_hdf5.c:910
#define MACSIO_IFACE_MAX_NAME
Definition: macsio_iface.h:33
int json_object_extarr_dim(struct json_object *jso, int dimidx)
Definition: json_object.c:1196
int json_object_array_length(struct json_object *obj)
Definition: json_object.c:854
#define JsonGetInt(OBJ,...)
Get the int value at specified path.
Definition: json_object.h:747
static void * OpenHDF5File(const char *fname, const char *nsname, MACSIO_MIF_ioFlags_t ioFlags, void *userData)
Definition: macsio_hdf5.c:1192
int json_object_extarr_ndims(struct json_object *jso)
Definition: json_object.c:1190
#define MACSIO_CLARGS_END_OF_ARGS
Definition: macsio_clargs.h:53
double json_object_path_get_double(struct json_object *src, char const *key_path)
Get a double value for the object at specified path.
Definition: json_object.c:2140
#define MACSIO_CLARGS_NODEFAULT
Definition: macsio_clargs.h:54
struct json_object * json_object_path_get_array(struct json_object *src, char const *key_path)
Get the array object at specified path.
Definition: json_object.c:2215
static void main_dump_mif(json_object *main_obj, int numFiles, int dumpn, double dumpt)
Definition: macsio_hdf5.c:1253
const char * json_object_get_string(struct json_object *obj)
Definition: json_object.c:751
char ext[MACSIO_IFACE_MAX_NAME]
Definition: macsio_iface.h:55
static void CloseHDF5File(void *file, void *userData)
Definition: macsio_hdf5.c:1210
char const * json_object_path_get_string(struct json_object *src, char const *key_path)
Get a string value for the object at specified path.
Definition: json_object.c:2198
#define MACSIO_CLARGS_TOMEM
Definition: macsio_clargs.h:39
struct json_object * json_object_array_get_idx(struct json_object *obj, int idx)
Definition: json_object.c:870
static int no_collective
Definition: macsio_hdf5.c:675
static int show_errors
Definition: macsio_hdf5.c:686
static int sbuf_size
Definition: macsio_hdf5.c:679
MACSIO_MIF_baton_t * MACSIO_MIF_Init(int numFiles, MACSIO_MIF_ioFlags_t ioFlags, MPI_Comm mpiComm, int mpiTag, MACSIO_MIF_CreateCB createCb, MACSIO_MIF_OpenCB openCb, MACSIO_MIF_CloseCB closeCb, void *clientData)
Initialize MACSIO_MIF for a MIF I/O operation.
Definition: macsio_mif.c:102
struct json_object * json_object_path_get_object(struct json_object *src, char const *key_path)
Get the object at specified path.
Definition: json_object.c:2232
char name[MACSIO_IFACE_MAX_NAME]
Definition: macsio_iface.h:54
int32_t json_object_get_int(struct json_object *obj)
Definition: json_object.c:504
enum json_extarr_type json_object_extarr_type(struct json_object *jso)
Definition: json_object.c:1143
static int rbuf_size
Definition: macsio_hdf5.c:681
static char compression_params_str[512]
Definition: macsio_hdf5.c:688
static void WriteMultiXXXObjects(json_object *main_obj, DBfile *siloFile, int dumpn, MACSIO_MIF_baton_t *bat)
Definition: macsio_silo.c:403
struct _user_data user_data_t
int32_t json_object_path_get_int(struct json_object *src, char const *key_path)
Get integer value for object at specified path.
Definition: json_object.c:2103
static char compression_alg_str[64]
Definition: macsio_hdf5.c:687
MPI_Comm MACSIO_MAIN_Comm
Definition: macsio_main.c:279
static int lbuf_size
Definition: macsio_hdf5.c:682
static char const * iface_ext
Definition: macsio_hdf5.c:672
void MACSIO_MIF_HandOffBaton(MACSIO_MIF_baton_t const *Bat, void *file)
Release exclusive access to the group's file.
Definition: macsio_mif.c:284
static hid_t fid
Definition: macsio_hdf5.c:684
#define MACSIO_LOG_MSG(SEV, MSG)
Convenience macro for logging a message to the main log.
Definition: macsio_log.h:133
static char const * iface_name
Definition: macsio_hdf5.c:671