Chombo + EB + MF  3.2
CH_HDF5.H
Go to the documentation of this file.
1 #ifdef CH_LANG_CC
2 /*
3  * _______ __
4  * / ___/ / ___ __ _ / / ___
5  * / /__/ _ \/ _ \/ V \/ _ \/ _ \
6  * \___/_//_/\___/_/_/_/_.__/\___/
7  * Please refer to Copyright.txt, in Chombo's root directory.
8  */
9 #endif
10 
11 #ifndef _CH_HDF5_H_
12 #define _CH_HDF5_H_
13 
14 #define CHOFFSET(object, member) (int)((char*)&(object.member) - (char*)&object)
15 
16 #ifdef CH_USE_HDF5 // if you don't have CH_USE_HDF5, then this file is useless
17 
18 #include <iostream>
19 using std::cout;
20 using std::endl;
21 
22 #ifdef CH_MPI
23 #include "mpi.h"
24 #endif
25 
26 #include "LevelData.H"
27 #include "HDF5Portable.H"
28 // hdf5 #defines inline.... duh!
29 #undef inline
30 #include <string>
31 #include <map>
32 #include "RealVect.H"
33 #include "CH_Timer.H"
34 #include "LoadBalance.H"
35 #include "LayoutIterator.H"
36 #include "Vector.H"
37 #include "memtrack.H"
38 #include "FluxBox.H"
39 #include "EdgeDataBox.H"
40 #include "NodeFArrayBox.H"
41 
42 #ifdef CH_MULTIDIM
43 #include "BoxTools_ExternC_Mangler.H" // Generated by lib/utils/multidim/mangle_externs.sh
44 #endif
45 #include "NamespaceHeader.H"
46 #ifdef H5_USE_16_API
47 #define H516
48 #endif
49 
50 template <class T>
51 void read(T& item, Vector<Vector<char> >& a_allocatedBuffers, const Box& box,
52  const Interval& comps)
53 {
54  Vector<void*> b(a_allocatedBuffers.size());
55  for (int i=0; i<b.size(); ++i) b[i]= &(a_allocatedBuffers[i][0]);
56  read(item, b, box, comps);
57 }
58 
59 namespace CH_HDF5
60 {
61  enum IOPolicy
62  {
64  IOPolicyMultiDimHyperslab = (1<<0), // HDF5 will internally linearize the
65  // data
66  IOPolicyCollectiveWrite = (1<<1) // HDF5 will collectively write data
67  };
68 }
69 
70 using std::map;
71 
72 class HDF5Handle;
73 
74 // CH_HDF5.H
75 // ============
76 
77 /// user-friendly function to write out data on a AMR level
78 /**
79  all data is IN data.
80  a_handle: handle open and ready.
81  a_data: data, what else do you want to know ?
82  a_dx: the grid spacing at this level
83  a_dt: the timestep size that was last completed
84  a_time: the time of this level (might not be the same as other levels)
85  a_domain: the problem domain, represented at this level of refinement
86  a_refRatio:the refinement of a_level+1 wrt a_level. for vis systems it
87  would probably help if you use 1 for a_level==max_level.
88 */
89 template <class T>
90 int writeLevel(HDF5Handle& a_handle,
91  const int& a_level,
92  const T& a_data,
93  const Real& a_dx,
94  const Real& a_dt,
95  const Real& a_time,
96  const Box& a_domain,
97  const int& a_refRatio,
98  const IntVect& outputGhost = IntVect::Zero,
99  const Interval& comps = Interval());
100 
101 template <class T>
102 int readLevel(HDF5Handle& a_handle,
103  const int& a_level,
104  LevelData<T>& a_data,
105  Real& a_dx,
106  Real& a_dt,
107  Real& a_time,
108  Box& a_domain,
109  int& a_refRatio,
110  const Interval& a_comps = Interval(),
111  bool setGhost = false);
112 
113 template <class T>
114 int readLevel(HDF5Handle& a_handle,
115  const int& a_level,
116  LevelData<T>& a_data,
117  RealVect& a_dx,
118  Real& a_dt,
119  Real& a_time,
120  Box& a_domain,
121  IntVect& a_refRatio,
122  const Interval& a_comps = Interval(),
123  bool setGhost = false);
124 
125 // More basic HDF5 functions. These can be used at a persons own
126 // discretion. Refer to the User's Guide for a description of how these
127 // functions interact with the convenience functions writeLevel, etc.
128 
129 /// writes BoxLayout to HDF5 file.
130 /**
131  Writes BoxLayout to HDF5 file. Only one BoxLayout per group is permitted, this operation overwrites
132  previous entries.
133  This operation assumes boxes are cell-centered.\\
134  returns: success: 0\\
135  HDF5 error: negative error code.\\
136 */
137 int write(HDF5Handle& a_handle,
138  const BoxLayout& a_layout,
139  const std::string& name = "boxes");
140 
141 /// writes a BoxLayoutData<T> to an HDF5 file.
142 /**
143  writes a BoxLayoutData<T> to an HDF5 file.\\
144  returns: success: 0\\
145  HDF5 error: negative error code.\\
146 */
147 template <class T>
148 int write(HDF5Handle& a_handle,
149  const BoxLayoutData<T>& a_data,
150  const std::string& a_name,
151  IntVect outputGhost = IntVect::Zero,
152  const Interval& comps = Interval(),
153  bool newForm = false);
154 
155 /// writes a LevelData<T> to an HDF5 file.
156 /**
157  Writes a LevelData<T> to an HDF5 file.
158  the DisjointBoxLayout is not written out with the data, the user is required to
159  handle that object seperately. (see the "read" interface below).\\
160  returns: success: 0\\
161  HDF5 error: negative error code.\\
162 */
163 template <class T>
164 int write(HDF5Handle& a_handle,
165  const LevelData<T>& a_data,
166  const std::string& a_name,
167  const IntVect& outputGhost = IntVect::Zero,
168  const Interval& comps = Interval());
169 
170 /// reads Vector<Box> from location specified by a_handle.
171 /**
172  Reads BoxLayout from the group specified by a_handle.
173  Only one BoxLayout per group is permitted, this operation overwrites.
174  This operation assumes boxes are cell-centered.\\
175  returns: success: 0\\
176  HDF5 error: negative error code.\\
177 
178  Arg name refers to the name of an HDF5 dataset in which the boxes you want
179  are stored in a particular format, which is the one used to output DataLayouts
180  to HDF5. Here is an example of that format (as shown by h5dump):
181 
182  DATASET "datalayout" {
183  DATATYPE H5T_COMPOUND {
184  H5T_STD_I32LE "lo_i";
185  H5T_STD_I32LE "lo_j";
186  H5T_STD_I32LE "hi_i";
187  H5T_STD_I32LE "hi_j";
188  }
189  DATASPACE SIMPLE { ( 2 ) / ( 2 ) }
190  DATA {
191  (0): {
192  0,
193  0,
194  1,
195  0
196  },
197  (1): {
198  2,
199  0,
200  2,
201  1
202  }
203  }
204  }
205 
206 */
207 int read(HDF5Handle& a_handle,
208  Vector<Box>& boxes,
209  const std::string& name = "boxes");
210 
211 /// reads the set of Boxes out from the level_* groups of a Chombo HDF5 AMR file
212 /**
213  goes to all groups named level_n for level n = 0 to numLevel and fills the
214  vector of boxes. CH_HDF5.Handle must be set to the root group (the default location when the
215  file is just opened.
216 
217  A handy *get everything* version of int read(HDF5Handle& a_handle, Vector<Box>& boxes)
218 */
219 int readBoxes(HDF5Handle& a_handle,
220  Vector<Vector<Box> >& boxes);
221 
222 /// FArrayBox-at-a-time read function. FArrayBox gets redefined in the function. Reads data field named by a_dataName.
223 /** FArrayBox gets redefined in the function.
224  it will be sized to the box at the [level,boxNumber] position and have components
225  the go from [0,a_components.size()-1]
226 
227  some meta-data has to be read again and again internally to do this function, but
228  it should make some out-of-core tools possible.
229 */
230 
231 int readFArrayBox(HDF5Handle& a_handle,
232  FArrayBox& a_fab,
233  int a_level,
234  int a_boxNumber,
235  const Interval& a_components,
236  const std::string& a_dataName = "data" );
237 
238 /// read BoxLayoutData named a_name from location specified by a_handle.
239 /**
240  Read BoxLayoutData named a_name from location specified by a_handle. User must supply the correct BoxLayout for this function if redefineData == true. \\
241  returns: success: 0\\
242  bad location: 1\\
243  HDF5 error: negative error code.\\
244 */
245 template <class T>
246 int read(HDF5Handle& a_handle,
247  BoxLayoutData<T>& a_data,
248  const std::string& a_name,
249  const BoxLayout& a_layout,
250  const Interval& a_comps = Interval(),
251  bool redefineData = true);
252 
253 /// read LevelData named a_name from location specified by a_handle.
254 /**
255 
256  Read LevelData named a_name from location specified by a_handle.
257  User must supply the correct BoxLayout for this function.
258 
259  Arg a_name is significant: the HDF5 group to which a_handle is set is
260  is assumed to contain a dataset called <a_name>:datatype=<some integer>,
261  a dataset called <a_name>:offsets=<some integer>, and a subgroup named
262  <a_name>_attributes. You will have all these items if you dumped your
263  LevelData out using the corresponding write() function defined here.
264 
265  If arg redefineData==false, then the user must pass in a valid LevelData.
266  Otherwise, this function figures out how many components and ghost cells there
267  are, and allocates the correct amount of space. The actual FArray data held
268  by the LevelData gets filled in here, regardless of redefineData; "redefine"
269  alludes to the family of define() functions.
270 
271  returns: success: 0\\
272  bad location: 1\\
273  HDF5 error: negative error code.\\
274 */
275 template <class T>
276 int read(HDF5Handle& a_handle,
277  LevelData<T>& a_data,
278  const std::string& a_name,
279  const DisjointBoxLayout& a_layout,
280  const Interval& a_comps = Interval(),
281  bool redefineData = true);
282 
283 /// Handle to a particular group in an HDF file.
284 /**
285  HDF5Handle is a handle to a particular group in an HDF file. Upon
286  construction, group is defined to be the root. All data is
287  written and read assuming the native representations for data on
288  the architecture it is running on. When a file is opened, these
289  settings are checked and an error is flagged when things don't
290  match up. It is the USER'S responsibility to close() this object
291  when it is no longer needed.
292 
293 */
295 {
296 public:
297  ///
298  /**
299  Enumeration of opening modes for HDF5 files. \\
300 
301  CREATE: file is created if it didn't exist, or an existing file of
302  the same name is clobbered.\\
303 
304  CREATE_SERIAL: in serial execution this is equivalent to CREATE. In parallel this opens
305  a file just on this particular calling processor. Working with LevelData and this kind of
306  file is difficult to get right and most users will not use this mode. \\
307 
308  OPEN_RDONLY: existing file is opened in read-only mode. If the
309  file doesn't already exist then open fails and isOpen() returns
310  false.\\
311 
312  OPEN_RDWR: existing file is opened in read-write mode. If the file
313  doesn't already exist then open fails and isOpen() returns false.\\
314 
315  */
316  enum mode
317  {
322  };
323 
324  /// {\bf constructor}
325 
326  ///
327  /**
328  Default constructor. User must call open() prior to using
329  constructed object.
330  */
331  HDF5Handle();
332 
333  ///
334  /** Opens file and sets the current group to the root "/" group. \\
335 
336  if mode == CREATE, then file is created if it didn't exist, or an
337  existing file of the same name is clobbered.\\
338 
339  if mode == OPEN_*, then existing file is opened, if the file doesn't
340  already exist then open fails and isOpen() returns false.\\
341 
342  Writes basic information such as SpaceDim and testReal to a global
343  group. The global group is named Chombo_global in the call signature
344  where no globalGroupName is passed.\\
345  */
346  HDF5Handle(
347  const std::string& a_filename,
348  mode a_mode,
349  const char *a_globalGroupName="Chombo_global");
350 
351  // HDF5Handle(const std::string& a_filename, mode a_mode);
352 
353  ~HDF5Handle();
354 
355  /// {\bf File functions}
356 
357  ///
358  /**
359  Opens file and sets the current group of this HDF5Handle to the
360  root "/" group. File that this HDF5Handle previously pointed at is
361  NOT closed, that is the users responsibility.\\
362 
363  if mode == CREATE, then file is created if it didn't exist, or an
364  existing file of the same name is clobbered.\\
365 
366  if mode == OPEN_*, then existing file is opened, if the file doesn't
367  already exist then open fails and isOpen() returns false.\\
368 
369  Writes basic information such as SpaceDim and testReal to a global
370  group. The global group is named Chombo_global in the call signature
371  where no globalGroupName is passed.\\
372 
373  returns:\\
374  0 on success\\
375  negative number if file open failed (return code from HDF5)\\
376  1 file does not appear to contain datacheck info, probably not a Chombo file\\
377  2 on data bit size differences between code and file.\\
378 
379  aborts on SpaceDim not matching between code and file\\
380 
381  */
382  int open(
383  const std::string& a_filename,
384  mode a_mode,
385  const char *a_globalGroupName="Chombo_global");
386 
387  // int open(const std::string& a_filename, mode a_mode);
388 
389  ///
390  /**
391  A NULL or failed constructed HDF5Handle will return false.
392  */
393  bool isOpen() const;
394 
395  ///
396  /**
397  Closes the file. Must be called to close file. Files are not
398  automatically closed.
399 
400  */
401  void close();
402 
403  /// {\bf Group functions}
404 
405  ///
406  /**
407  Sets the current group to be "/level_x" where x=a_level.
408  */
409  void setGroupToLevel(int a_level);
410 
411  ///
412  /**
413  Set group to users choice, referenced from file root.
414  groupAbsPath will look like a Unix file path:
415  "/mySpecialData/group1/" "/" is the root group of the
416  file. returns a negative value on failure
417 
418  */
419  int setGroup(const std::string& groupAbsPath);
420 
421  ///
422  /**
423  Add the indicated string to the group path. For example, if
424  getGroup() returns "/foo" then after pushGroup("bar"), getGroup()
425  will return "/foo/bar".
426  Return value is whatever the internal setGroup returns.
427  */
428  int pushGroup( const std::string& grp );
429 
430  ///
431  /**
432  Pop off the last element of the group path. For example, "/foo/bar"
433  becomes "/foo". It's an error to call this function if the group,
434  going in, is "/".
435  Return value is whatever the internal setGroup returns, when we call
436  it to reset the group's path.
437  */
438  int popGroup();
439 
440  ///
441  /**
442  Returns name of current group. groupAbsPath will look like a Unix
443  file path: "/mySpecialData/group1/" "/" is the root group of the
444  file.
445 
446  */
447  const std::string& getGroup() const;
448 
449  HDF5Handle::mode openMode() const {return m_mode;}
450  const hid_t& fileID() const;
451  const hid_t& groupID() const;
452  static hid_t box_id;
453  static hid_t intvect_id;
454  static hid_t realvect_id;
455  static map<std::string, std::string> groups;
456 
457 private:
458 
459  HDF5Handle(const HDF5Handle&);
461 
462  hid_t m_fileID;
465  bool m_isOpen;
466  std::string m_filename; // keep around for debugging
467  std::string m_group;
468  int m_level;
469 
470  // static hid_t file_access;
471  static bool initialized;
472  static void initialize();
473 
474 };
475 
476 /// data to be added to HDF5 files.
477 /**
478  HDF5HeaderData is a wrapper for some data maps to be added to HDF5
479  files. instead of an overdose of access functions, the maps are
480  made public and they can be manipulated by the user at will. They
481  maintain type safety.
482 
483  to add a Real data entry, a user can simply program as follows:
484 
485  <PRE>
486  Real dx;
487  .
488  .
489  HDF5HeaderData metaData;
490  metaData.m_real["dx"] = dx;
491  </PRE>
492 
493  If "dx" already existed, then it is overwritten, otherwise an entry is
494  created and added with the new value;
495 
496  To search for entries, the user does the following:
497  <PRE>
498 
499  HDF5HeaderData metaData;
500  HDF5Handle currentStep(filename);
501  currentStep.setGroupToLevel(0);
502  metaData.readFromFile(currentStep);
503  if (metaData.m_intvect.find("ghost") != metaData.m_intvect.end())
504  ghost = metaData.m_intvect["ghost"];
505  else
506  ghost = defaultGhostIntVect;
507 
508  </PRE>
509 
510  A user can skip the check for existence if they have reason to "know" the
511  data will be there. It is just good coding practice.
512 
513  To erase an entry, you can use:
514  <PRE>
515  metaData.m_real.erase("dx");
516  </PRE>
517 
518 */
520 {
521 public:
522 
523  ///
524  /**
525  Writes this HDF5HeaderData's current attribute list to the
526  current group in 'file.' Returns 0 on success, returns the
527  error code from HDF5 on failure.
528 
529  */
530  int writeToFile(HDF5Handle& file) const;
531 
532  ///
533  /**
534  Reads into this HDF5HeaderData's attribute list from file. Read
535  process is add/change, does not remove key-value pairs. Reads
536  from current group. Returns 0 on success, positive number if a
537  particular member of group caused an error, negative on general
538  error.
539 
540  */
541  int readFromFile(HDF5Handle& file);
542 
543  ///
544  void clear();
545 
546  ///
547  map<std::string, Real> m_real;
548 
549  ///
550  map<std::string, int> m_int;
551 
552  ///
553  map<std::string, std::string> m_string;
554 
555  ///
556  map<std::string, IntVect> m_intvect;
557 
558  ///
559  map<std::string, Box> m_box;
560 
561  ///
562  map<std::string, RealVect> m_realvect;
563 
564  //users should not need these functions in general
565 
566  int writeToLocation(hid_t loc_id) const;
567  int readFromLocation(hid_t loc_id);
568 
569  /// useful for debugging. dumps contents to std::cout
570  void dump() const;
571 
572 private:
573  static herr_t attributeScan(hid_t loc_id, const char *name, void *opdata);
574 };
575 
576 extern "C"
577 {
578 #ifdef H516
579  herr_t HDF5HeaderDataattributeScan(hid_t loc_id, const char *name, void *opdata);
580 #else
581  herr_t HDF5HeaderDataattributeScan(hid_t loc_id, const char *name, const H5A_info_t* info, void *opdata);
582 #endif
583 }
584 
585 std::ostream& operator<<(std::ostream& os, const HDF5HeaderData& data);
586 
587 /// Methods for writing multiple LevelData to an HDF5 file.
588 /**
589  While the write functions can be used to write a single LevelData, they
590  do not support writing intervals from multiple LevelData. This is often
591  the case when you want to add diagnostic information to the output file.
592  The purpose of this class is to handle writing multiple LevelData to
593  a single file. The dataset for the LevelData is created during
594  construction, when the boxes are written. Any number of intervals from
595  LevelData can then be written.
596 
597  Special behavior can be selected with the policy flags:
598  <ul>
599  <li> CH_HDF5::IOPolicyMultiDimHyperslab - The memory dataspace will be
600  set up so that HDF5 linearizes the data. Using this requires
601  T.dataPtr() and contiguous memory (so things like T=FluxBox will
602  not work).
603  <li> CH_HDF5::IOPolicyCollectiveWrite - The write for each dataset will
604  be collective. The write is always parallel but otherwise will
605  be done independently. May or may not provide a speedup but it
606  should work. Rumor has it the BG requires this.
607  </ul>
608 
609  With MPI-everywhere on x86 architectures, it is highly likely that
610  the default policy CH_HDF5::IOPolicyDefault gives the best performance
611  (i.e., use internal linearization and independent I/O). In some limited
612  testing (2 procs, 20 cores each):
613  IOPolicyDefault - 6.77
614  IOPolicyCollectiveWrite - 33.33
615  IOPolicyMultiDimHyperslab - 168.14
616  IOPolicyCollectiveWrite | IOPolicyMultiDimHyperslab - 30.94
617  which is difficult to make sense of.
618 
619  \note
620  <ul>
621  <li> Users are resposible for matching the total number of components
622  used to allocate the dataset match the number of levelData
623  added to the dataset
624  <li> Only a single datatype is supported per file
625  <li> Since the dataset must be created in advance, T can only be
626  statically allocatable (T::preAllocatable() == 0)
627  <li> API 1.6 is not supported
628  </ul>
629 */
630 template <class T>
632 {
633 
634 public:
635 
636  /// Constructor writes boxes and allocates dataset for LevelData
637  WriteMultiData(HDF5Handle& a_handle,
638  const BoxLayout& a_layout,
639  const int a_numIntv,
640  const string& a_name,
641  const int a_policyFlags = CH_HDF5::IOPolicyDefault,
642  const IntVect& a_outputGhost = IntVect::Zero,
643  const bool a_newForm = false);
644 
645  /// Destructor
647  {
648  H5Dclose(m_dataSet);
649  }
650 
651 private:
652 
653  // Copy and assignment not allowed
656 
657 public:
658 
659  /// Write an interval of LevelData to the dataset
660  int writeData(const BoxLayoutData<T>& a_data,
661  const Interval& a_intvMem,
662  const Interval& a_intvFile);
663 
664 protected:
665 
666  const int m_policyFlags; ///< Policies
667  const IntVect m_outputGhost; ///< Number of ghost cells written
668  const bool m_newForm; ///< ?
669 
670  char m_dataname[128]; ///< Name for level data dataset
671  hid_t m_dataSet; ///< Dataset for level data
672  Interval m_allIntvFile; ///< Interval for components in file
673  long m_maxBoxPerProc; ///< Maximum boxes written by any proc
674 
675  // Both of these vectors are size 1 in the outermost dimension since
676  // only 1 type is allowed.
678  ///< Offset in file for each process to
679  ///< write to
680  Vector<hid_t> m_types; ///< Type of data written
681 };
682 
683 //=============================================================================
684 //
685 // end of declarations.
686 //
687 //=============================================================================
688 
689 #if ( H5_VERS_MAJOR == 1 && H5_VERS_MINOR > 6 )
690 typedef hsize_t ch_offset_t;
691 #else
692 #if ( H5_VERS_MAJOR == 1 && H5_VERS_MINOR == 6 && H5_VERS_RELEASE >= 4 )
693 typedef hsize_t ch_offset_t;
694 #else
695 typedef hssize_t ch_offset_t;
696 #endif
697 #endif
698 
699 template<class T>
700 hid_t H5Type(const T* dummy);
701 
702 template< >
703 hid_t H5Type(const int* dummy);
704 
705 template< >
706 hid_t H5Type(const long long* dummy);
707 
708 template< >
709 hid_t H5Type(const float* dummy);
710 
711 template< >
712 hid_t H5Type(const double* dummy);
713 
714 template< >
715 hid_t H5Type(const Box* dummy);
716 
717 template< >
718 hid_t H5Type(const RealVect* dummy);
719 
720 template< >
721 hid_t H5Type(const IntVect* dummy);
722 
723 template<class T>
724 hid_t H5Type(const T* dummy)
725 {
726  // no such definition;
727  MayDay::Error(" H5Type(const T* dummy)");
728  return -4;
729 }
730 
731 void createData(hid_t& a_dataset,
732  hid_t& a_dataspace,
733  HDF5Handle& handle,
734  const std::string& name,
735  hid_t type,
736  hsize_t size);
737 
738 template <class T>
739 void createDataset(hid_t& a_dataset,
740  hid_t& a_dataspace,
741  HDF5Handle& handle,
742  const std::string& name,
743  const T* dummy,
744  hsize_t size)
745 {
746  createData(a_dataset, a_dataspace, handle, name, H5Type(dummy), size);
747 
748 }
749 
750 void writeDataset(hid_t a_dataset,
751  hid_t a_dataspace,
752  const void* start,
753  ch_offset_t off,
754  hsize_t count);
755 
756 void readDataset(hid_t a_dataset,
757  hid_t a_dataspace,
758  void* start,
759  ch_offset_t off,
760  hsize_t count);
761 
762 // non-user code used in implementation of communication
763 
765 {
768  void operator=(const OffsetBuffer& rhs);
769 };
770 
771 ostream& operator<<(ostream& os, const OffsetBuffer& ob);
772 
773 #include "NamespaceFooter.H"
774 
775 #include "BaseNamespaceHeader.H"
776 
777 #include "NamespaceVar.H"
778 
779 //OffsetBuffer specialization of linearSize
780 template < >
781 int linearSize(const CH_XDIR::OffsetBuffer& a_input);
782 
783 //OffsetBuffer specialization of linearIn
784 template < >
785 void linearIn(CH_XDIR::OffsetBuffer& a_outputT, const void* const a_inBuf);
786 
787 //OffsetBuffer specialization of linearOut
788 template < >
789 void linearOut(void* const a_outBuf, const CH_XDIR::OffsetBuffer& a_inputT);
790 
791 template < > int linearSize(const Vector<CH_XDIR::OffsetBuffer>& a_input);
792 template < > void linearIn(Vector<CH_XDIR::OffsetBuffer>& a_outputT, const void* const inBuf);
793 template < > void linearOut(void* const a_outBuf, const Vector<CH_XDIR::OffsetBuffer>& a_inputT);
794 
795 #include "BaseNamespaceFooter.H"
796 #include "NamespaceHeader.H"
797 
798 // First, template specializations for read/write for FArrayBox.
799 
800 template <>
801 inline void dataTypes(Vector<hid_t>& a_types, const BaseFab<int>& dummy)
802 {
803  a_types.resize(1);
804  a_types[0] = H5T_NATIVE_INT;
805 }
806 
807 template <>
808 inline void dataTypes(Vector<hid_t>& a_types, const BaseFab<char>& dummy)
809 {
810  a_types.resize(1);
811  a_types[0] = H5T_NATIVE_CHAR;
812 }
813 
814 /* since many compilers choke on the proper template specialization syntax
815  I am forced to pass in a dummy specialization argument
816 */
817 template <>
818 inline void dataTypes(Vector<hid_t>& a_types, const FArrayBox& dummy)
819 {
820  a_types.resize(1);
821  a_types[0] = H5T_NATIVE_REAL;
822 }
823 
824 template <>
825 inline void dataTypes(Vector<hid_t>& a_types, const FluxBox& dummy)
826 {
827  a_types.resize(1);
828  a_types[0] = H5T_NATIVE_REAL;
829 }
830 
831 template <>
832 inline void dataTypes(Vector<hid_t>& a_types, const EdgeDataBox& dummy)
833 {
834  a_types.resize(1);
835  a_types[0] = H5T_NATIVE_REAL;
836 }
837 
838 template <>
839 inline void dataTypes(Vector<hid_t>& a_types, const NodeFArrayBox& dummy)
840 {
841  a_types.resize(1);
842  a_types[0] = H5T_NATIVE_REAL;
843 }
844 
845 
846 template <>
847 inline void dataSize(const BaseFab<int>& item, Vector<int>& a_sizes,
848  const Box& box, const Interval& comps)
849 {
850  a_sizes[0] = box.numPts() * comps.size();
851 }
852 
853 template <>
854 inline void dataSize(const BaseFab<char>& item, Vector<int>& a_sizes,
855  const Box& box, const Interval& comps)
856 {
857  a_sizes[0] = box.numPts() * comps.size();
858 }
859 
860 template <>
861 inline void dataSize(const FArrayBox& item, Vector<int>& a_sizes,
862  const Box& box, const Interval& comps)
863 {
864  a_sizes[0] = box.numPts() * comps.size();
865 }
866 
867 template <>
868 inline void dataSize(const FluxBox& item, Vector<int>& a_sizes,
869  const Box& box, const Interval& comps)
870 {
871  int& t = a_sizes[0];
872  t = 0;
873  for (int dir =0; dir<CH_SPACEDIM; dir++)
874  {
875  Box edgeBox(surroundingNodes(box,dir));
876  t += edgeBox.numPts()*comps.size();
877  }
878 }
879 
880 
881 template <>
882 inline void dataSize(const EdgeDataBox& item, Vector<int>& a_sizes,
883  const Box& box, const Interval& comps)
884 {
885  int& t = a_sizes[0];
886  t = 0;
887  for (int dir =0; dir<CH_SPACEDIM; dir++)
888  {
889  Box edgeBox(surroundingNodes(box));
890  edgeBox.enclosedCells(dir);
891  t += edgeBox.numPts()*comps.size();
892  }
893 }
894 
895 
896 template <>
897 inline void dataSize(const NodeFArrayBox& item, Vector<int>& a_sizes,
898  const Box& box, const Interval& comps)
899 {
900  Box boxNodes = surroundingNodes(box);
901  a_sizes[0] = boxNodes.numPts() * comps.size();
902 }
903 
904 
905 
906 template <>
907 inline const char* name(const FArrayBox& a_dummySpecializationArg)
908 {
909  // Attempt to get rid of warnings on IBM...
910  //static const char* name = "FArrayBox";
911  const char* name = "FArrayBox";
912  return name;
913 }
914 
915 template <>
916 inline const char* name(const BaseFab<int>& a_dummySpecializationArg)
917 {
918  // Attempt to get rid of warnings on IBM...
919  //static const char* name = "BaseFab<int>";
920  const char* name = "BaseFab<int>";
921  return name;
922 }
923 
924 template <>
925 inline const char* name(const BaseFab<char>& a_dummySpecializationArg)
926 {
927  // Attempt to get rid of warnings on IBM...
928  //static const char* name = "BaseFab<int>";
929  const char* name = "BaseFab<char>";
930  return name;
931 }
932 //
933 // now, generic, non-binary portable version of template functions
934 // for people who just want ot rely on linearIn/linearOut
935 
936 template <class T>
937 inline void dataTypes(Vector<hid_t>& a_types, const T& dummy)
938 {
939  a_types.resize(1);
940  a_types[0] = H5T_NATIVE_CHAR;
941 }
942 
943 template <class T>
944 inline void dataSize(const T& item, Vector<int>& a_sizes,
945  const Box& box, const Interval& comps)
946 {
947  a_sizes[0] = item.size(box, comps);
948 }
949 
950 template <class T>
951 inline void write(const T& item, Vector<void*>& a_allocatedBuffers,
952  const Box& box, const Interval& comps)
953 {
954  item.linearOut(a_allocatedBuffers[0], box, comps);
955 }
956 
957 template <class T>
958 inline void read(T& item, Vector<void*>& a_allocatedBuffers,
959  const Box& box, const Interval& comps)
960 {
961  item.linearIn(a_allocatedBuffers[0], box, comps);
962 }
963 
964 template <class T>
965 inline const char* name(const T& a_dummySpecializationArg)
966 {
967  static const char* name = "unknown";
968  return name;
969 }
970 
971 template <class T>
972 void getOffsets(Vector<Vector<long long> >& offsets, const BoxLayoutData<T>& a_data,
973  int types, const Interval& comps, const IntVect& outputGhost)
974 {
975  CH_TIME("getOffsets");
976  const BoxLayout& layout = a_data.boxLayout();
977  {
978  CH_TIMELEAF("offsets.resize");
979  offsets.resize(types, Vector<long long>(layout.size()+1));
980  // offsets.resize(layout.size() + 1, Vector<long long>(types));
981  }
982  for (int t=0; t<types; t++) offsets[t][0] = 0;
983  Vector<int> thisSize(types);
984  if (T::preAllocatable()==0)
985  { // static preAllocatable
986  T dummy;
987  unsigned int index = 1;
988  for (LayoutIterator it(layout.layoutIterator()); it.ok(); ++it)
989  {
990  Box region = layout[it()];
991  region.grow(outputGhost);
992  {
993  CH_TIMELEAF("dataSize");
994  dataSize(dummy, thisSize, region, comps);
995  }
996  for (unsigned int i=0; i<thisSize.size(); ++i)
997  {
998  CH_TIMELEAF("offsets calc");
999  //offsets[index][i] = offsets[index-1][i] + thisSize[i];
1000  offsets[i][index] = offsets[i][index-1] + thisSize[i];
1001  }
1002  ++index;
1003  }
1004  }
1005  else
1006  { // symmetric and dynamic preallocatable need two pass I/O
1007  OffsetBuffer buff;
1008  //int index = 0;
1009  for (DataIterator dit(a_data.dataIterator()); dit.ok(); ++dit)
1010  {
1011  int index = a_data.boxLayout().index(dit());
1012  //int index = dit().intCode();
1013  buff.index.push_back(index);
1014  Box region = layout[dit()];
1015  region.grow(outputGhost);
1016  {
1017  CH_TIMELEAF("dataSize");
1018  dataSize(a_data[dit()], thisSize, region, comps);
1019  }
1020  buff.offsets.push_back(thisSize);
1021  }
1022  Vector<OffsetBuffer> gathering(numProc());
1023  {
1024  CH_TIMELEAF("gather");
1025  gather(gathering, buff, uniqueProc(SerialTask::compute));
1026  }
1027  {
1028  CH_TIMELEAF("broadcast");
1030  }
1031  // pout() << gathering<<endl;
1032  for (int i=0; i<numProc(); ++i)
1033  {
1034  OffsetBuffer& offbuf = gathering[i];
1035  for (int num=0; num<offbuf.index.size(); num++)
1036  {
1037  int index = offbuf.index[num];
1038  for (unsigned int j=0; j<types; ++j)
1039  {
1040  CH_TIMELEAF("offsets calc");
1041  //offsets[index+1][j] = offbuf.offsets[num][j];
1042  offsets[j][index+1] = offbuf.offsets[num][j];
1043  }
1044  }
1045  }
1046  for (int i=0; i<layout.size(); i++)
1047  {
1048  for (unsigned int j=0; j<types; ++j)
1049  {
1050  CH_TIMELEAF("offsets calc");
1051  //offsets[i+1][j] += offsets[i][j];
1052  offsets[j][i+1] += offsets[j][i];
1053  }
1054  }
1055  }
1056 
1057  // pout() << offsets<<endl;
1058 }
1059 
1060 //==================================================================
1061 //
1062 // Statically pre-allocatable version only requires BoxLayout, not
1063 // the data
1064 //
1065 template <class T>
1067  const BoxLayout a_layout,
1068  int a_numTypes,
1069  const Interval& a_comps,
1070  const IntVect& a_outputGhost)
1071 {
1072  CH_TIME("getOffsets (prealloc)");
1073  CH_assert(T::preAllocatable() == 0);
1074 
1075  a_offsets.resize(a_numTypes, Vector<long long>(a_layout.size()+1));
1076  for (int t = 0; t != a_numTypes; ++t)
1077  {
1078  a_offsets[t][0] = 0;
1079  }
1080  Vector<int> thisSize(a_numTypes);
1081  T dummy;
1082  unsigned int index = 1;
1083  for (LayoutIterator it(a_layout.layoutIterator()); it.ok(); ++it)
1084  {
1085  Box region = a_layout[it()];
1086  region.grow(a_outputGhost);
1087  dataSize(dummy, thisSize, region, a_comps);
1088  for (int i = 0; i != thisSize.size(); ++i) // Loop over (1) types
1089  {
1090  //offsets[index][i] = offsets[index-1][i] + thisSize[i];
1091  a_offsets[i][index] = a_offsets[i][index-1] + thisSize[i];
1092  }
1093  ++index;
1094  }
1095 }
1096 
1097 //==================================================================
1098 //
1099 // Now, linear IO routines for a BoxLayoutData of T
1100 //
1101 template <class T>
1102 int write(HDF5Handle& a_handle, const BoxLayoutData<T>& a_data,
1103  const std::string& a_name, IntVect outputGhost,
1104  const Interval& in_comps, bool newForm)
1105 {
1106  CH_TIME("write_Level");
1107  int ret = 0;
1108 
1109  Interval comps(in_comps);
1110  if ( comps.size() == 0) comps = a_data.interval();
1111  T dummy; // used for preallocatable methods for dumb compilers.
1112  Vector<hid_t> types;
1113  dataTypes(types, dummy);
1114 
1115  Vector<Vector<long long> > offsets;
1116  Vector<long long> bufferCapacity(types.size(), 1); // noel (was 0)
1117  Vector<void*> buffers(types.size(), NULL);
1118 
1119  getOffsets(offsets, a_data, types.size(), comps, outputGhost);
1120 
1121  // create datasets collectively.
1122  hsize_t flatdims[1];
1123  char dataname[100];
1124  Vector<hid_t> dataspace(types.size());
1125  Vector<hid_t> dataset(types.size());
1126 
1127  herr_t err;
1128  hsize_t count[1];
1129  ch_offset_t offset[1];
1130  CH_assert(!(newForm && types.size() != 1));
1131 
1132  for (unsigned int i=0; i<types.size(); ++i)
1133  {
1134  flatdims[0] = offsets[i][offsets[i].size()-1];
1135  if (newForm)
1136  {
1137  if (a_name == "M")
1138  {
1139  strcpy(dataname, "Mask");
1140  }
1141  else
1142  {
1143  sprintf(dataname, "%sRegular", a_name.c_str());
1144  }
1145  }
1146  else
1147  {
1148  sprintf(dataname, "%s:datatype=%i",a_name.c_str(), i);
1149  }
1150  {
1151  CH_TIME("H5Screate");
1152  dataspace[i] = H5Screate_simple(1, flatdims, NULL);
1153  }
1154  CH_assert(dataspace[i] >=0);
1155  {
1156  CH_TIME("H5Dcreate");
1157 #ifdef H516
1158  dataset[i] = H5Dcreate(a_handle.groupID(), dataname,
1159  types[i],
1160  dataspace[i], H5P_DEFAULT);
1161 #else
1162  dataset[i] = H5Dcreate2(a_handle.groupID(), dataname,
1163  types[i],
1164  dataspace[i], H5P_DEFAULT,
1165  H5P_DEFAULT, H5P_DEFAULT);
1166 #endif
1167  }
1168  CH_assert(dataset[i] >= 0);
1169  }
1170 
1171  hid_t offsetspace, offsetData;
1172  for (unsigned int i=0; i<types.size(); ++i)
1173  {
1174  flatdims[0] = offsets[i].size();
1175  if (newForm)
1176  {
1177  if (a_name == "M")
1178  {
1179  strcpy(dataname, "MaskOffsets");
1180  }
1181  else
1182  {
1183  sprintf(dataname, "%sOffsets",a_name.c_str());
1184  }
1185  }
1186  else
1187  {
1188  sprintf(dataname, "%s:offsets=%i",a_name.c_str(), i);
1189  }
1190  {
1191  CH_TIME("H5S_H5D_offsets_create");
1192  offsetspace = H5Screate_simple(1, flatdims, NULL);
1193  CH_assert(offsetspace >= 0);
1194 #ifdef H516
1195  offsetData = H5Dcreate(a_handle.groupID(), dataname,
1196  H5T_NATIVE_LLONG, offsetspace,
1197  H5P_DEFAULT);
1198 #else
1199  offsetData = H5Dcreate2(a_handle.groupID(), dataname,
1200  H5T_NATIVE_LLONG, offsetspace,
1201  H5P_DEFAULT,
1202  H5P_DEFAULT,H5P_DEFAULT);
1203 #endif
1204  CH_assert(offsetData >= 0);
1205  }
1206  if (procID() == 0)
1207  {
1208  CH_TIME("WriteOffsets");
1209  hid_t memdataspace = H5Screate_simple(1, flatdims, NULL);
1210  CH_assert(memdataspace >= 0);
1211  err = H5Dwrite(offsetData, H5T_NATIVE_LLONG, memdataspace, offsetspace,
1212  H5P_DEFAULT, &(offsets[i][0]));
1213  CH_assert(err >= 0);
1214  H5Sclose(memdataspace);
1215  }
1216  {
1217  CH_TIME("H5S_H5D_offsets_close");
1218  H5Sclose(offsetspace);
1219  H5Dclose(offsetData);
1220  }
1221  }
1222 
1223  // write BoxLayoutData attributes into Dataset[0]
1224  if (!newForm)
1225  {
1226  CH_TIME("WriteAttributes");
1227  HDF5HeaderData info;
1228  info.m_int["comps"] = comps.size();
1229  info.m_string["objectType"] = name(dummy);
1230  info.m_intvect["outputGhost"] = outputGhost;
1231  info.m_intvect["ghost"] = outputGhost;
1232  std::string group = a_handle.getGroup();
1233  a_handle.setGroup(group+"/"+a_name+"_attributes");
1234  info.writeToFile(a_handle);
1235  a_handle.setGroup(group);
1236  }
1237 
1238  // collective operations finished, now perform parallel writes
1239  // to specified hyperslabs.
1240 
1241  Vector<size_t> type_size(types.size());
1242  for (unsigned int i=0; i<types.size(); ++i)
1243  {
1244  type_size[i] = H5Tget_size(types[i]);
1245  }
1246 
1247  Vector<int> thisSize(types.size());
1248 
1249  // Hooks for aggregated collective buffering (ACB).
1250  // Devendran, et al. Collective I/O Optimizations for Adaptive Mesh
1251  // Refinement Data Writes on Lustre File System, CUG 2016 for more on
1252  // ACB.
1253  // Comment out next line if you don't want ACB.
1254  // Also, comment out the line "#define TRY_MPI_COLLECTIVES_ 0" below if
1255  // you don't want MPI-IO collective buffering.
1256  // In general, you should turn on collective buffering if you use
1257  // ACB.
1258 #ifdef CH_MPI
1259 //#define AGGREGATE_BOXES_ 1
1260 #endif
1261 #ifdef AGGREGATE_BOXES_
1262  // pout() << "Turning on aggregated collective buffering (ACB)." << endl;
1263  // For each type types[i], aggCount[i] is the total number of data points for all
1264  // the boxes on this processor.
1265  Vector<hsize_t> aggCount(types.size(), 0);
1266  // size of aggregated buffers; initialize to 0 so we can add up
1267  // the box sizes to compute the size of the buffers
1268  Vector<long long> aggBufferSize(types.size(), 0);
1269  Vector<void*> aggBuffers(types.size(), NULL);
1270 #endif
1271 
1272  // step 1, create buffer big enough to hold the biggest linearized T
1273  // that I will have to output.
1274  // pout()<<"offsets ";
1275  // for (int i=0; i<offsets[0].size(); i++)
1276  // {
1277  // pout()<<" "<<offsets[0][i];
1278  // }
1279  // pout()<<"\n";
1280  {
1281  CH_TIME("ComputeBufferCapacity");
1282  for (DataIterator it = a_data.dataIterator(); it.ok(); ++it)
1283  {
1284  unsigned int index = a_data.boxLayout().index(it());
1285  for (unsigned int i=0; i<types.size(); ++i)
1286  {
1287  long long size = (offsets[i][index+1] - offsets[i][index])
1288  * type_size[i];
1289  // pout()<<"index:size "<<index<<" "<<size<<"\n";
1290  CH_assert(size >= 0);
1291  if (size > bufferCapacity[i]) // grow buffer if necessary.....
1292  {
1293  bufferCapacity[i] = size;
1294  }
1295 #ifdef AGGREGATE_BOXES_
1296  aggBufferSize[i] += size;
1297 #endif
1298  }
1299  }
1300  }
1301  // CH_assert(bufferCapacity[0] > 1);
1302  for (unsigned int i=0; i<types.size(); ++i)
1303  {
1304  CH_TIME("mallocMT_buffers");
1305  buffers[i] = mallocMT(bufferCapacity[i]);
1306  if (buffers[i] == NULL)
1307  {
1308  pout() << " i=" << i
1309  << " types.size() = " << types.size()
1310  << " bufferCapacity[i] = " << (int)bufferCapacity[i]
1311  << endl;
1312  MayDay::Error("memory error in buffer allocation write");
1313  }
1314 #ifdef AGGREGATE_BOXES_
1315  aggBuffers[i] = mallocMT(aggBufferSize[i]);
1316  if (aggBuffers[i] == NULL)
1317  {
1318  MayDay::Error("memory error in aggregate buffer allocation in write");
1319  }
1320 #endif
1321  }
1322 
1323 #ifdef CH_MPI
1324 //#define TRY_MPI_COLLECTIVES_ 1
1325 #endif
1326 #ifdef TRY_MPI_COLLECTIVES_
1327  // pout() << "Turned on MPI-IO collective buffering." << endl;
1328  // MPI collective buffering requires all processes call H5Dwrite collectively.
1329  // In particular, the processes must issue the same number of H5Dwrite calls,
1330  // or the application will hang when collective is turned on.
1331  // In the case where we do a separate write for each box, gather the
1332  // maximum number of boxes assigned to any process, so we know how many
1333  // H5Dwrites to do.
1334  DataIterator dit = a_data.dataIterator();
1335  int maxNumBoxes = dit.size();
1336  int sendBuf = maxNumBoxes;
1337  int result = MPI_Allreduce(&sendBuf, &maxNumBoxes, 1, MPI_INT,MPI_MAX, Chombo_MPI::comm);
1338  if (result != MPI_SUCCESS)
1339  {
1340  MayDay::Error("Couldn't collect maximum number of boxes!");
1341  }
1342  // Set dataset transfer property to collective mode. This is how we turn
1343  // on MPI-IO collective in HDF5.
1344  hid_t DXPL = H5Pcreate(H5P_DATASET_XFER);
1345  if(!(a_handle.openMode()==HDF5Handle::CREATE_SERIAL)) // can't set MPI collective if file was created for serial IO
1346  H5Pset_dxpl_mpio(DXPL, H5FD_MPIO_COLLECTIVE);
1347 #endif /*end TRY_MPI_COLLECTIVES_ */
1348 
1349  // Step 2. actually a) write each of my T objects into the
1350  // buffer, then b) write that buffered data out to the
1351  // write position in the data file using hdf5 hyperslab functions.
1352  {
1353  CH_TIME("linearize_H5Dwrite");
1354 #ifdef AGGREGATE_BOXES_
1355  // the first non-empty hyperslab needs to be handled separately from
1356  // the others, so keep track of the first non-empty hyperslab
1357  bool isFirstHyperslab = true;
1358 
1359  // bufferLoc keeps track of last location written to in the aggregrated
1360  // buffer
1361  Vector<void*> bufferLoc(types.size(), NULL);
1362  // Set the bufferLoc to the beginning of the aggregated buffer
1363  for(unsigned int i=0; i<types.size(); ++i)
1364  {
1365  bufferLoc[i] = aggBuffers[i];
1366  }
1367 #endif
1368  for (DataIterator it = a_data.dataIterator(); it.ok(); ++it)
1369  {
1370  const T& data = a_data[it()];
1371  unsigned int index = a_data.boxLayout().index(it());
1372  Box box = a_data.box(it());
1373  box.grow(outputGhost);
1374  // First, linearize the box, and put data into the buffer
1375  {
1376  CH_TIMELEAF("linearize");
1377 #ifdef AGGREGATE_BOXES_
1378  // Under aggregation, we need to be careful to write to
1379  // the buffer starting at bufferLoc.
1380  for(unsigned int i=0; i<types.size(); i++)
1381  {
1382  data.linearOut(bufferLoc[i], box, comps);
1383  char* tempLoc = ((char*)bufferLoc[i]) + data.size(box,comps);
1384  bufferLoc[i] = (void*)tempLoc;
1385  // bufferLoc[i] += data.size(box, comps);
1386  }
1387 #else
1388  write(data, buffers, box, comps); //write T to buffer
1389 #endif
1390  }
1391  // Next select HDF5 hyperslabs to specify where to write in HDF5 file
1392  // BUG: The hyperslab union generation is wrong if types.size() > 1
1393  // because isFirstHyperslab isn't independent for different sizes.
1394  // We don't make a fix because types.size() is almost always 1 (and
1395  // will probably be set to 1 in future redesigns).
1396  for (unsigned int i=0; i<types.size(); ++i)
1397  {
1398  offset[0] = offsets[i][index];
1399  count[0] = offsets[i][index+1] - offset[0];
1400 #ifdef AGGREGATE_BOXES_
1401  // Under aggregation, we may be sending multiple boxes to HDF5.
1402  // Because boxes on a process are not necessarily written to
1403  // contiguous locations in the HDF5 file, we need to specify
1404  // multiple file locations to HDF5. This is done using a
1405  // union of hyperslabs.
1406 
1407  aggCount[i] += count[0];
1408  if (isFirstHyperslab)
1409  {
1410  // If the box is non-empty, select the first hyperslab
1411  // and set isFirstHyperslab to false. Otherwise,
1412  // we haven't encountered the first non-empty
1413  // hyperslab, so keep isFirstHyperslab equal to true.
1414  if(count[0] > 0)
1415  {
1416  err = H5Sselect_hyperslab(dataspace[i], H5S_SELECT_SET,
1417  offset, NULL,
1418  count, NULL);
1419  CH_assert(err >= 0);
1420  isFirstHyperslab = false;
1421  }
1422  else // must explicitly tell HDF5 we are doing an empty write
1423  {
1424  H5Sselect_none(dataspace[i]);
1425  }
1426  }
1427  else
1428  {
1429  if(count[0] > 0)
1430  {
1431  // H5S_SELECT_OR creates a union of the selected
1432  // hyperslab with the already selected hyperslabs
1433  // in dataspace
1434  err = H5Sselect_hyperslab(dataspace[i], H5S_SELECT_OR,
1435  offset, NULL,
1436  count, NULL);
1437  CH_assert(err >= 0);
1438  }
1439  // else don't do H5Sselect_none in case it overrides
1440  // the already existing union of hyperslabs.
1441  }
1442 #else
1443  // Without aggregation, we create a simple hyperslab for the box.
1444  hid_t memdataspace=0;
1445  if (count[0] > 0)
1446  {
1447  err = H5Sselect_hyperslab(dataspace[i], H5S_SELECT_SET,
1448  offset, NULL,
1449  count, NULL);
1450  CH_assert(err >= 0);
1451  memdataspace = H5Screate_simple(1, count, NULL);
1452  CH_assert(memdataspace >= 0);
1453  }
1454  else // must explicitly tell HDF5 we are doing an empty write
1455  {
1456  H5Sselect_none(dataspace[i]);
1457  H5Sselect_none(memdataspace);
1458  }
1459 
1460  // Write out box if we are NOT performing aggregation.
1461  // (Under aggregation, one single write call is issued at the
1462  // end of the function.)
1463  {
1464  CH_TIMELEAF("H5Dwrite");
1465 #ifdef TRY_MPI_COLLECTIVES_
1466  err = H5Dwrite(dataset[i], types[i], memdataspace, dataspace[i],
1467  DXPL, buffers[i]);
1468 #else
1469  err = H5Dwrite(dataset[i], types[i], memdataspace, dataspace[i],
1470  H5P_DEFAULT, buffers[i]);
1471 #endif
1472  }
1473  CH_assert(err >= 0);
1474  H5Sclose(memdataspace);
1475  if (err < 0)
1476  {
1477  ret = err;
1478  pout() << "Before goto cleanup" << endl;
1479  goto cleanup;
1480  }
1481 #endif // end of ifdef AGGREGATE_BOXES_
1482  } // end of loop over types
1483  } // end of loop over data iterator
1484 
1485  // Under aggregation, now we issue one write call to send all boxes to HDF5
1486  // If aggregation is turned off, but MPI collective is turned on, we may
1487  // have to do empty writes to make sure all processes call H5Dwrite
1488  // the same number of times.
1489 #ifdef AGGREGATE_BOXES_
1490  for(unsigned int i=0; i<types.size(); ++i)
1491  {
1492  if(aggCount[i] > 0)
1493  {
1494  hid_t memdataspace = 0;
1495  memdataspace = H5Screate_simple(1, &(aggCount[i]), NULL);
1496  CH_assert(memdataspace >= 0);
1497  {
1498  CH_TIMELEAF("H5Dwrite");
1499 #ifdef TRY_MPI_COLLECTIVES_
1500  err = H5Dwrite(dataset[i], types[i], memdataspace, dataspace[i],
1501  DXPL, aggBuffers[i]);
1502 #else
1503  err = H5Dwrite(dataset[i], types[i], memdataspace, dataspace[i],
1504  H5P_DEFAULT, aggBuffers[i]);
1505 #endif
1506  }
1507  H5Sclose(memdataspace);
1508  if (err < 0)
1509  {
1510  ret = err;
1511  pout() << "Error! goto cleanup" << endl;
1512  goto cleanup;
1513  }
1514  }
1515  //else aggCount[i] is 0, and this processor has no data to write.
1516  // For MPI collectives, still have to do an empty write call
1517 #ifdef TRY_MPI_COLLECTIVES_
1518  else
1519  {
1520  hid_t memdataspace = 0;
1521  memdataspace = H5Screate_simple(1, &(aggCount[i]), NULL);
1522  H5Sselect_none(memdataspace);
1523  H5Sselect_none(dataspace[i]);
1524  err = H5Dwrite(dataset[i], types[i], memdataspace, dataspace[i],
1525  DXPL, aggBuffers[i]);
1526  if (err < 0)
1527  {
1528  ret = err;
1529  pout() << "Before goto cleanup" << endl;
1530  goto cleanup;
1531  }
1532  }
1533 #endif
1534  } // end for loop over types
1535 #else // not using aggregated collective buffering
1536 #ifdef TRY_MPI_COLLECTIVES_
1537  // MPI collectives expects all processes to make the same number of H5Dwrite calls,
1538  // or it will hang. So, call H5Dwrite with empty data
1539  // First create memdataspace according to example in
1540  // https://www.hdfgroup.org/ftp/HDF5/examples/parallel/coll_test.c
1541  hid_t memdataspace = 0;
1542  // Setting first argument to 1 b/c that's the value used for non-empty writes.
1543  // (See H5Sselect_hyperslab code above.)
1544  memdataspace = H5Screate_simple(1, count, NULL);
1545  H5Sselect_none(memdataspace);
1546  int nBoxes = a_data.dataIterator().size();
1547  for(int iwrite = nBoxes; iwrite < maxNumBoxes; iwrite++)
1548  {
1549  for (unsigned int i=0; i<types.size(); ++i)
1550  {
1551  H5Sselect_none(dataspace[i]);
1552  // for debugging: buffers has junk data in it (but none of that data should
1553  // be written out here)
1554  err = H5Dwrite(dataset[i], types[i], memdataspace, dataspace[i],
1555  DXPL, buffers[i]);
1556  if (err < 0)
1557  {
1558  ret = err;
1559  pout() << "Before goto cleanup" << endl;
1560  goto cleanup;
1561  }
1562  }
1563  }
1564  H5Sclose(memdataspace);
1565 
1566 #endif // end of #ifdef TRY_MPI_COLLECTIVES_
1567 #endif // end of #ifdef AGGREGATE_BOXES_
1568 
1569  } // end of region for CH_TIME("linearize_H5Dwrite")
1570 
1571 #ifdef TRY_MPI_COLLECTIVES_
1572  H5Pclose(DXPL);
1573 #endif
1574 
1575  // OK, clean up data structures
1576 
1577  cleanup:
1578  for (unsigned int i=0; i<types.size(); ++i)
1579  {
1580  {
1581  CH_TIME("freeMT");
1582  freeMT(buffers[i]);
1583  #ifdef AGGREGATE_BOXES_
1584  freeMT(aggBuffers[i]);
1585  #endif
1586  }
1587  {
1588  CH_TIME("H5Sclose");
1589  H5Sclose(dataspace[i]);
1590  }
1591  {
1592  CH_TIME("H5Dclose");
1593  H5Dclose(dataset[i]);
1594  }
1595  }
1596  return ret;
1597 
1598 }
1599 
1600 template <class T>
1601 int write(HDF5Handle& a_handle, const LevelData<T>& a_data,
1602  const std::string& a_name, const IntVect& outputGhost, const Interval& in_comps)
1603 {
1604  CH_TIMERS("Write Level");
1605  CH_TIMER("calc minimum in outputGhost",t1);
1606  CH_TIMER("writeToFile",t2);
1607  CH_TIMER("setGroup",t3);
1608  HDF5HeaderData info;
1609  info.m_intvect["ghost"] = a_data.ghostVect();
1610  IntVect og(outputGhost);
1611  CH_START(t1);
1612  og.min(a_data.ghostVect());
1613  CH_STOP(t1);
1614  info.m_intvect["outputGhost"] = og;
1615  std::string group = a_handle.getGroup();
1616  a_handle.setGroup(group+"/"+a_name+"_attributes");
1617  CH_START(t2);
1618  info.writeToFile(a_handle);
1619  CH_STOP(t2);
1620  CH_START(t3);
1621  a_handle.setGroup(group);
1622  CH_STOP(t3);
1623  return write(a_handle, (const BoxLayoutData<T>&)a_data, a_name, og, in_comps);
1624 }
1625 
1626 template <class T>
1627 int read(HDF5Handle& a_handle, LevelData<T>& a_data, const std::string& a_name,
1628  const DisjointBoxLayout& a_layout, const Interval& a_comps, bool a_redefineData)
1629 {
1630  if (a_redefineData)
1631  {
1632  HDF5HeaderData info;
1633  std::string group = a_handle.getGroup();
1634  if (a_handle.setGroup(group+"/"+a_name+"_attributes"))
1635  {
1636  std::string message = "error opening "
1637  +a_handle.getGroup()+"/"+a_name+"_attributes" ;
1638  MayDay::Warning(message.c_str());
1639  return 1;
1640  }
1641  info.readFromFile(a_handle);
1642  a_handle.setGroup(group);
1643  int ncomp = info.m_int["comps"];
1644  IntVect ghost = info.m_intvect["ghost"];
1645  if (a_comps.end() > 0 && ncomp < a_comps.end())
1646  {
1647  MayDay::Error("attempt to read component interval that is not available");
1648  }
1649  if (a_comps.size() == 0)
1650  a_data.define(a_layout, ncomp, ghost);
1651  else
1652  a_data.define(a_layout, a_comps.size(), ghost);
1653  }
1654  return read(a_handle, (BoxLayoutData<T>&)a_data, a_name, a_layout, a_comps, false);
1655 
1656 }
1657 
1658 template <class T>
1659 int read(HDF5Handle& a_handle, BoxLayoutData<T>& a_data, const std::string& a_name,
1660  const BoxLayout& a_layout, const Interval& a_comps, bool a_redefineData)
1661 {
1662  CH_TIME("read");
1663  int ret = 0; // return value;
1664 
1665  herr_t err;
1666 
1667  char dataname[100];
1668  hsize_t count[1];
1669  ch_offset_t offset[1];
1670  Vector<Vector<long long> > offsets;
1671 
1672  T dummy;
1673  Vector<hid_t> types;
1674  dataTypes(types, dummy);
1675  Vector<hid_t> dataspace(types.size());
1676  Vector<hid_t> dataset(types.size());
1677  offsets.resize(types.size(), Vector<long long>(a_layout.size() +1));
1678 
1679  //Vector<int> bufferCapacity(types.size(), 500);
1680  //Vector<void*> buffers(types.size(), NULL);
1681  Vector<Vector<char> > buffers(types.size(), 500);
1682 
1683  for (unsigned int i=0; i<types.size(); ++i)
1684  {
1685  sprintf(dataname, "%s:datatype=%i",a_name.c_str(), i);
1686 #ifdef H516
1687  dataset[i] = H5Dopen(a_handle.groupID(), dataname);
1688 #else
1689  dataset[i] = H5Dopen2(a_handle.groupID(), dataname, H5P_DEFAULT);
1690 #endif
1691  if (dataset[i] < 0)
1692  {
1693  MayDay::Warning("dataset open failure"); return dataset[i];
1694  }
1695  dataspace[i] = H5Dget_space(dataset[i]);
1696  if (dataspace[i] < 0)
1697  {
1698  MayDay::Warning("dataspace open failure"); return dataspace[i];
1699  }
1700  }
1701 
1702  hid_t offsetspace, offsetData;
1703  hsize_t flatdims[1];
1704  for (unsigned int i=0; i<types.size(); ++i)
1705  {
1706  flatdims[0] = offsets[i].size();
1707  sprintf(dataname, "%s:offsets=%i",a_name.c_str(), i);
1708  offsetspace = H5Screate_simple(1, flatdims, NULL);
1709  CH_assert(offsetspace >= 0);
1710 #ifdef H516
1711  offsetData = H5Dopen(a_handle.groupID(), dataname);
1712 #else
1713  offsetData = H5Dopen2(a_handle.groupID(), dataname,H5P_DEFAULT);
1714 #endif
1715  CH_assert(offsetData >= 0);
1716  hid_t memdataspace = H5Screate_simple(1, flatdims, NULL);
1717  CH_assert(memdataspace >= 0);
1718  err = H5Dread(offsetData, H5T_NATIVE_LLONG, memdataspace, offsetspace,
1719  H5P_DEFAULT, &(offsets[i][0]));
1720  CH_assert(err >=0);
1721  H5Sclose(memdataspace);
1722  H5Sclose(offsetspace);
1723  H5Dclose(offsetData);
1724  }
1725 
1726  HDF5HeaderData info;
1727  std::string group = a_handle.getGroup();
1728  if (a_handle.setGroup(a_handle.getGroup()+"/"+a_name+"_attributes"))
1729  {
1730  std::string message = "error opening "+a_handle.getGroup()+"/"+a_name ;
1731  MayDay::Warning(message.c_str());
1732  return 1;
1733  }
1734 
1735  info.readFromFile(a_handle);
1736  a_handle.setGroup(group);
1737  int ncomps = info.m_int["comps"];
1738  IntVect outputGhost(IntVect::Zero); // backwards file compatible mode.
1739  if (info.m_intvect.find("outputGhost") != info.m_intvect.end())
1740  {
1741  outputGhost = info.m_intvect["outputGhost"];
1742  }
1743  if (ncomps <= 0)
1744  {
1745  MayDay::Warning("ncomps <= 0 in read");
1746  return ncomps;
1747  }
1748 
1749  if (a_redefineData)
1750  {
1751  if (a_comps.size() != 0)
1752  {
1753  a_data.define(a_layout, a_comps.size());
1754  }
1755  else
1756  {
1757  a_data.define(a_layout, ncomps);
1758  }
1759  }
1760 
1761  Interval comps(0, ncomps-1);
1762  //huh?
1763  // if (a_comps.size() != 0) comps = Interval(0, a_comps.size());
1764 
1765  // getOffsets(offsets, a_data, types.size(), comps);
1766 
1767  Vector<size_t> type_size(types.size());
1768  for (unsigned int i=0; i<types.size(); ++i)
1769  {
1770  type_size[i] = H5Tget_size(types[i]);
1771 
1772  }
1773 
1774  Vector<int> thisSize(types.size());
1775  DataIterator it = a_data.dataIterator();
1776  for ( ; it.ok(); ++it)
1777  {
1778  CH_TIMELEAF("H5Dread");
1779  T& data = a_data[it()];
1780  unsigned int index = a_data.boxLayout().index(it());
1781  Box box = a_data.box(it());
1782 
1783  for (unsigned int i=0; i<types.size(); ++i)
1784  {
1785 
1786  offset[0] = offsets[i][index];
1787  count[0] = offsets[i][index+1] - offset[0];
1788  if (count[0] > 0)
1789  {
1790  size_t size = count[0] * type_size[i];
1791  while (size > buffers[i].size())
1792  {
1793  buffers[i].resize(2*buffers[i].size());
1794  }
1795 
1796  err = H5Sselect_hyperslab(dataspace[i], H5S_SELECT_SET,
1797  offset, NULL,
1798  count, NULL);
1799  CH_assert(err >= 0);
1800  hid_t memdataspace = H5Screate_simple(1, count, NULL);
1801  CH_assert(memdataspace >= 0);
1802  err = H5Dread(dataset[i], types[i], memdataspace, dataspace[i],
1803  H5P_DEFAULT, &(buffers[i][0]));
1804  CH_assert(err >= 0);
1805  H5Sclose(memdataspace);
1806  if (err < 0)
1807  {
1808  ret = err;
1809  goto cleanup;
1810  }
1811  }
1812  }
1813  box.grow(outputGhost);
1814  read(data, buffers, box, comps);
1815  }
1816 // if (it.size()==0)
1817 // {
1818 // // try doing an empty H5Dread operation to make the collective system happier.
1819 // for (unsigned int i=0; i<types.size(); ++i)
1820 // {
1821 // hid_t filespace, memspace;
1822 // H5Sselect_none(filespace);
1823 // H5Sselect_none(memspace);
1824 // err = H5Dread(dataset[i], types[i], memspace, filespace,
1825 // H5P_DEFAULT, 0);
1826 // H5Sclose(filespace);
1827 // H5Sclose(memspace);
1828 // }
1829 // }
1830 
1831  cleanup:
1832  for (unsigned int i=0; i<types.size(); ++i)
1833  {
1834  // freeMT(buffers[i]);
1835  H5Sclose(dataspace[i]);
1836  H5Dclose(dataset[i]);
1837  }
1838  return ret;
1839 }
1840 
1841 template <class T>
1842 int writeLevel(HDF5Handle& a_handle,
1843  const int& a_level,
1844  const T& a_data,
1845  const Real& a_dx,
1846  const Real& a_dt,
1847  const Real& a_time,
1848  const Box& a_domain,
1849  const int& a_refRatio,
1850  const IntVect& outputGhost,
1851  const Interval& comps)
1852 {
1853  int error;
1854  char levelName[10];
1855  std::string currentGroup = a_handle.getGroup();
1856  sprintf(levelName, "/level_%i",a_level);
1857  error = a_handle.setGroup(currentGroup + levelName);
1858  if (error != 0) return 1;
1859 
1860  HDF5HeaderData meta;
1861  meta.m_real["dx"] = a_dx;
1862  meta.m_real["dt"] = a_dt;
1863  meta.m_real["time"] = a_time;
1864  meta.m_box["prob_domain"] = a_domain;
1865  meta.m_int["ref_ratio"] = a_refRatio;
1866 
1867  error = meta.writeToFile(a_handle);
1868  if (error != 0) return 2;
1869 
1870  error = write(a_handle, a_data.boxLayout());
1871  if (error != 0) return 3;
1872 
1873  error = write(a_handle, a_data, "data", outputGhost, comps);
1874  if (error != 0) return 4;
1875 
1876  a_handle.setGroup(currentGroup);
1877 
1878  return 0;
1879 }
1880 
1881 template <class T>
1882 int writeLevel(HDF5Handle& a_handle,
1883  const int& a_level,
1884  const T& a_data,
1885  const RealVect& a_dx, // dx for each direction
1886  const Real& a_dt,
1887  const Real& a_time,
1888  const Box& a_domain,
1889  const IntVect& a_refRatios, // ref ratio for each direction
1890  const IntVect& outputGhost,
1891  const Interval& comps)
1892 {
1893  int error;
1894  char levelName[10];
1895  std::string currentGroup = a_handle.getGroup();
1896  sprintf(levelName, "/level_%i",a_level);
1897  error = a_handle.setGroup(currentGroup + levelName);
1898  if (error != 0) return 1;
1899 
1900  HDF5HeaderData meta;
1901  meta.m_realvect["vec_dx"] = a_dx;
1902  meta.m_real["dt"] = a_dt;
1903  meta.m_real["time"] = a_time;
1904  meta.m_box["prob_domain"] = a_domain;
1905  meta.m_intvect["vec_ref_ratio"] = a_refRatios;
1906 
1907  error = meta.writeToFile(a_handle);
1908  if (error != 0) return 2;
1909 
1910  error = write(a_handle, a_data.boxLayout());
1911  if (error != 0) return 3;
1912 
1913  error = write(a_handle, a_data, "data", outputGhost, comps);
1914  if (error != 0) return 4;
1915 
1916  a_handle.setGroup(currentGroup);
1917 
1918  return 0;
1919 }
1920 
1921 template <class T>
1922 int readLevel(HDF5Handle& a_handle,
1923  const int& a_level,
1924  LevelData<T>& a_data,
1925  Real& a_dx,
1926  Real& a_dt,
1927  Real& a_time,
1928  Box& a_domain,
1929  int& a_refRatio,
1930  const Interval& a_comps,
1931  bool setGhost)
1932 { CH_TIME("readLevel");
1933  HDF5HeaderData header;
1934  header.readFromFile(a_handle);
1935  //unused
1936  // int nComp = header.m_int["num_components"];
1937 
1938  int error;
1939  char levelName[10];
1940  std::string currentGroup = a_handle.getGroup();
1941  sprintf(levelName, "/level_%i",a_level);
1942  error = a_handle.setGroup(currentGroup + levelName);
1943  if (error != 0) return 1;
1944 
1945  HDF5HeaderData meta;
1946  error = meta.readFromFile(a_handle);
1947  if (error != 0) return 2;
1948  a_dx = meta.m_real["dx"];
1949  a_dt = meta.m_real["dt"];
1950  a_time = meta.m_real["time"];
1951  a_domain = meta.m_box["prob_domain"];
1952  a_refRatio = meta.m_int["ref_ratio"];
1953  Vector<Box> boxes;
1954  { CH_TIME("readLevel read boxes");
1955  error = read(a_handle, boxes);
1956  }
1957  Vector<int> procIDs;
1958  { CH_TIME("readLevel LoadBalance");
1959  LoadBalance(procIDs, boxes);
1960  }
1961 
1962  DisjointBoxLayout layout;
1963  { CH_TIME("readLevel define layout");
1964  layout.define(boxes, procIDs, a_domain);
1965  }
1966 
1967  { CH_TIME("readLevel close layout");
1968  layout.close();
1969  }
1970  if (error != 0) return 3;
1971 
1972  { CH_TIME("readLevel read general");
1973  error = read(a_handle, a_data, "data", layout, a_comps, true);
1974  }
1975 
1976  if (error != 0) return 4;
1977 
1978  a_handle.setGroup(currentGroup);
1979 
1980  return 0;
1981 }
1982 
1983 template <class T>
1984 int readLevel(HDF5Handle& a_handle,
1985  const int& a_level,
1986  LevelData<T>& a_data,
1987  RealVect& a_dx,
1988  Real& a_dt,
1989  Real& a_time,
1990  Box& a_domain,
1991  IntVect& a_refRatio,
1992  const Interval& a_comps,
1993  bool setGhost)
1994 {
1995  HDF5HeaderData header;
1996  header.readFromFile(a_handle);
1997  //unused
1998  // int nComp = header.m_int["num_components"];
1999 
2000  int error;
2001  char levelName[10];
2002  std::string currentGroup = a_handle.getGroup();
2003  sprintf(levelName, "/level_%i",a_level);
2004  error = a_handle.setGroup(currentGroup + levelName);
2005  if (error != 0) return 1;
2006 
2007  HDF5HeaderData meta;
2008  error = meta.readFromFile(a_handle);
2009  if (error != 0) return 2;
2010  // a_dx = meta.m_realvect["vec_dx"];
2011  // Allow for vec_dx to be absent, as it will be in isotropic files.
2012  if (meta.m_realvect.find("vec_dx") != meta.m_realvect.end())
2013  {
2014  a_dx = meta.m_realvect["vec_dx"];
2015  }
2016  else
2017  { // vec_dx is not present, so get dx.
2018  Real dxScalar = meta.m_real["dx"];
2019  a_dx = dxScalar * RealVect::Unit;
2020  }
2021  a_dt = meta.m_real["dt"];
2022  a_time = meta.m_real["time"];
2023  a_domain = meta.m_box["prob_domain"];
2024  // a_refRatio = meta.m_intvect["vec_ref_ratio"];
2025  // Allow for vec_ref_ratio to be absent, as it will be in isotropic files.
2026  if (meta.m_intvect.find("vec_ref_ratio") != meta.m_intvect.end())
2027  {
2028  a_refRatio = meta.m_intvect["vec_ref_ratio"];
2029  }
2030  else
2031  { // vec_ref_ratio is not present, so get ref_ratio.
2032  int refRatioScalar = meta.m_int["ref_ratio"];
2033  a_refRatio = refRatioScalar * IntVect::Unit;
2034  }
2035  Vector<Box> boxes;
2036  error = read(a_handle, boxes);
2037  Vector<int> procIDs;
2038  LoadBalance(procIDs, boxes);
2039 
2040  DisjointBoxLayout layout(boxes, procIDs, a_domain);
2041 
2042  layout.close();
2043  if (error != 0) return 3;
2044 
2045  error = read(a_handle, a_data, "data", layout, a_comps, true);
2046 
2047  if (error != 0) return 4;
2048 
2049  a_handle.setGroup(currentGroup);
2050 
2051  return 0;
2052 }
2053 
2054 
2055 /*******************************************************************************
2056  *
2057  * Class WriteMultiData: member definitions
2058  *
2059  ******************************************************************************/
2060 
2061 /*--------------------------------------------------------------------*/
2062 // Constructor writes boxes and allocates dataset for LevelData
2063 /** Write boxes, processors, attributes and creates a dataset for all
2064  * the level data to be written. The dataset is closed on
2065  * destruction.
2066  * \param[in] a_handle
2067  * Chombo HDF5 handle holding open file for
2068  * writing
2069  * \param[in] a_layout
2070  * Box layout for all data to be written
2071  * \param[in] a_numIntv
2072  * Total number of components that will be
2073  * written from all BoxLayoutData or LevelData
2074  * \param[in] a_name Name of the dataset for the level data.
2075  * \param[in] a_policyFlags
2076  * Either
2077  * CH_HDF5::IOPolicyDefault - Use internal
2078  * T.linearOut(...) to linearize data and
2079  * processes independently write to file,
2080  * but still in parallel.
2081  * or a union (|) of the following flags:
2082  * CH_HDF5::IOPolicyMultiDimHyperslab - The
2083  * memory dataspace will be set up so that
2084  * HDF5 linearizes the data. Using this
2085  * requires T.dataPtr() and contiguous memory
2086  * (so things like T=FluxBox will not work).
2087  * CH_HDF5::IOPolicyCollectiveWrite - The write
2088  * for each dataset will be collective.
2089  * \param[in] a_outputGhost
2090  * Number of ghost cells that will be written to
2091  * the data file. Any data written must have
2092  * this many ghosts
2093  * \param[in] a_newForm
2094  * ?
2095  *//*-----------------------------------------------------------------*/
2096 
2097 template <class T>
2099  const BoxLayout& a_layout,
2100  const int a_numIntv,
2101  const string& a_name,
2102  const int a_policyFlags,
2103  const IntVect& a_outputGhost,
2104  const bool a_newForm)
2105  :
2106  m_policyFlags(a_policyFlags),
2107  m_outputGhost(a_outputGhost),
2108  m_newForm(a_newForm)
2109 {
2110  CH_TIME("WriteMultiData::constructor");
2111  CH_assert(T::preAllocatable() == 0);
2112 #ifdef H516
2113  CH_assert(false); // API 1.6 not supported
2114 #endif
2115 
2116 //--Write the boxes
2117 
2118  write(a_handle, a_layout);
2119 
2120 //--Create the dataset for the level data
2121 
2122  T dummy; // Used for preallocatable methods for dumb compilers.
2123  dataTypes<T>(m_types, dummy);
2124  // Only allow one type
2125  CH_assert(m_types.size() == 1);
2126 
2127  m_allIntvFile.define(0, a_numIntv-1);
2128  getOffsets<T>(m_offsets,
2129  a_layout,
2130  m_types.size(),
2131  m_allIntvFile,
2132  m_outputGhost);
2133 
2134  hsize_t flatdims[1];
2135  // Create dataset for data
2136  {
2137  hid_t dataspace;
2138  flatdims[0] = m_offsets[0].back();
2139  if (m_newForm)
2140  {
2141  if (a_name == "M")
2142  {
2143  strcpy(m_dataname, "Mask");
2144  }
2145  else
2146  {
2147  sprintf(m_dataname, "%sRegular", a_name.c_str());
2148  }
2149  }
2150  else
2151  {
2152  sprintf(m_dataname, "%s:datatype=%i", a_name.c_str(), 0);
2153  }
2154  {
2155  CH_TIME("H5Screate");
2156  dataspace = H5Screate_simple(1, flatdims, NULL);
2157  }
2158  CH_assert(dataspace >=0);
2159  {
2160  CH_TIME("H5Dcreate");
2161  m_dataSet = H5Dcreate2(a_handle.groupID(), m_dataname,
2162  m_types[0],
2163  dataspace, H5P_DEFAULT,
2164  H5P_DEFAULT, H5P_DEFAULT);
2165  }
2166  CH_assert(m_dataSet >= 0);
2167  {
2168  CH_TIME("H5S_H5D_data_close");
2169  H5Sclose(dataspace);
2170  }
2171  }
2172 
2173  // Create dataset for offsets
2174  {
2175  hid_t offsetspace, offsetdataset;
2176  char offsetname[128];
2177  flatdims[0] = m_offsets[0].size(); // Number of boxes
2178  if (m_newForm)
2179  {
2180  if (a_name == "M")
2181  {
2182  strcpy(offsetname, "MaskOffsets");
2183  }
2184  else
2185  {
2186  sprintf(offsetname, "%sOffsets", a_name.c_str());
2187  }
2188  }
2189  else
2190  {
2191  sprintf(offsetname, "%s:offsets=%i", a_name.c_str(), 0);
2192  }
2193  {
2194  CH_TIME("H5S_H5D_offsets_create");
2195  offsetspace = H5Screate_simple(1, flatdims, NULL);
2196  CH_assert(offsetspace >= 0);
2197  offsetdataset = H5Dcreate2(a_handle.groupID(), offsetname,
2198  H5T_NATIVE_LLONG, offsetspace,
2199  H5P_DEFAULT,
2200  H5P_DEFAULT,H5P_DEFAULT);
2201  CH_assert(offsetdataset >= 0);
2202  }
2203  if (procID() == 0)
2204  // Write the offsets
2205  {
2206  herr_t err;
2207  CH_TIME("WriteOffsets");
2208  hid_t memdataspace = H5Screate_simple(1, flatdims, NULL);
2209  CH_assert(memdataspace >= 0);
2210  err = H5Dwrite(offsetdataset, H5T_NATIVE_LLONG, memdataspace,
2211  offsetspace, H5P_DEFAULT, &(m_offsets[0][0]));
2212  CH_assert(err >= 0);
2213  H5Sclose(memdataspace);
2214  }
2215  {
2216  CH_TIME("H5S_H5D_offsets_close");
2217  H5Sclose(offsetspace);
2218  H5Dclose(offsetdataset);
2219  }
2220  }
2221 
2222  // Write attributes
2223  {
2224  CH_TIME("WriteAttributes");
2225  HDF5HeaderData info;
2226  info.m_intvect["ghost"] = m_outputGhost;
2227  info.m_intvect["outputGhost"] = m_outputGhost;
2228  if (!m_newForm)
2229  {
2230  info.m_int["comps"] = m_allIntvFile.size();
2231  info.m_string["objectType"] = name(dummy);
2232  }
2233  std::string group = a_handle.getGroup();
2234  a_handle.setGroup(group+"/"+a_name+"_attributes");
2235  info.writeToFile(a_handle);
2236  a_handle.setGroup(group);
2237  }
2238 
2239  // Get the maximum boxes per process
2240  {
2241  m_maxBoxPerProc = a_layout.dataIterator().size();
2242 #ifdef CH_MPI
2243  long myCountBoxes = m_maxBoxPerProc;
2244  MPI_Allreduce(&myCountBoxes,
2245  &m_maxBoxPerProc,
2246  1, MPI_LONG, MPI_MAX, Chombo_MPI::comm);
2247 #endif
2248  }
2249 }
2250 
2251 /*--------------------------------------------------------------------*/
2252 // Write an interval of LevelData to the dataset
2253 /** Call this as many times as you want to write level data. User is
2254  * resposible for making sure 'a_intvFile' has relevance to
2255  * 'a_numIntv' specified during construction.
2256  * \param[in] a_data Level data to write
2257  * \param[in] a_intvMem
2258  * Interval of a_data to read
2259  * \param[in] a_intvFile
2260  * Interval to write to file
2261  *//*-----------------------------------------------------------------*/
2262 
2263 template <class T>
2264 int
2266  const Interval& a_intvMem,
2267  const Interval& a_intvFile)
2268 {
2269  CH_TIME("WriteMultiData::writeData");
2270  CH_assert(a_intvMem.size() > 0);
2271  CH_assert(a_intvMem.size() == a_intvFile.size());
2272  CH_assert(a_intvMem.begin() >= a_data.interval().begin() &&
2273  a_intvMem.end() <= a_data.interval().end());
2274  CH_assert(a_intvFile.begin() >= m_allIntvFile.begin() &&
2275  a_intvFile.end() <= m_allIntvFile.end());
2276 
2277  int ret = 0;
2278 
2279  hsize_t fileCount[1], memCount[SpaceDim+1];
2280  ch_offset_t fileOffset[1], memOffset[SpaceDim+1]; // Same type as hsize_t
2281  herr_t err;
2282 
2283 //--Open the file dataspace
2284 
2285  hid_t fileDataSpace = H5Dget_space(m_dataSet);
2286 
2287 //--Data transfer policies (independent or collective)
2288 
2289  hid_t DXPL = H5Pcreate(H5P_DATASET_XFER);
2290 #ifdef CH_MPI
2291  if (m_policyFlags & CH_HDF5::IOPolicyCollectiveWrite)
2292  {
2293  H5Pset_dxpl_mpio(DXPL, H5FD_MPIO_COLLECTIVE);
2294  }
2295 #endif
2296 
2297  if (m_policyFlags & CH_HDF5::IOPolicyMultiDimHyperslab)
2298 
2299 //--Perform parallel writes from specified hyperslabs (when defining hyperslabs
2300 //--in memory, remember that HDF5 assumes row-ordering).
2301 
2302  {
2303  CH_TIME("hyperslab_H5Dwrite");
2304  DataIterator dit = a_data.dataIterator();
2305  const void* buffer;
2306  for (int iBox = 0; iBox != m_maxBoxPerProc; ++iBox)
2307  {
2308  hid_t memDataSpace;
2309  if (dit.ok())
2310  {
2311  const T& data = a_data[dit];
2312  unsigned globalIdx = a_data.boxLayout().index(dit());
2313  Box dbox = data.box(); // Data box
2314  Box rbox = a_data.box(dit()); // Read box
2315  rbox.grow(m_outputGhost);
2316  // Create the dataspace in memory
2317  memCount[0] = a_data.nComp();
2318  D_TERM6(memCount[SpaceDim-0] = dbox.size(0);,
2319  memCount[SpaceDim-1] = dbox.size(1);,
2320  memCount[SpaceDim-2] = dbox.size(2);,
2321  memCount[SpaceDim-3] = dbox.size(3);,
2322  memCount[SpaceDim-4] = dbox.size(4);,
2323  memCount[SpaceDim-5] = dbox.size(5);)
2324  memDataSpace = H5Screate_simple(SpaceDim+1, memCount, NULL);
2325  // Select the hyperslab from the memory dataspace
2326  memCount[0] = a_intvMem.size();
2327  D_TERM6(memCount[SpaceDim-0] = rbox.size(0);,
2328  memCount[SpaceDim-1] = rbox.size(1);,
2329  memCount[SpaceDim-2] = rbox.size(2);,
2330  memCount[SpaceDim-3] = rbox.size(3);,
2331  memCount[SpaceDim-4] = rbox.size(4);,
2332  memCount[SpaceDim-5] = rbox.size(5);)
2333  memOffset[0] = a_intvMem.begin();
2334  D_TERM6(
2335  memOffset[SpaceDim-0] = rbox.smallEnd(0) - dbox.smallEnd(0);,
2336  memOffset[SpaceDim-1] = rbox.smallEnd(1) - dbox.smallEnd(1);,
2337  memOffset[SpaceDim-2] = rbox.smallEnd(2) - dbox.smallEnd(2);,
2338  memOffset[SpaceDim-3] = rbox.smallEnd(3) - dbox.smallEnd(3);,
2339  memOffset[SpaceDim-4] = rbox.smallEnd(4) - dbox.smallEnd(4);,
2340  memOffset[SpaceDim-5] = rbox.smallEnd(5) - dbox.smallEnd(5);)
2341  err = H5Sselect_hyperslab(memDataSpace, H5S_SELECT_SET,
2342  memOffset, NULL, memCount, NULL);
2343  CH_assert(err >= 0);
2344  // Create the hyperslab in the file dataspace
2345  fileOffset[0] = m_offsets[0][globalIdx];
2346  fileCount[0] = m_offsets[0][globalIdx + 1] - fileOffset[0];
2347  if (fileCount[0] > 0) // Else catches more processes than boxes
2348  {
2349  // Revise offsets based on selection of interval
2350  const hsize_t dpnts = rbox.numPts(); // Points per comp
2351  fileOffset[0] += dpnts*a_intvFile.begin();
2352  fileCount[0] = dpnts*a_intvFile.size();
2353  CH_assert(fileOffset[0] + fileCount[0] <=
2354  m_offsets[0][globalIdx+1]);
2355  err = H5Sselect_hyperslab(fileDataSpace, H5S_SELECT_SET,
2356  fileOffset, NULL, fileCount, NULL);
2357  CH_assert(err >= 0);
2358  }
2359  else // More processes than boxes
2360  {
2361  H5Sselect_none(memDataSpace);
2362  H5Sselect_none(fileDataSpace);
2363  }
2364  buffer = data.dataPtr();
2365  ++dit;
2366  }
2367  else // Does not have a box to participate in collective :(
2368  {
2369  std::memset(memCount, 0, (SpaceDim+1)*sizeof(hsize_t));
2370  memDataSpace = H5Screate_simple(SpaceDim+1, memCount, NULL);
2371  H5Sselect_none(memDataSpace);
2372  H5Sselect_none(fileDataSpace);
2373  buffer = 0;
2374  }
2375  // Write
2376  err = H5Dwrite(m_dataSet, m_types[0], memDataSpace, fileDataSpace,
2377  DXPL, buffer);
2378  CH_assert(err >= 0);
2379  H5Sclose(memDataSpace);
2380  if (err < 0)
2381  {
2382  ret = err;
2383  goto cleanup;
2384  }
2385  }
2386  }
2387  else
2388 
2389 //--Perform parallel writes with 1-D hyperslabs and using internal mechanisms
2390 //--for linearization.
2391 
2392  {
2393  CH_TIME("linearize_H5Dwrite");
2394  void* linearBuffer;
2395 
2396  // Step 1: create a buffer to linearize T
2397  {
2398  CH_TIMELEAF("allocateLinearBuffer");
2399  long long bufferSize = 0;
2400  for (DataIterator dit = a_data.dataIterator(); dit.ok(); ++dit)
2401  {
2402  unsigned globalIdx = a_data.boxLayout().index(dit());
2403  {
2404  const long long allIntvSize =
2405  (m_offsets[0][globalIdx + 1] - m_offsets[0][globalIdx]);
2406  CH_assert(allIntvSize >= 0);
2407  if (allIntvSize > bufferSize)
2408  {
2409  bufferSize = allIntvSize;
2410  }
2411  }
2412  }
2413  // Get buffer size in bytes, and adjusted for write interval
2414  bufferSize = ((bufferSize/m_allIntvFile.size())*a_intvMem.size())*
2415  H5Tget_size(m_types[0]);
2416  linearBuffer = mallocMT(bufferSize);
2417  if (linearBuffer == NULL)
2418  {
2419  pout() << " bufferCapacity = " << (int)bufferSize << endl;
2420  MayDay::Error("Memory error in buffer allocation write");
2421  }
2422  }
2423 
2424  // Step 2: linearize the data and then write to file
2425  DataIterator dit = a_data.dataIterator();
2426  for (int iBox = 0; iBox != m_maxBoxPerProc; ++iBox)
2427  {
2428  hid_t memDataSpace;
2429  if (dit.ok())
2430  {
2431  const T& data = a_data[dit];
2432  unsigned globalIdx = a_data.boxLayout().index(dit());
2433  Box rbox = a_data.box(dit()); // Read box
2434  rbox.grow(m_outputGhost);
2435  {
2436  CH_TIMELEAF("linearize");
2437  data.linearOut(linearBuffer, rbox, a_intvMem);
2438  }
2439  const hsize_t dpnts = rbox.numPts(); // Points per comp
2440  // Create the dataspace in memory
2441  memCount[0] = dpnts*a_intvMem.size();
2442  memDataSpace = H5Screate_simple(1, memCount, NULL);
2443  // Create the hyperslab in the file dataspace
2444  fileOffset[0] = m_offsets[0][globalIdx];
2445  fileCount[0] = m_offsets[0][globalIdx + 1] - fileOffset[0];
2446  if (fileCount[0] > 0) // Else catches more processes than boxes
2447  {
2448  // Revise offsets based on selection of interval
2449  fileOffset[0] += dpnts*a_intvFile.begin();
2450  fileCount[0] = dpnts*a_intvFile.size();
2451  CH_assert(fileOffset[0] + fileCount[0] <=
2452  m_offsets[0][globalIdx+1]);
2453  err = H5Sselect_hyperslab(fileDataSpace, H5S_SELECT_SET,
2454  fileOffset, NULL, fileCount, NULL);
2455  CH_assert(err >= 0);
2456  }
2457  else // More processes than boxes
2458  {
2459  H5Sselect_none(memDataSpace);
2460  H5Sselect_none(fileDataSpace);
2461  }
2462  ++dit;
2463  }
2464  else // Does not have a box to participate in collective :(
2465  {
2466  memCount[0] = 0;
2467  memDataSpace = H5Screate_simple(SpaceDim+1, memCount, NULL);
2468  H5Sselect_none(memDataSpace);
2469  H5Sselect_none(fileDataSpace);
2470  }
2471  // Collective write
2472  err = H5Dwrite(m_dataSet, m_types[0], memDataSpace, fileDataSpace,
2473  DXPL, linearBuffer);
2474  CH_assert(err >= 0);
2475  H5Sclose(memDataSpace);
2476  if (err < 0)
2477  {
2478  ret = err;
2479  freeMT(linearBuffer);
2480  goto cleanup;
2481  }
2482  }
2483  freeMT(linearBuffer);
2484  }
2485 
2486  cleanup: ;
2487  H5Pclose(DXPL);
2488  H5Sclose(fileDataSpace);
2489  // m_dataSet closed in destructor
2490  return ret;
2491 }
2492 
2493 #include "NamespaceFooter.H"
2494 
2495 #else // CH_USE_HDF5
2496 
2497 // this is the only thing needed when HDF is not used
2498 #define HOFFSET(S,M) (offsetof(S,M))
2499 
2500 #endif // CH_USE_HDF5 not defined
2501 #endif // CH_HDF5_H
std::ostream & pout()
Use this in place of std::cout for program output.
int open(const std::string &a_filename, mode a_mode, const char *a_globalGroupName="Chombo_global")
{ File functions}
A FArrayBox-like container for edge-centered fluxes.
Definition: EdgeDataBox.H:21
#define CH_TIMERS(name)
Definition: CH_Timer.H:133
map< std::string, int > m_int
Definition: CH_HDF5.H:550
virtual void define(const BoxLayout &boxes, int comps, const DataFactory< T > &factory=DefaultDataFactory< T >())
Definition: BoxLayoutDataI.H:87
static herr_t attributeScan(hid_t loc_id, const char *name, void *opdata)
IntVect & min(const IntVect &p)
Definition: IntVect.H:1136
#define D_TERM6(a, b, c, d, e, f)
Definition: CHArray.H:40
#define freeMT(a_a)
Definition: memtrack.H:160
#define CH_SPACEDIM
Definition: SPACE.H:51
#define CH_assert(cond)
Definition: CHArray.H:37
int popGroup()
data to be added to HDF5 files.
Definition: CH_HDF5.H:519
const hid_t & fileID() const
mode m_mode
Definition: CH_HDF5.H:464
int readBoxes(HDF5Handle &a_handle, Vector< Vector< Box > > &boxes)
reads the set of Boxes out from the level_* groups of a Chombo HDF5 AMR file
mode
Definition: CH_HDF5.H:316
Definition: CH_HDF5.H:64
void writeDataset(hid_t a_dataset, hid_t a_dataspace, const void *start, ch_offset_t off, hsize_t count)
A not-necessarily-disjoint collective of boxes.
Definition: BoxLayout.H:145
one dimensional dynamic array
Definition: Vector.H:53
int nComp() const
Definition: BoxLayoutData.H:306
map< std::string, Real > m_real
Definition: CH_HDF5.H:547
const IntVect m_outputGhost
Number of ghost cells written.
Definition: CH_HDF5.H:667
int readFArrayBox(HDF5Handle &a_handle, FArrayBox &a_fab, int a_level, int a_boxNumber, const Interval &a_components, const std::string &a_dataName="data")
FArrayBox-at-a-time read function. FArrayBox gets redefined in the function. Reads data field named b...
int writeToLocation(hid_t loc_id) const
std::string m_group
Definition: CH_HDF5.H:467
#define mallocMT(a_a)
Definition: memtrack.H:159
LayoutIterator layoutIterator() const
Iterator that processes through ALL the boxes in a BoxLayout.
#define CH_START(tpointer)
Definition: CH_Timer.H:145
void getOffsets(Vector< Vector< long long > > &offsets, const BoxLayoutData< T > &a_data, int types, const Interval &comps, const IntVect &outputGhost)
Definition: CH_HDF5.H:972
int size() const
Definition: DataIterator.H:218
void readDataset(hid_t a_dataset, hid_t a_dataspace, void *start, ch_offset_t off, hsize_t count)
IntVect size() const
size functions
Definition: Box.H:1819
void close()
void setGroupToLevel(int a_level)
{ Group functions}
Definition: DataIterator.H:190
void linearOut(void *const a_outBuf, const CH_XDIR::OffsetBuffer &a_inputT)
int readLevel(HDF5Handle &a_handle, const int &a_level, LevelData< T > &a_data, Real &a_dx, Real &a_dt, Real &a_time, Box &a_domain, int &a_refRatio, const Interval &a_comps=Interval(), bool setGhost=false)
Definition: CH_HDF5.H:1922
Definition: CH_HDF5.H:321
unsigned int size() const
Returns the total number of boxes in the BoxLayout.
std::ostream & operator<<(std::ostream &os, const HDF5HeaderData &data)
herr_t HDF5HeaderDataattributeScan(hid_t loc_id, const char *name, const H5A_info_t *info, void *opdata)
int LoadBalance(Vector< Vector< int > > &a_procAssignments, Real &a_effRatio, const Vector< Vector< Box > > &a_Grids, const Vector< Vector< long > > &a_ComputeLoads, const Vector< int > &a_RefRatios, int a_nProc=numProc())
int writeData(const BoxLayoutData< T > &a_data, const Interval &a_intvMem, const Interval &a_intvFile)
Write an interval of LevelData to the dataset.
Definition: CH_HDF5.H:2265
unsigned int numProc()
number of parallel processes
int uniqueProc(const SerialTask::task &a_task)
An Iterator based on a BoxLayout object.
Definition: LayoutIterator.H:35
void createDataset(hid_t &a_dataset, hid_t &a_dataspace, HDF5Handle &handle, const std::string &name, const T *dummy, hsize_t size)
Definition: CH_HDF5.H:739
int size() const
Definition: Interval.H:75
hid_t m_dataSet
Dataset for level data.
Definition: CH_HDF5.H:671
void dataTypes(Vector< hid_t > &a_types, const BaseFab< int > &dummy)
Definition: CH_HDF5.H:801
void dataSize(const BaseFab< int > &item, Vector< int > &a_sizes, const Box &box, const Interval &comps)
Definition: CH_HDF5.H:847
const int SpaceDim
Definition: SPACE.H:38
Definition: CH_HDF5.H:318
map< std::string, RealVect > m_realvect
Definition: CH_HDF5.H:562
#define CH_TIMER(name, tpointer)
Definition: CH_Timer.H:63
static const RealVect Unit
Definition: RealVect.H:427
Definition: EBInterface.H:45
static hid_t realvect_id
Definition: CH_HDF5.H:454
Box & enclosedCells()
void resize(unsigned int isize)
Definition: Vector.H:346
static void initialize()
Definition: CH_HDF5.H:320
virtual void close()
map< std::string, IntVect > m_intvect
Definition: CH_HDF5.H:556
Interval m_allIntvFile
Interval for components in file.
Definition: CH_HDF5.H:672
A FArrayBox-like container for face-centered fluxes.
Definition: FluxBox.H:22
void gather(Vector< T > &a_outVec, const T &a_input, int a_dest)
Definition: SPMDI.H:197
void linearIn(CH_XDIR::OffsetBuffer &a_outputT, const void *const a_inBuf)
IOPolicy
Definition: CH_HDF5.H:61
char m_dataname[128]
Name for level data dataset.
Definition: CH_HDF5.H:670
Methods for writing multiple LevelData to an HDF5 file.
Definition: CH_HDF5.H:631
void push_back(const T &in)
Definition: Vector.H:295
Vector< hid_t > m_types
Type of data written.
Definition: CH_HDF5.H:680
static bool initialized
Definition: CH_HDF5.H:471
#define CH_TIME(name)
Definition: CH_Timer.H:82
Structure for passing component ranges in code.
Definition: Interval.H:23
Definition: CH_HDF5.H:66
static const IntVect Unit
Definition: IntVect.H:663
const char * name(const FArrayBox &a_dummySpecializationArg)
Definition: CH_HDF5.H:907
int linearSize(const CH_XDIR::OffsetBuffer &a_input)
new code
Definition: BoxLayoutData.H:170
long m_maxBoxPerProc
Maximum boxes written by any proc.
Definition: CH_HDF5.H:673
std::string m_filename
Definition: CH_HDF5.H:466
HDF5Handle::mode openMode() const
Definition: CH_HDF5.H:449
Interval interval() const
Definition: BoxLayoutData.H:312
Data on a BoxLayout.
Definition: BoxLayoutData.H:97
double Real
Definition: REAL.H:33
Box surroundingNodes(const Box &b, int dir)
Definition: Box.H:2161
unsigned int index(const LayoutIndex &index) const
Definition: BoxLayout.H:724
Definition: SPMD.H:281
Definition: CH_HDF5.H:764
virtual void define(const DisjointBoxLayout &dp, int comps, const IntVect &ghost=IntVect::Zero, const DataFactory< T > &a_factory=DefaultDataFactory< T >())
Definition: LevelDataI.H:80
void operator=(const OffsetBuffer &rhs)
int pushGroup(const std::string &grp)
bool isOpen() const
Vector< int > index
Definition: CH_HDF5.H:766
Definition: CH_HDF5.H:319
A BoxLayout that has a concept of disjointedness.
Definition: DisjointBoxLayout.H:30
void createData(hid_t &a_dataset, hid_t &a_dataspace, HDF5Handle &handle, const std::string &name, hid_t type, hsize_t size)
static void Error(const char *const a_msg=m_nullString, int m_exitCode=CH_DEFAULT_ERROR_CODE)
Print out message to cerr and exit with the specified exit code.
int write(HDF5Handle &a_handle, const BoxLayout &a_layout, const std::string &name="boxes")
writes BoxLayout to HDF5 file.
int begin() const
Definition: Interval.H:99
const BoxLayout & boxLayout() const
Definition: LayoutData.H:107
const int m_policyFlags
Policies.
Definition: CH_HDF5.H:666
static const IntVect Zero
Definition: IntVect.H:658
static hid_t box_id
Definition: CH_HDF5.H:452
const hid_t & groupID() const
Vector< Vector< int > > offsets
Definition: CH_HDF5.H:767
hssize_t ch_offset_t
Definition: CH_HDF5.H:695
static void Warning(const char *const a_msg=m_nullString)
Print out message to cerr and continue.
A Rectangular Domain on an Integer Lattice.
Definition: Box.H:469
A Real vector in SpaceDim-dimensional space.
Definition: RealVect.H:41
void define(int a_firstComp, int a_lastComp)
Definition: Interval.H:52
size_t numPts() const
hid_t m_fileID
Definition: CH_HDF5.H:462
int writeToFile(HDF5Handle &file) const
WriteMultiData & operator=(const WriteMultiData)
Vector< Vector< long long > > m_offsets
Definition: CH_HDF5.H:677
hid_t H5Type(const T *dummy)
Definition: CH_HDF5.H:724
map< std::string, Box > m_box
Definition: CH_HDF5.H:559
#define CH_STOP(tpointer)
Definition: CH_Timer.H:150
Handle to a particular group in an HDF file.
Definition: CH_HDF5.H:294
void dump() const
useful for debugging. dumps contents to std::cout
An integer Vector in SpaceDim-dimensional space.
Definition: CHArray.H:42
Definition: FArrayBox.H:45
const std::string & getGroup() const
DataIterator dataIterator() const
Definition: LayoutDataI.H:78
size_t size() const
Definition: Vector.H:192
HDF5Handle()
{ constructor}
WriteMultiData(HDF5Handle &a_handle, const BoxLayout &a_layout, const int a_numIntv, const string &a_name, const int a_policyFlags=CH_HDF5::IOPolicyDefault, const IntVect &a_outputGhost=IntVect::Zero, const bool a_newForm=false)
Constructor writes boxes and allocates dataset for LevelData.
Definition: CH_HDF5.H:2098
Definition: CH_HDF5.H:63
#define CH_TIMELEAF(name)
Definition: CH_Timer.H:100
Box & grow(int i)
grow functions
Definition: Box.H:2263
virtual bool ok() const
return true if this iterator is still in its Layout
Definition: LayoutIterator.H:117
int setGroup(const std::string &groupAbsPath)
virtual void define(const Vector< Box > &a_boxes, const Vector< int > &a_procIDs)
void broadcast(T &a_inAndOut, int a_src)
broadcast to every process
Definition: SPMDI.H:207
int writeLevel(HDF5Handle &a_handle, const int &a_level, const T &a_data, const Real &a_dx, const Real &a_dt, const Real &a_time, const Box &a_domain, const int &a_refRatio, const IntVect &outputGhost=IntVect::Zero, const Interval &comps=Interval())
user-friendly function to write out data on a AMR level
Definition: CH_HDF5.H:1842
int readFromLocation(hid_t loc_id)
int end() const
Definition: Interval.H:104
map< std::string, std::string > m_string
Definition: CH_HDF5.H:553
static hid_t intvect_id
Definition: CH_HDF5.H:453
const IntVect & smallEnd() const
{ Accessors}
Definition: Box.H:1770
int procID()
local process ID
Box box(const DataIndex &a_index) const
Definition: LayoutDataI.H:66
int readFromFile(HDF5Handle &file)
HDF5Handle & operator=(const HDF5Handle &)
bool m_isOpen
Definition: CH_HDF5.H:465
static map< std::string, std::string > groups
Definition: CH_HDF5.H:455
void read(T &item, Vector< Vector< char > > &a_allocatedBuffers, const Box &box, const Interval &comps)
Definition: CH_HDF5.H:51
hid_t m_currentGroupID
Definition: CH_HDF5.H:463
const bool m_newForm
?
Definition: CH_HDF5.H:668
#define H5T_NATIVE_REAL
Definition: REAL.H:35
int m_level
Definition: CH_HDF5.H:468
const IntVect & ghostVect() const
Definition: LevelData.H:170
~WriteMultiData()
Destructor.
Definition: CH_HDF5.H:646
A wrapper for an FArrayBox to contain NODE-centered data.
Definition: NodeFArrayBox.H:122