DIY  3.0
data-parallel out-of-core C++ library
 All Classes Namespaces Functions Typedefs Groups Pages
bov.hpp
1 #ifndef DIY_IO_BOV_HPP
2 #define DIY_IO_BOV_HPP
3 
4 #include <vector>
5 #include <algorithm>
6 #include <numeric>
7 
8 #include "../types.hpp"
9 #include "../mpi.hpp"
10 
11 namespace diy
12 {
13 namespace io
14 {
15  // Reads and writes subsets of a block of values into specified block bounds
16  class BOV
17  {
18  public:
19  typedef std::vector<int> Shape;
20  public:
21  BOV(mpi::io::file& f):
22  f_(f), offset_(0) {}
23 
24  template<class S>
25  BOV(mpi::io::file& f,
26  const S& shape = S(),
27  mpi::io::offset offset = 0):
28  f_(f), offset_(offset) { set_shape(shape); }
29 
30  void set_offset(mpi::io::offset offset) { offset_ = offset; }
31 
32  template<class S>
33  void set_shape(const S& shape)
34  {
35  shape_.clear();
36  stride_.clear();
37  for (unsigned i = 0; i < shape.size(); ++i)
38  {
39  shape_.push_back(shape[i]);
40  stride_.push_back(1);
41  }
42  for (int i = shape_.size() - 2; i >= 0; --i)
43  stride_[i] = stride_[i+1] * shape_[i+1];
44  }
45 
46  const Shape& shape() const { return shape_; }
47 
48  template<class T>
49  void read(const DiscreteBounds& bounds, T* buffer, bool collective = false, int chunk = 1) const;
50 
51  template<class T>
52  void write(const DiscreteBounds& bounds, const T* buffer, bool collective = false, int chunk = 1);
53 
54  template<class T>
55  void write(const DiscreteBounds& bounds, const T* buffer, const DiscreteBounds& core, bool collective = false, int chunk = 1);
56 
57  protected:
58  mpi::io::file& file() { return f_; }
59 
60  private:
61  mpi::io::file& f_;
62  Shape shape_;
63  std::vector<size_t> stride_;
64  size_t offset_;
65  };
66 }
67 }
68 
69 template<class T>
70 void
71 diy::io::BOV::
72 read(const DiscreteBounds& bounds, T* buffer, bool collective, int chunk) const
73 {
74  int dim = shape_.size();
75  int total = 1;
76  std::vector<int> subsizes;
77  for (int i = 0; i < dim; ++i)
78  {
79  subsizes.push_back(bounds.max[i] - bounds.min[i] + 1);
80  total *= subsizes.back();
81  }
82 
83  MPI_Datatype T_type;
84  if (chunk == 1)
85  T_type = mpi::detail::get_mpi_datatype<T>();
86  else
87  {
88  // create an MPI struct of size chunk to read the data in those chunks
89  // (this allows to work around MPI-IO weirdness where crucial quantities
90  // are ints, which are too narrow of a type)
91  int array_of_blocklengths[] = { chunk };
92  MPI_Aint array_of_displacements[] = { 0 };
93  MPI_Datatype array_of_types[] = { mpi::detail::get_mpi_datatype<T>() };
94  MPI_Type_create_struct(1, array_of_blocklengths, array_of_displacements, array_of_types, &T_type);
95  MPI_Type_commit(&T_type);
96  }
97 
98  MPI_Datatype fileblk;
99  MPI_Type_create_subarray(dim, (int*) &shape_[0], &subsizes[0], (int*) &bounds.min[0], MPI_ORDER_C, T_type, &fileblk);
100  MPI_Type_commit(&fileblk);
101 
102  MPI_File_set_view(f_.handle(), offset_, T_type, fileblk, (char*)"native", MPI_INFO_NULL);
103 
104  mpi::status s;
105  if (!collective)
106  MPI_File_read(f_.handle(), buffer, total, T_type, &s.s);
107  else
108  MPI_File_read_all(f_.handle(), buffer, total, T_type, &s.s);
109 
110  if (chunk != 1)
111  MPI_Type_free(&T_type);
112  MPI_Type_free(&fileblk);
113 }
114 
115 template<class T>
116 void
117 diy::io::BOV::
118 write(const DiscreteBounds& bounds, const T* buffer, bool collective, int chunk)
119 {
120  write(bounds, buffer, bounds, collective, chunk);
121 }
122 
123 template<class T>
124 void
125 diy::io::BOV::
126 write(const DiscreteBounds& bounds, const T* buffer, const DiscreteBounds& core, bool collective, int chunk)
127 {
128  int dim = shape_.size();
129  std::vector<int> subsizes;
130  std::vector<int> buffer_shape, buffer_start;
131  for (int i = 0; i < dim; ++i)
132  {
133  buffer_shape.push_back(bounds.max[i] - bounds.min[i] + 1);
134  buffer_start.push_back(core.min[i] - bounds.min[i]);
135  subsizes.push_back(core.max[i] - core.min[i] + 1);
136  }
137 
138  MPI_Datatype T_type;
139  if (chunk == 1)
140  T_type = mpi::detail::get_mpi_datatype<T>();
141  else
142  {
143  // assume T is a binary block and create an MPI struct of appropriate size
144  int array_of_blocklengths[] = { chunk };
145  MPI_Aint array_of_displacements[] = { 0 };
146  MPI_Datatype array_of_types[] = { mpi::detail::get_mpi_datatype<T>() };
147  MPI_Type_create_struct(1, array_of_blocklengths, array_of_displacements, array_of_types, &T_type);
148  MPI_Type_commit(&T_type);
149  }
150 
151  MPI_Datatype fileblk, subbuffer;
152  MPI_Type_create_subarray(dim, (int*) &shape_[0], &subsizes[0], (int*) &bounds.min[0], MPI_ORDER_C, T_type, &fileblk);
153  MPI_Type_create_subarray(dim, (int*) &buffer_shape[0], &subsizes[0], (int*) &buffer_start[0], MPI_ORDER_C, T_type, &subbuffer);
154  MPI_Type_commit(&fileblk);
155  MPI_Type_commit(&subbuffer);
156 
157  MPI_File_set_view(f_.handle(), offset_, T_type, fileblk, (char*)"native", MPI_INFO_NULL);
158 
159  mpi::status s;
160  if (!collective)
161  MPI_File_write(f_.handle(), (void*)buffer, 1, subbuffer, &s.s);
162  else
163  MPI_File_write_all(f_.handle(), (void*)buffer, 1, subbuffer, &s.s);
164 
165  if (chunk != 1)
166  MPI_Type_free(&T_type);
167  MPI_Type_free(&fileblk);
168  MPI_Type_free(&subbuffer);
169 }
170 
171 #endif
Definition: bov.hpp:16
Wraps MPI file IO.
Definition: io.hpp:16
Definition: types.hpp:16