Canoe
Comprehensive Atmosphere N' Ocean Engine
communication.hpp
Go to the documentation of this file.
1 #ifndef SRC_SNAP_IMPLICIT_COMMUNICATION_HPP_
2 #define SRC_SNAP_IMPLICIT_COMMUNICATION_HPP_
3 
4 // C/C++
5 #include <cstring>
6 #include <functional>
7 #include <string>
8 #include <vector>
9 
10 // athena
11 #include <athena/globals.hpp>
12 #include <athena/hydro/hydro.hpp>
13 #include <athena/mesh/mesh.hpp>
14 
15 // snap
16 #include <impl.hpp>
17 
18 // snap
19 #include "implicit_solver.hpp"
20 
21 // MPI headers
22 #ifdef MPI_PARALLEL
23 #include <mpi.h>
24 #endif
25 
26 template <typename T>
27 void ImplicitSolver::SendBuffer(T const &a, int k, int j, NeighborBlock nb) {
28  int s1 = a.size();
29  // size_t phy = k << 10 | j << 2 | 0;
30  std::string phy = std::to_string(k) + "x" + std::to_string(j) + "x1";
31 
32  memcpy(buffer_[k][j], a.data(), s1 * sizeof(Real));
33 
34  if (nb.snb.rank != Globals::my_rank) { // MPI boundary
35 #ifdef MPI_PARALLEL
36  int tag = CreateMPITag(nb.snb.gid, pmy_block_->gid, phy);
37  MPI_Isend(buffer_[k][j], s1, MPI_ATHENA_REAL, nb.snb.rank, tag,
38  MPI_COMM_WORLD, &req_send_data1_[k][j]);
39 #endif
40  } else { // local boundary
41  MeshBlock *pbl = pmy_block_->pmy_mesh->FindMeshBlock(bblock.snb.gid);
42  std::memcpy(pbl->pimpl->phevi->buffer_[k][j], buffer_[k][j],
43  s1 * sizeof(Real));
44  }
45 }
46 
47 template <typename T1, typename T2>
48 void ImplicitSolver::SendBuffer(T1 const &a, T2 const &b, int k, int j,
49  NeighborBlock nb) {
50  int s1 = a.size(), s2 = b.size();
51  // size_t phy = k << 10 | j << 2 | 1;
52  std::string phy = std::to_string(k) + "x" + std::to_string(j) + "x2";
53 
54  memcpy(buffer_[k][j], a.data(), s1 * sizeof(Real));
55  memcpy(buffer_[k][j] + s1, b.data(), s2 * sizeof(Real));
56 
57  if (nb.snb.rank != Globals::my_rank) { // MPI boundary
58 #ifdef MPI_PARALLEL
59  int tag = CreateMPITag(nb.snb.gid, pmy_block_->gid, phy);
60  MPI_Isend(buffer_[k][j], s1 + s2, MPI_ATHENA_REAL, nb.snb.rank, tag,
61  MPI_COMM_WORLD, &req_send_data2_[k][j]);
62 #endif
63  } else { // local boundary
64  MeshBlock *pbl = pmy_block_->pmy_mesh->FindMeshBlock(bblock.snb.gid);
65  std::memcpy(pbl->pimpl->phevi->buffer_[k][j], buffer_[k][j],
66  (s1 + s2) * sizeof(Real));
67  }
68 }
69 
70 template <typename T1, typename T2, typename T3, typename T4, typename T5,
71  typename T6>
72 void ImplicitSolver::SendBuffer(T1 const &a, T2 const &b, T3 const &c,
73  T4 const &d, T5 const &e, T6 const &f, int k,
74  int j, NeighborBlock nb) {
75  int s1 = a.size(), s2 = b.size(), s3 = c.size(), s4 = d.size();
76  int s5 = e.size(), s6 = f.size();
77  // size_t phy = k << 10 | j << 2 | 2;
78  std::string phy = std::to_string(k) + "x" + std::to_string(j) + "x6";
79 
80  Real *it = buffer_[k][j];
81  memcpy(it, a.data(), s1 * sizeof(Real));
82  it += s1;
83  memcpy(it, b.data(), s2 * sizeof(Real));
84  it += s2;
85  memcpy(it, c.data(), s3 * sizeof(Real));
86  it += s3;
87  memcpy(it, d.data(), s4 * sizeof(Real));
88  it += s4;
89  memcpy(it, e.data(), s5 * sizeof(Real));
90  it += s5;
91  memcpy(it, f.data(), s6 * sizeof(Real));
92 
93  int st = s1 + s2 + s3 + s4 + s5 + s6;
94 
95  if (nb.snb.rank != Globals::my_rank) { // MPI boundary
96 #ifdef MPI_PARALLEL
97  int tag = CreateMPITag(nb.snb.gid, pmy_block_->gid, phy);
98  MPI_Isend(buffer_[k][j], st, MPI_ATHENA_REAL, nb.snb.rank, tag,
99  MPI_COMM_WORLD, &req_send_data6_[k][j]);
100 #endif
101  } else { // local boundary
102  MeshBlock *pbl = pmy_block_->pmy_mesh->FindMeshBlock(bblock.snb.gid);
103  std::memcpy(pbl->pimpl->phevi->buffer_[k][j], buffer_[k][j],
104  st * sizeof(Real));
105  }
106 }
107 
108 template <typename T1, typename T2, typename T3, typename T4, typename T5,
109  typename T6, typename T7>
110 void ImplicitSolver::SendBuffer(T1 const &a, T2 const &b, T3 const &c,
111  T4 const &d, T5 const &e, T6 const &f,
112  T7 const &g, int k, int j, NeighborBlock nb) {
113  int s1 = a.size(), s2 = b.size(), s3 = c.size(), s4 = d.size();
114  int s5 = e.size(), s6 = f.size(), s7 = g.size();
115  std::string phy = std::to_string(k) + "x" + std::to_string(j) + "x7";
116 
117  Real *it = buffer_[k][j];
118  memcpy(it, a.data(), s1 * sizeof(Real));
119  it += s1;
120  memcpy(it, b.data(), s2 * sizeof(Real));
121  it += s2;
122  memcpy(it, c.data(), s3 * sizeof(Real));
123  it += s3;
124  memcpy(it, d.data(), s4 * sizeof(Real));
125  it += s4;
126  memcpy(it, e.data(), s5 * sizeof(Real));
127  it += s5;
128  memcpy(it, f.data(), s6 * sizeof(Real));
129  it += s6;
130  memcpy(it, g.data(), s7 * sizeof(Real));
131 
132  int st = s1 + s2 + s3 + s4 + s5 + s6 + s7;
133 
134  if (nb.snb.rank != Globals::my_rank) { // MPI boundary
135 #ifdef MPI_PARALLEL
136  int tag = CreateMPITag(nb.snb.gid, pmy_block_->gid, phy);
137  MPI_Isend(buffer_[k][j], st, MPI_ATHENA_REAL, nb.snb.rank, tag,
138  MPI_COMM_WORLD, &req_send_data7_[k][j]);
139 #endif
140  } else { // local boundary
141  MeshBlock *pbl = pmy_block_->pmy_mesh->FindMeshBlock(bblock.snb.gid);
142  std::memcpy(pbl->pimpl->phevi->buffer_[k][j], buffer_[k][j],
143  st * sizeof(Real));
144  }
145 }
146 
147 template <typename T>
148 void ImplicitSolver::RecvBuffer(T &a, int k, int j, NeighborBlock nb) {
149  int s1 = a.size();
150  // size_t phy = k << 10 | j << 2 | 0;
151  std::string phy = std::to_string(k) + "x" + std::to_string(j) + "x1";
152 #ifdef MPI_PARALLEL
153  MPI_Status status;
154 #endif
155 
156  if (nb.snb.rank != Globals::my_rank) { // MPI boundary
157 #ifdef MPI_PARALLEL
158  int tag = CreateMPITag(pmy_block_->gid, nb.snb.gid, phy);
159  MPI_Recv(buffer_[k][j], s1, MPI_ATHENA_REAL, nb.snb.rank, tag,
160  MPI_COMM_WORLD, &status);
161 #endif
162  } // local boundary
163 
164  memcpy(a.data(), buffer_[k][j], s1 * sizeof(Real));
165 }
166 
167 template <typename T1, typename T2>
168 void ImplicitSolver::RecvBuffer(T1 &a, T2 &b, int k, int j, NeighborBlock nb) {
169  int s1 = a.size(), s2 = b.size();
170  // size_t phy = k << 10 | j << 2 | 1;
171  std::string phy = std::to_string(k) + "x" + std::to_string(j) + "x2";
172 #ifdef MPI_PARALLEL
173  MPI_Status status;
174 #endif
175 
176  if (nb.snb.rank != Globals::my_rank) { // MPI boundary
177 #ifdef MPI_PARALLEL
178  int tag = CreateMPITag(pmy_block_->gid, nb.snb.gid, phy);
179  MPI_Recv(buffer_[k][j], s1 + s2, MPI_ATHENA_REAL, nb.snb.rank, tag,
180  MPI_COMM_WORLD, &status);
181 #endif
182  } // local boundary
183 
184  memcpy(a.data(), buffer_[k][j], s1 * sizeof(Real));
185  memcpy(b.data(), buffer_[k][j] + s1, s2 * sizeof(Real));
186 }
187 
188 template <typename T1, typename T2, typename T3, typename T4, typename T5,
189  typename T6>
190 void ImplicitSolver::RecvBuffer(T1 &a, T2 &b, T3 &c, T4 &d, T5 &e, T6 &f, int k,
191  int j, NeighborBlock nb) {
192  int s1 = a.size(), s2 = b.size(), s3 = c.size(), s4 = d.size();
193  int s5 = e.size(), s6 = f.size();
194  // size_t phy = k << 10 | j << 2 | 2;
195  std::string phy = std::to_string(k) + "x" + std::to_string(j) + "x6";
196 #ifdef MPI_PARALLEL
197  MPI_Status status;
198 #endif
199 
200  int st = s1 + s2 + s3 + s4 + s5 + s6;
201 
202  if (nb.snb.rank != Globals::my_rank) { // MPI boundary
203 #ifdef MPI_PARALLEL
204  int tag = CreateMPITag(pmy_block_->gid, nb.snb.gid, phy);
205  MPI_Recv(buffer_[k][j], st, MPI_ATHENA_REAL, nb.snb.rank, tag,
206  MPI_COMM_WORLD, &status);
207 #endif
208  } // local boundary
209 
210  Real *it = buffer_[k][j];
211  memcpy(a.data(), it, s1 * sizeof(Real));
212  it += s1;
213  memcpy(b.data(), it, s2 * sizeof(Real));
214  it += s2;
215  memcpy(c.data(), it, s3 * sizeof(Real));
216  it += s3;
217  memcpy(d.data(), it, s4 * sizeof(Real));
218  it += s4;
219  memcpy(e.data(), it, s5 * sizeof(Real));
220  it += s5;
221  memcpy(f.data(), it, s6 * sizeof(Real));
222 }
223 
224 template <typename T1, typename T2, typename T3, typename T4, typename T5,
225  typename T6, typename T7>
226 void ImplicitSolver::RecvBuffer(T1 &a, T2 &b, T3 &c, T4 &d, T5 &e, T6 &f, T7 &g,
227  int k, int j, NeighborBlock nb) {
228  int s1 = a.size(), s2 = b.size(), s3 = c.size(), s4 = d.size();
229  int s5 = e.size(), s6 = f.size(), s7 = g.size();
230  // size_t phy = k << 10 | j << 2 | 3;
231  std::string phy = std::to_string(k) + "x" + std::to_string(j) + "x7";
232 #ifdef MPI_PARALLEL
233  MPI_Status status;
234 #endif
235 
236  int st = s1 + s2 + s3 + s4 + s5 + s6 + s7;
237 
238  if (nb.snb.rank != Globals::my_rank) { // MPI boundary
239 #ifdef MPI_PARALLEL
240  int tag = CreateMPITag(pmy_block_->gid, nb.snb.gid, phy);
241  MPI_Recv(buffer_[k][j], st, MPI_ATHENA_REAL, nb.snb.rank, tag,
242  MPI_COMM_WORLD, &status);
243 #endif
244  } // local boundary
245 
246  Real *it = buffer_[k][j];
247  memcpy(a.data(), it, s1 * sizeof(Real));
248  it += s1;
249  memcpy(b.data(), it, s2 * sizeof(Real));
250  it += s2;
251  memcpy(c.data(), it, s3 * sizeof(Real));
252  it += s3;
253  memcpy(d.data(), it, s4 * sizeof(Real));
254  it += s4;
255  memcpy(e.data(), it, s5 * sizeof(Real));
256  it += s5;
257  memcpy(f.data(), it, s6 * sizeof(Real));
258  it += s6;
259  memcpy(g.data(), it, s7 * sizeof(Real));
260 }
261 
262 template <typename T1, typename T2>
263 void ImplicitSolver::SaveCoefficients(std::vector<T1> &a, std::vector<T2> &b,
264  int k, int j, int il, int iu) {
265  for (int i = il; i <= iu; ++i) {
266  int s1 = a[i].size(), s2 = b[i].size();
267  memcpy(&coefficients_(k, j, i, 0), a[i].data(), s1 * sizeof(Real));
268  memcpy(&coefficients_(k, j, i, s1), b[i].data(), s2 * sizeof(Real));
269  }
270 }
271 
272 template <typename T1, typename T2, typename T3, typename T4>
273 void ImplicitSolver::SaveCoefficients(std::vector<T1> &a, std::vector<T2> &b,
274  std::vector<T3> &c, std::vector<T4> &d,
275  int k, int j, int il, int iu) {
276  for (int i = il; i <= iu; ++i) {
277  int s1 = a[i].size(), s2 = b[i].size(), s3 = c[i].size(), s4 = d[i].size();
278  memcpy(&coefficients_(k, j, i, 0), a[i].data(), s1 * sizeof(Real));
279  memcpy(&coefficients_(k, j, i, s1), b[i].data(), s2 * sizeof(Real));
280  memcpy(&coefficients_(k, j, i, s1 + s2), c[i].data(), s3 * sizeof(Real));
281  memcpy(&coefficients_(k, j, i, s1 + s2 + s3), d[i].data(),
282  s4 * sizeof(Real));
283  }
284 }
285 
286 template <typename T1, typename T2>
287 void ImplicitSolver::LoadCoefficients(std::vector<T1> &a, std::vector<T2> &b,
288  int k, int j, int il, int iu) {
289  for (int i = il; i <= iu; ++i) {
290  int s1 = a[i].size(), s2 = b[i].size();
291  memcpy(a[i].data(), &coefficients_(k, j, i, 0), s1 * sizeof(Real));
292  memcpy(b[i].data(), &coefficients_(k, j, i, s1), s2 * sizeof(Real));
293  }
294 }
295 
296 template <typename T1, typename T2, typename T3, typename T4>
297 void ImplicitSolver::LoadCoefficients(std::vector<T1> &a, std::vector<T2> &b,
298  std::vector<T3> &c, std::vector<T4> &d,
299  int k, int j, int il, int iu) {
300  for (int i = il; i <= iu; ++i) {
301  int s1 = a[i].size(), s2 = b[i].size(), s3 = c[i].size(), s4 = d[i].size();
302  memcpy(a[i].data(), &coefficients_(k, j, i, 0), s1 * sizeof(Real));
303  memcpy(b[i].data(), &coefficients_(k, j, i, s1), s2 * sizeof(Real));
304  memcpy(c[i].data(), &coefficients_(k, j, i, s1 + s2), s3 * sizeof(Real));
305  memcpy(d[i].data(), &coefficients_(k, j, i, s1 + s2 + s3),
306  s4 * sizeof(Real));
307  }
308 }
309 
310 /*template<typename T>
311 void inline ImplicitSolver::SaveForcingJacobian(T &phi, int k, int j ,int i) {
312  if (mydir == X1DIR) {
313  } else if (mydir == X2DIR) {
314  memcpy(jacobian_[j][i][k], phi.data(), phi.size()*sizeof(Real));
315  } else {
316  memcpy(jacobian_[i][k][j], phi.data(), phi.size()*sizeof(Real));
317  }
318 }
319 
320 template<typename T>
321 void inline ImplicitSolver::LoadForcingJacobian(T &phi, int k, int j ,int i,
322  CoordinateDirection dir) {
323  Eigen::Matrix<Real,5,5> tmp;
324 
325  if (dir == X1DIR) {
326  memcpy(tmp.data(), jacobian_[k][j][i], tmp.size()*sizeof(Real));
327  } else if (dir == X2DIR) {
328  memcpy(tmp.data(), jacobian_[j][i][k], tmp.size()*sizeof(Real));
329  tmp = p2_*tmp*p3_;
330  } else { // X3DIR
331  memcpy(tmp.data(), jacobian_[i][k][j], tmp.size()*sizeof(Real));
332  tmp = p3_*tmp*p2_;
333  }
334 
335  for (int i = 0; i < 5; ++i)
336  for (int j = 0; j < 5; ++j)
337  phi(i,j) = tmp(i,j);
338 }*/
339 
340 #endif // SRC_SNAP_IMPLICIT_COMMUNICATION_HPP_
int CreateMPITag(int lid, int bufid, std::string phy)
NeighborBlock bblock
void RecvBuffer(T &a, int k, int j, NeighborBlock nb)
AthenaArray< Real > coefficients_
void LoadCoefficients(std::vector< T1 > &a, std::vector< T2 > &b, int k, int j, int il, int iu)
void SaveCoefficients(std::vector< T1 > &a, std::vector< T2 > &b, int k, int j, int il, int iu)
MeshBlock const * pmy_block_
void SendBuffer(T const &a, int k, int j, NeighborBlock nb)