-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtile.h
268 lines (198 loc) · 6.29 KB
/
tile.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
#pragma once
#include <array>
#include <vector>
#include <tuple>
#include <iostream>
#include <algorithm>
#include "common.h"
#include "internals.h"
#include "cellular_automata.h"
#include <mpi4cpp/mpi.h>
namespace corgi {
namespace mpi = mpi4cpp::mpi;
// Data storage struct for communication members
//
// This is a general D-independent class to make MPI communications
// easier so that we always communicate only this object and use it
// to reconstruct the back.
struct Communication {
/// my index
int cid;
/// (i,j,k) indices
std::array<int, 3> indices;
/// MPI rank of who owns me
int owner;
/// If I am a virtual tile, who do I share the values the most.
int top_virtual_owner;
/// how many times do I have to be sent to others
int communications = 0;
/// How many virtual neighbors do I have
int number_of_virtual_neighbors = 0;
/// tile boundaries
std::array<double, 3> mins;
std::array<double, 3> maxs;
/// tile type listing
//bool local = false;
/// my virtual owners; not communicated along the metainfo
//std::vector<int> virtual_owners;
};
/*! \brief Tile object
*
* This is the smallest storage unit of the framework. Internally this should host
* a mesh/grid/particles/etc.
*/
template<std::size_t D>
class Tile
// public std::enable_shared_from_this<Tile<D>>
{
public:
/// unique tile ID
uint64_t cid;
// Order here is fixed for mpi_tile_t
Communication communication;
// my virtual owners
std::vector<int> virtual_owners;
/// coarse mpi_grid grid indices
corgi::internals::tuple_of<D, size_t> index;
/// Global grid dimensions (needed for wrapping boundaries)
std::array<size_t, D> lengths;
/// tile boundaries
std::array<double, D> mins;
std::array<double, D> maxs;
// using default ctor
// TODO: are there side effects?
Tile() = default;
/*! \brief *virtual* base class destructor
* NOTE: this needs to be virtual so that child classes can be
* destroyed.
*/
virtual ~Tile() = default;
/// load tile metainfo from Communication object
void load_metainfo(Communication cm)
{
communication = cm;
cid = cm.cid;
// create temporary array of correct size
std::array<size_t, D> ind2;
for(size_t i=0; i<D; i++) ind2[i] = static_cast<size_t>(cm.indices[i]);
// cast into tuple
index = corgi::internals::into_tuple(ind2);
for(size_t i=0; i<D; i++) mins[i] = cm.mins[i];
for(size_t i=0; i<D; i++) maxs[i] = cm.maxs[i];
}
/// general N-dim implementation of wrap
size_t wrap(int ind, size_t d)
{
auto N = static_cast<int>(lengths[d]);
assert(N < 1e6); // FIXME: debug catch preventing ridiculously large while loops
assert(N > 0); // FIXME: debug catch preventing ridiculously large while loops
while (ind < 0) { ind += N; }
while (ind >= N) { ind -= N; }
return static_cast<size_t>(ind);
}
/// return index of tiles in relative to my position
public:
template<typename... Indices>
corgi::internals::enable_if_t< (sizeof...(Indices) == D) &&
corgi::internals::are_integral<Indices...>::value,
const corgi::internals::tuple_of<D, size_t> >
neighs(Indices... indices_rel)
{
std::array<int, D> rel = {{static_cast<int>(indices_rel)...}};
//std::array<size_t, D> cur = {{index}};
auto cur = corgi::internals::into_array(index);
for(size_t i=0; i<D; i++) {
cur[i] = static_cast<size_t>(
wrap( static_cast<int>(rel[i]) +
static_cast<int>(cur[i]), i)
);
}
auto ret = corgi::internals::into_tuple(cur);
return ret;
}
/// auxiliary function to unpack tuples
private:
template <size_t... Is>
corgi::internals::tuple_of<D, size_t> neighs_impl(
corgi::internals::tuple_of<D, int>& tuple,
std::index_sequence<Is...> /*unused*/)
{
return neighs( std::get<Is>(tuple)... );
}
public:
/// unpack tuple into variadic argument list
template<typename Indices = std::make_index_sequence<D>>
corgi::internals::tuple_of<D, size_t> neighs(
corgi::internals::tuple_of<D, int>& indices)
{
return neighs_impl(indices, Indices{} );
}
// end of neighs + auxiliary functions
//--------------------------------------------------
/// Return full Moore neighborhood around me
std::vector< corgi::internals::tuple_of<D, size_t> > nhood()
{
std::vector< corgi::internals::tuple_of<D, size_t> > nh;
for(auto& reli : corgi::ca::moore_neighborhood<D>() ){
nh.push_back( neighs(reli) );
}
return nh;
}
// --------------------------------------------------
// (optional) tile geometry
/// set tile minimum limits
void set_tile_mins(std::array<double, D> bbox)
{
mins = std::move(bbox);
for(size_t i=0; i<D; i++) communication.mins[i] = bbox[i];
}
/// set tile minimum limits
void set_tile_maxs(std::array<double, D> bbox)
{
maxs = std::move(bbox);
for(size_t i=0; i<D; i++) communication.maxs[i] = bbox[i];
}
// --------------------------------------------------
/// dummy MPI data send function
virtual std::vector<mpi::request>
send_data(
mpi::communicator& /*comm*/,
int dest,
int /*mode*/,
int /*tag*/)
{
std::vector<mpi::request> reqs;
std::cout << "send to " << dest << "\n";
return reqs;
}
/// dummy MPI data recv function
virtual std::vector<mpi::request>
recv_data(
mpi::communicator& /*comm*/,
int orig,
int /*mode*/,
int /*tag*/)
{
std::vector<mpi::request> reqs;
std::cout << "recv from " << orig << "\n";
return reqs;
}
/// dummy MPI data recv function for extra data
virtual std::vector<mpi::request>
recv_extra_data(
mpi::communicator& /*comm*/,
int orig,
int /*mode*/,
int /*tag*/)
{
std::vector<mpi::request> reqs;
std::cout << "recv from " << orig << "\n";
return reqs;
}
/// Local computational work estimate for this tile
virtual double get_work()
{
return 1.0;
}
}; // end of Tile class
} // end of namespace corgi