DOLFIN
DOLFIN C++ interface
MeshPartitioning.h
1// Copyright (C) 2008-2013 Niclas Jansson, Ola Skavhaug, Anders Logg,
2// Garth N. Wells and Chris Richardson
3//
4// This file is part of DOLFIN.
5//
6// DOLFIN is free software: you can redistribute it and/or modify
7// it under the terms of the GNU Lesser General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10//
11// DOLFIN is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU Lesser General Public License for more details.
15//
16// You should have received a copy of the GNU Lesser General Public License
17// along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
18//
19// Modified by Garth N. Wells, 2010
20// Modified by Kent-Andre Mardal, 2011
21// Modified by Chris Richardson, 2013
22//
23// First added: 2008-12-01
24// Last changed: 2014-06-20
25
26#ifndef __MESH_PARTITIONING_H
27#define __MESH_PARTITIONING_H
28
29#include <cstdint>
30#include <map>
31#include <utility>
32#include <vector>
33#include <boost/multi_array.hpp>
34#include <dolfin/log/log.h>
35#include <dolfin/common/Set.h>
36#include "CellType.h"
37#include "DistributedMeshTools.h"
38#include "LocalMeshValueCollection.h"
39#include "Mesh.h"
40
41
42namespace dolfin
43{
44 // Developer note: MeshFunction and MeshValueCollection cannot
45 // appear in the implementations that appear in this file of the
46 // templated functions as this leads to a circular
47 // dependency. Therefore the functions are templated over these
48 // types.
49
50 template <typename T> class MeshFunction;
51 template <typename T> class MeshValueCollection;
52 class LocalMeshData;
53
61
63 {
64 public:
65
67 static void build_distributed_mesh(Mesh& mesh);
68
72 static void
73 build_distributed_mesh(Mesh& mesh, const std::vector<int>& cell_partition,
74 const std::string ghost_mode);
75
78 static void build_distributed_mesh(Mesh& mesh, const LocalMeshData& data,
79 const std::string ghost_mode);
80
82 template<typename T>
83 static void
85 const LocalMeshValueCollection<T>& local_data,
86 const Mesh& mesh);
87
88 private:
89
90 // Compute cell partitioning from local mesh data. Returns a
91 // vector 'cell -> process' vector for cells in LocalMeshData, and
92 // a map 'local cell index -> processes' to which ghost cells must
93 // be sent
94 static
95 void partition_cells(const MPI_Comm& mpi_comm,
96 const LocalMeshData& mesh_data,
97 const std::string partitioner,
98 std::vector<int>& cell_partition,
99 std::map<std::int64_t, std::vector<int>>& ghost_procs);
100
101 // Build a distributed mesh from local mesh data with a computed
102 // partition
103 static void build(Mesh& mesh, const LocalMeshData& data,
104 const std::vector<int>& cell_partition,
105 const std::map<std::int64_t, std::vector<int>>& ghost_procs,
106 const std::string ghost_mode);
107
108 // FIXME: Improve this docstring
109 // Distribute a layer of cells attached by vertex to boundary updating
110 // new_mesh_data and shared_cells. Used when ghosting by vertex.
111 static
112 void distribute_cell_layer(MPI_Comm mpi_comm,
113 const int num_regular_cells,
114 const std::int64_t num_global_vertices,
115 std::map<std::int32_t, std::set<unsigned int>>& shared_cells,
116 boost::multi_array<std::int64_t, 2>& cell_vertices,
117 std::vector<std::int64_t>& global_cell_indices,
118 std::vector<int>& cell_partition);
119
120 // FIXME: make clearer what goes in and what comes out
121 // Reorder cells by Gibbs-Poole-Stockmeyer algorithm (via SCOTCH). Returns
122 // the tuple (new_shared_cells, new_cell_vertices,new_global_cell_indices).
123 static
124 void reorder_cells_gps(MPI_Comm mpi_comm,
125 const unsigned int num_regular_cells,
126 const CellType& cell_type,
127 const std::map<std::int32_t, std::set<unsigned int>>& shared_cells,
128 const boost::multi_array<std::int64_t, 2>& cell_vertices,
129 const std::vector<std::int64_t>& global_cell_indices,
130 std::map<std::int32_t, std::set<unsigned int>>& reordered_shared_cells,
131 boost::multi_array<std::int64_t, 2>& reordered_cell_vertices,
132 std::vector<std::int64_t>& reordered_global_cell_indices);
133
134 // FIXME: make clearer what goes in and what comes out
135 // Reorder vertices by Gibbs-Poole-Stockmeyer algorithm (via SCOTCH).
136 // Returns the pair (new_vertex_indices, new_vertex_global_to_local).
137 static
138 void
139 reorder_vertices_gps(MPI_Comm mpi_comm,
140 const std::int32_t num_regular_vertices,
141 const std::int32_t num_regular_cells,
142 const int num_cell_vertices,
143 const boost::multi_array<std::int64_t, 2>& cell_vertices,
144 const std::vector<std::int64_t>& vertex_indices,
145 const std::map<std::int64_t, std::int32_t>& vertex_global_to_local,
146 std::vector<std::int64_t>& reordered_vertex_indices,
147 std::map<std::int64_t, std::int32_t>& reordered_vertex_global_to_local);
148
149 // FIXME: Update, making clear exactly what is computed
150 // This function takes the partition computed by the partitioner
151 // (which tells us to which process each of the local cells stored in
152 // LocalMeshData on this process belongs) and sends the cells
153 // to the appropriate owning process. Ghost cells are also sent,
154 // along with the list of sharing processes.
155 // A new LocalMeshData object is populated with the redistributed
156 // cells. Return the number of non-ghost cells on this process.
157 static
158 std::int32_t
159 distribute_cells(const MPI_Comm mpi_comm,
160 const LocalMeshData& data,
161 const std::vector<int>& cell_partition,
162 const std::map<std::int64_t, std::vector<int>>& ghost_procs,
163 boost::multi_array<std::int64_t, 2>& new_cell_vertices,
164 std::vector<std::int64_t>& new_global_cell_indices,
165 std::vector<int>& new_cell_partition,
166 std::map<std::int32_t, std::set<unsigned int>>& shared_cells);
167
168 // FIXME: Improve explaination
169 // Utility to convert received_vertex_indices into
170 // vertex sharing information
171 static void build_shared_vertices(MPI_Comm mpi_comm,
172 std::map<std::int32_t, std::set<unsigned int>>& shared_vertices,
173 const std::map<std::int64_t, std::int32_t>& vertex_global_to_local_indices,
174 const std::vector<std::vector<std::size_t>>& received_vertex_indices);
175
176 // FIXME: make clear what is computed
177 // Distribute vertices and vertex sharing information
178 static void
179 distribute_vertices(const MPI_Comm mpi_comm,
180 const LocalMeshData& mesh_data,
181 const std::vector<std::int64_t>& vertex_indices,
182 boost::multi_array<double, 2>& new_vertex_coordinates,
183 std::map<std::int64_t, std::int32_t>& vertex_global_to_local_indices,
184 std::map<std::int32_t, std::set<unsigned int>>& shared_vertices_local);
185
186 // Compute the local->global and global->local maps for all local vertices
187 // on this process, from the global vertex indices on each local cell.
188 // Returns the number of regular (non-ghosted) vertices.
189 static std::int32_t compute_vertex_mapping(MPI_Comm mpi_comm,
190 const std::int32_t num_regular_cells,
191 const boost::multi_array<std::int64_t, 2>& cell_vertices,
192 std::vector<std::int64_t>& vertex_indices,
193 std::map<std::int64_t, std::int32_t>& vertex_global_to_local);
194
195 // FIXME: Improve pre-conditions explaination
196 // Build mesh
197 static void build_local_mesh(Mesh& mesh,
198 const std::vector<std::int64_t>& global_cell_indices,
199 const boost::multi_array<std::int64_t, 2>& cell_global_vertices,
200 const CellType::Type cell_type,
201 const int tdim,
202 const std::int64_t num_global_cells,
203 const std::vector<std::int64_t>& vertex_indices,
204 const boost::multi_array<double, 2>& vertex_coordinates,
205 const int gdim,
206 const std::int64_t num_global_vertices,
207 const std::map<std::int64_t, std::int32_t>& vertex_global_to_local_indices);
208
209 // Create and attach distributed MeshDomains from local_data
210 static void build_mesh_domains(Mesh& mesh, const LocalMeshData& local_data);
211
212 // Create and attach distributed MeshDomains from local_data
213 // [entry, (cell_index, local_index, value)]
214 template<typename T, typename MeshValueCollection>
215 static void build_mesh_value_collection(const Mesh& mesh,
216 const std::vector<std::pair<std::pair<std::size_t, std::size_t>, T>>& local_value_data,
217 MeshValueCollection& mesh_values);
218 };
219 //---------------------------------------------------------------------------
220 template<typename T>
222 const LocalMeshValueCollection<T>& local_data, const Mesh& mesh)
223 {
224 // Extract data
225 const std::vector<std::pair<std::pair<std::size_t, std::size_t>, T>>& local_values
226 = local_data.values();
227
228 // Build MeshValueCollection from local data
229 build_mesh_value_collection(mesh, local_values, values);
230 }
231 //---------------------------------------------------------------------------
232 template<typename T, typename MeshValueCollection>
233 void MeshPartitioning::build_mesh_value_collection(const Mesh& mesh,
234 const std::vector<std::pair<std::pair<std::size_t, std::size_t>, T>>& local_value_data,
235 MeshValueCollection& mesh_values)
236 {
237 // Get MPI communicator
238 const MPI_Comm mpi_comm = mesh.mpi_comm();
239
240 // Get topological dimensions
241 const std::size_t D = mesh.topology().dim();
242 const std::size_t dim = mesh_values.dim();
243 mesh.init(dim);
244
245 // This is required for old-style mesh data that uses (cell index,
246 // local entity index)
247 mesh.init(dim, D);
248
249 // Clear MeshValueCollection values
250 mesh_values.clear();
251
252 // Initialise global entity numbering
254
255 // Get mesh value collection used for marking
256 MeshValueCollection& markers = mesh_values;
257
258 // Get local mesh data for domains
259 const std::vector< std::pair<std::pair<std::size_t, std::size_t>, T>>&
260 ldata = local_value_data;
261
262 // Get local local-to-global map
263 if (!mesh.topology().have_global_indices(D))
264 {
265 dolfin_error("MeshPartitioning.h",
266 "build mesh value collection",
267 "Do not have have_global_entity_indices");
268 }
269
270 // Get global indices on local process
271 const auto& global_entity_indices = mesh.topology().global_indices(D);
272
273 // Add local (to this process) data to domain marker
274 std::vector<std::size_t> off_process_global_cell_entities;
275
276 // Build and populate a local map for global_entity_indices
277 std::map<std::size_t, std::size_t> map_of_global_entity_indices;
278 for (std::size_t i = 0; i < global_entity_indices.size(); i++)
279 map_of_global_entity_indices[global_entity_indices[i]] = i;
280
281 for (std::size_t i = 0; i < ldata.size(); ++i)
282 {
283 const std::map<std::int32_t, std::set<unsigned int>>& sharing_map
284 = mesh.topology().shared_entities(D);
285
286 const std::size_t global_cell_index = ldata[i].first.first;
287 std::map<std::size_t, std::size_t>::const_iterator data
288 = map_of_global_entity_indices.find(global_cell_index);
289 if (data != map_of_global_entity_indices.end())
290 {
291 const std::size_t local_cell_index = data->second;
292 const std::size_t entity_local_index = ldata[i].first.second;
293 const T value = ldata[i].second;
294 markers.set_value(local_cell_index, entity_local_index, value);
295
296 // If shared with other processes, add to off process list
297 if (sharing_map.find(local_cell_index) != sharing_map.end())
298 off_process_global_cell_entities.push_back(global_cell_index);
299 }
300 else
301 off_process_global_cell_entities.push_back(global_cell_index);
302 }
303
304 // Get destinations and local cell index at destination for
305 // off-process cells
306 const std::map<std::size_t, std::set<std::pair<std::size_t, std::size_t>>>
307 entity_hosts
308 = DistributedMeshTools::locate_off_process_entities(off_process_global_cell_entities,
309 D, mesh);
310
311 // Number of MPI processes
312 const std::size_t num_processes = MPI::size(mpi_comm);
313
314 // Pack data to send to appropriate process
315 std::vector<std::vector<std::size_t>> send_data0(num_processes);
316 std::vector<std::vector<T>> send_data1(num_processes);
317 std::map<std::size_t, std::set<std::pair<std::size_t, std::size_t>>>::const_iterator entity_host;
318
319 {
320 // Build a convenience map in order to speedup the loop over
321 // local data
322 std::map<std::size_t, std::set<std::size_t>> map_of_ldata;
323 for (std::size_t i = 0; i < ldata.size(); ++i)
324 map_of_ldata[ldata[i].first.first].insert(i);
325
326 for (entity_host = entity_hosts.begin(); entity_host != entity_hosts.end();
327 ++entity_host)
328 {
329 const std::size_t host_global_cell_index = entity_host->first;
330 const std::set<std::pair<std::size_t, std::size_t>>& processes_data
331 = entity_host->second;
332
333 // Loop over local data
334 std::map<std::size_t, std::set<std::size_t>>::const_iterator ldata_it
335 = map_of_ldata.find(host_global_cell_index);
336 if (ldata_it != map_of_ldata.end())
337 {
338 for (std::set<std::size_t>::const_iterator it = ldata_it->second.begin();
339 it != ldata_it->second.end(); it++)
340 {
341 const std::size_t local_entity_index = ldata[*it].first.second;
342 const T domain_value = ldata[*it].second;
343
344 std::set<std::pair<std::size_t, std::size_t>>::const_iterator process_data;
345 for (process_data = processes_data.begin();
346 process_data != processes_data.end(); ++process_data)
347 {
348 const std::size_t proc = process_data->first;
349 const std::size_t local_cell_entity = process_data->second;
350 send_data0[proc].push_back(local_cell_entity);
351 send_data0[proc].push_back(local_entity_index);
352 send_data1[proc].push_back(domain_value);
353 }
354 }
355 }
356 }
357 }
358
359 // Send/receive data
360 std::vector<std::size_t> received_data0;
361 std::vector<T> received_data1;
362 MPI::all_to_all(mpi_comm, send_data0, received_data0);
363 MPI::all_to_all(mpi_comm, send_data1, received_data1);
364 dolfin_assert(2*received_data1.size() == received_data0.size());
365
366 // Add received data to mesh domain
367 for (std::size_t i = 0; i < received_data1.size(); ++i)
368 {
369 const std::size_t local_cell_entity = received_data0[2*i];
370 const std::size_t local_entity_index = received_data0[2*i + 1];
371 const T value = received_data1[i];
372 dolfin_assert(local_cell_entity < mesh.num_cells());
373 markers.set_value(local_cell_entity, local_entity_index, value);
374 }
375
376 }
377 //---------------------------------------------------------------------------
378
379}
380
381#endif
Definition: CellType.h:47
Type
Enum for different cell types.
Definition: CellType.h:51
static void number_entities(const Mesh &mesh, std::size_t d)
Create global entity indices for entities of dimension d.
Definition: DistributedMeshTools.cpp:43
static std::map< std::size_t, std::set< std::pair< std::size_t, std::size_t > > > locate_off_process_entities(const std::vector< std::size_t > &entity_indices, std::size_t dim, const Mesh &mesh)
Definition: DistributedMeshTools.cpp:358
This class stores mesh data on a local processor corresponding to a portion of a (larger) global mesh...
Definition: LocalMeshData.h:59
Definition: LocalMeshValueCollection.h:46
const std::vector< std::pair< std::pair< std::size_t, std::size_t >, T > > & values() const
Return data.
Definition: LocalMeshValueCollection.h:62
static void all_to_all(MPI_Comm comm, std::vector< std::vector< T > > &in_values, std::vector< std::vector< T > > &out_values)
Definition: MPI.h:345
static unsigned int size(MPI_Comm comm)
Definition: MPI.cpp:154
Definition: MeshPartitioning.h:63
static void build_distributed_mesh(Mesh &mesh)
Build a distributed mesh from a local mesh on process 0.
Definition: MeshPartitioning.cpp:82
static void build_distributed_value_collection(MeshValueCollection< T > &values, const LocalMeshValueCollection< T > &local_data, const Mesh &mesh)
Build a MeshValueCollection based on LocalMeshValueCollection.
Definition: MeshPartitioning.h:221
std::size_t dim() const
Return topological dimension.
Definition: MeshTopology.cpp:74
bool have_global_indices(std::size_t dim) const
Definition: MeshTopology.h:121
const std::vector< std::int64_t > & global_indices(std::size_t d) const
Definition: MeshTopology.h:113
std::map< std::int32_t, std::set< unsigned int > > & shared_entities(unsigned int dim)
Definition: MeshTopology.cpp:191
Definition: MeshValueCollection.h:51
std::size_t dim() const
Definition: MeshValueCollection.h:389
bool set_value(std::size_t cell_index, std::size_t local_entity, const T &value)
Definition: MeshValueCollection.h:415
void clear()
Clear all values.
Definition: MeshValueCollection.h:534
Definition: Mesh.h:84
std::size_t init(std::size_t dim) const
Definition: Mesh.cpp:139
MeshTopology & topology()
Definition: Mesh.h:220
MPI_Comm mpi_comm() const
Definition: Mesh.h:498
std::size_t num_cells() const
Definition: Mesh.h:167
Definition: adapt.h:30
void dolfin_error(std::string location, std::string task, std::string reason,...)
Definition: log.cpp:129