Loading [MathJax]/extensions/tex2jax.js
ug4
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
domain_distribution_impl.hpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2011-2015: G-CSC, Goethe University Frankfurt
3 * Author: Sebastian Reiter
4 *
5 * This file is part of UG4.
6 *
7 * UG4 is free software: you can redistribute it and/or modify it under the
8 * terms of the GNU Lesser General Public License version 3 (as published by the
9 * Free Software Foundation) with the following additional attribution
10 * requirements (according to LGPL/GPL v3 §7):
11 *
12 * (1) The following notice must be displayed in the Appropriate Legal Notices
13 * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 *
15 * (2) The following notice must be displayed at a prominent place in the
16 * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 *
18 * (3) The following bibliography is recommended for citation and must be
19 * preserved in all covered files:
20 * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 * parallel geometric multigrid solver on hierarchically distributed grids.
22 * Computing and visualization in science 16, 4 (2013), 151-164"
23 * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 * flexible software system for simulating pde based models on high performance
25 * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU Lesser General Public License for more details.
31 */
32
33#ifndef __H__UG__domain_distribution_impl__
34#define __H__UG__domain_distribution_impl__
35
36#include "domain_distribution.h"
40
41#ifdef UG_PARALLEL
42 #include "pcl/pcl.h"
44#endif
45
46
47namespace ug
48{
49
51template <typename TDomain>
52static bool PartitionDomain_RegularGrid(TDomain& domain, PartitionMap& partitionMap,
53 int numCellsX, int numCellsY, int numCellsZ,
54 bool surfaceOnly)
55{
56 PROFILE_FUNC_GROUP("parallelization");
57// prepare the partition map and a vertex position attachment accessor
58 SmartPtr<MultiGrid> pMG = domain.grid();
59 partitionMap.assign_grid(*pMG);
60
61 #ifdef UG_PARALLEL
62
63 SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
64
65 // a distributed grid manager is required
66 if(!domain.distributed_grid_manager()){
67 UG_LOG("A distributed grid manager is required in the given domain.\n");
68 return false;
69 }
70
71 typedef typename TDomain::position_attachment_type TAPos;
73 domain.position_attachment());
74
75 // this callback allows us to only distribute surface elements, which are no ghosts
76 IsRegularSurfaceElem cbConsiderElem(*domain.distributed_grid_manager());
77
78 // we need a process to which elements which are not considered will be send.
79 // Those elements should stay on the current process.
80 int localProc = 0;
81 localProc = pcl::ProcRank();
82
83 int bucketSubset = partitionMap.find_target_proc(localProc);
84 if(bucketSubset == -1)
85 bucketSubset = (int)partitionMap.num_target_procs();
86
87 // partition the grid
88 if(pMG->num<Volume>() > 0){
89 if(!surfaceOnly)
90 PartitionElements_RegularGrid<Volume>(
91 partitionHandler,
92 pMG->begin<Volume>(), pMG->end<Volume>(),
93 numCellsX, numCellsY, numCellsZ, aaPos,
94 ConsiderAll(), bucketSubset);
95 else
96 PartitionElements_RegularGrid<Volume>(
97 partitionHandler,
98 pMG->begin<Volume>(), pMG->end<Volume>(),
99 numCellsX, numCellsY, numCellsZ, aaPos,
100 cbConsiderElem, bucketSubset);
101 }
102 else if(pMG->num<Face>() > 0){
103 if(!surfaceOnly)
104 PartitionElements_RegularGrid<Face>(
105 partitionHandler,
106 pMG->begin<Face>(), pMG->end<Face>(),
107 numCellsX, numCellsY, numCellsZ, aaPos,
108 ConsiderAll(), bucketSubset);
109 else
110 PartitionElements_RegularGrid<Face>(
111 partitionHandler,
112 pMG->begin<Face>(), pMG->end<Face>(),
113 numCellsX, numCellsY, numCellsZ, aaPos,
114 cbConsiderElem, bucketSubset);
115 }
116 else if(pMG->num<Edge>() > 0){
117 if(!surfaceOnly)
118 PartitionElements_RegularGrid<Edge>(
119 partitionHandler,
120 pMG->begin<Edge>(), pMG->end<Edge>(),
121 numCellsX, numCellsY, numCellsZ, aaPos,
122 ConsiderAll(), bucketSubset);
123 else
124 PartitionElements_RegularGrid<Edge>(
125 partitionHandler,
126 pMG->begin<Edge>(), pMG->end<Edge>(),
127 numCellsX, numCellsY, numCellsZ, aaPos,
128 cbConsiderElem, bucketSubset);
129 }
130 else if(pMG->num<Vertex>() > 0){
131 if(!surfaceOnly)
132 PartitionElements_RegularGrid<Vertex>(
133 partitionHandler,
134 pMG->begin<Vertex>(), pMG->end<Vertex>(),
135 numCellsX, numCellsY, numCellsZ, aaPos,
136 ConsiderAll(), bucketSubset);
137 else
138 PartitionElements_RegularGrid<Vertex>(
139 partitionHandler,
140 pMG->begin<Vertex>(), pMG->end<Vertex>(),
141 numCellsX, numCellsY, numCellsZ, aaPos,
142 cbConsiderElem, bucketSubset);
143 }
144 else{
145 LOG("partitioning could not be performed - "
146 << "grid doesn't contain any elements!\n");
147 return false;
148 }
149
150 // if elements have been assigned to bucketProc, then we have to make sure,
151 // that it is also present in the process-map
152 if(!partitionHandler.empty(bucketSubset)){
153 if(bucketSubset >= (int)partitionMap.num_target_procs())
154 partitionMap.add_target_proc(localProc);
155 }
156
157 return true;
158 #endif
159
160 UG_LOG("WARNING: PartitionDomain_RegularGrid is currently only implemented for");
161 UG_LOG(" parallel environments.\n");
162 return false;
163}
164
165template <typename TDomain>
166static bool
167PartitionDomain_MetisKWay(TDomain& domain, PartitionMap& partitionMap,
168 int numPartitions, size_t baseLevel,
169 int hWeight, int vWeight)
170{
171 PROFILE_FUNC_GROUP("parallelization");
172// prepare the partition map
173 SmartPtr<MultiGrid> pMG = domain.grid();
174 partitionMap.assign_grid(*pMG);
175
176#ifdef UG_PARALLEL
177
178 SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
179
180// we need a process to which elements which are not considered will be send.
181// Those elements should stay on the current process.
182 int localProc = 0;
183 localProc = pcl::ProcRank();
184
185 int bucketSubset = partitionMap.find_target_proc(localProc);
186 if(bucketSubset == -1)
187 bucketSubset = (int)partitionMap.num_target_procs();
188
189// call the actual partitioning routine
190 if(pMG->num<Volume>() > 0){
191 PartitionMultiGrid_MetisKway<Volume>(partitionHandler, *pMG, numPartitions,
192 baseLevel, hWeight, vWeight);
193 // assign all elements below baseLevel to bucketSubset
194 for(size_t lvl = 0; lvl < baseLevel; ++lvl)
195 partitionHandler.assign_subset(pMG->begin<Volume>(lvl), pMG->end<Volume>(lvl),
196 bucketSubset);
197 }
198 else if(pMG->num<Face>() > 0){
199 PartitionMultiGrid_MetisKway<Face>(partitionHandler, *pMG, numPartitions,
200 baseLevel, hWeight, vWeight);
201 // assign all elements below baseLevel to bucketSubset
202 for(size_t lvl = 0; lvl < baseLevel; ++lvl)
203 partitionHandler.assign_subset(pMG->begin<Face>(lvl), pMG->end<Face>(lvl),
204 bucketSubset);
205 }
206 else if(pMG->num<Edge>() > 0){
207 PartitionMultiGrid_MetisKway<Edge>(partitionHandler, *pMG, numPartitions,
208 baseLevel, hWeight, vWeight);
209 // assign all elements below baseLevel to bucketSubset
210 for(size_t lvl = 0; lvl < baseLevel; ++lvl)
211 partitionHandler.assign_subset(pMG->begin<Edge>(lvl), pMG->end<Edge>(lvl),
212 bucketSubset);
213 }
214
215 if(!partitionHandler.empty(bucketSubset)){
216 if(bucketSubset >= (int)partitionMap.num_target_procs())
217 partitionMap.add_target_proc(localProc);
218 }
219
220 return true;
221#else
222 UG_LOG("WARNING in PartitionDomain_MetisKWay: Only available in parallel builds.\n");
223 return false;
224#endif
225}
226
227template <typename TDomain>
228static bool
229PartitionDomain_MetisKWay(TDomain& domain, PartitionMap& partitionMap,
230 int numPartitions, size_t baseLevel,
232{
233 PROFILE_FUNC_GROUP("parallelization");
234// prepare the partition map
235 SmartPtr<MultiGrid> pMG = domain.grid();
236 partitionMap.assign_grid(*pMG);
237
238#ifdef UG_PARALLEL
239
240 SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
241
242 PartitionWeighting& wFct = *weightFct;
243 wFct.set_subset_handler(domain.subset_handler().operator->());
244// we need a process to which elements which are not considered will be send.
245// Those elements should stay on the current process.
246 int localProc = 0;
247 localProc = pcl::ProcRank();
248
249 int bucketSubset = partitionMap.find_target_proc(localProc);
250 if(bucketSubset == -1)
251 bucketSubset = (int)partitionMap.num_target_procs();
252
253// call the actual partitioning routine
254 if(pMG->num<Volume>() > 0){
255 // do not use boost::function<...> f = wFct, since this leads to slicing
256 // of wFct and losing properties of derived objects
257 boost::function<int (Volume*, Volume*)> f = boost::ref(wFct);
258 PartitionMultiGrid_MetisKway<Volume>(partitionHandler, *pMG, numPartitions, baseLevel, f);
259 // assign all elements below baseLevel to bucketSubset
260 for(size_t lvl = 0; lvl < baseLevel; ++lvl)
261 partitionHandler.assign_subset(pMG->begin<Volume>(lvl), pMG->end<Volume>(lvl),
262 bucketSubset);
263 }
264 else if(pMG->num<Face>() > 0){
265 boost::function<int (Face*, Face*)> f = boost::ref(wFct);
266 PartitionMultiGrid_MetisKway<Face>(partitionHandler, *pMG, numPartitions, baseLevel, f);
267 // assign all elements below baseLevel to bucketSubset
268 for(size_t lvl = 0; lvl < baseLevel; ++lvl)
269 partitionHandler.assign_subset(pMG->begin<Face>(lvl), pMG->end<Face>(lvl),
270 bucketSubset);
271 }
272 else if(pMG->num<Edge>() > 0){
273 boost::function<int (Edge*, Edge*)> f = boost::ref(wFct);
274 PartitionMultiGrid_MetisKway<Edge>(partitionHandler, *pMG, numPartitions, baseLevel, f);
275 // assign all elements below baseLevel to bucketSubset
276 for(size_t lvl = 0; lvl < baseLevel; ++lvl)
277 partitionHandler.assign_subset(pMG->begin<Edge>(lvl), pMG->end<Edge>(lvl),
278 bucketSubset);
279 }
280
281 if(!partitionHandler.empty(bucketSubset)){
282 if(bucketSubset >= (int)partitionMap.num_target_procs())
283 partitionMap.add_target_proc(localProc);
284 }
285
286 return true;
287#else
288 UG_LOG("WARNING in PartitionDomain_MetisKWay: Only available in parallel builds.\n");
289 return false;
290#endif
291}
292
293
294template <typename TDomain>
295static bool
296PartitionDomain_LevelBased(TDomain& domain, PartitionMap& partitionMap,
297 int numPartitions, size_t level)
298{
299 PROFILE_FUNC_GROUP("parallelization");
300 // prepare the partition map
301 SmartPtr<MultiGrid> pMG = domain.grid();
302 partitionMap.assign_grid(*pMG);
303 SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
304
305// call the actual partitioning routine
306 switch(domain.domain_info().element_type()){
307 case VOLUME:
308 PartitionMultiGridLevel_MetisKway<Volume>(partitionHandler, *pMG, numPartitions, level);
309 break;
310
311 case FACE:
312 PartitionMultiGridLevel_MetisKway<Face>(partitionHandler, *pMG, numPartitions, level);
313 break;
314
315 case EDGE:
316 PartitionMultiGridLevel_MetisKway<Edge>(partitionHandler, *pMG, numPartitions, level);
317 break;
318
319 default:
320 UG_THROW("Partitioning only works for element types EDGE, FACE, and VOLUME!");
321 break;
322 }
323
324 return true;
325}
326
327
328template <typename TDomain>
329static bool
331 int numPartitions, size_t level)
332{
333 PROFILE_FUNC_GROUP("parallelization");
334 // prepare the partition map
335 SmartPtr<MultiGrid> pMG = domain.grid();
336 partitionMap.assign_grid(*pMG);
337 SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
338
339// call the actual partitioning routine
340 switch(domain.domain_info().element_type()){
341 case VOLUME:
342 PartitionMultiGridLevel_ParmetisKway<Volume>(partitionHandler, *pMG, numPartitions, level);
343 break;
344
345 case FACE:
346 PartitionMultiGridLevel_ParmetisKway<Face>(partitionHandler, *pMG, numPartitions, level);
347 break;
348
349 case EDGE:
350 PartitionMultiGridLevel_ParmetisKway<Edge>(partitionHandler, *pMG, numPartitions, level);
351 break;
352
353 default:
354 UG_THROW("Partitioning only works for element types EDGE, FACE, and VOLUME!");
355 break;
356 }
357
358 return true;
359}
360
361
362template <typename TDomain>
363static bool DistributeDomain(TDomain& domainOut,
364 PartitionMap& partitionMap,
365 bool createVerticalInterfaces)
366{
367 PROFILE_FUNC_GROUP("parallelization");
368//todo Use a process-communicator to restrict communication
369
370// make sure that the input is fine
371 typedef typename TDomain::grid_type GridType;
372 SmartPtr<GridType> pGrid = domainOut.grid();
373 SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
374
375 if(partitionHandler.grid() != pGrid.get()){
376 partitionMap.assign_grid(*pGrid);
377 }
378
379#ifdef UG_PARALLEL
380
381 typedef typename TDomain::position_attachment_type position_attachment_type;
382
383// used to check whether all processes are correctly prepared for redistribution
384 //bool performDistribution = true;
385
386// make sure that the number of subsets and target processes match
387// THIS MAKES NO SENSE FOR PARALLEL REDISTRIBUTION - IT IS CLEAR THAT SOME
388// PROCS WON'T DELIVER TO ALL PROCS IN THE MAP.
389/* const int numSubs = partitionHandler.num_subsets();
390 const int numTargetProcs = (int)partitionMap.num_target_procs();
391 if(numSubs > numTargetProcs){
392 UG_LOG("ERROR in RedistributeDomain: More partitions than target processes.\n");
393 performDistribution = false;
394 }
395 else if(numSubs < numTargetProcs){
396 UG_LOG("ERROR in RedistributeDomain: More target processes than partitions.\n");
397 performDistribution = false;
398 }
399*/
400
401//todo: check whether all target-processes in partitionMap are in the valid range.
402
403 PCL_PROFILE(RedistributeDomain);
404
405//todo Use a process-communicator to restrict communication
406/*
407 if(!pcl::AllProcsTrue(performDistribution))
408 return false;
409*/
410
411// data serialization
412 SPVertexDataSerializer posSerializer =
414 create(*pGrid, domainOut.position_attachment());
415
417 create(*domainOut.subset_handler());
418
420 serializer.add(posSerializer);
421 serializer.add(shSerializer);
422
423 std::vector<std::string> additionalSHNames = domainOut.additional_subset_handler_names();
424 for(size_t i = 0; i < additionalSHNames.size(); ++i){
425 SmartPtr<ISubsetHandler> sh = domainOut.additional_subset_handler(additionalSHNames[i]);
426 if(sh.valid()){
428 serializer.add(shSerializer);
429 }
430 }
431
432// now call redistribution
433 DistributeGrid(*pGrid, partitionHandler, serializer, createVerticalInterfaces,
434 &partitionMap.get_target_proc_vec());
435
437#endif
438
439// in the serial case there's nothing to do.
440 return true;
441}
442
443}// end of namespace
444
445#endif
Definition smart_pointer.h:108
T * get()
returns encapsulated pointer
Definition smart_pointer.h:197
bool valid() const
returns true if the pointer is valid, false if not.
Definition smart_pointer.h:206
callback that always returns true
Definition basic_callbacks.h:50
Base-class for edges.
Definition grid_base_objects.h:397
Faces are 2-dimensional objects.
Definition grid_base_objects.h:510
static SmartPtr< GeomObjDataSerializer< TGeomObj > > create(Grid &g, TAttachment a)
Definition serialization.h:293
the generic attachment-accessor for access to grids attachment pipes.
Definition grid.h:182
Serialization of data associated with grid elements.
Definition serialization.h:186
void add(SPVertexDataSerializer cb)
Adds a callback class for serialization and deserialization.
Definition serialization.cpp:69
Partitions elements of a grid into several subsets.
Definition subset_handler_grid.h:53
bool empty() const
returns true if the subset-handler contains no elements of the given type.
Definition subset_handler_grid_impl.hpp:190
void assign_subset(Vertex *elem, int subsetIndex)
assigns a vertex to a subset.
Definition subset_handler_grid.cpp:204
Grid * grid() const
returns a pointer to the grid on which the subset-handler works.
Definition subset_handler_interface.cpp:304
Returns true if an element is a regular surface element.
Definition parallel_callbacks.h:48
Used to describe how a domain shall be distributed in a parallel environment.
Definition partition_map.h:54
SmartPtr< SubsetHandler > get_partition_handler()
Definition partition_map.cpp:58
size_t num_target_procs()
Definition partition_map.cpp:70
void assign_grid(Grid &grid)
Definition partition_map.cpp:52
std::vector< int > & get_target_proc_vec()
Definition partition_map.cpp:90
void add_target_proc(int tarProcRank)
Definition partition_map.cpp:61
int find_target_proc(int procRank)
returns the index at which the given process lies. -1 if it doesn't exist.
Definition partition_map.cpp:105
Definition partition_weighting_callbacks.h:50
void set_subset_handler(MGSubsetHandler *sh)
Definition partition_weighting_callbacks.h:59
static SPGridDataSerializer create(ISubsetHandler &sh)
Definition serialization.h:314
Base-class for all vertex-types.
Definition grid_base_objects.h:231
Volumes are 3-dimensional objects.
Definition grid_base_objects.h:754
int ProcRank()
returns the rank of the process
Definition pcl_base.cpp:83
#define LOG(msg)
Definition common.h:60
#define UG_THROW(msg)
Definition error.h:57
#define UG_LOG(msg)
Definition log.h:367
the ug namespace
template bool PartitionMultiGridLevel_MetisKway< Edge >(SubsetHandler &, MultiGrid &, int, size_t)
template bool PartitionMultiGrid_MetisKway< Face >(SubsetHandler &, MultiGrid &, int, size_t, int, int)
bool DistributeGrid(MultiGrid &mg, SubsetHandler &shPartition, GridDataSerializationHandler &serializer, bool createVerticalInterfaces, const std::vector< int > *processMap, const pcl::ProcessCommunicator &procComm)
distributes/redistributes parts of possibly distributed grids.
Definition distribution.cpp:1802
template bool PartitionMultiGridLevel_ParmetisKway< Edge >(SubsetHandler &, MultiGrid &, int, size_t)
static bool PartitionDomain_MetisKWay(TDomain &domain, PartitionMap &partitionMap, int numPartitions, size_t baseLevel=0, int hWeight=1, int vWeight=1)
partitions a domain by using graph-based partitioning by METIS
template bool PartitionMultiGrid_MetisKway< Edge >(SubsetHandler &, MultiGrid &, int, size_t, int, int)
static bool PartitionDomain_LevelBased(TDomain &domain, PartitionMap &partitionMap, int numPartitions, size_t level)
Partitions a domain based on the elements of one level.
template bool PartitionMultiGridLevel_ParmetisKway< Volume >(SubsetHandler &, MultiGrid &, int, size_t)
template bool PartitionMultiGrid_MetisKway< Volume >(SubsetHandler &, MultiGrid &, int, size_t, int, int)
@ VOLUME
Definition grid_base_objects.h:63
@ EDGE
Definition grid_base_objects.h:61
@ FACE
Definition grid_base_objects.h:62
static bool PartitionDomain_RegularGrid(TDomain &domain, PartitionMap &partitionMap, int numCellsX, int numCellsY, int numCellsZ, bool surfaceOnly)
partitions a domain by sorting all elements into a regular grid
static bool DistributeDomain(TDomain &domainOut, PartitionMap &partitionMap, bool createVerticalInterfaces)
distributes a already distributed domain onto the specified processes
template bool PartitionMultiGridLevel_ParmetisKway< Face >(SubsetHandler &, MultiGrid &, int, size_t)
template bool PartitionMultiGridLevel_MetisKway< Volume >(SubsetHandler &, MultiGrid &, int, size_t)
template bool PartitionMultiGridLevel_MetisKway< Face >(SubsetHandler &, MultiGrid &, int, size_t)
static bool PartitionDistributedDomain_LevelBased(TDomain &domain, PartitionMap &partitionMap, int numPartitions, size_t level)
Definition domain_distribution_impl.hpp:330
#define PCL_PROFILE(name)
Definition pcl_profiling.h:49
#define PCL_PROFILE_END()
Definition pcl_profiling.h:50
#define PROFILE_FUNC_GROUP(groups)
Definition profiler.h:258