ug4
parallel_dual_graph_impl.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2015: G-CSC, Goethe University Frankfurt
3  * Author: Sebastian Reiter
4  *
5  * This file is part of UG4.
6  *
7  * UG4 is free software: you can redistribute it and/or modify it under the
8  * terms of the GNU Lesser General Public License version 3 (as published by the
9  * Free Software Foundation) with the following additional attribution
10  * requirements (according to LGPL/GPL v3 §7):
11  *
12  * (1) The following notice must be displayed in the Appropriate Legal Notices
13  * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14  *
15  * (2) The following notice must be displayed at a prominent place in the
16  * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17  *
18  * (3) The following bibliography is recommended for citation and must be
19  * preserved in all covered files:
20  * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21  * parallel geometric multigrid solver on hierarchically distributed grids.
22  * Computing and visualization in science 16, 4 (2013), 151-164"
23  * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24  * flexible software system for simulating pde based models on high performance
25  * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26  *
27  * This program is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30  * GNU Lesser General Public License for more details.
31  */
32 
33 #ifndef __H__UG__parallel_dual_graph_impl__
34 #define __H__UG__parallel_dual_graph_impl__
35 
36 #include "parallel_dual_graph.h"
38 #include "compol_copy_attachment.h"
39 #include "../distributed_grid.h"
40 
41 namespace ug{
42 
43 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
46  m_procCom(pcl::PCD_EMPTY),
47  m_pMG(NULL)
48 {
49  if(pmg)
50  set_grid(pmg);
51 }
52 
53 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
56 {
57  if(m_pMG)
58  detach_data();
59 }
60 
61 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
63 set_grid(MultiGrid* pmg)
64 {
65  if(m_pMG == pmg)
66  return;
67 
68  if(m_pMG)
69  detach_data();
70 
71  m_pMG = pmg;
72  if(m_pMG)
73  attach_data();
74 }
75 
76 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
79 {
80  if(m_adjacencyMapStructure.empty())
81  return 0;
82 
83  return (TIndexType)m_adjacencyMapStructure.size() - 1;
84 }
85 
86 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
89 {
90  return (TIndexType)m_adjacencyMap.size();
91 }
92 
93 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
96 {
97  UG_ASSERT(!m_adjacencyMapStructure.empty(),
98  "Call generate graph before calling this method!");
99  return &m_adjacencyMapStructure.front();
100 }
101 
102 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
105 {
106  UG_ASSERT(!m_adjacencyMap.empty(),
107  "Call generate graph before calling this method!");
108  return &m_adjacencyMap.front();
109 }
110 
111 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
114 {
115  UG_ASSERT(!m_nodeOffsetMap.empty(),
116  "Call generate graph before calling this method!");
117  return &m_nodeOffsetMap.front();
118 }
119 
120 
121 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
123 was_considered(TGeomBaseObj* o)
124 {
125  UG_ASSERT(m_pMG, "A MultiGrid has to be set!");
126  return m_aaElemIndex[o] != -1;
127 }
128 
129 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
131 attach_data()
132 {
133  assert(m_pMG);
134  m_pMG->attach_to<TGeomBaseObj>(m_aElemIndex);
135  m_pMG->attach_to<TConnectingObj>(m_aElemIndices);
136  m_aaElemIndex.access(*m_pMG, m_aElemIndex);
137  m_aaElemIndices.access(*m_pMG, m_aElemIndices);
138 }
139 
140 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
142 detach_data()
143 {
144  assert(m_pMG);
145  m_pMG->detach_from<TGeomBaseObj>(m_aElemIndex);
146  m_pMG->detach_from<TConnectingObj>(m_aElemIndices);
147  m_aaElemIndex.invalidate();
148  m_aaElemIndices.invalidate();
149 }
150 
151 
152 template <class TGeomBaseObj, class TIndexType, class TConnectingObj>
154 generate_graph(int level, pcl::ProcessCommunicator procCom)
155 {
157  UG_DLOG(LIB_GRID, 1, "ParallelDualGraph-start generate_graph\n");
158  UG_ASSERT(m_pMG, "A MultiGrid has to be set!");
159 
160  using namespace std;
161  typedef TGeomBaseObj Elem;
162  typedef TConnectingObj ConElem;
163  typedef typename geometry_traits<Elem>::iterator ElemIterator;
164  typedef typename geometry_traits<ConElem>::iterator ConElemIterator;
165 
166  MultiGrid& mg = *m_pMG;
168  Grid::AttachmentAccessor<TConnectingObj, AElemIndices>& aaConInds = m_aaElemIndices;
169 
171  GridLayoutMap& glm = distGridMgr.grid_layout_map();
172 
173 // init the indices and count ghosts and normal elements on the fly
174  size_t numElems = 0;
175  m_elems.clear();
176  {
177  for(ElemIterator iter = mg.begin<Elem>(level);
178  iter != mg.end<Elem>(level); ++iter)
179  {
180  if(distGridMgr.is_ghost(*iter))
181  aaInd[*iter] = -1;
182  else{
183  aaInd[*iter] = numElems++;
184  m_elems.push_back(*iter);
185  }
186  }
187  }
188 
189 // generate a local procCom, which only contains processes which actually contain elements.
190  m_procCom = procCom.create_sub_communicator(numElems > 0);
191 
192 // generate the nodeOffsetMap and determine the localNodeOffset
193  UG_DLOG(LIB_GRID, 2, "ParallelDualGraph-generate_graph: gathering element numbers\n");
194  m_nodeOffsetMap.clear();
195  int localNodeOffset = 0;
196 
197  if(!m_procCom.empty()){
198  int numElemsTmp = (int)numElems;
199  vector<int> elemCounts(m_procCom.size());
200  m_procCom.allgather(&numElemsTmp, 1, PCL_DT_INT,
201  &elemCounts.front(), 1, PCL_DT_INT);
202 
203  m_nodeOffsetMap.resize(m_procCom.size() + 1);
204  int numElemsTotal = 0;
205  for(size_t i = 0; i < elemCounts.size(); ++i){
206  m_nodeOffsetMap[i] = numElemsTotal;
207  numElemsTotal += elemCounts[i];
208  }
209  m_nodeOffsetMap[elemCounts.size()] = numElemsTotal;
210  localNodeOffset = m_nodeOffsetMap[m_procCom.get_local_proc_id()];
211 
212  UG_DLOG(LIB_GRID, 2, "ParallelDualGraph-generate_graph: gathering indices of connected elements\n");
213  // we have to gather indices of connected elements in connecting elements.
214  // This is required since we have to communicate those indices between distributed
215  // connecting elements.
217  for(ConElemIterator iter = mg.begin<ConElem>(level);
218  iter != mg.end<ConElem>(level); ++iter)
219  {
220  ConElem* ce = *iter;
221  aaConInds[ce].clear();
222  mg.associated_elements(elems, ce);
223  for(size_t i = 0; i < elems.size(); ++i){
224  if(aaInd[elems[i]] != -1)
225  aaConInds[ce].push_back(localNodeOffset + aaInd[elems[i]]);
226  }
227  }
228  }
229 
230 // communicate connected elements between horizontal interfaces on this level
233 
234  ComPol_GatherVecAttachment<Layout, AElemIndices> compolGather(mg, m_aElemIndices);
235  if(glm.has_layout<ConElem>(INT_H_SLAVE)){
236  com.send_data(glm.get_layout<ConElem>(INT_H_SLAVE).layout_on_level(level),
237  compolGather);
238  }
239  if(glm.has_layout<ConElem>(INT_H_MASTER)){
240  com.receive_data(glm.get_layout<ConElem>(INT_H_MASTER).layout_on_level(level),
241  compolGather);
242  }
243  com.communicate();
244 
245  ComPol_CopyAttachment<Layout, AElemIndices> compolCopy(mg, m_aElemIndices);
246  if(glm.has_layout<ConElem>(INT_H_MASTER)){
247  com.send_data(glm.get_layout<ConElem>(INT_H_MASTER).layout_on_level(level),
248  compolCopy);
249  }
250  if(glm.has_layout<ConElem>(INT_H_SLAVE)){
251  com.receive_data(glm.get_layout<ConElem>(INT_H_SLAVE).layout_on_level(level),
252  compolCopy);
253  }
254  com.communicate();
255 
256  UG_DLOG(LIB_GRID, 2, "ParallelDualGraph-generate_graph: building adjacency structure\n");
257 // init the adjacencyMapStructure
258  m_adjacencyMapStructure.resize(numElems + 1);
259  m_adjacencyMapStructure[0] = 0;
260  m_adjacencyMap.clear();
261  m_connections.clear();
262 
263 // generate adjacency structure.
264  typename Grid::traits<ConElem>::secure_container conElems;
265  if(ConElem::dim == Elem::dim - 1){
266  int ind = 0;
267  for(ElemIterator iter = mg.begin<Elem>(level); iter != mg.end<Elem>(level); ++iter)
268  {
269  Elem* elem = *iter;
270  int eInd = aaInd[elem];
271  if(eInd == -1)
272  continue;
273  eInd += localNodeOffset;
274 
275  // store first entry at which the connections will be written to the map
276  assert(ind < (int)m_adjacencyMapStructure.size());
277  m_adjacencyMapStructure[ind] = m_adjacencyMap.size();
278 
279  mg.associated_elements(conElems, elem);
280  // iterate over the neighbors and push adjacent indices to the adjacencyMap
281  for(size_t i_con = 0; i_con < conElems.size(); ++i_con){
282  std::vector<int>& conInds = aaConInds[conElems[i_con]];
283  for(size_t i = 0; i < conInds.size(); ++i){
284  UG_ASSERT(conInds[i] != -1, "ghosts should be ignored when assigning conInds.");
285  if(conInds[i] != eInd){
286  m_adjacencyMap.push_back(conInds[i]);
287  m_connections.push_back(conElems[i_con]);
288  }
289  }
290  }
291  ++ind;
292  }
293  assert(ind == (int)m_adjacencyMapStructure.size() - 1);
294  }
295  else{
296  // if ConElem::dim < Elem::dim - 1 then we have to check whether indices have
297  // already been added...
298  UG_THROW("Currently a dual graph can only be created if elements are"
299  " connected via their sides. Since nearly everything is prepared,"
300  " implementing this step for arbitrary connecting elements shouldn't"
301  " be much work.");
302  }
303 
304 // add the final element
305  m_adjacencyMapStructure[m_adjacencyMapStructure.size() - 1] = m_adjacencyMap.size();
306  UG_DLOG(LIB_GRID, 1, "ParallelDualGraph-stop generate_graph\n");
307 }
308 
309 }// end of namespace
310 
311 #endif
Performs communication between interfaces on different processes.
Definition: pcl_interface_communicator.h:68
bool communicate(int tag=749345)
sends and receives the collected data.
Definition: pcl_interface_communicator_impl.hpp:409
void send_data(int targetProc, const Interface &interface, ICommunicationPolicy< TLayout > &commPol)
collects data that will be send during communicate.
Definition: pcl_interface_communicator_impl.hpp:80
void receive_data(int srcProc, const Interface &interface, ICommunicationPolicy< TLayout > &commPol)
registers a communication-policy to receive data on communicate.
Definition: pcl_interface_communicator_impl.hpp:188
Definition: pcl_process_communicator.h:70
ProcessCommunicator create_sub_communicator(bool participate) const
creates a new communicator containing a subset of the current communicator
Definition: pcl_process_communicator.cpp:122
the standard single-level-layout implementation
Definition: pcl_communication_structs.h:452
copies values from a specified attachment to a stream and back.
Definition: compol_copy_attachment.h:54
Gathers the values stored in vector-attachments.
Definition: compol_gather_vec_attachment.h:52
manages the layouts and interfaces which are associated with a distributed grid.
Definition: distributed_grid.h:88
bool is_ghost(TElem *elem) const
returns true if the element is a ghost
Definition: distributed_grid_impl.hpp:67
GridLayoutMap & grid_layout_map()
Definition: distributed_grid.h:103
the generic attachment-accessor for access to grids attachment pipes.
Definition: grid.h:182
DistributedGridManager * distributed_grid_manager()
returns a pointer to the associated distributed grid manager.
Definition: grid_impl.hpp:53
void associated_elements(traits< Vertex >::secure_container &elemsOut, TElem *e)
Puts all elements of type TAss which are contained in 'e' or which contain 'e' into elemsOut.
Definition: grid_impl.hpp:466
lets you access layouts by type and key
Definition: parallel_grid_layout.h:152
bool has_layout(const Key &key) const
checks whether the layout associated with the given key exists for the given type.
Definition: parallel_grid_layout_impl.hpp:44
Types< TType >::Layout & get_layout(const Key &key)
creates the required layout if it doesn't exist already.
Definition: parallel_grid_layout_impl.hpp:52
void clear()
Definition: parallel_grid_layout_impl.hpp:118
Definition: multi_grid.h:72
geometry_traits< TElem >::iterator end(int level)
Definition: multi_grid.h:168
geometry_traits< TElem >::iterator begin(int level)
Definition: multi_grid.h:158
~ParallelDualGraph()
Definition: parallel_dual_graph_impl.hpp:55
ParallelDualGraph(MultiGrid *pmg=NULL)
Definition: parallel_dual_graph_impl.hpp:45
TIndexType * adjacency_map()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:104
void attach_data()
Definition: parallel_dual_graph_impl.hpp:131
TIndexType * parallel_offset_map()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:113
bool was_considered(TGeomBaseObj *o)
Some vertices are not considered for the dual graph (e.g. ghosts).
Definition: parallel_dual_graph_impl.hpp:123
TIndexType num_graph_vertices()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:78
TIndexType * adjacency_map_structure()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:95
TIndexType num_graph_edges()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:88
void set_grid(MultiGrid *pmg)
Definition: parallel_dual_graph_impl.hpp:63
void detach_data()
Definition: parallel_dual_graph_impl.hpp:142
void generate_graph(int level, pcl::ProcessCommunicator procCom=pcl::ProcessCommunicator(pcl::PCD_WORLD))
generates the graph for the given level.
Definition: parallel_dual_graph_impl.hpp:154
Container which holds an array of pointers.
Definition: pointer_const_array.h:84
size_t size() const
returns the size of the associated array.
Definition: pointer_const_array_impl.hpp:106
Definition: grid_base_object_traits.h:68
#define PCL_DT_INT
Definition: pcl_datatype.h:51
@ PCD_EMPTY
Definition: pcl_process_communicator.h:54
static const int dim
#define UG_ASSERT(expr, msg)
Definition: assert.h:70
#define UG_THROW(msg)
Definition: error.h:57
#define UG_DLOG(__debugID__, level, msg)
Definition: log.h:298
DebugID LIB_GRID
Definition: debug_id.h:115
#define GDIST_PROFILE_FUNC()
Definition: parallelization_util.h:41
Definition: parallel_grid_layout.h:46
Definition: smart_pointer.h:814
the ug namespace
@ INT_H_MASTER
horizontal master node
Definition: parallel_grid_layout.h:104
@ INT_H_SLAVE
horizontal slave node
Definition: parallel_grid_layout.h:105