33 #ifndef __H__UG__parallel_dual_graph_impl__
34 #define __H__UG__parallel_dual_graph_impl__
39 #include "../distributed_grid.h"
43 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
53 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
61 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
76 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
80 if(m_adjacencyMapStructure.empty())
83 return (TIndexType)m_adjacencyMapStructure.size() - 1;
86 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
90 return (TIndexType)m_adjacencyMap.size();
93 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
97 UG_ASSERT(!m_adjacencyMapStructure.empty(),
98 "Call generate graph before calling this method!");
99 return &m_adjacencyMapStructure.front();
102 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
107 "Call generate graph before calling this method!");
108 return &m_adjacencyMap.front();
111 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
116 "Call generate graph before calling this method!");
117 return &m_nodeOffsetMap.front();
121 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
125 UG_ASSERT(m_pMG,
"A MultiGrid has to be set!");
126 return m_aaElemIndex[o] != -1;
129 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
134 m_pMG->attach_to<TGeomBaseObj>(m_aElemIndex);
135 m_pMG->attach_to<TConnectingObj>(m_aElemIndices);
136 m_aaElemIndex.access(*m_pMG, m_aElemIndex);
137 m_aaElemIndices.access(*m_pMG, m_aElemIndices);
140 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
145 m_pMG->detach_from<TGeomBaseObj>(m_aElemIndex);
146 m_pMG->detach_from<TConnectingObj>(m_aElemIndices);
147 m_aaElemIndex.invalidate();
148 m_aaElemIndices.invalidate();
152 template <
class TGeomBaseObj,
class TIndexType,
class TConnectingObj>
158 UG_ASSERT(m_pMG,
"A MultiGrid has to be set!");
161 typedef TGeomBaseObj Elem;
162 typedef TConnectingObj ConElem;
177 for(ElemIterator iter = mg.
begin<Elem>(level);
178 iter != mg.
end<Elem>(level); ++iter)
183 aaInd[*iter] = numElems++;
184 m_elems.push_back(*iter);
193 UG_DLOG(
LIB_GRID, 2,
"ParallelDualGraph-generate_graph: gathering element numbers\n");
194 m_nodeOffsetMap.clear();
195 int localNodeOffset = 0;
197 if(!m_procCom.empty()){
198 int numElemsTmp = (int)numElems;
199 vector<int> elemCounts(m_procCom.size());
200 m_procCom.allgather(&numElemsTmp, 1,
PCL_DT_INT,
203 m_nodeOffsetMap.resize(m_procCom.size() + 1);
204 int numElemsTotal = 0;
205 for(
size_t i = 0; i < elemCounts.size(); ++i){
206 m_nodeOffsetMap[i] = numElemsTotal;
207 numElemsTotal += elemCounts[i];
209 m_nodeOffsetMap[elemCounts.size()] = numElemsTotal;
210 localNodeOffset = m_nodeOffsetMap[m_procCom.get_local_proc_id()];
212 UG_DLOG(
LIB_GRID, 2,
"ParallelDualGraph-generate_graph: gathering indices of connected elements\n");
217 for(ConElemIterator iter = mg.
begin<ConElem>(level);
218 iter != mg.
end<ConElem>(level); ++iter)
221 aaConInds[ce].clear();
223 for(
size_t i = 0; i < elems.
size(); ++i){
224 if(aaInd[elems[i]] != -1)
225 aaConInds[ce].push_back(localNodeOffset + aaInd[elems[i]]);
256 UG_DLOG(
LIB_GRID, 2,
"ParallelDualGraph-generate_graph: building adjacency structure\n");
258 m_adjacencyMapStructure.resize(numElems + 1);
259 m_adjacencyMapStructure[0] = 0;
260 m_adjacencyMap.clear();
261 m_connections.clear();
267 for(ElemIterator iter = mg.
begin<Elem>(level); iter != mg.
end<Elem>(level); ++iter)
270 int eInd = aaInd[elem];
273 eInd += localNodeOffset;
276 assert(ind < (
int)m_adjacencyMapStructure.size());
277 m_adjacencyMapStructure[ind] = m_adjacencyMap.size();
281 for(
size_t i_con = 0; i_con < conElems.
size(); ++i_con){
282 std::vector<int>& conInds = aaConInds[conElems[i_con]];
283 for(
size_t i = 0; i < conInds.size(); ++i){
284 UG_ASSERT(conInds[i] != -1,
"ghosts should be ignored when assigning conInds.");
285 if(conInds[i] != eInd){
286 m_adjacencyMap.push_back(conInds[i]);
287 m_connections.push_back(conElems[i_con]);
293 assert(ind == (
int)m_adjacencyMapStructure.size() - 1);
298 UG_THROW(
"Currently a dual graph can only be created if elements are"
299 " connected via their sides. Since nearly everything is prepared,"
300 " implementing this step for arbitrary connecting elements shouldn't"
305 m_adjacencyMapStructure[m_adjacencyMapStructure.size() - 1] = m_adjacencyMap.size();
Performs communication between interfaces on different processes.
Definition: pcl_interface_communicator.h:68
bool communicate(int tag=749345)
sends and receives the collected data.
Definition: pcl_interface_communicator_impl.hpp:409
void send_data(int targetProc, const Interface &interface, ICommunicationPolicy< TLayout > &commPol)
collects data that will be send during communicate.
Definition: pcl_interface_communicator_impl.hpp:80
void receive_data(int srcProc, const Interface &interface, ICommunicationPolicy< TLayout > &commPol)
registers a communication-policy to receive data on communicate.
Definition: pcl_interface_communicator_impl.hpp:188
Definition: pcl_process_communicator.h:70
ProcessCommunicator create_sub_communicator(bool participate) const
creates a new communicator containing a subset of the current communicator
Definition: pcl_process_communicator.cpp:122
the standard single-level-layout implementation
Definition: pcl_communication_structs.h:452
copies values from a specified attachment to a stream and back.
Definition: compol_copy_attachment.h:54
Gathers the values stored in vector-attachments.
Definition: compol_gather_vec_attachment.h:52
manages the layouts and interfaces which are associated with a distributed grid.
Definition: distributed_grid.h:88
bool is_ghost(TElem *elem) const
returns true if the element is a ghost
Definition: distributed_grid_impl.hpp:67
GridLayoutMap & grid_layout_map()
Definition: distributed_grid.h:103
the generic attachment-accessor for access to grids attachment pipes.
Definition: grid.h:182
DistributedGridManager * distributed_grid_manager()
returns a pointer to the associated distributed grid manager.
Definition: grid_impl.hpp:53
void associated_elements(traits< Vertex >::secure_container &elemsOut, TElem *e)
Puts all elements of type TAss which are contained in 'e' or which contain 'e' into elemsOut.
Definition: grid_impl.hpp:466
lets you access layouts by type and key
Definition: parallel_grid_layout.h:152
bool has_layout(const Key &key) const
checks whether the layout associated with the given key exists for the given type.
Definition: parallel_grid_layout_impl.hpp:44
Types< TType >::Layout & get_layout(const Key &key)
creates the required layout if it doesn't exist already.
Definition: parallel_grid_layout_impl.hpp:52
void clear()
Definition: parallel_grid_layout_impl.hpp:118
Definition: multi_grid.h:72
geometry_traits< TElem >::iterator end(int level)
Definition: multi_grid.h:168
geometry_traits< TElem >::iterator begin(int level)
Definition: multi_grid.h:158
~ParallelDualGraph()
Definition: parallel_dual_graph_impl.hpp:55
ParallelDualGraph(MultiGrid *pmg=NULL)
Definition: parallel_dual_graph_impl.hpp:45
TIndexType * adjacency_map()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:104
void attach_data()
Definition: parallel_dual_graph_impl.hpp:131
TIndexType * parallel_offset_map()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:113
bool was_considered(TGeomBaseObj *o)
Some vertices are not considered for the dual graph (e.g. ghosts).
Definition: parallel_dual_graph_impl.hpp:123
TIndexType num_graph_vertices()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:78
TIndexType * adjacency_map_structure()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:95
TIndexType num_graph_edges()
Access to the graph which was generated during the last call to generate_graph.
Definition: parallel_dual_graph_impl.hpp:88
void set_grid(MultiGrid *pmg)
Definition: parallel_dual_graph_impl.hpp:63
void detach_data()
Definition: parallel_dual_graph_impl.hpp:142
void generate_graph(int level, pcl::ProcessCommunicator procCom=pcl::ProcessCommunicator(pcl::PCD_WORLD))
generates the graph for the given level.
Definition: parallel_dual_graph_impl.hpp:154
Container which holds an array of pointers.
Definition: pointer_const_array.h:84
size_t size() const
returns the size of the associated array.
Definition: pointer_const_array_impl.hpp:106
Definition: grid_base_object_traits.h:68
#define PCL_DT_INT
Definition: pcl_datatype.h:51
@ PCD_EMPTY
Definition: pcl_process_communicator.h:54
#define UG_ASSERT(expr, msg)
Definition: assert.h:70
#define UG_THROW(msg)
Definition: error.h:57
#define UG_DLOG(__debugID__, level, msg)
Definition: log.h:298
DebugID LIB_GRID
Definition: debug_id.h:115
#define GDIST_PROFILE_FUNC()
Definition: parallelization_util.h:41
Definition: parallel_grid_layout.h:46
Definition: smart_pointer.h:814
@ INT_H_MASTER
horizontal master node
Definition: parallel_grid_layout.h:104
@ INT_H_SLAVE
horizontal slave node
Definition: parallel_grid_layout.h:105