ug4
row_sending_scheme.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011-2015: G-CSC, Goethe University Frankfurt
3  * Author: Martin Rupp
4  *
5  * This file is part of UG4.
6  *
7  * UG4 is free software: you can redistribute it and/or modify it under the
8  * terms of the GNU Lesser General Public License version 3 (as published by the
9  * Free Software Foundation) with the following additional attribution
10  * requirements (according to LGPL/GPL v3 §7):
11  *
12  * (1) The following notice must be displayed in the Appropriate Legal Notices
13  * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14  *
15  * (2) The following notice must be displayed at a prominent place in the
16  * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17  *
18  * (3) The following bibliography is recommended for citation and must be
19  * preserved in all covered files:
20  * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21  * parallel geometric multigrid solver on hierarchically distributed grids.
22  * Computing and visualization in science 16, 4 (2013), 151-164"
23  * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24  * flexible software system for simulating pde based models on high performance
25  * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26  *
27  * This program is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30  * GNU Lesser General Public License for more details.
31  */
32 
33 #ifndef __H__LIB_ALGEBRA__PARALLELIZATION__ROW_SENDING_SCHEME_H_
34 #define __H__LIB_ALGEBRA__PARALLELIZATION__ROW_SENDING_SCHEME_H_
35 
36 #include <map>
37 #include "parallel_nodes.h"
38 #include "parallelization_util.h"
39 #include "pcl/pcl.h"
40 #include "new_layout_creator.h"
41 
42 namespace ug
43 {
44 
45 template<typename matrix_type>
47 {
48 private:
50  typedef std::map<int, BinaryBuffer> BufferMap;
52  const matrix_type &mat;
53  typedef typename matrix_type::connection connection;
54  std::map<size_t, std::vector<connection> > connections;
56  size_t rowMax, colMax;
57 
58 public:
60  : mat(_mat), PN(_PN)
61  {
62 
63  }
64 
65  void set_create_new_nodes(bool bCreateNewNodes)
66  {
67  m_bCreateNewNodes = bCreateNewNodes;
68  }
69 
70 
84  const IndexLayout &sendLayout, const IndexLayout &receiveLayout)
85  {
86  UG_DLOG(LIB_ALG_MATRIX, 4, "*** RowSendingScheme::issue_send: ***\n");
87  for(IndexLayout::const_iterator it = sendLayout.begin(); it != sendLayout.end(); ++it)
88  {
89  const IndexLayout::Interface& interface = sendLayout.interface(it);
90  int pid = sendLayout.proc_id(it);
91  BinaryBuffer buf;
92  for(IndexLayout::Interface::const_iterator iter2 = interface.begin();
93  iter2 != interface.end(); ++iter2)
94  {
95  issue_send(buf, pid, interface.get_element(iter2));
96  }
97  UG_DLOG(LIB_ALG_MATRIX, 4, "Sending " << buf.write_pos() << " of data to processor " << pid << "\n");
98  communicator.send_raw(pid, buf.buffer(), buf.write_pos(), false);
99  }
100 
101  rowsBufferMap.clear();
102 
103  for(IndexLayout::const_iterator it = receiveLayout.begin(); it != receiveLayout.end(); ++it)
104  {
105  int pid = receiveLayout.proc_id(it);
106  UG_DLOG(LIB_ALG_MATRIX, 4, "issue receive from processor " << pid << "\n");
107  communicator.receive_raw(pid, rowsBufferMap[pid]);
108  }
109 
110  PN.issue(communicator);
111 
112  }
113 
114  void process(const IndexLayout &receiveLayout)
115  {
116  rowMax=0; colMax=0;
117  UG_DLOG(LIB_ALG_MATRIX, 4, "*** RowSendingScheme::process: ***\n");
118  connections.clear();
119 
120  for(IndexLayout::const_iterator it = receiveLayout.begin(); it != receiveLayout.end(); ++it)
121  {
122  int pid = receiveLayout.proc_id(it);
123  const IndexLayout::Interface& interface = receiveLayout.interface(it);
124 
125  BinaryBuffer &buf = rowsBufferMap[pid];
126  UG_DLOG(LIB_ALG_MATRIX, 4, "rowsBufferMap: received " << buf.write_pos() << " bytes from processor " << pid << "\n");
127  for(IndexLayout::Interface::const_iterator it = interface.begin(); it != interface.end(); ++it)
128  process(buf, pid, interface.get_element(it));
129  }
130 
131  PN.process();
132  }
133 
134 
136  {
137  resize_mat(mat);
138  for(typename std::map<size_t, std::vector<connection> >::iterator it = connections.begin();
139  it != connections.end(); ++it)
140  {
141  std::vector<connection> &cons = it->second;
142  if(cons.size())
143  mat.set_matrix_row(it->first, &cons[0], cons.size());
144  }
145  }
146 
148  {
149  resize_mat(mat);
150  for(typename std::map<size_t, std::vector<connection> >::iterator it = connections.begin();
151  it != connections.end(); ++it)
152  {
153  std::vector<connection> &cons = it->second;
154  if(cons.size())
155  mat.add_matrix_row(it->first, &cons[0], cons.size());
156  }
157  }
158 
159 
160 private:
162  {
163  size_t cols = std::max(colMax, mat.num_cols());
164  size_t rows = std::max(rowMax, mat.num_rows());
165  if(rows > mat.num_rows() || cols > mat.num_cols())
166  mat.resize_and_keep_values(rows, cols);
167  }
168 
177  void issue_send(BinaryBuffer &buf, int pid, int localRowIndex)
178  {
179  size_t num_connections = mat.num_connections(localRowIndex);
180 
181  // serialize number of connections
183  UG_DLOG(LIB_ALG_MATRIX, 4, "sending to pid " << pid << " row " << localRowIndex << " (" << PN.local_to_global(localRowIndex) << "), " << num_connections << " connections \n");
184 
185  for(typename matrix_type::const_row_iterator conn = mat.begin_row(localRowIndex);
186  conn != mat.end_row(localRowIndex); ++conn)
187  {
188  size_t localColIndex = conn.index();
189  const AlgebraID &globalColIndex = PN.local_to_global(localColIndex);
191  PN.create_node(globalColIndex, localColIndex, pid);
192 
193  // serialize connection
194  Serialize(buf, globalColIndex);
195  Serialize(buf, conn.value());
196  UG_DLOG(LIB_ALG_MATRIX, 4, " " << localColIndex << " (" << globalColIndex << ") -> " << conn.value() << "\n");
197  }
198  UG_DLOG(LIB_ALG_MATRIX, 4, "\n");
199  }
200 
201  void process(BinaryBuffer &buf, int pid, size_t localRowIndex)
202  {
203  size_t num_connections;
204  rowMax = std::max(rowMax, localRowIndex+1);
205 
206  // serialize number of connections
208 
209  UG_DLOG(LIB_ALG_MATRIX, 4, "processing received row " << localRowIndex << ", " << num_connections << " connections \n");
210 
211  size_t distanceToMasterOrInner = PN.distance_to_master_or_inner(localRowIndex);
212 
213  std::vector<connection> &cons = connections[localRowIndex];
214  size_t i = cons.size();
215  num_connections += cons.size();
216  cons.resize(num_connections);
217  size_t j=i;
218  for(; i<num_connections; i++)
219  {
220  AlgebraID globalColIndex;
221  Deserialize(buf, globalColIndex);
222  Deserialize(buf, cons[j].dValue);
223  bool bHasIndex=true;
225  cons[j].iIndex = PN.create_slave_node(globalColIndex, distanceToMasterOrInner+1);
226  else
227  cons[j].iIndex = PN.get_local_index_if_available(globalColIndex, bHasIndex);
228 
229  UG_DLOG(LIB_ALG_MATRIX, 4, " " << (int)(cons[j].iIndex) << " (" << globalColIndex << ") -> " << cons[j].dValue << "\n");
230  if(bHasIndex)
231  {
232  size_t k;
233  for(k=0; k<j; k++)
234  {
235  if(cons[k].iIndex == cons[j].iIndex)
236  {
237  cons[k].dValue += cons[j].dValue;
238  break;
239  }
240  }
241  if(k==j)
242  {
243  colMax = std::max(colMax, cons[j].iIndex+1);
244  j++;
245  }
246  }
247  }
248  cons.resize(j);
249  }
250 };
251 
252 }
253 #endif /* __H__LIB_ALGEBRA__PARALLELIZATION__ROW_SENDING_SCHEME_H_ */
Performs communication between interfaces on different processes.
Definition: pcl_interface_communicator.h:68
void send_raw(int targetProc, const void *pBuff, int bufferSize, bool bSizeKnownAtTarget=false)
sends raw data to a target-proc.
Definition: pcl_interface_communicator_impl.hpp:61
void receive_raw(int srcProc, ug::BinaryBuffer &bufOut, int bufSize=-1)
registers a binary-stream to receive data from a source-proc.
Definition: pcl_interface_communicator_impl.hpp:166
You may add elements to this interface and iterate over them.
Definition: pcl_communication_structs.h:207
iterator end(size_t level=0)
returns the iterator to the last interface of the layout.
Definition: pcl_communication_structs.h:492
iterator begin(size_t level=0)
returns the iterator to the first interface of the layout.
Definition: pcl_communication_structs.h:486
int proc_id(iterator iter) const
returns the target process of the interface given in iterator
Definition: pcl_communication_structs.h:509
InterfaceMap::const_iterator const_iterator
Definition: pcl_communication_structs.h:477
A Buffer for binary data.
Definition: binary_buffer.h:56
char * buffer()
returns the raw buffer pointer or NULL if the buffer is empty (capacity() == 0)
Definition: binary_buffer_impl.h:94
size_t write_pos() const
returns the current write-pos (in bytes)
Definition: binary_buffer_impl.h:53
Definition: parallel_nodes.h:112
const AlgebraID & local_to_global(size_t i) const
Definition: parallel_nodes.h:278
void issue(pcl::InterfaceCommunicator< IndexLayout > &communicator)
Definition: parallel_nodes.cpp:297
void process()
Definition: parallel_nodes.cpp:320
void create_node(const AlgebraID &globalID, size_t localIndex, int pid)
Definition: parallel_nodes.cpp:232
size_t get_local_index_if_available(const AlgebraID &globalIndex, bool &bHasIndex) const
returns a local index by returning a old local one or a saved created one
Definition: parallel_nodes.cpp:83
size_t distance_to_master_or_inner(size_t i) const
Definition: parallel_nodes.h:368
size_t create_slave_node(const AlgebraID &globalID, int distanceToMasterOrInner)
Definition: parallel_nodes.cpp:219
Definition: row_sending_scheme.h:47
void add_rows_to_matrix(matrix_type &mat)
Definition: row_sending_scheme.h:147
ParallelNodes & PN
Definition: row_sending_scheme.h:55
RowSendingScheme(matrix_type &_mat, ParallelNodes &_PN)
Definition: row_sending_scheme.h:59
size_t rowMax
Definition: row_sending_scheme.h:56
std::map< int, BinaryBuffer > BufferMap
Definition: row_sending_scheme.h:50
void set_create_new_nodes(bool bCreateNewNodes)
Definition: row_sending_scheme.h:65
void process(const IndexLayout &receiveLayout)
Definition: row_sending_scheme.h:114
const matrix_type & mat
Definition: row_sending_scheme.h:52
void resize_mat(matrix_type &mat)
Definition: row_sending_scheme.h:161
BufferMap rowsBufferMap
Definition: row_sending_scheme.h:51
std::map< size_t, std::vector< connection > > connections
Definition: row_sending_scheme.h:54
matrix_type::connection connection
Definition: row_sending_scheme.h:53
bool m_bCreateNewNodes
Definition: row_sending_scheme.h:49
void process(BinaryBuffer &buf, int pid, size_t localRowIndex)
Definition: row_sending_scheme.h:201
void set_rows_in_matrix(matrix_type &mat)
Definition: row_sending_scheme.h:135
void issue_send(pcl::InterfaceCommunicator< IndexLayout > &communicator, const IndexLayout &sendLayout, const IndexLayout &receiveLayout)
Definition: row_sending_scheme.h:83
void issue_send(BinaryBuffer &buf, int pid, int localRowIndex)
Definition: row_sending_scheme.h:177
size_t colMax
Definition: row_sending_scheme.h:56
#define UG_DLOG(__debugID__, level, msg)
Definition: log.h:298
DebugID LIB_ALG_MATRIX
Definition: debug_id.h:130
CPUAlgebra::matrix_type matrix_type
the ug namespace
void Deserialize(TIStream &buf, ParallelVector< T > &v)
Deerialize for ParallelVector<T>
Definition: restart_bridge.cpp:112
void Serialize(TOStream &buf, const ParallelVector< T > &v)
Serialize for ParallelVector<T>
Definition: restart_bridge.cpp:103
size_t num_connections(size_t row) const
this type is used to identify distributed objects.
Definition: algebra_id.h:46