ug4
Loading...
Searching...
No Matches
parallelization_util.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2015: G-CSC, Goethe University Frankfurt
3 * Authors: Andreas Vogel, Sebastian Reiter
4 *
5 * This file is part of UG4.
6 *
7 * UG4 is free software: you can redistribute it and/or modify it under the
8 * terms of the GNU Lesser General Public License version 3 (as published by the
9 * Free Software Foundation) with the following additional attribution
10 * requirements (according to LGPL/GPL v3 §7):
11 *
12 * (1) The following notice must be displayed in the Appropriate Legal Notices
13 * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 *
15 * (2) The following notice must be displayed at a prominent place in the
16 * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 *
18 * (3) The following bibliography is recommended for citation and must be
19 * preserved in all covered files:
20 * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 * parallel geometric multigrid solver on hierarchically distributed grids.
22 * Computing and visualization in science 16, 4 (2013), 151-164"
23 * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 * flexible software system for simulating pde based models on high performance
25 * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU Lesser General Public License for more details.
31 */
32
33#ifndef __H__LIB_ALGEBRA__PARALLELIZATION__PARALLELIZATION_UTIL__
34#define __H__LIB_ALGEBRA__PARALLELIZATION__PARALLELIZATION_UTIL__
35
36#include <utility>
37#include <vector>
38#include <map>
39#include "common/assert.h"
40#include "algebra_id.h"
42#include "algebra_layouts.h"
43
44// additions for profiling (18042011ih)
46#define PROFILE_PARALLELIZATION_UTIL
47#ifdef PROFILE_PARALLELIZATION_UTIL
48 #define PU_PROFILE_FUNC() PROFILE_FUNC()
49 #define PU_PROFILE_BEGIN(name) PROFILE_BEGIN(name)
50 #define PU_PROFILE_END(name) PROFILE_END_(name)
51#else
52 #define PU_PROFILE_FUNC()
53 #define PU_PROFILE_BEGIN(name)
54 #define PU_PROFILE_END(name)
55#endif
56// additions for profiling - end
57
58namespace ug{
59
72
73
75
80template <class TLayout>
82 std::vector<AlgebraID>& idsOut,
83 size_t numIDs,
84 const TLayout& masterLayout,
85 const TLayout& slaveLayout)
86{
87 PROFILE_FUNC_GROUP("algebra parallelization");
88// generate an id for each entry.
89 idsOut.resize(numIDs);
90 int localProc = pcl::ProcRank();
91 for(size_t i = 0; i < numIDs; ++i)
92 idsOut[i] = AlgebraID(localProc, i);
93
94// copy all ids from master to slave interfaces
96
97 communicator.send_data(masterLayout, copyPol);
98 communicator.receive_data(slaveLayout, copyPol);
99 communicator.communicate();
100
101// a set of global ids has now been generated.
102}
103
104
105template <typename TMatrix>
107{
108 PROFILE_FUNC_GROUP("algebra parallelization");
109 using namespace std;
110 vector<AlgebraID> globalIDs;
111 const IndexLayout& masters = mat.layouts()->master();
112 const IndexLayout& slaves = mat.layouts()->slave();
113 pcl::InterfaceCommunicator<IndexLayout>& comm = mat.layouts()->comm();
114
115 GenerateGlobalAlgebraIDs(comm, globalIDs, mat.num_rows(), masters, slaves);
116
117// global ids are applied, now communicate...
118 ComPol_MatAddRowsOverlap0<TMatrix> comPolMatAdd(mat, globalIDs);
119 comm.send_data(slaves, comPolMatAdd);
120 comm.receive_data(masters, comPolMatAdd);
121 comm.communicate();
122}
123
124
125template <typename TMatrix>
127{
128 PROFILE_FUNC_GROUP("algebra parallelization");
129 using namespace std;
130 vector<AlgebraID> globalIDs;
131 const IndexLayout& masters = mat.layouts()->master();
132 const IndexLayout& slaves = mat.layouts()->slave();
133 pcl::InterfaceCommunicator<IndexLayout>& comm = mat.layouts()->comm();
134
135 GenerateGlobalAlgebraIDs(comm, globalIDs, mat.num_rows(), masters, slaves);
136
137// global ids are applied, now communicate...
138 ComPol_MatAddRowsOverlap0<TMatrix> comPolMatAdd(mat, globalIDs);
139 comm.send_data(slaves, comPolMatAdd);
140 comm.receive_data(masters, comPolMatAdd);
141 comm.communicate();
142
143 ComPol_MatCopyRowsOverlap0<TMatrix> comPolMatCopy(mat, globalIDs);
144 comm.send_data(masters, comPolMatCopy);
145 comm.receive_data(slaves, comPolMatCopy);
146 comm.communicate();
147}
148
150
159template <typename TVector>
160void AdditiveToConsistent( TVector* pVec,
161 const IndexLayout& masterLayout, const IndexLayout& slaveLayout,
163{
164 PROFILE_FUNC_GROUP("algebra parallelization");
165 // create a new communicator if required.
167 if(!pCom)
168 pCom = &tCom;
170
171 // step 1: add slave values to master
172 // create the required communication policies
173 ComPol_VecAdd<TVector> cpVecAdd(pVec);
174
175 PU_PROFILE_BEGIN(AdditiveToConsistent_step1);
176 // perform communication
177 com.send_data(slaveLayout, cpVecAdd);
178 com.receive_data(masterLayout, cpVecAdd);
179 com.communicate();
180 PU_PROFILE_END(AdditiveToConsistent_step1);
181 // step 2: copy master values to slaves
182 // create the required communication policies
183 ComPol_VecCopy<TVector> cpVecCopy(pVec);
184
185 PU_PROFILE_BEGIN(AdditiveToConsistent_step2);
186 // perform communication
187 com.send_data(masterLayout, cpVecCopy);
188 com.receive_data(slaveLayout, cpVecCopy);
189 com.communicate();
190 PU_PROFILE_END(AdditiveToConsistent_step2);
191}
192
194
203template <typename TVector>
204void UniqueToConsistent( TVector* pVec,
205 const IndexLayout& masterLayout, const IndexLayout& slaveLayout,
207{
208 PROFILE_FUNC_GROUP("algebra parallelization");
209// create a new communicator if required.
211 if(!pCom)
212 pCom = &tCom;
214
215// step 1: copy master values to slaves
216// create the required communication policies
217 ComPol_VecCopy<TVector> cpVecCopy(pVec);
218
219// perform communication
220 com.send_data(masterLayout, cpVecCopy);
221 com.receive_data(slaveLayout, cpVecCopy);
222 com.communicate();
223}
224
225
227template <typename TVector>
228void CopyValues( TVector* pVec,
229 const IndexLayout& sourceLayout, const IndexLayout& targetLayout,
231{
232 PROFILE_FUNC_GROUP("algebra parallelization");
233// create a new communicator if required.
235 if(!pCom)
236 pCom = &tCom;
238
239// step 1: copy master values to slaves
240// create the required communication policies
241 ComPol_VecCopy<TVector> cpVecCopy(pVec);
242
243// perform communication
244 com.send_data(sourceLayout, cpVecCopy);
245 com.receive_data(targetLayout, cpVecCopy);
246 com.communicate();
247}
248
249
251
260template <typename TVector>
261void AdditiveToUnique( TVector* pVec,
262 const IndexLayout& masterLayout, const IndexLayout& slaveLayout,
264{
265 PROFILE_FUNC_GROUP("algebra parallelization");
266 // create a new communicator if required.
268 if(!pCom)
269 pCom = &tCom;
271
272 // step 1: add slave values to master and set slave values to zero
273 // create the required communication policies
274 ComPol_VecAddSetZero<TVector> cpVecAddSetZero(pVec);
275
276 // perform communication
277 com.send_data(slaveLayout, cpVecAddSetZero);
278 com.receive_data(masterLayout, cpVecAddSetZero);
279 com.communicate();
280}
281
283
288template <typename TVector>
289void SetInterfaceValues(TVector* pVec,
290 const IndexLayout::Interface& interface,
291 typename TVector::value_type val)
292{
293 PROFILE_FUNC_GROUP("algebra parallelization");
294
295// loop over indices
296 for(typename IndexLayout::Interface::const_iterator iter = interface.begin();
297 iter != interface.end(); ++iter)
298 {
299 // get index
300 const size_t index = interface.get_element(iter);
301
302 // set value of vector to zero
303 (*pVec)[index] = val;
304 }
305}
306
307
309
314template <typename TVector>
315void SetLayoutValues( TVector* pVec,
316 const IndexLayout& layout,
317 typename TVector::value_type val)
318{
319 PROFILE_FUNC_GROUP("algebra parallelization");
320// interface iterators
321 typename IndexLayout::const_iterator iter = layout.begin();
322 typename IndexLayout::const_iterator end = layout.end();
323
324// iterate over interfaces
325 for(; iter != end; ++iter)
326 {
327 // get interface
328 const typename IndexLayout::Interface& interface = layout.interface(iter);
329
330 // loop over indices
331 for(typename IndexLayout::Interface::const_iterator iter = interface.begin();
332 iter != interface.end(); ++iter)
333 {
334 // get index
335 const size_t index = interface.get_element(iter);
336
337 // set value of vector to zero
338 (*pVec)[index] = val;
339 }
340 }
341}
342
344
349template <typename TVector>
350void ScaleLayoutValues( TVector* pVec,
351 const IndexLayout& layout,
352 number scale)
353{
354 PROFILE_FUNC_GROUP("algebra parallelization");
355// interface iterators
356 typename IndexLayout::const_iterator iter = layout.begin();
357 typename IndexLayout::const_iterator end = layout.end();
358
359// iterate over interfaces
360 for(; iter != end; ++iter)
361 {
362 // get interface
363 const typename IndexLayout::Interface& interface = layout.interface(iter);
364
365 // loop over indices
366 for(typename IndexLayout::Interface::const_iterator iter = interface.begin();
367 iter != interface.end(); ++iter)
368 {
369 // get index
370 const size_t index = interface.get_element(iter);
371
372 // set value of vector to zero
373 (*pVec)[index] *= scale;
374 }
375 }
376}
377
378
380
387template <typename TVector>
388void ConsistentToUnique( TVector* pVec,
389 const IndexLayout& slaveLayout)
390{
391 PROFILE_FUNC_GROUP("algebra parallelization");
392 SetLayoutValues(pVec, slaveLayout, 0.0);
393}
394
396
405template <typename TVector>
406void VecSubtractOnLayout( TVector* pVec,
407 const IndexLayout& masterLayout, const IndexLayout& slaveLayout,
409{
410 PROFILE_FUNC_GROUP("algebra parallelization");
411 // create a new communicator if required.
413 if(!pCom)
414 pCom = &tCom;
416
417 // step 1: subtract slave values from master
418 // create the required communication policies
419 ComPol_VecSubtract<TVector> cpVecSubtract(pVec);
420
421 // Subtract slave values from master values
422 com.send_data(slaveLayout, cpVecSubtract);
423 com.receive_data(masterLayout, cpVecSubtract);
424 com.communicate();
425
426 // step 2: Copy values to slaves
427 ComPol_VecScaleCopy<TVector> cpVecScaleCopy(pVec, -1.0);
428
429 com.send_data(masterLayout, cpVecScaleCopy);
430 com.receive_data(slaveLayout, cpVecScaleCopy);
431 com.communicate();
432}
433
435
444template <typename TVector>
446 const IndexLayout& masterLayout,
447 const IndexLayout& slaveLayout,
449{
450 PROFILE_FUNC_GROUP("algebra parallelization");
451 // create a new communicator if required.
453 if(!pCom)
454 pCom = &tCom;
456
457 // create the required communication policies
458 ComPol_VecSubtractOnlyOneSlave<TVector> cpVecSubtractOOS(pVec);
459
460 // sending: slaves, receiving: masters; masters subtract the value of only
461 // one slave on reception (according to the policy used)
462 com.send_data(slaveLayout, cpVecSubtractOOS);
463 com.receive_data(masterLayout, cpVecSubtractOOS);
464 com.communicate();
465
466}
467
468
470
481template <typename TVector>
482void VecCopy( TVector* pVec,
483 const IndexLayout& masterLayout, const IndexLayout& slaveLayout,
485{
486 PROFILE_FUNC_GROUP("algebra parallelization");
487 // create a new communicator if required.
489 if(!pCom)
490 pCom = &tCom;
492
493 // copy master values to slaves
494 // create the required communication policies
495 ComPol_VecCopy<TVector> cpVecCopy(pVec);
496
497 // perform communication
498 com.send_data(masterLayout, cpVecCopy);
499 com.receive_data(slaveLayout, cpVecCopy);
500 com.communicate();
501}
502
503// returns the highest referenced index of the elements in the layout.
505
508
527void CommunicateConnections(std::vector<std::vector<int> >& connectionsToProcsOut,
528 std::vector<std::vector<int> >& connectionsToSubDomsOut,
529 IndexLayout& masterLayout,
530 IndexLayout& slaveLayout,
531 int highestReferencedIndex, pcl::IDomainDecompositionInfo& ddinfo);
532
549int BuildOneToManyLayout(IndexLayout& masterLayoutOut,
550 IndexLayout& slaveLayoutOut,
551 int rootProcID,
552 IndexLayout& masterLayout,
553 IndexLayout& slaveLayout,
555 std::vector<int>* pNewMasterIDsOut = NULL);
556
557
558
570 IndexLayout& subdomMastersOut, IndexLayout& subdomSlavesOut,
571 IndexLayout& processMastersOut, IndexLayout& processSlavesOut,
572 IndexLayout& deltaNbrMastersOut, IndexLayout& deltaNbrSlavesOut,
573 IndexLayout& crossPointMastersOut, IndexLayout& crossPointSlavesOut,
574 const IndexLayout& standardMasters, const IndexLayout& standardSlaves,
575 int highestReferencedIndex, pcl::IDomainDecompositionInfo& ddinfo);
577
579
587template <typename TMatrix, typename TVector>
588void MatExtractDiagOnLayout( TVector* pDiagVector,
589 const TMatrix* pMatrix,
590 const IndexLayout& Layout)
591{
592 PROFILE_FUNC_GROUP("algebra parallelization");
593// interface iterator
594 typename IndexLayout::const_iterator iter = Layout.begin();
595 typename IndexLayout::const_iterator end = Layout.end();
596
597 for(; iter != end; ++iter)
598 {
599 // get interface
600 const typename IndexLayout::Interface& interface = Layout.interface(iter);
601
602 for(typename IndexLayout::Interface::const_iterator iter = interface.begin();
603 iter != interface.end(); ++iter)
604 {
605 // get index
606 const size_t index = interface.get_element(iter);
607
608 // copy values
609 const typename TMatrix::value_type& block = (*pMatrix)(index, index);
610 for(size_t beta = 0; beta < (size_t) GetCols(block); ++beta)
611 {
612 BlockRef((*pDiagVector)[index], beta) = BlockRef(block, beta, beta);
613 }
614 }
615 }
616}
617
619
627template <typename TMatrix, typename TVector>
628void MatWriteDiagOnLayout( TMatrix* pMatrix,
629 const TVector* pDiagVector,
630 const IndexLayout& Layout)
631{
632 PROFILE_FUNC_GROUP("algebra parallelization");
633// interface iterator
634 typename IndexLayout::const_iterator iter = Layout.begin();
635 typename IndexLayout::const_iterator end = Layout.end();
636
637 for(; iter != end; ++iter)
638 {
639 // get interface
640 const typename IndexLayout::Interface& interface = Layout.interface(iter);
641
642 for(typename IndexLayout::Interface::const_iterator iter = interface.begin();
643 iter != interface.end(); ++iter)
644 {
645 // get index
646 const size_t index = interface.get_element(iter);
647
648 // copy values
649 typename TMatrix::value_type& block = (*pMatrix)(index, index);
650 for(size_t beta = 0; beta < (size_t) GetCols(block); ++beta)
651 {
652 BlockRef(block, beta, beta) = BlockRef((*pDiagVector)[index], beta);
653 }
654 }
655 }
656}
657
658
660
669template <typename TAlgebra>
670void MatAdditiveToConsistentOnDiag( typename TAlgebra::matrix_type* pMat,
671 const IndexLayout& masterLayout, const IndexLayout& slaveLayout,
673{
674 PROFILE_FUNC_GROUP("algebra parallelization");
675// \todo: We could work on the matrix directly here, without temporary vector
676
677// create a vector of length of the diagonal
678 typename TAlgebra::vector_type vecDiag;
679
680// resize the vector to correct size
681 vecDiag.resize(pMat->num_rows());
682
683// copy diag values
684 MatExtractDiagOnLayout(&vecDiag, pMat, masterLayout);
685 MatExtractDiagOnLayout(&vecDiag, pMat, slaveLayout);
686
687// change vector to consistent
688 AdditiveToConsistent(&vecDiag, masterLayout, slaveLayout, pCom);
689
690// write consistent values back
691 MatWriteDiagOnLayout(pMat, &vecDiag, masterLayout);
692 MatWriteDiagOnLayout(pMat, &vecDiag, slaveLayout);
693}
694
696
707template <typename TVector>
708void VecGather( TVector* pVecDest, const TVector* pVecSrc,
709 const IndexLayout& masterLayoutDest, const IndexLayout& slaveLayoutSrc,
711{
712 PROFILE_FUNC_GROUP("algebra parallelization");
713 // create a new communicator if required.
715 if(!pCom)
716 pCom = &tCom;
718
719 // step 1: add slave values to master
720 // create the required communication policies
721 ComPol_VecAdd<TVector> cpVecAdd(pVecDest, pVecSrc);
722
723 // perform communication
724 com.send_data(slaveLayoutSrc, cpVecAdd);
725 com.receive_data(masterLayoutDest, cpVecAdd);
726 com.communicate();
727}
728
730
741template <typename TVector>
742void VecBroadcast( TVector* pVecDest, const TVector* pVecSrc,
743 const IndexLayout& slaveLayoutDest, const IndexLayout& masterLayoutSrc,
745{
746 PROFILE_FUNC_GROUP("algebra parallelization");
747 // create a new communicator if required.
749 if(!pCom)
750 pCom = &tCom;
752
753 // step 1: copy master values to slaves
754 // create the required communication policies
755 ComPol_VecCopy<TVector> cpVecCopy(pVecDest, pVecSrc);
756
757 // perform communication
758 com.send_data(masterLayoutSrc, cpVecCopy);
759 com.receive_data(slaveLayoutDest, cpVecCopy);
760 com.communicate();
761}
762
763inline bool PrintLayouts(const HorizontalAlgebraLayouts &layout)
764{
765 return TestLayout(layout.proc_comm(), layout.comm(), layout.master(), layout.slave(), true);
766}
767
773template<typename TLayout>
774typename TLayout::iterator find_pid(TLayout &layout, int pid)
775{
776 for(typename TLayout::iterator it = layout.begin(); it != layout.end(); ++it)
777 if(layout.proc_id(it) == pid) return it;
778 return layout.end();
779}
780
785
786
788
789template <class TIndVec>
790void GenerateGlobalConsecutiveIndices(TIndVec& indsOut, size_t numLocalInds,
791 const AlgebraLayouts& layouts);
792
794
800template <class TMatrix>
802 const TMatrix& mat,
803 std::vector<AlgebraID>* algebraIDs = NULL,
804 bool verbose = false);
805}// end of namespace
806
807
810
811#endif /* __H__LIB_ALGEBRA__PARALLELIZATION__PARALLELIZATION_UTIL__ */
location verbose
Definition checkpoint_util.lua:128
Definition smart_pointer.h:108
Definition pcl_domain_decomposition.h:43
Performs communication between interfaces on different processes.
Definition pcl_interface_communicator.h:68
bool communicate(int tag=749345)
sends and receives the collected data.
Definition pcl_interface_communicator_impl.hpp:409
void send_data(int targetProc, const Interface &interface, ICommunicationPolicy< TLayout > &commPol)
collects data that will be send during communicate.
Definition pcl_interface_communicator_impl.hpp:80
void receive_data(int srcProc, const Interface &interface, ICommunicationPolicy< TLayout > &commPol)
registers a communication-policy to receive data on communicate.
Definition pcl_interface_communicator_impl.hpp:188
You may add elements to this interface and iterate over them.
Definition pcl_communication_structs.h:207
iterator end()
Definition pcl_communication_structs.h:293
iterator begin()
Definition pcl_communication_structs.h:292
Element & get_element(iterator iter)
Definition pcl_communication_structs.h:298
Definition pcl_process_communicator.h:70
iterator end(size_t level=0)
returns the iterator to the last interface of the layout.
Definition pcl_communication_structs.h:492
iterator begin(size_t level=0)
returns the iterator to the first interface of the layout.
Definition pcl_communication_structs.h:486
InterfaceMap::const_iterator const_iterator
Definition pcl_communication_structs.h:477
Communication Policy to copy slave couplings to master row.
Definition communication_policies.h:1143
Communication Policy to copy couplings between interfaces.
Definition communication_policies.h:1273
Communication Policy to add values of a vector.
Definition communication_policies.h:319
Communication Policy to add values of a vector and reset value to zero on sending interface.
Definition communication_policies.h:564
Communication Policy to copy values of a vector.
Definition communication_policies.h:88
Communication Policy to copy scaled values of a vector.
Definition communication_policies.h:204
Communication Policy to subtract values of a vector.
Definition communication_policies.h:679
Communication Policy to subtract only one slave value per master of a vector.
Definition communication_policies.h:881
Holds Interfaces and communicators for horizontal communication.
Definition algebra_layouts.h:48
const pcl::ProcessCommunicator & proc_comm() const
returns process communicator
Definition algebra_layouts.h:68
const IndexLayout & master() const
Definition algebra_layouts.h:61
pcl::InterfaceCommunicator< IndexLayout > & comm() const
returns (non-const !!!) communicator
Definition algebra_layouts.h:78
const IndexLayout & slave() const
Definition algebra_layouts.h:63
void ScaleLayoutValues(TVector *pVec, const IndexLayout &layout, number scale)
scales the values of a vector by a given number only on the layout indices
Definition parallelization_util.h:350
void ConsistentToUnique(TVector *pVec, const IndexLayout &slaveLayout)
changes parallel storage type from consistent to unique
Definition parallelization_util.h:388
void GenerateGlobalAlgebraIDs(pcl::InterfaceCommunicator< TLayout > &communicator, std::vector< AlgebraID > &idsOut, size_t numIDs, const TLayout &masterLayout, const TLayout &slaveLayout)
Generates a set of unique global algebra ids.
Definition parallelization_util.h:81
int BuildOneToManyLayout(IndexLayout &masterLayoutOut, IndexLayout &slaveLayoutOut, int rootProcID, IndexLayout &masterLayout, IndexLayout &slaveLayout, pcl::ProcessCommunicator procComm, std::vector< int > *pNewMasterIDsOut)
Definition parallelization_util.cpp:162
void BuildDomainDecompositionLayouts(IndexLayout &subdomMastersOut, IndexLayout &subdomSlavesOut, IndexLayout &processMastersOut, IndexLayout &processSlavesOut, IndexLayout &deltaNbrMastersOut, IndexLayout &deltaNbrSlavesOut, IndexLayout &crossPointMastersOut, IndexLayout &crossPointSlavesOut, const IndexLayout &standardMasters, const IndexLayout &standardSlaves, int highestReferencedIndex, IDomainDecompositionInfo &ddinfo)
Definition parallelization_util.cpp:497
int GetHighestReferencedIndex(IndexLayout &layout)
Definition parallelization_util.cpp:139
void SetLayoutValues(TVector *pVec, const IndexLayout &layout, typename TVector::value_type val)
sets the values of a vector to a given number only on the layout indices
Definition parallelization_util.h:315
void MatAddSlaveRowsToMasterRowOverlap0(TMatrix &mat)
Generates a set of unique global algebra ids.
Definition parallelization_util.h:106
void UniqueToConsistent(TVector *pVec, const IndexLayout &masterLayout, const IndexLayout &slaveLayout, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
changes parallel storage type from unique to consistent
Definition parallelization_util.h:204
void MatMakeConsistentOverlap0(TMatrix &mat)
Generates a set of unique global algebra ids.
Definition parallelization_util.h:126
void AdditiveToUnique(TVector *pVec, const IndexLayout &masterLayout, const IndexLayout &slaveLayout, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
changes parallel storage type from additive to unique
Definition parallelization_util.h:261
void VecSubtractOnLayout(TVector *pVec, const IndexLayout &masterLayout, const IndexLayout &slaveLayout, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
subtracts values of slave layout from master layout and sets slave layouts to negative of difference
Definition parallelization_util.h:406
void VecSubtractOneSlaveFromMaster(TVector *pVec, const IndexLayout &masterLayout, const IndexLayout &slaveLayout, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
subtracts values of only one slave dof per master on layout
Definition parallelization_util.h:445
void SetInterfaceValues(TVector *pVec, const IndexLayout::Interface &interface, typename TVector::value_type val)
sets the values of a vector to a given number only on the interface indices
Definition parallelization_util.h:289
void CopyValues(TVector *pVec, const IndexLayout &sourceLayout, const IndexLayout &targetLayout, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
Copies values from the source to the target layout.
Definition parallelization_util.h:228
void AdditiveToConsistent(TVector *pVec, const IndexLayout &masterLayout, const IndexLayout &slaveLayout, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
changes parallel storage type from additive to consistent
Definition parallelization_util.h:160
pcl::SingleLevelLayout< pcl::OrderedInterface< size_t, std::vector > > IndexLayout
Definition parallel_index_layout.h:53
int ProcRank()
returns the rank of the process
Definition pcl_base.cpp:83
double number
Definition types.h:124
void VecCopy(vector_target_t &target, const vector_source_t &source, typename vector_target_t::value_type fill)
Copy contents between vectors of possibly different types.
Definition math_vector_functions_common_impl.hpp:56
#define PU_PROFILE_END(name)
Definition parallelization_util.h:50
#define PU_PROFILE_BEGIN(name)
Definition parallelization_util.h:49
Definition smart_pointer.h:814
the ug namespace
size_t GetCols(const T &t)
void GenerateGlobalConsecutiveIndices(TIndVec &indsOut, size_t numLocalInds, const AlgebraLayouts &layouts)
Generates a set of global consecutive indices.
Definition parallelization_util_impl.h:44
void TestHorizontalAlgebraLayouts(const TMatrix &mat, std::vector< AlgebraID > *algebraIDs=NULL, bool verbose=false)
Tests layouts by matching master and slave interfaces and by comparing global id's.
Definition parallelization_util_impl.h:121
void MatAdditiveToConsistentOnDiag(typename TAlgebra::matrix_type *pMat, const IndexLayout &masterLayout, const IndexLayout &slaveLayout, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
changes parallel storage type from additive to consistent on diagonal of a matrix
Definition parallelization_util.h:670
void CommunicateConnections(vector< vector< int > > &connectionsToProcsOut, vector< vector< int > > &connectionsToSubDomsOut, const IndexLayout &masterLayout, const IndexLayout &slaveLayout, int highestReferencedIndex, pcl::IDomainDecompositionInfo &ddinfo)
Definition parallelization_util.cpp:70
TLayout::iterator find_pid(TLayout &layout, int pid)
Definition parallelization_util.h:774
double & BlockRef(T &vec, size_t i)
Definition blocks.h:66
void MatWriteDiagOnLayout(TMatrix *pMatrix, const TVector *pDiagVector, const IndexLayout &Layout)
writes diagonal of a matrix for interface indices
Definition parallelization_util.h:628
void VecGather(TVector *pVecDest, const TVector *pVecSrc, const IndexLayout &masterLayoutDest, const IndexLayout &slaveLayoutSrc, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
gathers all values in master indices of a second vector
Definition parallelization_util.h:708
void MatExtractDiagOnLayout(TVector *pDiagVector, const TMatrix *pMatrix, const IndexLayout &Layout)
extracts diagonal of a matrix for interface indices
Definition parallelization_util.h:588
bool PrintLayouts(const HorizontalAlgebraLayouts &layout)
Definition parallelization_util.h:763
SmartPtr< AlgebraLayouts > CreateLocalAlgebraLayouts()
Definition parallelization_util.cpp:936
void VecBroadcast(TVector *pVecDest, const TVector *pVecSrc, const IndexLayout &slaveLayoutDest, const IndexLayout &masterLayoutSrc, pcl::InterfaceCommunicator< IndexLayout > *pCom=NULL)
broadcasts all values from master indices to slave values in a second vector
Definition parallelization_util.h:742
#define PROFILE_FUNC_GROUP(groups)
Definition profiler.h:258
this type is used to identify distributed objects.
Definition algebra_id.h:46