33 #ifndef __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__UZAWA__
34 #define __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__UZAWA__
54 template<
typename TGroupObj,
typename TGr
idFunction>
56 const TGridFunction& c,
57 const std::vector<size_t>& vFullRowCmp,
58 const std::vector<size_t>& vRemainCmp)
61 typedef typename TGridFunction::element_type Element;
62 std::vector<Element*> vElem;
66 typedef typename TGridFunction::template traits<TGroupObj>::const_iterator GroupObjIter;
67 for(GroupObjIter iter = c.template begin<TGroupObj>();
68 iter != c.template end<TGroupObj>(); ++iter)
71 TGroupObj* groupObj = *iter;
75 for(
size_t f = 0; f < vFullRowCmp.size(); ++f)
76 c.inner_dof_indices(groupObj, vFullRowCmp[f], vIndex,
false);
80 template <
typename TGr
idFunction>
90 void init(
const TGridFunction &u,
const std::vector<std::string>& vSchurCmps);
98 template <
typename TGr
idFunction>
100 init(
const TGridFunction &u,
const std::vector<std::string>& vSchurCmps)
105 u.approx_space()->dof_distribution_info();
108 std::vector<DoFIndex> vIndex;
111 std::vector<size_t> vFullRowCmp;
114 std::vector<size_t> vRemainCmp;
117 UG_ASSERT(!vSchurCmps.empty(),
"UzawaBase::init: No components set.");
120 for(
size_t i = 0; i < vSchurCmps.size(); ++i)
121 vFullRowCmp.push_back(ddinfo->fct_id_by_name(vSchurCmps[i].c_str()));
125 for(
size_t f = 0; f < ddinfo->num_fct(); ++f)
126 if(
std::find(vFullRowCmp.begin(), vFullRowCmp.end(), f) == vFullRowCmp.end())
127 vRemainCmp.push_back(f);
137 for(
size_t f = 0; f < vFullRowCmp.size(); ++f)
138 iCarryDoFs += ddinfo->max_fct_dofs(vFullRowCmp[f], d);
139 if(iCarryDoFs == 0)
continue;
143 case VERTEX: ExtractByObject<Vertex, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp);
break;
144 case EDGE: ExtractByObject<Edge, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp);
break;
145 case FACE: ExtractByObject<Face, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp);
break;
146 case VOLUME: ExtractByObject<Volume, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp);
break;
150 UG_DLOG(
SchurDebug, 4,
"Found "<< vIndex.size() <<
" indices ( out of "<< u.size() <<
") for Schur block after dimension "<< d << std::endl) ;
157 for (
size_t i=0; i<vIndex.size(); ++i)
159 UG_ASSERT(vIndex[i][1]==0,
"Assuming CPUAlgebra...")
160 mapping[vIndex[i][0]] =
true;
164 base_type::set_types(mapping,
true);
173 template <
typename TDomain,
typename TAlgebra>
207 for (
size_t i=0; i<vSchurCmp.size(); ++i)
208 { std::cout <<
"Comp = " << vSchurCmp[i] << std::endl; }
215 { std::cout <<
"Comp = " << sSchurCmp << std::endl; }
228 J.template cast_dynamic<MatrixOperator<matrix_type, vector_type> >();
233 UG_ASSERT(pVecU!=NULL,
"Need a GridFunction here!");
250 virtual const char*
name()
const {
return "IUzawaBase";}
318 m_spAss->assemble_mass_matrix(tmpM, c, clevel);
321 my_write_debug(tmpM,
"init_MFull_ForSchurUpdate", clevel, clevel);
322 UG_DLOG(
SchurDebug, 5,
"extract_schur_update on level "<< clevel <<
", " << tmpM);
332 "UZAWA_init_M22_ForSchurUpdate.mat");
343 "UZAWA_init_C22_AfterSchurUpdate.mat");
403 case 1: reset_slice_debug_writers<1>();
break;
404 case 2: reset_slice_debug_writers<2>();
break;
405 case 3: reset_slice_debug_writers<3>();
break;
406 default:
UG_LOG(
"Invalid dimension for debug_write ???");
460 std::stringstream ss;
474 int m_dbgIterCnt = 0;
481 std::stringstream ss;
483 ss <<
"_i" << std::setfill(
'0') << std::setw(3) << m_dbgIterCnt <<
".vec";
509 template <
typename TDomain,
typename TAlgebra>
519 std::vector<IndexLayout::Element> vIndex;
534 template <
typename TDomain,
typename TAlgebra>
540 if(pC == NULL)
UG_THROW(
"UzawaBase: expects correction to be a GridFunction.");
560 init_in_first_step(*pMat, *pC);
570 SmartPtr<vector_type> cRegular(m_slicing.template slice_clone_without_values<vector_type>(*pC, UZAWA_CMP_DEFAULT));
571 SmartPtr<vector_type> cSchur(m_slicing.template slice_clone_without_values<vector_type>(*pC, UZAWA_CMP_SCHUR));
578 my_write_debug(*pC,
"UZAWA_Correction0");
580 if (m_spForwardInverse.valid())
585 m_spForwardInverse->apply(*cRegular, *ff);
587 m_slicing.template set_vector_slice<vector_type>(*cRegular, *pC, UZAWA_CMP_DEFAULT);
588 my_write_debug(*pC,
"UZAWA_Correction1");
591 m_auxMat[B21]->apply_sub(*gg, *cRegular);
595 if (m_SpSchurComplementInverse.valid()) {
598 m_SpSchurComplementInverse->apply(*cSchur, *gg);
599 m_slicing.template set_vector_slice<vector_type>(*cSchur, *pC, UZAWA_CMP_SCHUR);
600 my_write_debug(*pC,
"UZAWA_Correction2");
603 m_auxMat[B12]->apply_sub(*ff, *cSchur);
607 if (m_spBackwardInverse.valid()) {
610 m_spBackwardInverse->apply(*cRegular, *ff);
612 m_slicing.template set_vector_slice<vector_type>(*cRegular, *pC, UZAWA_CMP_DEFAULT);
613 my_write_debug(*pC,
"UZAWA_Correction3");
625 template <
typename TDomain,
typename TAlgebra>
630 m_slicing.get_matrix(K, UZAWA_CMP_DEFAULT, UZAWA_CMP_DEFAULT, *(m_auxMat[AUX_A11].
template cast_static<matrix_type>()) );
631 m_slicing.get_matrix(K, UZAWA_CMP_DEFAULT, UZAWA_CMP_SCHUR, *(m_auxMat[B12].
template cast_static<matrix_type>()) );
632 m_slicing.get_matrix(K, UZAWA_CMP_SCHUR, UZAWA_CMP_DEFAULT, *(m_auxMat[B21].
template cast_static<matrix_type>()) );
633 m_slicing.get_matrix(K, UZAWA_CMP_SCHUR, UZAWA_CMP_SCHUR, *(m_auxMat[AUX_C22].
template cast_static<matrix_type>()) );
642 uint mask_K = K.get_storage_mask();
643 m_auxMat[AUX_A11]->set_storage_type(mask_K);
644 m_auxMat[B12]->set_storage_type(mask_K);
645 m_auxMat[B21]->set_storage_type(mask_K);
646 m_auxMat[AUX_C22]->set_storage_type(mask_K);
652 m_auxMat[AUX_A11]->set_layouts(defaultLayouts);
653 m_auxMat[AUX_C22]->set_layouts(schurLayouts);
659 if (m_spSliceDebugWriter[UZAWA_CMP_DEFAULT].valid()) {
660 m_spSliceDebugWriter[UZAWA_CMP_DEFAULT]->write_matrix(*m_auxMat[AUX_A11],
"UZAWA_init_A11_AfterExtract.mat");
664 if (m_spSliceDebugWriter[UZAWA_CMP_SCHUR].valid()) {
665 m_spSliceDebugWriter[UZAWA_CMP_SCHUR]->write_matrix(*m_auxMat[AUX_C22],
"UZAWA_init_C22_AfterExtract.mat");
670 template <
typename TDomain,
typename TAlgebra>
674 postprocess_block_iterations();
682 template <
typename TDomain,
typename TAlgebra>
689 newInst->set_debug(debug_writer());
692 newInst->m_spForwardInverse = (m_spForwardInverse.valid()) ? m_spForwardInverse->clone().template cast_dynamic<base_type>() :
SPNULL;
693 newInst->m_SpSchurComplementInverse = (m_SpSchurComplementInverse.valid()) ? m_SpSchurComplementInverse->clone().template cast_dynamic<base_type>() :
SPNULL;
694 newInst->m_spBackwardInverse = (m_spBackwardInverse.valid()) ? m_spBackwardInverse->clone().template cast_dynamic<base_type>() :
SPNULL;
697 newInst->m_spSchurUpdateOp = m_spSchurUpdateOp;
698 newInst->m_dSchurUpdateWeight = m_dSchurUpdateWeight;
705 template <
typename TDomain,
typename TAlgebra>
709 std::string basePath = debug_writer()->get_base_dir();
712 m_spSliceDebugWriter[UZAWA_CMP_DEFAULT] ->set_base_dir(basePath.c_str());
715 m_spSliceDebugWriter[UZAWA_CMP_SCHUR] ->set_base_dir(basePath.c_str());
718 template <
typename TDomain,
typename TAlgebra>
723 UG_LOG(
"reset_slice_debug_writers"<< std::endl);
725 if (m_spSliceDebugWriter[UZAWA_CMP_DEFAULT].invalid() ||
726 m_spSliceDebugWriter[UZAWA_CMP_SCHUR].invalid())
return;
728 const std::vector<MathVector<dim> > &positions = (m_spGridFunctionDebugWriter.valid()) ? m_spGridFunctionDebugWriter->template get_positions<dim>() : debug_writer()->template get_positions<dim>();
729 std::vector<MathVector<dim> > cmpPositions[2];
731 m_slicing.get_vector_slice(positions, UZAWA_CMP_DEFAULT, cmpPositions[UZAWA_CMP_DEFAULT]);
732 m_spSliceDebugWriter[UZAWA_CMP_DEFAULT]->template set_positions<dim>(cmpPositions[UZAWA_CMP_DEFAULT]);
734 m_slicing.get_vector_slice(positions, UZAWA_CMP_SCHUR, cmpPositions[UZAWA_CMP_SCHUR]);
735 m_spSliceDebugWriter[UZAWA_CMP_SCHUR]->template set_positions<dim>(cmpPositions[UZAWA_CMP_SCHUR]);
739 template <
typename TDomain,
typename TAlgebra>
742 base_type::set_debug(spDebugWriter);
743 m_spGridFunctionDebugWriter = debug_writer().template cast_dynamic<GridFunctionDebugWriter<TDomain, TAlgebra> >();
745 if (m_spGridFunctionDebugWriter.invalid())
return;
747 create_slice_debug_writers();
749 debug_writer()->update_positions();
750 switch(debug_writer()->get_dim())
752 case 1: reset_slice_debug_writers<1>();
break;
753 case 2: reset_slice_debug_writers<2>();
break;
754 case 3: reset_slice_debug_writers<3>();
break;
755 default:
UG_LOG(
"debug dim ???");
Definition: smart_pointer.h:296
Definition: smart_pointer.h:108
T * get()
returns encapsulated pointer
Definition: smart_pointer.h:197
bool valid() const
returns true if the pointer is valid, false if not.
Definition: smart_pointer.h:206
Debug writer for connection viewer (based on algebraic information + vector positions only)
Definition: algebra_debug_writer.h:46
matrix operator based on the assembling of a problem
Definition: assembled_linear_operator.h:60
TAlgebra::vector_type vector_type
type of vector
Definition: debug_writer.h:360
SmartPtr< IDebugWriter< algebra_type > > debug_writer()
returns the debug writer
Definition: debug_writer.h:391
TAlgebra::matrix_type matrix_type
type of matrix
Definition: debug_writer.h:363
represents numerical solutions on a grid using an algebraic vector
Definition: grid_function.h:121
const GridLevel & grid_level() const
returns the grid level
Definition: grid_function.h:247
dim_traits< dim >::grid_base_object element_type
Definition: grid_function.h:159
dim_traits< dim-1 >::grid_base_object side_type
Definition: grid_function.h:163
Definition: grid_level.h:42
base class for all debug writer
Definition: debug_writer.h:244
describes a linear iterator
Definition: linear_iterator.h:81
describes a linear mapping X->Y
Definition: linear_operator.h:80
describes a linear iterator that is based on a matrix operator
Definition: preconditioner.h:103
virtual bool init(SmartPtr< ILinearOperator< vector_type > > J, const vector_type &u)
implements the ILinearIterator-interface for matrix based preconditioner
Definition: preconditioner.h:192
Definition: matrix_operator.h:49
Wrapper for sequential matrices to handle them in parallel.
Definition: parallel_matrix.h:65
binary_grouping_vector slice_desc_type_vector
Definition: slicing.h:72
matrix_type m_A
Definition: uzawa.h:504
UzawaSlicing< TGridFunction > m_slicing
object for slicing routines
Definition: uzawa.h:425
UzawaBase(const char *sSchurCmp)
Definition: uzawa.h:211
void set_backward_iter(SmartPtr< ILinearIterator< vector_type > > iter)
Definition: uzawa.h:281
void init_block_operators()
allocate block matrix operators
Definition: uzawa.h:290
MatrixOperator< matrix_type, vector_type > matrix_operator_type
Definition: uzawa.h:193
void set_forward_iter(SmartPtr< ILinearIterator< vector_type > > iter)
Definition: uzawa.h:273
static const bool UZAWA_CMP_SCHUR
Definition: uzawa.h:177
static const bool UZAWA_CMP_DEFAULT
Definition: uzawa.h:178
TGridFunction::side_type TSide
Definition: uzawa.h:185
SmartPtr< IDebugWriter< algebra_type > > m_spSliceDebugWriter[2]
Definition: uzawa.h:501
void postprocess_block_iterations()
Definition: uzawa.h:376
void set_schur_operator_update(SmartPtr< AssembledLinearOperator< TAlgebra > > spSchurUpdateOp, double theta=0.0)
Definition: uzawa.h:285
void extract_schur_update(const matrix_type &K, const TGridFunction &c)
Update C22 block by matrix.
Definition: uzawa.h:304
virtual void my_write_debug(const TGridFunction &rGF, std::string name)
Definition: uzawa.h:472
SmartPtr< ILinearIterator< vector_type > > m_spBackwardInverse
iteration for forward system
Definition: uzawa.h:434
void reset_slice_debug_writers()
Definition: uzawa.h:721
TAlgebra::vector_type vector_type
Definition: uzawa.h:191
SmartPtr< GridFunctionDebugWriter< TDomain, TAlgebra > > m_spGridFunctionDebugWriter
Definition: uzawa.h:500
SmartPtr< ILinearIterator< typename TAlgebra::vector_type > > clone()
clone
Definition: uzawa.h:684
virtual ~UzawaBase()
Definition: uzawa.h:219
virtual bool init(SmartPtr< ILinearOperator< vector_type > > J, const vector_type &u)
Overriding base type.
Definition: uzawa.h:223
void my_write_debug(const matrix_type &mat, std::string name, const GridLevel &glTo, const GridLevel &glFrom)
Definition: uzawa.h:453
void init_block_iterations()
Definition: uzawa.h:347
virtual bool preprocess(SmartPtr< MatrixOperator< matrix_type, vector_type > > pOp)
initializes the preconditioner
Definition: uzawa.h:511
void set_debug(SmartPtr< IDebugWriter< algebra_type > > spDebugWriter)
set debug writer
Definition: uzawa.h:740
SmartPtr< ILinearIterator< vector_type > > m_spForwardInverse
iteration for forward system
Definition: uzawa.h:428
IPreconditioner< TAlgebra > base_type
Definition: uzawa.h:198
void set_schur_iter(SmartPtr< ILinearIterator< vector_type > > iter)
Definition: uzawa.h:277
bool m_bInit
flag indicating, whether operator must be initialized
Definition: uzawa.h:419
BLOCK
Definition: uzawa.h:444
@ B12
Definition: uzawa.h:444
@ B21
Definition: uzawa.h:444
@ AUX_A11
Definition: uzawa.h:444
@ AUX_ARRAY_SIZE
Definition: uzawa.h:444
@ AUX_M22
Definition: uzawa.h:444
@ AUX_C22
Definition: uzawa.h:444
SmartPtr< AssembledLinearOperator< TAlgebra > > m_spSchurUpdateOp
assembly for (additive) Schur complement update
Definition: uzawa.h:437
SmartPtr< matrix_operator_type > m_auxMat[AUX_ARRAY_SIZE]
Definition: uzawa.h:445
virtual bool postprocess()
cleans the operator
Definition: uzawa.h:672
void my_write_debug(const matrix_type &mat, std::string name, const TGridFunction &rTo, const TGridFunction &rFrom)
auxiliary matrices (not cloned!)
Definition: uzawa.h:448
void extract_sub_matrices(const matrix_type &K, const TGridFunction &c)
extract block matrix operators (called once)
Definition: uzawa.h:627
TAlgebra algebra_type
Definition: uzawa.h:190
SmartPtr< ILinearIterator< vector_type > > m_SpSchurComplementInverse
iteration for forward system
Definition: uzawa.h:431
TAlgebra::matrix_type matrix_type
Definition: uzawa.h:192
GridFunction< TDomain, TAlgebra > TGridFunction
Definition: uzawa.h:183
virtual bool step(SmartPtr< MatrixOperator< matrix_type, vector_type > > pOp, vector_type &c, const vector_type &d)
computes a new correction c = B*d
Definition: uzawa.h:536
virtual bool supports_parallel() const
returns if parallel solving is supported
Definition: uzawa.h:265
std::vector< std::string > m_vSchurCmp
vector of strings identifying components used for Schur complement
Definition: uzawa.h:422
void init_in_first_step(const matrix_type &pMat, const TGridFunction &pC)
Definition: uzawa.h:382
virtual const char * name() const
name
Definition: uzawa.h:250
double m_dSchurUpdateWeight
scaling factor for (additive) Schur complement update
Definition: uzawa.h:440
static const int dim
World dimension.
Definition: uzawa.h:182
void create_slice_debug_writers()
Definition: uzawa.h:707
UzawaBase(const std::vector< std::string > &vSchurCmp)
default constructor
Definition: uzawa.h:203
TGridFunction::element_type TElement
Definition: uzawa.h:184
SlicingData< binary_grouping_vector, 2 > base_type
Definition: uzawa.h:84
void init(const TGridFunction &u, const std::vector< std::string > &vSchurCmps)
Definition: uzawa.h:100
UzawaSlicing(const std::vector< std::string > &vSchurCmps)
Definition: uzawa.h:87
@ PST_UNIQUE
Definition: parallel_storage_type.h:70
bool CheckRowIterators(const TSparseMatrix &A)
Definition: sparsematrix_util.h:1077
void SetDirichletRow(TSparseMatrix &A, size_t i, size_t alpha)
Definition: sparsematrix_util.h:796
int NumProcs()
returns the number of processes
Definition: pcl_base.cpp:91
void CollectUniqueElements(std::vector< typename TLayout::Element > &elemsOut, const TLayout &layout)
writes all elements in the interfaces into the resulting vector. avoids doubles.
Definition: pcl_layout_util.h:142
const NullSmartPtr SPNULL
The equivalent to NULL for smart pointers.
Definition: smart_pointer.h:90
#define UG_ASSERT(expr, msg)
Definition: assert.h:70
#define UG_THROW(msg)
Definition: error.h:57
#define UG_DLOG(__debugID__, level, msg)
Definition: log.h:298
#define UG_LOG(msg)
Definition: log.h:367
unsigned int uint
Definition: types.h:114
CPUAlgebra::vector_type vector_type
IndexLayout::Interface::iterator find(IndexLayout::Interface &interface, size_t i)
Definition: parallel_index_layout.h:77
std::string GridLevelAppendix(const GridLevel &gl, int minfill)
returns appendix for a grid level
Definition: grid_level.cpp:56
void TokenizeTrimString(const string &str, vector< string > &vToken, const char delimiter)
Definition: string_util.cpp:83
bool MakeConsistent(const ParallelMatrix< matrix_type > &_mat, ParallelMatrix< matrix_type > &newMat)
Definition: parallel_matrix_overlap_impl.h:438
std::vector< bool > binary_grouping_vector
Definition: uzawa.h:51
DebugID SchurDebug
todo: replace DebugID
Definition: schur.h:55
@ VOLUME
Definition: grid_base_objects.h:63
@ VERTEX
Definition: grid_base_objects.h:60
@ EDGE
Definition: grid_base_objects.h:61
@ FACE
Definition: grid_base_objects.h:62
void ExtractByObject(std::vector< DoFIndex > &vIndex, const TGridFunction &c, const std::vector< size_t > &vFullRowCmp, const std::vector< size_t > &vRemainCmp)
Definition: uzawa.h:55
#define PROFILE_FUNC_GROUP(groups)
Definition: profiler.h:258
SmartPtr< T, FreePolicy > make_sp(T *inst)
returns a SmartPtr for the passed raw pointer
Definition: smart_pointer.h:836