ug4
Loading...
Searching...
No Matches
uzawa.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2017-now: G-CSC, Goethe University Frankfurt
3 * Authors: Arne Nägel
4 *
5 * This file is part of UG4.
6 *
7 * UG4 is free software: you can redistribute it and/or modify it under the
8 * terms of the GNU Lesser General Public License version 3 (as published by the
9 * Free Software Foundation) with the following additional attribution
10 * requirements (according to LGPL/GPL v3 §7):
11 *
12 * (1) The following notice must be displayed in the Appropriate Legal Notices
13 * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 *
15 * (2) The following notice must be displayed at a prominent place in the
16 * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 *
18 * (3) The following bibliography is recommended for citation and must be
19 * preserved in all covered files:
20 * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 * parallel geometric multigrid solver on hierarchically distributed grids.
22 * Computing and visualization in science 16, 4 (2013), 151-164"
23 * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 * flexible software system for simulating pde based models on high performance
25 * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU Lesser General Public License for more details.
31 */
32
33#ifndef __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__UZAWA__
34#define __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__UZAWA__
35
36#include <vector>
37#include <string>
38
39
42
43#include "common/util/string_util.h" // string tokenizer
45
46namespace ug{
47
48
49
50
51typedef std::vector<bool> binary_grouping_vector;
52
53
54template<typename TGroupObj, typename TGridFunction>
55void ExtractByObject(std::vector<DoFIndex>& vIndex,
56 const TGridFunction& c,
57 const std::vector<size_t>& vFullRowCmp,
58 const std::vector<size_t>& vRemainCmp)
59{
60 // memory for local algebra
61 typedef typename TGridFunction::element_type Element;
62 std::vector<Element*> vElem;
63
64
65 // loop all grouping objects
66 typedef typename TGridFunction::template traits<TGroupObj>::const_iterator GroupObjIter;
67 for(GroupObjIter iter = c.template begin<TGroupObj>();
68 iter != c.template end<TGroupObj>(); ++iter)
69 {
70 // get grouping obj
71 TGroupObj* groupObj = *iter;
72
73 // get all dof indices on obj associated to cmps
74
75 for(size_t f = 0; f < vFullRowCmp.size(); ++f)
76 c.inner_dof_indices(groupObj, vFullRowCmp[f], vIndex, false);
77 }
78}
79
80template <typename TGridFunction>
81class UzawaSlicing : public SlicingData<binary_grouping_vector, 2> {
82public:
83
85
86 // build an object with
87 UzawaSlicing(const std::vector<std::string>& vSchurCmps)
88 {}
89
90 void init(const TGridFunction &u, const std::vector<std::string>& vSchurCmps);
91
92protected:
93
94};
95
96
97
98template <typename TGridFunction>
99void UzawaSlicing <TGridFunction>::
100init(const TGridFunction &u, const std::vector<std::string>& vSchurCmps)
101{
102 UG_DLOG(SchurDebug, 3, "UzawaSlicing::init" << std::endl);
103
105 u.approx_space()->dof_distribution_info();
106
107 // fill this vector
108 std::vector<DoFIndex> vIndex;
109
110 // ids of selected functions
111 std::vector<size_t> vFullRowCmp;
112
113 // complementing functions
114 std::vector<size_t> vRemainCmp;
115
116 // tokenize passed functions
117 UG_ASSERT(!vSchurCmps.empty(), "UzawaBase::init: No components set.");
118
119 // get ids of selected functions
120 for(size_t i = 0; i < vSchurCmps.size(); ++i)
121 vFullRowCmp.push_back(ddinfo->fct_id_by_name(vSchurCmps[i].c_str()));
122
123
124 // Obtain remaining (non-Schur) functions.
125 for(size_t f = 0; f < ddinfo->num_fct(); ++f)
126 if(std::find(vFullRowCmp.begin(), vFullRowCmp.end(), f) == vFullRowCmp.end())
127 vRemainCmp.push_back(f);
128
129
130
131 // Extract for each dim-grouping objects.
132 for(int d = VERTEX; d <= VOLUME; ++d)
133 {
134
135 // only extract if carrying dofs
136 int iCarryDoFs = 0;
137 for(size_t f = 0; f < vFullRowCmp.size(); ++f)
138 iCarryDoFs += ddinfo->max_fct_dofs(vFullRowCmp[f], d);
139 if(iCarryDoFs == 0) continue;
140
141 // extract
142 switch(d){
143 case VERTEX: ExtractByObject<Vertex, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp); break;
144 case EDGE: ExtractByObject<Edge, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp); break;
145 case FACE: ExtractByObject<Face, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp); break;
146 case VOLUME: ExtractByObject<Volume, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp); break;
147 default: UG_THROW("wrong dim");
148 }
149
150 UG_DLOG(SchurDebug, 4, "Found "<< vIndex.size() << " indices ( out of "<< u.size() << ") for Schur block after dimension "<< d << std::endl) ;
151 }
152
153
154 // todo: replace by InputIterator
155 base_type::slice_desc_type_vector mapping(u.size(), false);
156
157 for (size_t i=0; i<vIndex.size(); ++i)
158 {
159 UG_ASSERT(vIndex[i][1]==0, "Assuming CPUAlgebra...")
160 mapping[vIndex[i][0]] = true;
161 }
162
163 UG_DLOG(SchurDebug, 3, "UzawaSlicing::init::set_types" << std::endl);
164 base_type::set_types(mapping, true);
165
166}
167
168
169
170
173template <typename TDomain, typename TAlgebra>
174class UzawaBase : public IPreconditioner<TAlgebra>
175{
176
177 static const bool UZAWA_CMP_SCHUR= true;
178 static const bool UZAWA_CMP_DEFAULT = false;
179
180 public:
182 static const int dim = TDomain::dim;
186
187
190 typedef TAlgebra algebra_type;
191 typedef typename TAlgebra::vector_type vector_type;
192 typedef typename TAlgebra::matrix_type matrix_type;
194
196
197 protected:
199
200
201 public:
203 UzawaBase(const std::vector<std::string>& vSchurCmp)
204 : m_bInit(false), m_vSchurCmp(vSchurCmp), m_slicing(vSchurCmp), m_dSchurUpdateWeight(0.0)
205 {
207 for (size_t i=0; i<vSchurCmp.size(); ++i)
208 { std::cout << "Comp = " << vSchurCmp[i] << std::endl; }
209 }
210
211 UzawaBase(const char *sSchurCmp)
213 {
215 { std::cout << "Comp = " << sSchurCmp << std::endl; }
216 }
217
218
219 virtual ~UzawaBase()
220 {}
221
224 {
225 UG_DLOG(SchurDebug, 4, "UzawaBase::init(J,u)")
226
228 J.template cast_dynamic<MatrixOperator<matrix_type, vector_type> >();
229
230 const TGridFunction* pVecU = dynamic_cast<const TGridFunction* >(&u);
231
232 UG_ASSERT(pOp.valid(), "Need a matrix based operator here!");
233 UG_ASSERT(pVecU!=NULL, "Need a GridFunction here!");
234 base_type::init(J,u); // calls preprocess
235
236 // Extract sub-matrices afterwards.
237 init_in_first_step(*pOp, *pVecU);
238 m_bInit = true;
239
240 return true;
241
242
243 }
244
246protected:
247 // Interface for IPreconditioner
248
250 virtual const char* name() const {return "IUzawaBase";}
251
254
257
258
260 virtual bool postprocess();
261
262public:
263
265 virtual bool supports_parallel() const
266 {
267 UG_ASSERT(m_spForwardInverse.valid() && m_SpSchurComplementInverse.valid(), "Need valid iter");
268 return (m_spForwardInverse->supports_parallel() && m_SpSchurComplementInverse->supports_parallel());
269
270 }
271
272 // forward approximate inverse
275
276 // Schur approximate inverse
279
280 // backward approximate inverse
283
284 // assembly for update of Schur operator
286 { m_spSchurUpdateOp = spSchurUpdateOp; m_dSchurUpdateWeight=theta;}
287
288
299
301 void extract_sub_matrices(const matrix_type& K, const TGridFunction& c);
302
305 {
306 if (m_spSchurUpdateOp.invalid()) return;
307
308
309 const GridLevel clevel = c.grid_level();
310 my_write_debug(K, "init_KFull_ForSchurUpdate", clevel, clevel);
311
312 //Assembling auxiliary matrix
313 SmartPtr<IAssemble<TAlgebra> > m_spAss = m_spSchurUpdateOp->discretization();
314 matrix_type tmpM;
315
316 //m_spAss->ass_tuner()->set_force_regular_grid(true);
317 //m_spAss->assemble_jacobian(tmpM, c, clevel);
318 m_spAss->assemble_mass_matrix(tmpM, c, clevel);
319 //m_spAss->ass_tuner()->set_force_regular_grid(false);
320
321 my_write_debug(tmpM, "init_MFull_ForSchurUpdate", clevel, clevel);
322 UG_DLOG(SchurDebug, 5, "extract_schur_update on level "<< clevel << ", " << tmpM);
323
324
325 /* Matrices C22 and M22 are both additive. Thus, we add both.
326 * (Note, that preconds requiring a consistent diagonal must be called afterwards!)*/
327 m_slicing.get_matrix(tmpM, UZAWA_CMP_SCHUR, UZAWA_CMP_SCHUR,
328 *(m_auxMat[AUX_M22].template cast_static<matrix_type>()) );
329
332 "UZAWA_init_M22_ForSchurUpdate.mat");
333 }
334
337
338 // add matrix
339 MatAddNonDirichlet<matrix_type>(*m_auxMat[AUX_C22], 1.0, *m_auxMat[AUX_C22], m_dSchurUpdateWeight, *m_auxMat[AUX_M22]);
340
343 "UZAWA_init_C22_AfterSchurUpdate.mat");
344 }
345 }
346
353
354/*
355 void power_iteration(ConstSmartPtr<vector_type> usol, ConstSmartPtr<vector_type> dsol)
356 {
357 SmartPtr<vector_type> uschur(m_slicing.template slice_clone<vector_type>(*usol, UZAWA_CMP_SCHUR) );
358 SmartPtr<vector_type> dschur(m_slicing.template slice_clone<vector_type>(*dsol, UZAWA_CMP_SCHUR) )
359 SmartPtr<vector_type> ueigen(m_slicing.template slice_clone<vector_type>(*usol, UZAWA_CMP_DEFAULT) );
360 SmartPtr<vector_type> deigen(m_slicing.template slice_clone<vector_type>(*usol, UZAWA_CMP_DEFAULT) );
361
362 // Initialize with random guess
363 ueigen->set_random(-0.5, 0.5);
364
365 // dschur = B12*ueigen
366
367
368 //
369 m_spForwardInverse->apply(*uschur, *dschur);
370
371 // deigen = B21 * uschur
372
373 }
374*/
375
377 {
378
379 }
380
381protected:
382 void init_in_first_step(const matrix_type &pMat, const TGridFunction &pC)
383 {
384 UG_DLOG(SchurDebug, 4, "step-init: Size=" << m_vSchurCmp.size() << std::endl);
385 m_slicing.init(pC, m_vSchurCmp);
386
387 if (debug_writer().valid())
388 {
389
390 if (m_spGridFunctionDebugWriter.valid())
391 {
392 UG_LOG("Valid grid function writer for "<< m_spGridFunctionDebugWriter->grid_level() << " on level " << pC.grid_level());
393
394 GridLevel gLevel = m_spGridFunctionDebugWriter->grid_level();
395 m_spGridFunctionDebugWriter->set_grid_level(pC.grid_level());
396 m_spGridFunctionDebugWriter->update_positions();
397 m_spGridFunctionDebugWriter->set_grid_level(gLevel);
398 }
399
400
401 switch(debug_writer()->get_dim())
402 {
403 case 1: reset_slice_debug_writers<1>(); break;
404 case 2: reset_slice_debug_writers<2>(); break;
405 case 3: reset_slice_debug_writers<3>(); break;
406 default: UG_LOG("Invalid dimension for debug_write ???");
407 }
408 }
409
410 extract_sub_matrices(pMat, pC);
411 extract_schur_update(pMat, pC);
412
413 // Initialize preconditioners.
415 }
416protected:
417
420
422 std::vector<std::string> m_vSchurCmp;
423
426
429
432
435
438
441
442
443
446
447
448 void my_write_debug(const matrix_type& mat, std::string name, const TGridFunction& rTo, const TGridFunction& rFrom)
449 {
450 my_write_debug(mat, name, rTo.grid_level(), rFrom.grid_level());
451 }
452
453 void my_write_debug(const matrix_type& mat, std::string name, const GridLevel& glTo, const GridLevel& glFrom)
454 {
455 PROFILE_FUNC_GROUP("debug");
456
457 if(m_spGridFunctionDebugWriter.invalid()) return;
458
459 // build name
460 std::stringstream ss;
461 ss << "UZAWA_" << name << GridLevelAppendix(glTo);
462 if(glFrom != glTo) ss << GridLevelAppendix(glFrom);
463 ss << ".mat";
464
465 // write
466 GridLevel currGL = m_spGridFunctionDebugWriter->grid_level();
467 m_spGridFunctionDebugWriter->set_grid_levels(glFrom, glTo);
468 m_spGridFunctionDebugWriter->write_matrix(mat, ss.str().c_str());
469 m_spGridFunctionDebugWriter->set_grid_level(currGL);
470 }
471
472 virtual void my_write_debug(const TGridFunction& rGF, std::string name)
473 {
474 int m_dbgIterCnt = 0;
475 PROFILE_FUNC_GROUP("debug");
476
477 if(m_spGridFunctionDebugWriter.invalid()) return;
478
479 // build name
480 GridLevel gl = rGF.grid_level();
481 std::stringstream ss;
482 ss << "UZAWA_" << name << GridLevelAppendix(gl);
483 ss << "_i" << std::setfill('0') << std::setw(3) << m_dbgIterCnt << ".vec";
484
485 // write
486 GridLevel currGL = m_spGridFunctionDebugWriter->grid_level();
487 m_spGridFunctionDebugWriter->set_grid_level(gl);
488 m_spGridFunctionDebugWriter->write_vector(rGF, ss.str().c_str());
489 m_spGridFunctionDebugWriter->set_grid_level(currGL);
490 };
491
492protected:
494
495 void set_debug(SmartPtr<IDebugWriter<algebra_type> > spDebugWriter);
496
498 template <int d> void reset_slice_debug_writers();
499
502
503#ifdef UG_PARALLEL
505#endif
506};
507
508
509template <typename TDomain, typename TAlgebra>
512{
513#ifdef UG_PARALLEL
514 if(pcl::NumProcs() > 1)
515 {
516 // copy original matrix
517 MakeConsistent(*pOp, m_A);
518 // set zero on slaves
519 std::vector<IndexLayout::Element> vIndex;
520 CollectUniqueElements(vIndex, m_A.layouts()->slave());
521 SetDirichletRow(m_A, vIndex);
522 }
523#endif
524
525 // more preprocessing is based on grid functions
526 // thus, it must be performed later...
527 //m_bInit = false;
528
529 return true;
530}
531
532
534template <typename TDomain, typename TAlgebra>
537{
538 // Check that grid function was passed.
540 if(pC == NULL) UG_THROW("UzawaBase: expects correction to be a GridFunction.");
541
542 const vector_type* pD = &d;
543 const matrix_type* pMat = pOp.get();
544
545#ifdef UG_PARALLEL
547 if(pcl::NumProcs() > 1)
548 {
549 // Make defect unique
550 spDtmp = d.clone();
551 spDtmp->change_storage_type(PST_UNIQUE);
552 pD = spDtmp.get();
553 pMat = &m_A;
554 }
555#endif
556
557 // Check, if initialized.
558 if(!m_bInit)
559 {
560 init_in_first_step(*pMat, *pC);
561 m_bInit = true;
562
563 }
564
565 // Obtain defects.
566 SmartPtr<vector_type> ff(m_slicing.template slice_clone<vector_type>(*pD, UZAWA_CMP_DEFAULT) );
567 SmartPtr<vector_type> gg(m_slicing.template slice_clone<vector_type>(*pD, UZAWA_CMP_SCHUR) );
568
569 // Clear corrections.
570 SmartPtr<vector_type> cRegular(m_slicing.template slice_clone_without_values<vector_type>(*pC, UZAWA_CMP_DEFAULT));
571 SmartPtr<vector_type> cSchur(m_slicing.template slice_clone_without_values<vector_type>(*pC, UZAWA_CMP_SCHUR));
572
573 pC->set(0.0);
574 cRegular->set(0.0);
575 cSchur->set(0.0);
576
577 // Step 1: Solve problem \tilde A11 \tilde u = f
578 my_write_debug(*pC, "UZAWA_Correction0");
579 my_write_debug(*(GridFunction<TDomain, TAlgebra>*) pD, "UZAWA_Defect0");
580 if (m_spForwardInverse.valid())
581 {
582 // Solve problem, also compute g:=(f - A11 \tilde u)
583 // WARNING: Should use exact A11.
584 // m_spForwardInverse->apply_update_defect(*cRegular, *ff);
585 m_spForwardInverse->apply(*cRegular, *ff);
586
587 m_slicing.template set_vector_slice<vector_type>(*cRegular, *pC, UZAWA_CMP_DEFAULT);
588 my_write_debug(*pC, "UZAWA_Correction1");
589
590 // Update g:= (g - B21 \tilde u^k)
591 m_auxMat[B21]->apply_sub(*gg, *cRegular);
592 }
593
594 // Step 2: Solve problem: (\tilde S22) pDelta = (g - B21 \tilde u)
595 if (m_SpSchurComplementInverse.valid()) {
596
597 //m_auxMat[AUX_C22]->apply_sub(*gg, *cRegular);
598 m_SpSchurComplementInverse->apply(*cSchur, *gg);
599 m_slicing.template set_vector_slice<vector_type>(*cSchur, *pC, UZAWA_CMP_SCHUR);
600 my_write_debug(*pC, "UZAWA_Correction2");
601
602 // update defect
603 m_auxMat[B12]->apply_sub(*ff, *cSchur);
604 }
605
606 // Step 3: Solve problem \tilde A11 uDelta^{k+1} = (f - \tilde A11 u^k) - B12 p^k
607 if (m_spBackwardInverse.valid()) {
608
609 // ff has been updated
610 m_spBackwardInverse->apply(*cRegular, *ff);
611 // m_slicing.template add_vector_slice<vector_type>(*cRegular, *pC, UZAWA_CMP_DEFAULT);
612 m_slicing.template set_vector_slice<vector_type>(*cRegular, *pC, UZAWA_CMP_DEFAULT);
613 my_write_debug(*pC, "UZAWA_Correction3");
614 }
615
616
617#ifdef UG_PARALLEL
618 pC->set_storage_type(PST_UNIQUE);
619#endif
620
621 // UG_ASSERT(0, "STOP HERE!")
622 return true;
623}
624
625template <typename TDomain, typename TAlgebra>
628{
629 // Copy matrices.
630 m_slicing.get_matrix(K, UZAWA_CMP_DEFAULT, UZAWA_CMP_DEFAULT, *(m_auxMat[AUX_A11].template cast_static<matrix_type>()) );
631 m_slicing.get_matrix(K, UZAWA_CMP_DEFAULT, UZAWA_CMP_SCHUR, *(m_auxMat[B12].template cast_static<matrix_type>()) );
632 m_slicing.get_matrix(K, UZAWA_CMP_SCHUR, UZAWA_CMP_DEFAULT, *(m_auxMat[B21].template cast_static<matrix_type>()) );
633 m_slicing.get_matrix(K, UZAWA_CMP_SCHUR, UZAWA_CMP_SCHUR, *(m_auxMat[AUX_C22].template cast_static<matrix_type>()) );
634
635 UG_DLOG(SchurDebug, 4, "A11 =" << *m_auxMat[AUX_A11] << ", ");
636 UG_DLOG(SchurDebug, 4, "B12 =" << *m_auxMat[B12] << ", ");
637 UG_DLOG(SchurDebug, 4, "B21 =" << *m_auxMat[B21] << ", ");
638 UG_DLOG(SchurDebug, 4, "C22 =" << *m_auxMat[AUX_C22] << std::endl);
639
640#ifdef UG_PARALLEL
641 // Copy storage mask.
642 uint mask_K = K.get_storage_mask();
643 m_auxMat[AUX_A11]->set_storage_type(mask_K);
644 m_auxMat[B12]->set_storage_type(mask_K);
645 m_auxMat[B21]->set_storage_type(mask_K);
646 m_auxMat[AUX_C22]->set_storage_type(mask_K);
647
648 // Extract and set layouts.
649 SmartPtr<AlgebraLayouts> defaultLayouts = m_slicing.create_slice_layouts(K.layouts(), UZAWA_CMP_DEFAULT);
650 SmartPtr<AlgebraLayouts> schurLayouts = m_slicing.create_slice_layouts(K.layouts(), UZAWA_CMP_SCHUR);
651
652 m_auxMat[AUX_A11]->set_layouts(defaultLayouts);
653 m_auxMat[AUX_C22]->set_layouts(schurLayouts);
654
655#endif
656
657
658 // Write matrices for debug, if applicable.
659 if (m_spSliceDebugWriter[UZAWA_CMP_DEFAULT].valid()) {
660 m_spSliceDebugWriter[UZAWA_CMP_DEFAULT]->write_matrix(*m_auxMat[AUX_A11], "UZAWA_init_A11_AfterExtract.mat");
661 }
662 //write_debug(*m_auxMat[B12], "Uzawa_init_B12_AfterExtract");
663 //write_debug(*m_auxMat[B21], "Uzawa_init_B21_AfterExtract");
664 if (m_spSliceDebugWriter[UZAWA_CMP_SCHUR].valid()) {
665 m_spSliceDebugWriter[UZAWA_CMP_SCHUR]->write_matrix(*m_auxMat[AUX_C22], "UZAWA_init_C22_AfterExtract.mat");
666 }
667}
668
669
670template <typename TDomain, typename TAlgebra>
673{
674 postprocess_block_iterations();
675 m_bInit = false;
676
677 return true;
678}
679
680
681
682template <typename TDomain, typename TAlgebra>
685{
687 newInst(new UzawaBase<TDomain, TAlgebra>(m_vSchurCmp));
688
689 newInst->set_debug(debug_writer());
690
691 // clone approximate inverses
692 newInst->m_spForwardInverse = (m_spForwardInverse.valid()) ? m_spForwardInverse->clone().template cast_dynamic<base_type>() : SPNULL;
693 newInst->m_SpSchurComplementInverse = (m_SpSchurComplementInverse.valid()) ? m_SpSchurComplementInverse->clone().template cast_dynamic<base_type>() : SPNULL;
694 newInst->m_spBackwardInverse = (m_spBackwardInverse.valid()) ? m_spBackwardInverse->clone().template cast_dynamic<base_type>() : SPNULL;
695
696 // todo: need to clone here?
697 newInst->m_spSchurUpdateOp = m_spSchurUpdateOp;
698 newInst->m_dSchurUpdateWeight = m_dSchurUpdateWeight;
699
700 return newInst;
701}
702
703
704
705template <typename TDomain, typename TAlgebra>
708{
709 std::string basePath = debug_writer()->get_base_dir();
710
711 m_spSliceDebugWriter[UZAWA_CMP_DEFAULT] = make_sp(new AlgebraDebugWriter<algebra_type>);
712 m_spSliceDebugWriter[UZAWA_CMP_DEFAULT] ->set_base_dir(basePath.c_str());
713
714 m_spSliceDebugWriter[UZAWA_CMP_SCHUR] = make_sp(new AlgebraDebugWriter<algebra_type>);
715 m_spSliceDebugWriter[UZAWA_CMP_SCHUR] ->set_base_dir(basePath.c_str());
716}
717
718template <typename TDomain, typename TAlgebra>
719template<int dim>
722{
723 UG_LOG("reset_slice_debug_writers"<< std::endl);
724
725 if (m_spSliceDebugWriter[UZAWA_CMP_DEFAULT].invalid() ||
726 m_spSliceDebugWriter[UZAWA_CMP_SCHUR].invalid()) return;
727
728 const std::vector<MathVector<dim> > &positions = (m_spGridFunctionDebugWriter.valid()) ? m_spGridFunctionDebugWriter->template get_positions<dim>() : debug_writer()->template get_positions<dim>();
729 std::vector<MathVector<dim> > cmpPositions[2];
730
731 m_slicing.get_vector_slice(positions, UZAWA_CMP_DEFAULT, cmpPositions[UZAWA_CMP_DEFAULT]);
732 m_spSliceDebugWriter[UZAWA_CMP_DEFAULT]->template set_positions<dim>(cmpPositions[UZAWA_CMP_DEFAULT]);
733
734 m_slicing.get_vector_slice(positions, UZAWA_CMP_SCHUR, cmpPositions[UZAWA_CMP_SCHUR]);
735 m_spSliceDebugWriter[UZAWA_CMP_SCHUR]->template set_positions<dim>(cmpPositions[UZAWA_CMP_SCHUR]);
736
737}
738
739template <typename TDomain, typename TAlgebra>
741{
742 base_type::set_debug(spDebugWriter);
743 m_spGridFunctionDebugWriter = debug_writer().template cast_dynamic<GridFunctionDebugWriter<TDomain, TAlgebra> >();
744
745 if (m_spGridFunctionDebugWriter.invalid()) return;
746
747 create_slice_debug_writers();
748
749 debug_writer()->update_positions();
750 switch(debug_writer()->get_dim())
751 {
752 case 1: reset_slice_debug_writers<1>(); break;
753 case 2: reset_slice_debug_writers<2>(); break;
754 case 3: reset_slice_debug_writers<3>(); break;
755 default: UG_LOG("debug dim ???");
756 }
757
758}
759
760
761} // namespace ug
762#endif
Definition smart_pointer.h:296
Definition smart_pointer.h:108
T * get()
returns encapsulated pointer
Definition smart_pointer.h:197
bool valid() const
returns true if the pointer is valid, false if not.
Definition smart_pointer.h:206
Debug writer for connection viewer (based on algebraic information + vector positions only)
Definition algebra_debug_writer.h:46
matrix operator based on the assembling of a problem
Definition assembled_linear_operator.h:60
SmartPtr< IDebugWriter< algebra_type > > debug_writer()
returns the debug writer
Definition debug_writer.h:391
represents numerical solutions on a grid using an algebraic vector
Definition grid_function.h:121
const GridLevel & grid_level() const
returns the grid level
Definition grid_function.h:247
dim_traits< dim >::grid_base_object element_type
Definition grid_function.h:159
dim_traits< dim-1 >::grid_base_object side_type
Definition grid_function.h:163
Definition grid_level.h:42
base class for all debug writer
Definition debug_writer.h:244
describes a linear iterator
Definition linear_iterator.h:81
describes a linear mapping X->Y
Definition linear_operator.h:80
describes a linear iterator that is based on a matrix operator
Definition preconditioner.h:103
virtual bool init(SmartPtr< ILinearOperator< vector_type > > J, const vector_type &u)
implements the ILinearIterator-interface for matrix based preconditioner
Definition preconditioner.h:192
Definition matrix_operator.h:49
Definition slicing.h:68
binary_grouping_vector slice_desc_type_vector
Definition slicing.h:72
Definition uzawa.h:175
virtual const char * name() const
name
Definition uzawa.h:250
matrix_type m_A
Definition uzawa.h:504
UzawaSlicing< TGridFunction > m_slicing
object for slicing routines
Definition uzawa.h:425
UzawaBase(const char *sSchurCmp)
Definition uzawa.h:211
void set_backward_iter(SmartPtr< ILinearIterator< vector_type > > iter)
Definition uzawa.h:281
void init_block_operators()
allocate block matrix operators
Definition uzawa.h:290
MatrixOperator< matrix_type, vector_type > matrix_operator_type
Definition uzawa.h:193
void set_forward_iter(SmartPtr< ILinearIterator< vector_type > > iter)
Definition uzawa.h:273
static const bool UZAWA_CMP_SCHUR
Definition uzawa.h:177
static const bool UZAWA_CMP_DEFAULT
Definition uzawa.h:178
TGridFunction::side_type TSide
Definition uzawa.h:185
SmartPtr< IDebugWriter< algebra_type > > m_spSliceDebugWriter[2]
Definition uzawa.h:501
void postprocess_block_iterations()
Definition uzawa.h:376
void set_schur_operator_update(SmartPtr< AssembledLinearOperator< TAlgebra > > spSchurUpdateOp, double theta=0.0)
Definition uzawa.h:285
void extract_schur_update(const matrix_type &K, const TGridFunction &c)
Update C22 block by matrix.
Definition uzawa.h:304
virtual void my_write_debug(const TGridFunction &rGF, std::string name)
Definition uzawa.h:472
SmartPtr< ILinearIterator< vector_type > > m_spBackwardInverse
iteration for forward system
Definition uzawa.h:434
void reset_slice_debug_writers()
Definition uzawa.h:721
TAlgebra::vector_type vector_type
Definition uzawa.h:191
SmartPtr< GridFunctionDebugWriter< TDomain, TAlgebra > > m_spGridFunctionDebugWriter
Definition uzawa.h:500
SmartPtr< ILinearIterator< typename TAlgebra::vector_type > > clone()
clone
Definition uzawa.h:684
virtual ~UzawaBase()
Definition uzawa.h:219
virtual bool init(SmartPtr< ILinearOperator< vector_type > > J, const vector_type &u)
Overriding base type.
Definition uzawa.h:223
void my_write_debug(const matrix_type &mat, std::string name, const GridLevel &glTo, const GridLevel &glFrom)
Definition uzawa.h:453
void init_block_iterations()
Definition uzawa.h:347
virtual bool preprocess(SmartPtr< MatrixOperator< matrix_type, vector_type > > pOp)
initializes the preconditioner
Definition uzawa.h:511
void set_debug(SmartPtr< IDebugWriter< algebra_type > > spDebugWriter)
set debug writer
Definition uzawa.h:740
SmartPtr< ILinearIterator< vector_type > > m_spForwardInverse
iteration for forward system
Definition uzawa.h:428
IPreconditioner< TAlgebra > base_type
Definition uzawa.h:198
void set_schur_iter(SmartPtr< ILinearIterator< vector_type > > iter)
Definition uzawa.h:277
bool m_bInit
flag indicating, whether operator must be initialized
Definition uzawa.h:419
BLOCK
Definition uzawa.h:444
@ B12
Definition uzawa.h:444
@ B21
Definition uzawa.h:444
@ AUX_A11
Definition uzawa.h:444
@ AUX_ARRAY_SIZE
Definition uzawa.h:444
@ AUX_M22
Definition uzawa.h:444
@ AUX_C22
Definition uzawa.h:444
SmartPtr< AssembledLinearOperator< TAlgebra > > m_spSchurUpdateOp
assembly for (additive) Schur complement update
Definition uzawa.h:437
SmartPtr< matrix_operator_type > m_auxMat[AUX_ARRAY_SIZE]
Definition uzawa.h:445
virtual bool postprocess()
cleans the operator
Definition uzawa.h:672
void my_write_debug(const matrix_type &mat, std::string name, const TGridFunction &rTo, const TGridFunction &rFrom)
auxiliary matrices (not cloned!)
Definition uzawa.h:448
void extract_sub_matrices(const matrix_type &K, const TGridFunction &c)
extract block matrix operators (called once)
Definition uzawa.h:627
TAlgebra algebra_type
Definition uzawa.h:190
SmartPtr< ILinearIterator< vector_type > > m_SpSchurComplementInverse
iteration for forward system
Definition uzawa.h:431
TAlgebra::matrix_type matrix_type
Definition uzawa.h:192
GridFunction< TDomain, TAlgebra > TGridFunction
Definition uzawa.h:183
virtual bool step(SmartPtr< MatrixOperator< matrix_type, vector_type > > pOp, vector_type &c, const vector_type &d)
computes a new correction c = B*d
Definition uzawa.h:536
virtual bool supports_parallel() const
returns if parallel solving is supported
Definition uzawa.h:265
std::vector< std::string > m_vSchurCmp
vector of strings identifying components used for Schur complement
Definition uzawa.h:422
void init_in_first_step(const matrix_type &pMat, const TGridFunction &pC)
Definition uzawa.h:382
double m_dSchurUpdateWeight
scaling factor for (additive) Schur complement update
Definition uzawa.h:440
static const int dim
World dimension.
Definition uzawa.h:182
void create_slice_debug_writers()
Definition uzawa.h:707
UzawaBase(const std::vector< std::string > &vSchurCmp)
default constructor
Definition uzawa.h:203
TGridFunction::element_type TElement
Definition uzawa.h:184
Definition uzawa.h:81
SlicingData< binary_grouping_vector, 2 > base_type
Definition uzawa.h:84
void init(const TGridFunction &u, const std::vector< std::string > &vSchurCmps)
Definition uzawa.h:100
UzawaSlicing(const std::vector< std::string > &vSchurCmps)
Definition uzawa.h:87
@ PST_UNIQUE
Definition parallel_storage_type.h:70
bool CheckRowIterators(const TSparseMatrix &A)
Definition sparsematrix_util.h:1077
void SetDirichletRow(TSparseMatrix &A, size_t i, size_t alpha)
Definition sparsematrix_util.h:796
int NumProcs()
returns the number of processes
Definition pcl_base.cpp:91
virtual void init()
const NullSmartPtr SPNULL
The equivalent to NULL for smart pointers.
Definition smart_pointer.h:90
#define UG_ASSERT(expr, msg)
Definition assert.h:70
#define UG_THROW(msg)
Definition error.h:57
#define UG_DLOG(__debugID__, level, msg)
Definition log.h:298
#define UG_LOG(msg)
Definition log.h:367
unsigned int uint
Definition types.h:114
the ug namespace
std::string GridLevelAppendix(const GridLevel &gl, int minfill)
returns appendix for a grid level
Definition grid_level.cpp:56
void TokenizeTrimString(const string &str, vector< string > &vToken, const char delimiter)
Definition string_util.cpp:83
bool MakeConsistent(const ParallelMatrix< matrix_type > &_mat, ParallelMatrix< matrix_type > &newMat)
Definition parallel_matrix_overlap_impl.h:438
std::vector< bool > binary_grouping_vector
Definition uzawa.h:51
DebugID SchurDebug
todo: replace DebugID
Definition schur.h:55
@ VOLUME
Definition grid_base_objects.h:63
@ VERTEX
Definition grid_base_objects.h:60
@ EDGE
Definition grid_base_objects.h:61
@ FACE
Definition grid_base_objects.h:62
void ExtractByObject(std::vector< DoFIndex > &vIndex, const TGridFunction &c, const std::vector< size_t > &vFullRowCmp, const std::vector< size_t > &vRemainCmp)
Definition uzawa.h:55
#define PROFILE_FUNC_GROUP(groups)
Definition profiler.h:258
SmartPtr< T, FreePolicy > make_sp(T *inst)
returns a SmartPtr for the passed raw pointer
Definition smart_pointer.h:836