Loading [MathJax]/extensions/tex2jax.js
ug4
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
mg_solver_impl.hpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2015: G-CSC, Goethe University Frankfurt
3 * Author: Andreas Vogel
4 *
5 * This file is part of UG4.
6 *
7 * UG4 is free software: you can redistribute it and/or modify it under the
8 * terms of the GNU Lesser General Public License version 3 (as published by the
9 * Free Software Foundation) with the following additional attribution
10 * requirements (according to LGPL/GPL v3 §7):
11 *
12 * (1) The following notice must be displayed in the Appropriate Legal Notices
13 * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 *
15 * (2) The following notice must be displayed at a prominent place in the
16 * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 *
18 * (3) The following bibliography is recommended for citation and must be
19 * preserved in all covered files:
20 * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 * parallel geometric multigrid solver on hierarchically distributed grids.
22 * Computing and visualization in science 16, 4 (2013), 151-164"
23 * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 * flexible software system for simulating pde based models on high performance
25 * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU Lesser General Public License for more details.
31 */
32
33#ifndef __H__UG__LIB_DISC__MULTI_GRID_SOLVER__MG_SOLVER_IMPL__
34#define __H__UG__LIB_DISC__MULTI_GRID_SOLVER__MG_SOLVER_IMPL__
35
36#include <iostream>
37#include <sstream>
38#include <string>
40 #include "common/error.h"
46#include "mg_solver.h"
47
48#ifdef UG_PARALLEL
50 #include "pcl/pcl_util.h"
51// the debug barrier is used to eliminate synchronization overhead from
52// profiling stats. Only used for parallel builds.
53// PCL_DEBUG_BARRIER only has an effect if PCL_DEBUG_BARRIER_ENABLED is defined.
54 #define GMG_PARALLEL_DEBUG_BARRIER(comm) PCL_DEBUG_BARRIER(comm)
55
56#else
57 #define GMG_PARALLEL_DEBUG_BARRIER(comm)
58#endif
59
60#define PROFILE_GMG
61#ifdef PROFILE_GMG
62 #define GMG_PROFILE_FUNC() PROFILE_FUNC_GROUP("gmg")
63 #define GMG_PROFILE_BEGIN(name) PROFILE_BEGIN_GROUP(name, "gmg")
64 #define GMG_PROFILE_END() PROFILE_END()
65#else
66 #define GMG_PROFILE_FUNC()
67 #define GMG_PROFILE_BEGIN(name)
68 #define GMG_PROFILE_END()
69#endif
70
71namespace ug{
72
74// Constructor
76
77template <typename TDomain, typename TAlgebra>
80 m_spSurfaceMat(NULL), m_pSurfaceSol(nullptr), m_spAss(NULL), m_spApproxSpace(SPNULL),
81 m_topLev(GridLevel::TOP), m_surfaceLev(GridLevel::TOP),
82 m_baseLev(0), m_cycleType(_V_),
83 m_numPreSmooth(2), m_numPostSmooth(2),
84 m_LocalFullRefLevel(0), m_GridLevelType(GridLevel::LEVEL),
85 m_bUseRAP(false), m_bSmoothOnSurfaceRim(false),
86 m_bCommCompOverlap(false),
87 m_spPreSmootherPrototype(new Jacobi<TAlgebra>()),
88 m_spPostSmootherPrototype(m_spPreSmootherPrototype),
89 m_spProjectionPrototype(SPNULL),
90 m_spProlongationPrototype(new StdTransfer<TDomain,TAlgebra>()),
91 m_spRestrictionPrototype(m_spProlongationPrototype),
92 m_spBaseSolver(new LU<TAlgebra>()),
93 m_bGatheredBaseIfAmbiguous(true),
94 m_bGatheredBaseUsed(true),
95 m_ignoreInitForBaseSolver(false),
96 m_bMatrixStructureIsConst(false),
97 m_pC(nullptr),
98 m_spDebugWriter(NULL), m_dbgIterCnt(0)
99{};
100
101
102template <typename TDomain, typename TAlgebra>
105 m_spSurfaceMat(NULL), m_pSurfaceSol(nullptr), m_spAss(NULL), m_spApproxSpace(approxSpace),
106 m_topLev(GridLevel::TOP), m_surfaceLev(GridLevel::TOP),
107 m_baseLev(0), m_cycleType(_V_),
108 m_numPreSmooth(2), m_numPostSmooth(2),
109 m_LocalFullRefLevel(0), m_GridLevelType(GridLevel::LEVEL),
110 m_bUseRAP(false), m_bSmoothOnSurfaceRim(false),
111 m_bCommCompOverlap(false),
112 m_spPreSmootherPrototype(new Jacobi<TAlgebra>()),
113 m_spPostSmootherPrototype(m_spPreSmootherPrototype),
114 m_spProjectionPrototype(new StdInjection<TDomain,TAlgebra>(m_spApproxSpace)),
115 m_spProlongationPrototype(new StdTransfer<TDomain,TAlgebra>()),
116 m_spRestrictionPrototype(m_spProlongationPrototype),
117 m_spBaseSolver(new LU<TAlgebra>()),
118 m_bGatheredBaseIfAmbiguous(true),
119 m_bGatheredBaseUsed(true),
120 m_ignoreInitForBaseSolver(false),
121 m_bMatrixStructureIsConst(false),
122 m_pC(nullptr),
123 m_spDebugWriter(NULL), m_dbgIterCnt(0)
124{};
125
126template <typename TDomain, typename TAlgebra>
129{
130 m_spApproxSpace = approxSpace;
131 m_spProjectionPrototype = make_sp(new StdInjection<TDomain,TAlgebra>(m_spApproxSpace));
132}
133
134template <typename TDomain, typename TAlgebra>
137clone()
138{
140 new AssembledMultiGridCycle<TDomain, TAlgebra>(m_spApproxSpace));
141
142 clone->set_base_level(m_baseLev);
143 clone->set_base_solver(m_spBaseSolver);
144 clone->set_cycle_type(m_cycleType);
145 clone->set_debug(m_spDebugWriter);
146 clone->set_discretization(m_spAss);
147 clone->set_num_postsmooth(m_numPostSmooth);
148 clone->set_num_presmooth(m_numPreSmooth);
149 clone->set_projection(m_spProjectionPrototype);
150 clone->set_prolongation(m_spProlongationPrototype);
151 clone->set_restriction(m_spRestrictionPrototype);
152 clone->set_presmoother(m_spPreSmootherPrototype);
153 clone->set_postsmoother(m_spPostSmootherPrototype);
154 clone->set_surface_level(m_surfaceLev);
155
156 for(size_t i = 0; i < m_vspProlongationPostProcess.size(); ++i)
157 clone->add_prolongation_post_process(m_vspProlongationPostProcess[i]);
158
159 for(size_t i = 0; i < m_vspRestrictionPostProcess.size(); ++i)
160 clone->add_restriction_post_process(m_vspRestrictionPostProcess[i]);
161
162 return clone;
163}
164
165template <typename TDomain, typename TAlgebra>
169
171// apply and init
173
174template <typename TDomain, typename TAlgebra>
176apply(vector_type& rC, const vector_type& rD)
177{
179 GF* pC = dynamic_cast<GF*>(&rC);
180 if(!pC) UG_THROW("GMG::apply: Expect Correction to be grid based.")
181 const GF* pD = dynamic_cast<const GF*>(&rD);
182 if(!pD) UG_THROW("GMG::apply: Expect Defect to be grid based.")
183 GF& c = *pC;
184 m_pC = pC;
185 const GF& d = *pD;
186
187 try{
188// Check if surface level has been chosen correctly
189// Please note, that the approximation space returns the global number of levels,
190// i.e. the maximum of levels among all processes.
191 if(m_topLev >= (int)m_spApproxSpace->num_levels())
192 UG_THROW("GMG::apply: SurfaceLevel "<<m_topLev<<" does not exist.");
193
194// Check if base level has been choose correctly
195 if(m_baseLev > m_topLev)
196 UG_THROW("GMG::apply: Base level must be smaller or equal to surface Level.");
197
198// debug output
199 write_debug(d, "Defect_In");
200 write_debug(*m_spSurfaceMat, "SurfaceStiffness", c, c);
201 for(int lev = m_baseLev; lev <= m_topLev; ++lev)
202 {
203 LevData& ld = *m_vLevData[lev];
205 = ld.n_restr_calls = ld.n_prolong_calls = 0;
206 }
207
208// project defect from surface to level
209 GMG_PROFILE_BEGIN(GMG_Apply_CopyDefectFromSurface);
210 try{
211 for(int lev = m_baseLev; lev <= m_topLev; ++lev){
212 const std::vector<SurfLevelMap>& vMap = m_vLevData[lev]->vSurfLevelMap;
213 LevData& ld = *m_vLevData[lev];
214 for(size_t i = 0; i < vMap.size(); ++i){
215 (*ld.sd)[vMap[i].levIndex] = d[vMap[i].surfIndex];
216 }
217 }
218#ifdef UG_PARALLEL
219 for(int lev = m_baseLev; lev <= m_topLev; ++lev)
220 m_vLevData[lev]->sd->set_storage_type(d.get_storage_mask());
221#endif
222 }
223 UG_CATCH_THROW("GMG::apply: Project d Surf -> Level failed.");
225
226// Perform one multigrid cycle
227 UG_DLOG(LIB_DISC_MULTIGRID, 4, "gmg-apply lmgc (on level " << m_topLev << ")... \n");
228 try{
229 GMG_PROFILE_BEGIN(GMG_Apply_ResetCorrection);
230 // reset surface correction
231 c.set(0.0);
232
233 // reset corr on top level
234 m_vLevData[m_topLev]->sc->set(0.0);
236
237 // start mg-cycle
238 GMG_PROFILE_BEGIN(GMG_Apply_lmgc);
239 lmgc(m_topLev, m_cycleType);
241
242 // project top lev to surface
243 GMG_PROFILE_BEGIN(GMG_Apply_AddCorrectionToSurface);
244 const std::vector<SurfLevelMap>& vMap = m_vLevData[m_topLev]->vSurfLevelMap;
245 LevData& ld = *m_vLevData[m_topLev];
246 for(size_t i = 0; i < vMap.size(); ++i){
247 c[vMap[i].surfIndex] += (*ld.sc)[vMap[i].levIndex];
248 }
249 #ifdef UG_PARALLEL
250 c.set_storage_type(PST_CONSISTENT);
251 #endif
253 }
254 UG_CATCH_THROW("GMG: lmgc failed.");
255
256// apply scaling
257 GMG_PROFILE_BEGIN(GMG_Apply_Scaling);
258 try{
259 const number kappa = this->damping()->damping(c, d, m_spSurfaceMat.template cast_dynamic<ILinearOperator<vector_type> >());
260 if(kappa != 1.0) c *= kappa;
261 }
262 UG_CATCH_THROW("GMG: Damping failed.")
264
265// debug output
266 write_debug(c, "Correction_Out");
267
268// increase dbg counter
269 if(m_spDebugWriter.valid()) m_dbgIterCnt++;
270
271 } UG_CATCH_THROW("GMG::apply: Application failed.");
272
273 UG_DLOG(LIB_DISC_MULTIGRID, 4, "gmg-apply done. \n");
274 return true;
275}
276
277template <typename TDomain, typename TAlgebra>
280{
282
283// NOTE: This is the implementation of a multiplicative Multigrid. Thus, the
284// defect is kept up to date when traversing the grid. At the end of
285// the iteration the updated defect is stored in the level defects and
286// could be projected back to the surface in order to get an updated
287// surface defect. This is, however, not done. For these reasons:
288// a) A Matrix-Vector operation is not very expensive
289// b) In a 2d adaptive case, the update is difficult, but can be
290// performed. But if the implementation is incorrect, this will
291// hardly be detected.
292// c) In a 3d adaptive case, it is impossible to ensure, that assembled
293// level matrices and the surface matrix have the same couplings
294// (not even to inner points). This is due to the fact, that e.g.
295// finite volume geometries are computed using different
296// integration points. (Hanging fv used triangles as scvf in 3d,
297// while normal fv use quads). Therefore, the updated defect is
298// only approximately the correct defect. In order to return the
299// correct defect, we must recompute the defect in the
300// adaptive case.
301// d) If scaling of the correction is performed, the defect must be
302// recomputed anyway. Thus, only scale=1.0 allows optimizing.
303// e) Updated defects are only needed in LinearIterativeSolvers. In
304// Krylov-Methods (CG, BiCGStab, ...) only the correction is
305// needed. We optimize for that case.
306
307// compute correction
308 if(!apply(c, rD)) return false;
309
310// update defect: d = d - A*c
311 m_spSurfaceMat->matmul_minus(rD, c);
312
313// write for debugging
314 const GF* pD = dynamic_cast<const GF*>(&rD);
315 if(!pD) UG_THROW("GMG::apply: Expect Defect to be grid based.")
316 const GF& d = *pD;
317 write_debug(d, "Defect_Out");
318
319 return true;
320}
321
322template <typename TDomain, typename TAlgebra>
325{
327 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - init(J, u)\n");
328
329 // try to extract assembling routine
331 J.template cast_dynamic<AssembledLinearOperator<TAlgebra> >();
332 if(spALO.valid()){
333 m_spAss = spALO->discretization();
334 }
335
336 // Store Surface Matrix
337 m_spSurfaceMat = J.template cast_dynamic<matrix_type>();
338
339 // Store Surface Solution
340 m_pSurfaceSol = &u;
341
342 // call init
343 init();
344
345 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - init(J, u)\n");
346 return true;
347}
348
349template <typename TDomain, typename TAlgebra>
352{
354 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - init(L)\n");
355
356 // try to extract assembling routine
358 L.template cast_dynamic<AssembledLinearOperator<TAlgebra> >();
359 if(spALO.valid()){
360 m_spAss = spALO->discretization();
361 }
362
363 // Store Surface Matrix
364 m_spSurfaceMat = L.template cast_dynamic<matrix_type>();
365
366 // Store Surface Solution
367 m_pSurfaceSol = NULL;
368
369 // call init
370 init();
371
372 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - init(L)\n");
373 return true;
374}
375
376
377
378template <typename TDomain, typename TAlgebra>
380init()
381{
383 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_common\n");
384
385 try{
386
387// Cast Operator
388 if(m_spSurfaceMat.invalid())
389 UG_THROW("GMG:init: Can not cast Operator to Matrix.");
390
391// Check Approx Space
392 if(m_spApproxSpace.invalid())
393 UG_THROW("GMG::init: Approximation Space not set.");
394
395// check that grid given
396 if(m_spApproxSpace->num_levels() == 0)
397 UG_THROW("GMG:init: No grid levels");
398
399 if(m_spAss.invalid())
400 UG_THROW("GMG::init: Discretization not set.");
401
402 if(m_spBaseSolver.invalid())
403 UG_THROW("GMG::init: Base Solver not set.");
404
405 if(m_spPreSmootherPrototype.invalid())
406 UG_THROW("GMG::init: PreSmoother not set.");
407
408 if(m_spPostSmootherPrototype.invalid())
409 UG_THROW("GMG::init: PostSmoother not set.");
410
411 if(m_spProlongationPrototype.invalid())
412 UG_THROW("GMG::init: Prolongation not set.");
413
414 if(m_spRestrictionPrototype.invalid())
415 UG_THROW("GMG::init: Restriction not set.");
416
417// get current toplevel
418 const GF* pSol = dynamic_cast<const GF*>(m_pSurfaceSol);
419 if(pSol){
420 m_surfaceLev = pSol->dof_distribution()->grid_level().level();
421 }
422
423 int topLev = 0;
424 if(m_surfaceLev != GridLevel::TOP) topLev = m_surfaceLev;
425 else topLev = m_spApproxSpace->num_levels() - 1;
426
427 if(m_baseLev > topLev)
428 UG_THROW("GMG::init: Base Level greater than Surface level.");
429
430 if(m_ApproxSpaceRevision != m_spApproxSpace->revision()
431 || topLev != m_topLev)
432 {
433 // remember new top level
434 m_topLev = topLev;
435
436 // Allocate memory for given top level
437 GMG_PROFILE_BEGIN(GMG_Init_LevelMemory);
438 try{
439 init_level_memory(m_baseLev, m_topLev);
440 }
441 UG_CATCH_THROW("GMG::init: Cannot allocate memory.");
443
444 // init mapping from surface level to top level in case of full refinement
445 GMG_PROFILE_BEGIN(GMG_Init_IndexMappings);
446 try{
447 init_index_mappings();
448 }
449 UG_CATCH_THROW("GMG: Cannot create SurfaceToLevelMap.")
451
452 // Create Interpolation
453 GMG_PROFILE_BEGIN(GMG_Init_Transfers);
454 try{
455 init_transfer();
456 }
457 UG_CATCH_THROW("GMG:init: Cannot init Transfer (Prolongation/Restriction).");
459
460 // remember revision counter of approx space
461 m_ApproxSpaceRevision = m_spApproxSpace->revision();
462 }
463
464// Assemble coarse grid operators
465 GMG_PROFILE_BEGIN(GMG_Init_CreateLevelMatrices);
466 try{
467 if(m_bUseRAP){
468 init_rap_operator();
469 } else {
470 assemble_level_operator();
471 }
472 }
473 UG_CATCH_THROW("GMG: Initialization of Level Operator failed.");
475
476// Init smoother for coarse grid operators
477 GMG_PROFILE_BEGIN(GMG_Init_Smoother);
478 try{
479 init_smoother();
480 }
481 UG_CATCH_THROW("GMG:init: Cannot init Smoother.");
483
484// Init base solver
485 if(!ignore_init_for_base_solver()){
486 GMG_PROFILE_BEGIN(GMG_Init_BaseSolver);
487 try{
488 init_base_solver();
489 }
490 UG_CATCH_THROW("GMG:init: Cannot init Base Solver.");
492 }
493 } UG_CATCH_THROW("GMG: Init failure for init(u)");
494
495 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_common\n");
496}
497
498
499template <typename TDomain, typename TAlgebra>
502{
503 #ifdef UG_PARALLEL
504 UG_COND_THROW(ignore && (pcl::NumProcs() > 1),
505 "ignore_init_for_base_solver currently only works for serial runs.")
506 #endif
507 m_ignoreInitForBaseSolver = ignore;
508}
509
510template <typename TDomain, typename TAlgebra>
513{
514 return m_ignoreInitForBaseSolver;
515}
516
517
518template <typename TDomain, typename TAlgebra>
521{
522 m_ApproxSpaceRevision.invalidate();
523}
524
525
526template <typename TDomain, typename TAlgebra>
529{
531 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start assemble_level_operator\n");
532
533 if(m_GridLevelType == GridLevel::SURFACE)
534 UG_THROW("GMG: emulate_full_refined_grid currently only implemented "
535 "for set_rap(true) - since assembling of on surface-level with "
536 " lev < topLev is currently not supported by constraints and "
537 " elem-disc loop (only top-lev or level-view poosible). It is "
538 "necessary to rework that part of the assembing procedure.")
539
540// Create Projection
541 try{
542 if(m_pSurfaceSol) {
543 UG_DLOG(LIB_DISC_MULTIGRID, 4, " start assemble_level_operator: project sol\n");
544 GMG_PROFILE_BEGIN(GMG_ProjectSolution_CopyFromSurface);
545 #ifdef UG_PARALLEL
546 if(!m_pSurfaceSol->has_storage_type(PST_CONSISTENT))
547 UG_THROW("GMG::init: Can only project "
548 "a consistent solution. Make sure to pass a consistent on.");
549 #endif
550
551 init_projection();
552
553 for(int lev = m_baseLev; lev <= m_topLev; ++lev){
554 const std::vector<SurfLevelMap>& vMap = m_vLevData[lev]->vSurfLevelMap;
555 LevData& ld = *m_vLevData[lev];
556 for(size_t i = 0; i < vMap.size(); ++i){
557 (*ld.st)[vMap[i].levIndex] = (*m_pSurfaceSol)[vMap[i].surfIndex];
558 }
559 }
561 UG_DLOG(LIB_DISC_MULTIGRID, 4, " end assemble_level_operator: project sol\n");
562 }
563 }
564 UG_CATCH_THROW("GMG::init: Projection of Surface Solution failed.");
565
566// Create coarse level operators
567 GMG_PROFILE_BEGIN(GMG_AssembleLevelMat_AllLevelMat);
568 for(int lev = m_topLev; lev >= m_baseLev; --lev)
569 {
570 LevData& ld = *m_vLevData[lev];
571
572 // send solution to v-master
573 SmartPtr<GF> spT = ld.st;
574 #ifdef UG_PARALLEL
575 ComPol_VecCopy<vector_type> cpVecCopy(ld.t.get());
576 if(m_pSurfaceSol && lev > m_baseLev)
577 {
578 if( !ld.t->layouts()->vertical_slave().empty() ||
579 !ld.t->layouts()->vertical_master().empty())
580 {
581 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - send sol to v-master\n");
582
583 // todo: if no ghosts present on proc one could use ld.st directly
584 GMG_PROFILE_BEGIN(GMG_ProjectSolution_CopyNoghostToGhost);
585 copy_noghost_to_ghost(ld.t, ld.st, ld.vMapPatchToGlobal);
587
588 GMG_PROFILE_BEGIN(GMG_ProjectSolution_CollectAndSend);
589 m_Com.receive_data(ld.t->layouts()->vertical_master(), cpVecCopy);
590 m_Com.send_data(ld.t->layouts()->vertical_slave(), cpVecCopy);
591 m_Com.communicate_and_resume();
593 if(!m_bCommCompOverlap){
594 GMG_PROFILE_BEGIN(GMG_ProjectSolution_RevieveAndExtract_NoOverlap);
595 m_Com.wait();
597 }
598
599 spT = ld.t;
600 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - send sol to v-master\n");
601 }
602 }
603 #endif
604
605 // In Full-Ref case we can copy the Matrix from the surface
606 bool bCpyFromSurface = ((lev == m_topLev) && (lev <= m_LocalFullRefLevel));
607 if(!bCpyFromSurface)
608 {
609 UG_DLOG(LIB_DISC_MULTIGRID, 4, " start assemble_level_operator: assemble on lev "<<lev<<"\n");
610 GMG_PROFILE_BEGIN(GMG_AssembleLevelMat_AssembleOnLevel);
611 try{
612 if(m_GridLevelType == GridLevel::LEVEL)
613 m_spAss->ass_tuner()->set_force_regular_grid(true);
614 m_spAss->assemble_jacobian(*ld.A, *ld.st, GridLevel(lev, m_GridLevelType, false));
615 m_spAss->ass_tuner()->set_force_regular_grid(false);
616 }
617 UG_CATCH_THROW("GMG:init: Cannot init operator for level "<<lev);
619 UG_DLOG(LIB_DISC_MULTIGRID, 4, " end assemble_level_operator: assemble on lev "<<lev<<"\n");
620 }
621 else
622 {
623 // in case of full refinement we simply copy the matrix (with correct numbering)
624 UG_DLOG(LIB_DISC_MULTIGRID, 4, " start assemble_level_operator: copy mat on lev "<<lev<<"\n");
625 GMG_PROFILE_BEGIN(GMG_AssembleLevelMat_CopyFromTopSurface);
626
627 // loop all mapped indices
628 UG_ASSERT(m_spSurfaceMat->num_rows() == m_vSurfToLevelMap.size(),
629 "Surface Matrix rows != Surf Level Indices")
630
631 if (m_bMatrixStructureIsConst)
632 ld.A->clear_retain_structure();
633 else
634 ld.A->resize_and_clear(m_spSurfaceMat->num_rows(), m_spSurfaceMat->num_cols());
635 for(size_t srfRow = 0; srfRow < m_vSurfToLevelMap.size(); ++srfRow)
636 {
637 // get mapped level index
638 UG_ASSERT(m_vSurfToLevelMap[srfRow].level == m_topLev,
639 "All surface Indices must be on top level for full-ref.")
640 const size_t lvlRow = m_vSurfToLevelMap[srfRow].index;
641
642 // loop all connections of the surface dof to other surface dofs
643 // and copy the matrix coupling into the level matrix
644 typedef typename matrix_type::const_row_iterator const_row_iterator;
645 const_row_iterator conn = m_spSurfaceMat->begin_row(srfRow);
646 const_row_iterator connEnd = m_spSurfaceMat->end_row(srfRow);
647 for( ;conn != connEnd; ++conn){
648 // get corresponding level connection index
649 UG_ASSERT(m_vSurfToLevelMap[conn.index()].level == m_topLev,
650 "All surface Indices must be on top level for full-ref.")
651 const size_t lvlCol = m_vSurfToLevelMap[conn.index()].index;
652
653 // copy connection to level matrix
654 (*ld.A)(lvlRow, lvlCol) = conn.value();
655 }
656 }
657
658 #ifdef UG_PARALLEL
659 ld.A->set_storage_type(m_spSurfaceMat->get_storage_mask());
660 ld.A->set_layouts(ld.st->layouts());
661 #endif
663 UG_DLOG(LIB_DISC_MULTIGRID, 4, " end assemble_level_operator: copy mat on lev "<<lev<<"\n");
664 }
665
666 if(m_pSurfaceSol && lev > m_baseLev)
667 {
668 #ifdef UG_PARALLEL
669 if(m_bCommCompOverlap){
670 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - recieve sol at v-master\n");
671 GMG_PROFILE_BEGIN(GMG_ProjectSolution_RecieveAndExtract_WithOverlap);
672 m_Com.wait();
674 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - recieve sol at v-master\n");
675 }
676 #endif
677
678 GMG_PROFILE_BEGIN(GMG_ProjectSolution_Transfer);
679 LevData& lc = *m_vLevData[lev-1];
680 try{
681 ld.Projection->do_restrict(*lc.st, *spT);
682 #ifdef UG_PARALLEL
683 lc.st->set_storage_type(m_pSurfaceSol->get_storage_mask());
684 #endif
685 } UG_CATCH_THROW("GMG::init: Cannot project solution to coarse grid "
686 "function of level "<<lev-1);
688 }
689 }
691
692// write computed level matrices for debug purpose
693 for(int lev = m_baseLev; lev <= m_topLev; ++lev){
694 LevData& ld = *m_vLevData[lev];
695 write_debug(*ld.A, "LevelMatrix", *ld.st, *ld.st);
696 }
697
698// if no ghosts are present we can simply use the whole grid. If the base
699// solver is carried out in serial (gathering to some processes), we have
700// to assemble the assemble the coarse grid matrix on the whole grid as
701// well
702 if(m_bGatheredBaseUsed)
703 {
704 UG_DLOG(LIB_DISC_MULTIGRID, 4, " start assemble_level_operator: ass gathered on lev "<<m_baseLev<<"\n");
705 LevData& ld = *m_vLevData[m_baseLev];
706
707 if(m_pSurfaceSol){
708
709 #ifdef UG_PARALLEL
710 if( !ld.t->layouts()->vertical_slave().empty() ||
711 !ld.t->layouts()->vertical_master().empty())
712 {
713 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - copy sol to gathered master\n");
714
715 GMG_PROFILE_BEGIN(GMG_ProjectSolution_CopyToGatheredMaster);
716 copy_noghost_to_ghost(ld.t, ld.st, ld.vMapPatchToGlobal);
717
718 ComPol_VecCopy<vector_type> cpVecCopy(ld.t.get());
719 m_Com.receive_data(ld.t->layouts()->vertical_master(), cpVecCopy);
720 m_Com.send_data(ld.t->layouts()->vertical_slave(), cpVecCopy);
721 m_Com.communicate();
723
724 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - copy sol to gathered master\n");
725 }
726 #endif
727 }
728
729 // only assemble on gathering proc
730 if(gathered_base_master())
731 {
732 GMG_PROFILE_BEGIN(GMG_AssembleLevelMat_GatheredBase);
733 try{
734 if(m_GridLevelType == GridLevel::LEVEL)
735 m_spAss->ass_tuner()->set_force_regular_grid(true);
736 m_spAss->assemble_jacobian(*spGatheredBaseMat, *ld.t, GridLevel(m_baseLev, m_GridLevelType, true));
737 m_spAss->ass_tuner()->set_force_regular_grid(false);
738 }
739 UG_CATCH_THROW("GMG:init: Cannot init operator base level operator");
741 }
742 UG_DLOG(LIB_DISC_MULTIGRID, 4, " end assemble_level_operator: ass gathered on lev "<<m_baseLev<<"\n");
743 }
744
745// assemble missing coarse grid matrix contribution (only in adaptive case)
746 try{
747 assemble_rim_cpl(m_pSurfaceSol);
748 }
749 UG_CATCH_THROW("GMG:init: Cannot init missing coarse grid coupling.");
750
751 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop assemble_level_operator\n");
752}
753
754template <typename TDomain, typename TAlgebra>
757{
759 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - assemble_rim_cpl " << "\n");
760
761// clear matrices
762 for(int lev = m_baseLev; lev <= m_topLev; ++lev){
763 LevData& ld = *m_vLevData[lev];
764 ld.RimCpl_Fine_Coarse.resize_and_clear(0,0);
765 ld.RimCpl_Coarse_Fine.resize_and_clear(0,0);
766 }
767
768// loop all levels to compute the missing contribution
769 for(int lev = m_topLev; lev > m_LocalFullRefLevel; --lev)
770 {
771 LevData& lc = *m_vLevData[lev-1];
772 LevData& lf = *m_vLevData[lev];
773
774 lc.RimCpl_Fine_Coarse.resize_and_clear(lf.sd->size(), lc.sc->size());
775 if(m_bSmoothOnSurfaceRim)
776 lc.RimCpl_Coarse_Fine.resize_and_clear(lc.sd->size(), lf.sc->size());
777
778 for(size_t i = 0; i< lf.vShadowing.size(); ++i)
779 {
780 const size_t lvlRow = lf.vShadowing[i];
781 const size_t srfRow = lf.vSurfShadowing[i];
782
783 SetRow((*lf.A), lvlRow, 0.0);
784
785 typedef typename matrix_type::const_row_iterator row_iterator;
786 row_iterator conn = m_spSurfaceMat->begin_row(srfRow);
787 row_iterator connEnd = m_spSurfaceMat->end_row(srfRow);
788 for( ;conn != connEnd; ++conn)
789 {
790 const size_t srfCol = conn.index();
791
792 if(m_vSurfToLevelMap[srfCol].level == lev) {
793 const size_t lvlCol = m_vSurfToLevelMap[srfCol].index;
794 (*lf.A)(lvlRow, lvlCol) = conn.value();
795 }
796 if(m_vSurfToLevelMap[srfCol].level == lev+1) {
797 if(m_vSurfToLevelMap[srfCol].levelLower == lev) {
798 const size_t lvlCol = m_vSurfToLevelMap[srfCol].indexLower;
799 (*lf.A)(lvlRow, lvlCol) = conn.value();
800 }
801 }
802 if(m_vSurfToLevelMap[srfCol].level == lev-1){
803 const size_t lvlCol = m_vSurfToLevelMap[srfCol].index;
804 (lc.RimCpl_Fine_Coarse)(lvlRow, lvlCol) = conn.value();
805
806 if(m_bSmoothOnSurfaceRim){
807 lc.RimCpl_Coarse_Fine(lvlCol, lvlRow) = (*m_spSurfaceMat)(srfCol, srfRow);
808 }
809 }
810
811 }
812 }
813
814#ifdef UG_PARALLEL
815 lc.RimCpl_Fine_Coarse.set_storage_type(m_spSurfaceMat->get_storage_mask());
816 if(m_bSmoothOnSurfaceRim)
817 lc.RimCpl_Coarse_Fine.set_storage_type(m_spSurfaceMat->get_storage_mask());
818#endif
819 write_debug(*lf.A, "LevelMatrix", *lf.st, *lf.st);
820 write_debug(lc.RimCpl_Fine_Coarse, "RimCpl", *lf.sd, *lc.sc);
821 if(m_bSmoothOnSurfaceRim)
822 write_debug(lc.RimCpl_Coarse_Fine, "RimCpl", *lc.sd, *lf.sc);
823 }
824
825 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - assemble_rim_cpl " << "\n");
826}
827
828template <typename TDomain, typename TAlgebra>
831{
833 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_rap_operator\n");
834
835 GMG_PROFILE_BEGIN(GMG_BuildRAP_ResizeLevelMat);
836 for(int lev = m_topLev; lev >= m_baseLev; --lev)
837 {
838 LevData& ld = *m_vLevData[lev];
839 if (m_bMatrixStructureIsConst)
840 ld.A->clear_retain_structure();
841 else
842 ld.A->resize_and_clear(ld.st->size(), ld.st->size());
843 #ifdef UG_PARALLEL
844 ld.A->set_storage_type(m_spSurfaceMat->get_storage_mask());
845 ld.A->set_layouts(ld.st->layouts());
846 #endif
847 }
849
850 UG_DLOG(LIB_DISC_MULTIGRID, 4, " start init_rap_operator: copy from surface\n");
851 GMG_PROFILE_BEGIN(GMG_BuildRAP_CopyFromSurfaceMat);
852 for(size_t srfRow = 0; srfRow < m_vSurfToLevelMap.size(); ++srfRow)
853 {
854 // loop all connections of the surface dof to other surface dofs
855 // and copy the matrix coupling into the level matrix
856 typedef typename matrix_type::const_row_iterator const_row_iterator;
857 const_row_iterator conn = m_spSurfaceMat->begin_row(srfRow);
858 const_row_iterator connEnd = m_spSurfaceMat->end_row(srfRow);
859 for( ;conn != connEnd; ++conn)
860 {
861 // get corresponding surface connection index
862 const size_t srfCol = conn.index();
863
864 // get level
865 int rowLevel = m_vSurfToLevelMap[srfRow].level;
866 int colLevel = m_vSurfToLevelMap[srfCol].level;
867
868 // get corresponding indices
869 size_t lvlRow = m_vSurfToLevelMap[srfRow].index;
870 size_t lvlCol = m_vSurfToLevelMap[srfCol].index;
871
872 // if connection between different level -> adjust
873 UG_ASSERT(colLevel >= 0, "Invalid colLevel: "<<colLevel)
874 UG_ASSERT(rowLevel >= 0, "Invalid rowLevel: "<<rowLevel)
875 if(colLevel > rowLevel){
876 if(m_vSurfToLevelMap[srfCol].levelLower == rowLevel){
877 lvlCol = m_vSurfToLevelMap[srfCol].indexLower;
878 colLevel = rowLevel;
879 } else {
880 continue;
881 }
882 } else if(colLevel < rowLevel){
883 if(m_vSurfToLevelMap[srfRow].levelLower == colLevel){
884 lvlRow = m_vSurfToLevelMap[srfRow].indexLower;
885 rowLevel = colLevel;
886 } else {
887 continue;
888 }
889 }
890
891 // copy connection to level matrix
892 (*(m_vLevData[colLevel]->A))(lvlRow, lvlCol) = conn.value();
893 }
894 }
896 UG_DLOG(LIB_DISC_MULTIGRID, 4, " end init_rap_operator: copy from surface\n");
897
898// write computed level matrices for debug purpose
899 for(int lev = m_baseLev; lev <= m_topLev; ++lev){
900 LevData& ld = *m_vLevData[lev];
901 write_debug(*ld.A, "LevelMatrixFromSurf", *ld.st, *ld.st);
902 }
903
904// Create coarse level operators
905 UG_DLOG(LIB_DISC_MULTIGRID, 4, " start init_rap_operator: build rap\n");
906 GMG_PROFILE_BEGIN(GMG_BuildRAP_AllLevelMat);
907 for(int lev = m_topLev; lev > m_baseLev; --lev)
908 {
909 UG_DLOG(LIB_DISC_MULTIGRID, 4, " start init_rap_operator: build rap on lev "<<lev<<"\n");
910 LevData& lf = *m_vLevData[lev];
911 LevData& lc = *m_vLevData[lev-1];
912
913 SmartPtr<matrix_type> spA = lf.A;
914 #ifdef UG_PARALLEL
917 if( !lf.t->layouts()->vertical_master().empty() ||
918 !lf.t->layouts()->vertical_slave().empty())
919 {
920 GMG_PROFILE_BEGIN(GMG_BuildRAP_CopyNoghostToGhost);
921 spGhostA->resize_and_clear(lf.t->size(), lf.t->size());
922 spGhostA->set_layouts(lf.t->layouts());
923 if(!lf.t->layouts()->vertical_master().empty()){
924 copy_noghost_to_ghost(spGhostA, lf.A, lf.vMapPatchToGlobal);
925 } else {
926 *spGhostA = *lf.A;
927 }
929
930 GMG_PROFILE_BEGIN(GMG_BuildRAP_CollectAndSend);
931 m_Com.send_data(spGhostA->layouts()->vertical_slave(), cpMatAdd);
932 m_Com.receive_data(spGhostA->layouts()->vertical_master(), cpMatAdd);
933 m_Com.communicate_and_resume();
935 if(!m_bCommCompOverlap){
936 GMG_PROFILE_BEGIN(GMG_BuildRAP_RecieveAndExtract_NoOverlap);
937 m_Com.wait();
939 }
940
941 spA = spGhostA;
942 }
943 #endif
944
945 GMG_PROFILE_BEGIN(GMG_BuildRAP_CreateRestrictionAndProlongation);
946 SmartPtr<matrix_type> R = lf.Restriction->restriction(lc.st->grid_level(), lf.t->grid_level(), m_spApproxSpace);
947 SmartPtr<matrix_type> P = lf.Prolongation->prolongation(lf.t->grid_level(), lc.st->grid_level(), m_spApproxSpace);
949
950 #ifdef UG_PARALLEL
951 if(m_bCommCompOverlap){
952 GMG_PROFILE_BEGIN(GMG_BuildRAP_RecieveAndExtract_WithOverlap);
953 m_Com.wait();
955 }
956 #endif
957
958 GMG_PROFILE_BEGIN(GMG_BuildRAP_MultiplyRAP);
959 AddMultiplyOf(*lc.A, *R, *spA, *P);
961 UG_DLOG(LIB_DISC_MULTIGRID, 4, " end init_rap_operator: build rap on lev "<<lev<<"\n");
962 }
964 UG_DLOG(LIB_DISC_MULTIGRID, 4, " end init_rap_operator: build rap\n");
965
966// write computed level matrices for debug purpose
967 for(int lev = m_baseLev; lev <= m_topLev; ++lev){
968 LevData& ld = *m_vLevData[lev];
969 write_debug(*ld.A, "LevelMatrix", *ld.st, *ld.st);
970 }
971
972 if(m_bGatheredBaseUsed)
973 {
974#ifdef UG_PARALLEL
975 GMG_PROFILE_BEGIN(GMG_BuildRAP_CopyNoghostToGhost_GatheredBase);
976 LevData& ld = *m_vLevData[m_baseLev];
977 if(gathered_base_master()){
978
979 if (m_bMatrixStructureIsConst)
980 spGatheredBaseMat->clear_retain_structure();
981 else
982 spGatheredBaseMat->resize_and_clear(ld.t->size(), ld.t->size());
983 copy_noghost_to_ghost(spGatheredBaseMat.template cast_dynamic<matrix_type>(), ld.A, ld.vMapPatchToGlobal);
984 } else {
986 *spGatheredBaseMat = *ld.A;
987 }
988 spGatheredBaseMat->set_storage_type(m_spSurfaceMat->get_storage_mask());
989 spGatheredBaseMat->set_layouts(ld.t->layouts());
991
992 GMG_PROFILE_BEGIN(GMG_BuildRAP_SendAndRevieve_GatheredBase);
994 m_Com.send_data(spGatheredBaseMat->layouts()->vertical_slave(), cpMatAdd);
995 if(gathered_base_master())
996 m_Com.receive_data(spGatheredBaseMat->layouts()->vertical_master(), cpMatAdd);
997 m_Com.communicate();
999
1000 if(!gathered_base_master()){
1001 spGatheredBaseMat = SPNULL;
1002 }
1003#endif
1004 }
1005
1006// coarse grid contributions
1007 try{
1008 init_rap_rim_cpl();
1009 }
1010 UG_CATCH_THROW("GMG:init: Cannot init missing coarse grid coupling.");
1011
1012 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_rap_operator\n");
1013}
1014
1015template <typename TDomain, typename TAlgebra>
1018{
1020 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - init_rap_rim_cpl " << "\n");
1021
1022// clear matrices
1023 for(int lev = m_baseLev; lev <= m_topLev; ++lev){
1024 LevData& ld = *m_vLevData[lev];
1025 ld.RimCpl_Fine_Coarse.resize_and_clear(0,0);
1026 ld.RimCpl_Coarse_Fine.resize_and_clear(0,0);
1027 }
1028
1029// loop all levels to compute the missing contribution
1030 for(int lev = m_topLev; lev > m_LocalFullRefLevel; --lev)
1031 {
1032 LevData& lc = *m_vLevData[lev-1];
1033 LevData& lf = *m_vLevData[lev];
1034
1035 lc.RimCpl_Fine_Coarse.resize_and_clear(lf.sd->size(), lc.sc->size());
1036 if(m_bSmoothOnSurfaceRim)
1037 lc.RimCpl_Coarse_Fine.resize_and_clear(lc.sd->size(), lf.sc->size());
1038
1039 for(size_t i = 0; i< lf.vShadowing.size(); ++i)
1040 {
1041 const size_t lvlRow = lf.vShadowing[i];
1042 const size_t srfRow = lf.vSurfShadowing[i];
1043
1044 typedef typename matrix_type::const_row_iterator const_row_iterator;
1045 const_row_iterator conn = m_spSurfaceMat->begin_row(srfRow);
1046 const_row_iterator connEnd = m_spSurfaceMat->end_row(srfRow);
1047 for( ;conn != connEnd; ++conn)
1048 {
1049 const size_t srfCol = conn.index();
1050
1051 if(m_vSurfToLevelMap[srfCol].level == lev-1){
1052 const size_t lvlCol = m_vSurfToLevelMap[srfCol].index;
1053 (lc.RimCpl_Fine_Coarse)(lvlRow, lvlCol) = conn.value();
1054
1055 if(m_bSmoothOnSurfaceRim){
1056 lc.RimCpl_Coarse_Fine(lvlCol, lvlRow) = (*m_spSurfaceMat)(srfCol, srfRow);
1057 }
1058 }
1059 }
1060 }
1061
1062#ifdef UG_PARALLEL
1063 lc.RimCpl_Fine_Coarse.set_storage_type(m_spSurfaceMat->get_storage_mask());
1064 if(m_bSmoothOnSurfaceRim)
1065 lc.RimCpl_Coarse_Fine.set_storage_type(m_spSurfaceMat->get_storage_mask());
1066#endif
1067 write_debug(lc.RimCpl_Fine_Coarse, "RimCpl", *lf.sd, *lc.sc);
1068 if(m_bSmoothOnSurfaceRim)
1069 write_debug(lc.RimCpl_Coarse_Fine, "RimCpl", *lc.sd, *lf.sc);
1070 }
1071
1072 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - init_rap_rim_cpl " << "\n");
1073}
1074
1075
1076template <typename TDomain, typename TAlgebra>
1079{
1081 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_transfer\n");
1082
1083// loop all levels
1084 for(int lev = m_baseLev+1; lev <= m_topLev; ++lev)
1085 {
1086 LevData& lf = *m_vLevData[lev];
1087 LevData& lc = *m_vLevData[lev-1];
1088
1089 // check if same operator for prolongation and restriction used
1090 bool bOneOperator = false;
1091 if(lf.Prolongation == lf.Restriction) bOneOperator = true;
1092
1093 lf.Prolongation->set_levels(lc.st->grid_level(), lf.t->grid_level());
1094 lf.Prolongation->clear_constraints();
1095 for(size_t i = 0; i < m_spAss->num_constraints(); ++i){
1096 SmartPtr<IConstraint<TAlgebra> > pp = m_spAss->constraint(i);
1097 lf.Prolongation->add_constraint(pp);
1098 }
1099 lf.Prolongation->init();
1100
1101 if(!bOneOperator){
1102 lf.Restriction->set_levels(lc.st->grid_level(), lf.t->grid_level());
1103 lf.Restriction->clear_constraints();
1104 for(size_t i = 0; i < m_spAss->num_constraints(); ++i){
1105 SmartPtr<IConstraint<TAlgebra> > pp = m_spAss->constraint(i);
1106 lf.Restriction->add_constraint(pp);
1107 }
1108 lf.Restriction->init();
1109 }
1110 }
1111
1112 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_transfer\n");
1113}
1114
1115template <typename TDomain, typename TAlgebra>
1118{
1120 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_projection\n");
1121
1122 for(int lev = m_baseLev+1; lev <= m_topLev; ++lev)
1123 {
1124 LevData& lf = *m_vLevData[lev];
1125 LevData& lc = *m_vLevData[lev-1];
1126 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "ProjectionInit", lev);
1127 lf.Projection->set_levels(lc.st->grid_level(), lf.t->grid_level());
1128 lf.Projection->init();
1129 leave_debug_writer_section(gw_gl);
1130 }
1131
1132 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_projection\n");
1133}
1134
1135template <typename TDomain, typename TAlgebra>
1138{
1140 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_smoother\n");
1141
1142// Init smoother
1143 for(int lev = m_baseLev+1; lev <= m_topLev; ++lev)
1144 {
1145 LevData& ld = *m_vLevData[lev];
1146
1147 UG_DLOG(LIB_DISC_MULTIGRID, 4, " init_smoother: initializing pre-smoother on lev "<<lev<<"\n");
1148 bool success;
1149 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "PreSmootherInit", lev);
1150 try {success = ld.PreSmoother->init(ld.A, *ld.sc);}
1151 UG_CATCH_THROW("GMG::init: Cannot init pre-smoother for level "<<lev);
1152 leave_debug_writer_section(gw_gl);
1153 if (!success)
1154 UG_THROW("GMG::init: Cannot init pre-smoother for level "<<lev);
1155
1156 UG_DLOG(LIB_DISC_MULTIGRID, 4, " init_smoother: initializing post-smoother on lev "<<lev<<"\n");
1157 if(ld.PreSmoother != ld.PostSmoother)
1158 {
1159 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "PostSmootherInit", lev);
1160 try {success = ld.PostSmoother->init(ld.A, *ld.sc);}
1161 UG_CATCH_THROW("GMG::init: Cannot init post-smoother for level "<<lev);
1162 leave_debug_writer_section(gw_gl);
1163 if (!success)
1164 UG_THROW("GMG::init: Cannot init post-smoother for level "<<lev);
1165 }
1166 }
1167
1168 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_smoother\n");
1169}
1170
1171template <typename TDomain, typename TAlgebra>
1174{
1176 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_base_solver\n");
1177 LevData& ld = *m_vLevData[m_baseLev];
1178
1179// check, if a gathering base solver is required:
1180 if(m_bGatheredBaseUsed)
1181 {
1182 // only init on gathering proc
1183 if(!gathered_base_master()) return;
1184
1185 // create layout with only v-master (v-slave do not exist on gathering proc)
1186 // especially: remove h-layouts and set proc-comm to process-only
1187 #ifdef UG_PARALLEL
1188 SmartPtr<AlgebraLayouts> spOneProcLayout =
1190 spOneProcLayout->clear();
1191 spOneProcLayout->vertical_master() = ld.t->layouts()->vertical_master();
1192 spOneProcLayout->proc_comm() = pcl::ProcessCommunicator(pcl::PCD_LOCAL);
1193
1194 spGatheredBaseMat->set_layouts(spOneProcLayout);
1195 spGatheredBaseCorr->set_layouts(spOneProcLayout);
1196
1197 ld.t->set_layouts(spOneProcLayout);
1198 #endif
1199
1200 // we init the base solver with the whole grid matrix
1201 if(!m_pSurfaceSol)
1202 *ld.t = 0;
1203 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "GatheredBaseSolverInit", m_baseLev);
1204 if(!m_spBaseSolver->init(spGatheredBaseMat, *ld.t))
1205 UG_THROW("GMG::init: Cannot init base solver on baselevel "<< m_baseLev);
1206 leave_debug_writer_section(gw_gl);
1207 }
1208 else
1209 {
1210#ifdef UG_PARALLEL
1211 if(!ld.st->layouts()->master().empty() || !ld.st->layouts()->slave().empty())
1212 if(!m_spBaseSolver->supports_parallel())
1213 UG_THROW("GMG: Base level is distributed onto more than process, "
1214 "but the chosen base solver "<<m_spBaseSolver->name()<<
1215 " does not support parallel solving. Choose a parallel"
1216 " base solver or construct a situation, where a single"
1217 " process stores the whole base grid, to cure this issue.");
1218#endif
1219
1220 if(!m_pSurfaceSol)
1221 *ld.st = 0;
1222 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "BaseSolverInit", m_baseLev);
1223 if(!m_spBaseSolver->init(ld.A, *ld.st))
1224 UG_THROW("GMG::init: Cannot init base solver on baselevel "<< m_baseLev);
1225 leave_debug_writer_section(gw_gl);
1226 }
1227
1228 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_base_solver\n");
1229}
1230
1231template <typename TDomain, typename TAlgebra>
1232template <typename TElem>
1235{
1236/* This Method is used to create a caching the transfer between surface and
1237 * level grid functions. Note, that in the adaptive case the surface grid is
1238 * distributed over several levels (all elements that do not have children). But
1239 * even in the full refinement case the surface level and the top level may
1240 * not have the same index pattern, since a different sorting may be used (however
1241 * the number of indices must be equal in the full-ref case).
1242 * In every case, each surface index has a corresponding index on some level
1243 * of the level grid functions. In this method this index is looked up and
1244 * stored in a vector (for fast access). I.e. for every surface index i, the
1245 * corresponding index in the level grid function is stored in m_vSurfToLevelMap[i]
1246 * together with the level of the grid function. Using this mapping copying
1247 * between surface <-> levels is implemented. Note: When Shadow-Copy are present
1248 * the dof manager usually assigns the same surface index to both, shadowing and
1249 * shadow-copy. Thus, there exist more than one corresponding level index for
1250 * such a surface index. In this case the shadowing index is used in the mapping
1251 * since this index will have the correct value at the end of the multigrid
1252 * cycle and at startup the projection to the level is necessary only to shadowing,
1253 * because shadows will get their value by transfer between the level. In order
1254 * to get the map this way, the surface loop below is only performed on
1255 * SURFACE_NONCOPY elements.
1256 */
1257
1258 PeriodicBoundaryManager* pbm = m_spApproxSpace->domain()->grid()->periodic_boundary_manager();
1259 ConstSmartPtr<SurfaceView> spSurfView = m_spApproxSpace->surface_view();
1260
1261 std::vector<ConstSmartPtr<DoFDistribution> > vLevelDD(m_topLev+1);
1262 for(int lev = m_baseLev; lev <= m_topLev; ++lev)
1263 vLevelDD[lev] = m_spApproxSpace->dof_distribution(GridLevel(lev, m_GridLevelType, false));
1264
1266 m_spApproxSpace->dof_distribution(GridLevel(m_surfaceLev, GridLevel::SURFACE));
1267
1268// iterators for subset
1269 // \todo: The loop below should only be on SURFACE_NONCOPY, since the
1270 // copy-shadows have the same indices as their shadowing. In such a
1271 // case the child index (shadowing) must win. This could be realized by
1272 // looping only non-copy elements. But it seems, that on a process
1273 // the shadow-copy may exist without its shadowing. In such a case
1274 // the mapping is invalid. To fix this, the loop is extended temporarily
1275 // below and doubling of appearance is checked.
1276 typedef typename DoFDistribution::traits<TElem>::const_iterator iter_type;
1277 iter_type iter = surfDD->begin<TElem>(SurfaceView::ALL);
1278 iter_type iterEnd = surfDD->end<TElem>(SurfaceView::ALL);
1279
1280// vector of indices
1281 std::vector<size_t> vSurfInd, vLevelInd;
1282
1283// loop all elements of type
1284 for( ; iter != iterEnd; ++iter)
1285 {
1286 // get elem
1287 TElem* elem = *iter;
1288
1289 // ignore slave (otherwise they would appear twice in map)
1290 if (pbm && pbm->is_slave(elem)) continue;
1291
1292 // get elem level
1293 int level = m_spApproxSpace->domain()->grid()->get_level(elem);
1294
1295 if (m_GridLevelType == GridLevel::SURFACE)
1296 level = m_topLev;
1297
1298 // check that coarse grid covers whole domain. If this is not the case,
1299 // some surface indices are mapped to grid levels below baseLev. We
1300 // do not allow that.
1301 if(level < m_baseLev)
1302 UG_THROW("GMG::init: Some index of the surface grid is located on "
1303 "level "<<level<<", which is below the chosen baseLev "<<
1304 m_baseLev<<". This is not allowed, since otherwise the "
1305 "gmg correction would only affect parts of the domain. Use"
1306 "gmg:set_base_level(lvl) to cure this issue.");
1307
1308 // remember minimal level, that contains a surface index on this proc
1309 m_LocalFullRefLevel = std::min(m_LocalFullRefLevel, level);
1310
1311 // extract all algebra indices for the element on surface and level
1312 vLevelDD[level]->inner_algebra_indices(elem, vLevelInd);
1313 surfDD->inner_algebra_indices(elem, vSurfInd);
1314 UG_ASSERT(vSurfInd.size() == vLevelInd.size(), "Number of indices does not match.");
1315
1316 // extract shadows
1317 if(m_GridLevelType == GridLevel::LEVEL) {
1318 if(spSurfView->surface_state(elem).contains(SurfaceView::MG_SURFACE_RIM)){
1319 for(size_t i = 0; i < vSurfInd.size(); ++i){
1320 m_vLevData[level]->vShadowing.push_back(vLevelInd[i]);
1321 m_vLevData[level]->vSurfShadowing.push_back(vSurfInd[i]);
1322 }
1323 }
1324 }
1325
1326 // set mapping index
1327 for(size_t i = 0; i < vSurfInd.size(); ++i){
1328 if(spSurfView->surface_state(elem).contains(SurfaceView::MG_SHADOW_RIM_COPY)
1329 && (level != m_topLev)) {
1330 if(m_GridLevelType == GridLevel::LEVEL){
1331 m_vSurfToLevelMap[vSurfInd[i]].indexLower = vLevelInd[i];
1332 m_vSurfToLevelMap[vSurfInd[i]].levelLower = level;
1333 }
1334 } else {
1335 m_vSurfToLevelMap[vSurfInd[i]].index = vLevelInd[i];
1336 m_vSurfToLevelMap[vSurfInd[i]].level = level;
1337 m_vLevData[level]->vSurfLevelMap.push_back(SurfLevelMap(vLevelInd[i], vSurfInd[i]));
1338 }
1339 }
1340 }
1341}
1342
1343
1344template <typename TDomain, typename TAlgebra>
1347{
1350 m_spApproxSpace->dof_distribution(GridLevel(m_surfaceLev, GridLevel::SURFACE));
1351
1352 m_vSurfToLevelMap.resize(0);
1353 m_vSurfToLevelMap.resize(surfDD->num_indices());
1354 for(int lev = m_baseLev; lev <= m_topLev; ++lev){
1355 m_vLevData[lev]->vShadowing.clear();
1356 m_vLevData[lev]->vSurfShadowing.clear();
1357 m_vLevData[lev]->vSurfLevelMap.clear();
1358 }
1359 m_LocalFullRefLevel = std::numeric_limits<int>::max();
1360
1361 if(surfDD->max_dofs(VERTEX)) init_index_mappings<Vertex>();
1362 if(surfDD->max_dofs(EDGE)) init_index_mappings<Edge>();
1363 if(surfDD->max_dofs(FACE)) init_index_mappings<Face>();
1364 if(surfDD->max_dofs(VOLUME)) init_index_mappings<Volume>();
1365
1366 if(m_baseLev > m_LocalFullRefLevel)
1367 UG_THROW("GMG: Base level "<<m_baseLev<<" does not cover whole grid. "
1368 <<"Highest full-ref level is "<<m_LocalFullRefLevel);
1369}
1370
1371template <typename TDomain, typename TAlgebra>
1372template <typename TElem>
1374init_noghost_to_ghost_mapping( std::vector<size_t>& vNoGhostToGhostMap,
1377{
1378 typedef typename DoFDistribution::traits<TElem>::const_iterator iter_type;
1379 iter_type iter = spNoGhostDD->begin<TElem>();
1380 iter_type iterEnd = spNoGhostDD->end<TElem>();
1381
1382// vector of indices
1383 std::vector<size_t> vGhostInd, vNoGhostInd;
1384
1385// loop all elements of type
1386 for( ; iter != iterEnd; ++iter){
1387 // get elem
1388 TElem* elem = *iter;
1389
1390 // extract all algebra indices for the element
1391 spGhostDD->inner_algebra_indices(elem, vGhostInd);
1392 spNoGhostDD->inner_algebra_indices(elem, vNoGhostInd);
1393 UG_ASSERT(vGhostInd.size() == vNoGhostInd.size(), "Number of indices does not match.");
1394
1395 // set mapping index
1396 for(size_t i = 0; i < vNoGhostInd.size(); ++i){
1397 vNoGhostToGhostMap[vNoGhostInd[i]] = vGhostInd[i];
1398 }
1399 }
1400}
1401
1402
1403template <typename TDomain, typename TAlgebra>
1406{
1408
1409 LevData& ld = *m_vLevData[lev];
1410 std::vector<size_t>& vMapPatchToGlobal = ld.vMapPatchToGlobal;
1411 vMapPatchToGlobal.resize(0);
1412
1413 ConstSmartPtr<DoFDistribution> spNoGhostDD = ld.st->dd();
1414 ConstSmartPtr<DoFDistribution> spGhostDD = ld.t->dd();
1415
1416 if(spNoGhostDD == spGhostDD) return;
1417
1418 vMapPatchToGlobal.resize(spNoGhostDD->num_indices());
1419
1420 if(spNoGhostDD->max_dofs(VERTEX)) init_noghost_to_ghost_mapping<Vertex>(vMapPatchToGlobal, spNoGhostDD, spGhostDD);
1421 if(spNoGhostDD->max_dofs(EDGE)) init_noghost_to_ghost_mapping<Edge>(vMapPatchToGlobal, spNoGhostDD, spGhostDD);
1422 if(spNoGhostDD->max_dofs(FACE)) init_noghost_to_ghost_mapping<Face>(vMapPatchToGlobal, spNoGhostDD, spGhostDD);
1423 if(spNoGhostDD->max_dofs(VOLUME)) init_noghost_to_ghost_mapping<Volume>(vMapPatchToGlobal, spNoGhostDD, spGhostDD);
1424}
1425
1426
1427template <typename TDomain, typename TAlgebra>
1430 ConstSmartPtr<GF> spVecFrom,
1431 const std::vector<size_t>& vMapPatchToGlobal)
1432{
1434
1435 if(spVecTo == spVecFrom)
1436 UG_THROW("GMG::copy_ghost_to_noghost: Should not be called on equal GF.");
1437
1438 if(spVecTo->dd() == spVecFrom->dd())
1439 {
1440 for(size_t i = 0; i < spVecTo->size(); ++i)
1441 (*spVecTo)[i] = (*spVecFrom)[i];
1442 }
1443 else
1444 {
1445 UG_ASSERT(vMapPatchToGlobal.size() == spVecTo->size(),
1446 "Mapping range ("<<vMapPatchToGlobal.size()<<") != "
1447 "To-Vec-Size ("<<spVecTo->size()<<")");
1448
1449 for(size_t i = 0; i < vMapPatchToGlobal.size(); ++i)
1450 (*spVecTo)[i] = (*spVecFrom)[ vMapPatchToGlobal[i] ];
1451 }
1452
1453 #ifdef UG_PARALLEL
1454 spVecTo->set_storage_type(spVecFrom->get_storage_mask());
1455 #endif
1456}
1457
1458template <typename TDomain, typename TAlgebra>
1461 ConstSmartPtr<GF> spVecFrom,
1462 const std::vector<size_t>& vMapPatchToGlobal)
1463{
1465
1466 if(spVecTo == spVecFrom)
1467 UG_THROW("GMG::copy_noghost_to_ghost: Should not be called on equal GF.");
1468
1469 if(spVecTo->dd() == spVecFrom->dd())
1470 {
1471 for(size_t i = 0; i < spVecTo->size(); ++i)
1472 (*spVecTo)[i] = (*spVecFrom)[i];
1473 }
1474 else
1475 {
1476 UG_ASSERT(vMapPatchToGlobal.size() == spVecFrom->size(),
1477 "Mapping domain ("<<vMapPatchToGlobal.size()<<") != "
1478 "From-Vec-Size ("<<spVecFrom->size()<<")");
1479
1480 for(size_t i = 0; i < vMapPatchToGlobal.size(); ++i)
1481 (*spVecTo)[ vMapPatchToGlobal[i] ] = (*spVecFrom)[i];
1482 }
1483 #ifdef UG_PARALLEL
1484 spVecTo->set_storage_type(spVecFrom->get_storage_mask());
1485 #endif
1486}
1487
1488template <typename TDomain, typename TAlgebra>
1492 const std::vector<size_t>& vMapPatchToGlobal)
1493{
1495 UG_ASSERT(vMapPatchToGlobal.size() == spMatFrom->num_rows(),
1496 "Mapping domain ("<<vMapPatchToGlobal.size()<<") != "
1497 "From-Mat-Num-Rows ("<<spMatFrom->num_rows()<<")");
1498 UG_ASSERT(vMapPatchToGlobal.size() == spMatFrom->num_cols(),
1499 "Mapping domain ("<<vMapPatchToGlobal.size()<<") != "
1500 "From-Mat-Num-Cols ("<<spMatFrom->num_cols()<<")");
1501
1502 for(size_t noghostFrom = 0; noghostFrom < vMapPatchToGlobal.size(); ++noghostFrom)
1503 {
1504 typedef typename matrix_type::const_row_iterator row_iter;
1505 row_iter conn = spMatFrom->begin_row(noghostFrom);
1506 row_iter connEnd = spMatFrom->end_row(noghostFrom);
1507 for(; conn != connEnd; ++conn)
1508 {
1509 const typename matrix_type::value_type& block = conn.value();
1510 size_t noghostTo = conn.index();
1511
1512 (*spMatTo)(vMapPatchToGlobal[noghostFrom], vMapPatchToGlobal[noghostTo])
1513 = block;
1514 }
1515 }
1516
1517 #ifdef UG_PARALLEL
1518 spMatTo->set_storage_type(spMatFrom->get_storage_mask());
1519 #endif
1520}
1521
1522
1524// Init Level Data
1526
1527template <typename TDomain, typename TAlgebra>
1529init_level_memory(int baseLev, int topLev)
1530{
1532
1533 m_vLevData.resize(0);
1534 m_vLevData.resize(topLev+1);
1535
1536 for(int lev = baseLev; lev <= topLev; ++lev)
1537 {
1538 m_vLevData[lev] = SmartPtr<LevData>(new LevData);
1539 LevData& ld = *m_vLevData[lev];
1540
1541 GridLevel gl = GridLevel(lev, m_GridLevelType, false);
1542 ld.sc = SmartPtr<GF>(new GF(m_spApproxSpace, gl, false));
1543 ld.sd = SmartPtr<GF>(new GF(m_spApproxSpace, gl, false));
1544 ld.st = SmartPtr<GF>(new GF(m_spApproxSpace, gl, false));
1545
1546 // TODO: all procs must call this, since a MPI-Group is created.
1547 // Think about optimizing this
1548 #ifdef UG_PARALLEL
1549 GridLevel glGhosts = GridLevel(lev, m_GridLevelType, true);
1550 ld.t = SmartPtr<GF>(new GF(m_spApproxSpace, glGhosts, false));
1551 if( ld.t->layouts()->vertical_slave().empty() &&
1552 ld.t->layouts()->vertical_master().empty())
1553 #endif
1554 {
1555 ld.t = ld.st;
1556 }
1557
1560
1561 ld.PreSmoother = m_spPreSmootherPrototype->clone();
1562 if(m_spPreSmootherPrototype == m_spPostSmootherPrototype)
1563 ld.PostSmoother = ld.PreSmoother;
1564 else
1565 ld.PostSmoother = m_spPostSmootherPrototype->clone();
1566
1567 // let the smoothers know the grid level they are on if they need to know
1570 if (lpre) lpre->set_grid_level(gl);
1571 if (lpost) lpost->set_grid_level(gl);
1572
1573 ld.Projection = m_spProjectionPrototype->clone();
1574
1575 ld.Prolongation = m_spProlongationPrototype->clone();
1576 if(m_spProlongationPrototype == m_spRestrictionPrototype)
1577 ld.Restriction = ld.Prolongation;
1578 else
1579 ld.Restriction = m_spRestrictionPrototype->clone();
1580
1581 init_noghost_to_ghost_mapping(lev);
1582 }
1583
1584 bool bHasVertMaster = false;
1585 bool bHasVertSlave = false;
1586 bool bHasHorrMaster = false;
1587 bool bHasHorrSlave = false;
1588 #ifdef UG_PARALLEL
1589 LevData& ld = *m_vLevData[baseLev];
1590 if( !ld.t->layouts()->vertical_master().empty()) bHasVertMaster = true;
1591 if( !ld.t->layouts()->vertical_slave().empty()) bHasVertSlave = true;
1592 if( !ld.t->layouts()->master().empty()) bHasHorrMaster = true;
1593 if( !ld.t->layouts()->slave().empty()) bHasHorrSlave = true;
1594 #endif
1595 bool bHasVertConn = bHasVertMaster || bHasVertSlave;
1596 bool bHasHorrConn = bHasHorrMaster || bHasHorrSlave;
1597
1598 m_bGatheredBaseUsed = m_bGatheredBaseIfAmbiguous;
1599
1600// if no vert-interfaces are present (especially in serial or on a proc that
1601// does not have parts of the base level), we cannot gather. In this case we
1602// overrule the choice of the user
1603// Note: levels not containing any dof, are skipped from computation anyway
1604 if(!bHasVertConn) m_bGatheredBaseUsed = false;
1605
1606// check if parallel solver is available, if not, try to use gathered
1607 if(!m_bGatheredBaseUsed
1608 && bHasHorrConn
1609 && !m_spBaseSolver->supports_parallel())
1610 {
1611 if(!bHasVertConn)
1612 UG_THROW("GMG: base level distributed in parallel, without possibility"
1613 " to gather on one process, but chosen base solver is not"
1614 " parallel. Choose a parallel base solver.")
1615
1616 m_bGatheredBaseUsed = true;
1617 }
1618
1619// check if gathering base solver possible: If some horizontal layouts are
1620// given, we know, that still the grid is distributed. But, if no
1621// vertical layouts are present in addition, we can not gather the vectors
1622// to on proc.
1623 if(m_bGatheredBaseUsed){
1624 if(bHasVertMaster && bHasVertSlave)
1625 UG_THROW("GMG: Gathered base solver expects one proc to have all "
1626 "v-masters and no v-slaves. Use gmg:set_gathered_base_solver_if_ambiguous(false)"
1627 " to avoid this error.");
1628
1629 if(!bHasVertConn && bHasHorrConn){
1630 UG_THROW("GMG: Base level distributed among processes and no possibility"
1631 " of gathering (vert. interfaces) present, but a gathering"
1632 " base solving is required. Use gmg:set_gathered_base_solver_if_ambiguous(false)"
1633 " to avoid this error.");
1634 }
1635
1636#ifdef UG_PARALLEL
1637 if(bHasVertSlave && (ld.t->layouts()->vertical_slave().num_interfaces() != 1))
1638 UG_THROW("GMG: Gathered base solver expects a v-slave containing "
1639 "process to send all its values to exactly on master. But "
1640 "more then one master detected. Use gmg:set_gathered_base_solver_if_ambiguous(false)"
1641 " to avoid this error.");
1642
1643 if(bHasVertSlave && (ld.t->layouts()->vertical_slave().num_interface_elements() != ld.t->size()))
1644 UG_THROW("GMG: Gathered base solver expects that all indices "
1645 "are sent to exactly one master. But not all indices are "
1646 "v-slaves. Use gmg:set_gathered_base_solver_if_ambiguous(false)"
1647 " to avoid this error.");
1648#endif
1649 }
1650
1651 if(m_bGatheredBaseUsed && gathered_base_master()){
1652 // note: we can safely call this here, since the dd has already been
1653 // created in the level loop
1654 GridLevel glGhosts = GridLevel(baseLev, m_GridLevelType, true);
1655 spGatheredBaseCorr = SmartPtr<GF>(new GF(m_spApproxSpace, glGhosts, false));
1656
1659 } else {
1660 spGatheredBaseCorr = SPNULL;
1661 spGatheredBaseMat = SPNULL;
1662 }
1663}
1664
1665template <typename TDomain, typename TAlgebra>
1668{
1669 if(!m_bGatheredBaseUsed)
1670 UG_THROW("GMG: Should only be called when gather-base solver used.")
1671
1672 #ifdef UG_PARALLEL
1673 if(!m_vLevData[m_baseLev]->t->layouts()->vertical_master().empty())
1674 return true;
1675 #endif
1676
1677 return false;
1678}
1679
1680
1682// Cycle - Methods
1684
1685template <typename TDomain, typename TAlgebra>
1688{
1690 LevData& lf = *m_vLevData[lev];
1691 LevData& lc = *m_vLevData[lev-1];
1692
1693 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - presmooth on level "<<lev<<"\n");
1694 log_debug_data(lev, lf.n_restr_calls, "BeforePreSmooth");
1695 mg_stats_defect(*lf.sd, lev, mg_stats_type::BEFORE_PRE_SMOOTH);
1696
1697// PRESMOOTH
1698 GMG_PROFILE_BEGIN(GMG_PreSmooth);
1699 try{
1700 // smooth several times
1701 for(int nu = 0; nu < m_numPreSmooth; ++nu)
1702 {
1703 // a) Compute t = B*d with some iterator B
1704 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "PreSmoother", lev, lf.n_pre_calls, nu);
1705 if(!lf.PreSmoother->apply(*lf.st, *lf.sd))
1706 UG_THROW("GMG: Smoothing step "<<nu+1<<" on level "<<lev<<" failed.");
1707 leave_debug_writer_section(gw_gl);
1708
1709 // b) handle patch rim.
1710 if(!m_bSmoothOnSurfaceRim){
1711 const std::vector<size_t>& vShadowing = lf.vShadowing;
1712 for(size_t i = 0; i < vShadowing.size(); ++i)
1713 (*lf.st)[ vShadowing[i] ] = 0.0;
1714 } else {
1715 if(lev > m_LocalFullRefLevel)
1716 lc.RimCpl_Coarse_Fine.matmul_minus(*lc.sd, *lf.st);
1717 // make sure each lc.sd has the same PST
1718 // (if m_LocalFullRefLevel not equal on every proc)
1719 #ifdef UG_PARALLEL
1720 else
1721 lc.sd->set_storage_type(PST_ADDITIVE);
1722 #endif
1723 }
1724
1725 // c) update the defect with this correction ...
1726 lf.A->apply_sub(*lf.sd, *lf.st);
1727
1728 // d) ... and add the correction to the overall correction
1729 if(nu < m_numPreSmooth-1)
1730 (*lf.sc) += (*lf.st);
1731 }
1732 }
1733 UG_CATCH_THROW("GMG: Pre-Smoothing on level "<<lev<<" failed.");
1734 lf.n_pre_calls++;
1736
1737 log_debug_data(lev, lf.n_restr_calls, "AfterPreSmooth_BeforeCom");
1738 mg_stats_defect(*lf.sd, lev, mg_stats_type::AFTER_PRE_SMOOTH);
1739 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - presmooth on level "<<lev<<"\n");
1740
1741// PARALLEL CASE:
1742 SmartPtr<GF> spD = lf.sd;
1743 #ifdef UG_PARALLEL
1744 ComPol_VecAddSetZero<vector_type> cpVecAdd(lf.t.get());
1745 if( !lf.t->layouts()->vertical_slave().empty() ||
1746 !lf.t->layouts()->vertical_master().empty())
1747 {
1748 // v-slaves indicate, that part of the parent are stored on a different proc.
1749 // Thus the values of the defect must be transfered to the v-masters and will
1750 // have their parents on the proc of the master and must be restricted on
1751 // that process. Thus we have to transfer the defect (currently stored
1752 // additive in the noghost-vectors) to the v-masters. And have to make sure
1753 // that d is additive after this operation. We do that by ensuring that it
1754 // is additive-unique regarding v-masters and v-slaves (v-slaves will be set to 0).
1755 // We use a temporary vector including ghost, such that the no-ghost defect
1756 // remains valid and can be used when the cycle comes back to this level.
1757
1758 GMG_PROFILE_BEGIN(GMG_Restrict_CopyNoghostToGhost);
1759 SetLayoutValues(&(*lf.t), lf.t->layouts()->vertical_master(), 0);
1760 copy_noghost_to_ghost(lf.t, lf.sd, lf.vMapPatchToGlobal);
1762
1763 GMG_PROFILE_BEGIN(GMG_Restrict_CollectAndSend);
1764 m_Com.send_data(lf.t->layouts()->vertical_slave(), cpVecAdd);
1765 m_Com.receive_data(lf.t->layouts()->vertical_master(), cpVecAdd);
1766 m_Com.communicate_and_resume();
1768 if(!m_bCommCompOverlap){
1769 GMG_PROFILE_BEGIN(GMG_Restrict_RecieveAndExtract_NoOverlap);
1770 m_Com.wait();
1772 }
1773
1774 spD = lf.t;
1775 }
1776 #endif
1777
1778 GMG_PROFILE_BEGIN(GMG_Restrict_OverlapedComputation);
1779// reset corr on coarse level
1780 lc.sc->set(0.0);
1781
1782// update last correction
1783 if(m_numPreSmooth > 0)
1784 (*lf.sc) += (*lf.st);
1786
1787 #ifdef UG_PARALLEL
1788 if(m_bCommCompOverlap){
1789 GMG_PROFILE_BEGIN(GMG_Restrict_RecieveAndExtract_WithOverlap);
1790 m_Com.wait();
1792 }
1793 #endif
1794
1795 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - restriction on level "<<lev<<"\n");
1796 log_debug_data(lev, lf.n_restr_calls, "BeforeRestrict");
1797
1798// RESTRICTION:
1799 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "Restriction", lev, lf.n_restr_calls);
1800 GMG_PROFILE_BEGIN(GMG_Restrict_Transfer);
1801 try{
1802 lf.Restriction->do_restrict(*lc.sd, *spD);
1803 }
1804 UG_CATCH_THROW("GMG: Restriction from lev "<<lev<<" to "<<lev-1<<" failed.");
1806
1807// apply post processes
1808 GMG_PROFILE_BEGIN(GMG_Restrict_PostProcess);
1809 for(size_t i = 0; i < m_vspRestrictionPostProcess.size(); ++i)
1810 m_vspRestrictionPostProcess[i]->post_process(lc.sd);
1812 leave_debug_writer_section(gw_gl);
1813 lf.n_restr_calls++;
1814
1815 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - restriction on level "<<lev<<"\n");
1816}
1817
1818template <typename TDomain, typename TAlgebra>
1821{
1823 LevData& lf = *m_vLevData[lev];
1824 LevData& lc = *m_vLevData[lev-1];
1825
1826 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - prolongation on level "<<lev<<"\n");
1827 log_debug_data(lev, lf.n_prolong_calls, "BeforeProlong");
1828
1829// ADAPTIVE CASE:
1830 if(lev > m_LocalFullRefLevel)
1831 {
1832 // write computed correction to surface
1833 GMG_PROFILE_BEGIN(GMG_AddCorrectionToSurface);
1834 try{
1835 const std::vector<SurfLevelMap>& vMap = lc.vSurfLevelMap;
1836 for(size_t i = 0; i < vMap.size(); ++i){
1837 (*m_pC)[vMap[i].surfIndex] += (*lc.sc)[vMap[i].levIndex];
1838 }
1839 }
1840 UG_CATCH_THROW("GMG::lmgc: Cannot add to surface.");
1842
1843 // in the adaptive case there is a small part of the coarse coupling that
1844 // has not been used to update the defect. In order to ensure, that the
1845 // defect on this level still corresponds to the updated defect, we need
1846 // to add it here.
1847 GMG_PROFILE_BEGIN(GMG_UpdateRimDefect);
1848 lc.RimCpl_Fine_Coarse.matmul_minus(*lf.sd, *lc.sc);
1850 }
1851 log_debug_data(lev, lf.n_prolong_calls, "AfterCoarseGridDefect");
1852
1853// PROLONGATE:
1854 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "Prolongation", lev, lf.n_prolong_calls);
1855 SmartPtr<GF> spT = lf.st;
1856 #ifdef UG_PARALLEL
1857 if( !lf.t->layouts()->vertical_slave().empty() ||
1858 !lf.t->layouts()->vertical_master().empty())
1859 {
1860 spT = lf.t;
1861 }
1862 #endif
1863 GMG_PROFILE_BEGIN(GMG_Prolongate_Transfer);
1864 try{
1865 lf.Prolongation->prolongate(*spT, *lc.sc);
1866 }
1867 UG_CATCH_THROW("GMG: Prolongation from lev "<<lev-1<<" to "<<lev<<" failed.");
1869
1870// PARALLEL CASE:
1871#ifdef UG_PARALLEL
1872 if( !lf.t->layouts()->vertical_slave().empty() ||
1873 !lf.t->layouts()->vertical_master().empty())
1874 {
1875 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - copy_to_vertical_slaves\n");
1876
1877 // Receive values of correction for vertical slaves:
1878 // If there are vertical slaves/masters on the coarser level, we now copy
1879 // the correction values from the v-master DoFs to the v-slave DoFs.
1880 GMG_PROFILE_BEGIN(GMG_Prolongate_SendAndRecieve);
1881 ComPol_VecCopy<vector_type> cpVecCopy(lf.t.get());
1882 m_Com.receive_data(lf.t->layouts()->vertical_slave(), cpVecCopy);
1883 m_Com.send_data(lf.t->layouts()->vertical_master(), cpVecCopy);
1884 m_Com.communicate();
1886
1887 GMG_PROFILE_BEGIN(GMG_Prolongate_GhostToNoghost);
1888 copy_ghost_to_noghost(lf.st, lf.t, lf.vMapPatchToGlobal);
1890
1891 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - copy_to_vertical_slaves\n");
1892 }
1893#endif
1894
1895// apply post processes
1896 GMG_PROFILE_BEGIN(GMG_Prolongate_PostProcess);
1897 for(size_t i = 0; i < m_vspProlongationPostProcess.size(); ++i)
1898 m_vspProlongationPostProcess[i]->post_process(lf.st);
1900
1901 leave_debug_writer_section(gw_gl);
1902
1903// add coarse grid correction: c := c + t
1904 GMG_PROFILE_BEGIN(GMG_AddCoarseGridCorrection);
1905 (*lf.sc) += (*lf.st);
1907
1908 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - prolongation on level "<<lev<<"\n");
1909 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - postsmooth on level "<<lev<<"\n");
1910 // log_debug_data(lev, lf.n_prolong_calls, "BeforePostSmooth");
1911
1912// POST-SMOOTH:
1913 GMG_PROFILE_BEGIN(GMG_PostSmooth);
1914 try{
1915 // smooth several times
1916 for(int nu = 0; nu < m_numPostSmooth; ++nu)
1917 {
1918 // update defect
1919 lf.A->apply_sub(*lf.sd, *lf.st);
1920
1921 if(nu == 0){
1922 log_debug_data(lev, lf.n_prolong_calls, "BeforePostSmooth");
1923 mg_stats_defect(*lf.sd, lev, mg_stats_type::BEFORE_POST_SMOOTH);
1924 }
1925
1926 // a) Compute t = B*d with some iterator B
1927 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "PostSmoother", lev, lf.n_post_calls, nu);
1928 if(!lf.PostSmoother->apply(*lf.st, *lf.sd))
1929 UG_THROW("GMG: Smoothing step "<<nu+1<<" on level "<<lev<<" failed.");
1930 leave_debug_writer_section(gw_gl);
1931
1932 // b) handle patch rim
1933 if(!m_bSmoothOnSurfaceRim){
1934 const std::vector<size_t>& vShadowing = lf.vShadowing;
1935 for(size_t i = 0; i < vShadowing.size(); ++i)
1936 (*lf.st)[ vShadowing[i] ] = 0.0;
1937 } else {
1938 if(lev > m_LocalFullRefLevel)
1939 lc.RimCpl_Coarse_Fine.matmul_minus(*lc.sd, *lf.st);
1940 }
1941
1942 // d) ... and add the correction to the overall correction
1943 (*lf.sc) += (*lf.st);
1944 }
1945 }
1946 UG_CATCH_THROW("GMG: Post-Smoothing on level "<<lev<<" failed. ")
1948 lf.n_post_calls++;
1949
1950// update the defect if required. In full-ref case, the defect is not needed
1951// anymore, since it will be restricted anyway. For adaptive case, however,
1952// we must keep track of the defect on the surface.
1953// We also need it if we want to write stats or debug data
1954 if(lev >= m_LocalFullRefLevel || m_mgstats.valid() || m_spDebugWriter.valid()){
1955 GMG_PROFILE_BEGIN(GMG_UpdateDefectAfterPostSmooth);
1956 lf.A->apply_sub(*lf.sd, *lf.st);
1958 }
1959
1960 log_debug_data(lev, lf.n_prolong_calls, "AfterPostSmooth");
1961 mg_stats_defect(*lf.sd, lev, mg_stats_type::AFTER_POST_SMOOTH);
1962 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - postsmooth on level "<<lev<<"\n");
1963 lf.n_prolong_calls++;
1964}
1965
1966// performs the base solving
1967template <typename TDomain, typename TAlgebra>
1969base_solve(int lev)
1970{
1972 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - base_solve on level "<<lev<<"\n");
1973
1974 try{
1975 LevData& ld = *m_vLevData[lev];
1976
1977 log_debug_data(lev, ld.n_base_calls, "BeforeBaseSolver");
1978
1979// SOLVE BASE PROBLEM
1980// Here we distinguish two possibilities:
1981// a) The coarse grid problem is solved in parallel, using a parallel solver
1982// b) First all vectors are gathered to one process, solved on this one
1983// process and then again distributed
1984
1985// CASE a): We solve the problem in parallel (or normally for sequential code)
1986 if(!m_bGatheredBaseUsed)
1987 {
1988 UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: entering distributed basesolver branch.\n");
1989
1990 GMG_PROFILE_BEGIN(GMG_BaseSolver_Apply);
1991 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "GatheredBaseSolver", lev, ld.n_base_calls);
1992 try{
1993 if(!m_spBaseSolver->apply(*ld.sc, *ld.sd))
1994 UG_THROW("GMG::lmgc: Base solver on base level "<<lev<<" failed.");
1995 }
1996 UG_CATCH_THROW("GMG: BaseSolver::apply failed. (case: a).")
1997 leave_debug_writer_section(gw_gl);
1999
2000 UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: exiting distributed basesolver branch.\n");
2001 }
2002
2003// CASE b): We gather the processes, solve on one proc and distribute again
2004 else
2005 {
2006 UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: entering gathered basesolver branch.\n");
2007
2008 // gather the defect
2009 #ifdef UG_PARALLEL
2010 if( !ld.t->layouts()->vertical_slave().empty() ||
2011 !ld.t->layouts()->vertical_master().empty())
2012 {
2013 GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Defect_CopyNoghostToGhost);
2014 SetLayoutValues(&(*ld.t), ld.t->layouts()->vertical_master(), 0);
2015 copy_noghost_to_ghost(ld.t, ld.sd, ld.vMapPatchToGlobal);
2017
2018 GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Defect_SendAndRecieve);
2019 ComPol_VecAddSetZero<vector_type> cpVecAdd(ld.t.get());
2020 m_Com.send_data(ld.t->layouts()->vertical_slave(), cpVecAdd);
2021 m_Com.receive_data(ld.t->layouts()->vertical_master(), cpVecAdd);
2022 m_Com.communicate();
2024 }
2025 #endif
2026
2027 // only solve on gathering master
2028 if(gathered_base_master())
2029 {
2030 UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: Start serial base solver.\n");
2031 GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Apply);
2032
2033 // Reset correction
2034 spGatheredBaseCorr->set(0.0);
2035
2036 // compute coarse correction
2037 GridLevel gw_gl; enter_debug_writer_section(gw_gl, "BaseSolver", lev, ld.n_base_calls);
2038 try{
2039 if(!m_spBaseSolver->apply(*spGatheredBaseCorr, *ld.t))
2040 UG_THROW("GMG::lmgc: Base solver on base level "<<lev<<" failed.");
2041 }
2042 UG_CATCH_THROW("GMG: BaseSolver::apply failed. (case: b).")
2043 leave_debug_writer_section(gw_gl);
2044
2046 UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG gathered base solver done.\n");
2047 }
2048
2049 // broadcast the correction
2050 #ifdef UG_PARALLEL
2051 SmartPtr<GF> spC = ld.sc;
2052 if(gathered_base_master()) spC = spGatheredBaseCorr;
2053
2054 GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Correction_SendAndRecieve);
2055 ComPol_VecCopy<vector_type> cpVecCopy(spC.get());
2056 m_Com.send_data(spC->layouts()->vertical_master(), cpVecCopy);
2057 m_Com.receive_data(spC->layouts()->vertical_slave(), cpVecCopy);
2058 m_Com.communicate();
2060 if(gathered_base_master()){
2061 GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Correction_CopyGhostToNoghost);
2062 spGatheredBaseCorr->set_storage_type(PST_CONSISTENT);
2063 copy_ghost_to_noghost(ld.sc, spGatheredBaseCorr, ld.vMapPatchToGlobal);
2065 } else {
2066 ld.sc->set_storage_type(PST_CONSISTENT);
2067 }
2068 #endif
2069 UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: exiting gathered basesolver branch.\n");
2070 }
2071
2072// update the defect if required. In full-ref case, the defect is not needed
2073// anymore, since it will be restricted anyway. For adaptive case, however,
2074// we must keep track of the defect on the surface
2075 if(lev >= m_LocalFullRefLevel){
2076 GMG_PROFILE_BEGIN(GMG_UpdateDefectAfterBaseSolver);
2077 ld.A->apply_sub(*ld.sd, *ld.sc);
2079 }
2080
2081 log_debug_data(lev, ld.n_base_calls, "AfterBaseSolver");
2082 ld.n_base_calls++;
2083 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - base_solve on level "<<lev<<"\n");
2084 }
2085 UG_CATCH_THROW("GMG: Base Solver failed.");
2086}
2087
2088// performs a multi grid cycle on the level
2089template <typename TDomain, typename TAlgebra>
2091lmgc(int lev, int cycleType)
2092{
2094 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - lmgc on level "<<lev<<"\n");
2095
2096// check if already base level
2097 if(lev == m_baseLev) {
2098 base_solve(m_topLev);
2099 return;
2100 } else if(lev < m_baseLev){
2101 UG_THROW("GMG::lmgc: call lmgc only for lev > baseLev.");
2102 }
2103
2104// presmooth and restrict
2105 try{
2106 presmooth_and_restriction(lev);
2107 }
2108 UG_CATCH_THROW("GMG::lmgc: presmooth-restriction failed on level "<<lev);
2109
2110 try{
2111// on base level, invert only once
2112 if(lev-1 == m_baseLev) {
2113 base_solve(lev-1);
2114 }
2115// F-cycle: one F-cycle on lev-1, followed by a V-cycle
2116 else if(cycleType == _F_){
2117 lmgc(lev-1, _F_);
2118 lmgc(lev-1, _V_);
2119 }
2120// V- or W- cycle, or gamma > 2
2121 else {
2122 for(int i = 0; i < cycleType; ++i)
2123 lmgc(lev-1, cycleType);
2124 }
2125 }
2126 UG_CATCH_THROW("GMG::lmgc: Linear multi-grid cycle on level "<<lev-1<<
2127 " failed. (BaseLev="<<m_baseLev<<", TopLev="<<m_topLev<<").");
2128
2129// prolongate, add coarse-grid correction and postsmooth
2130 try{
2131 prolongation_and_postsmooth(lev);
2132 }
2133 UG_CATCH_THROW("GMG::lmgc: prolongation-postsmooth failed on level "<<lev);
2134
2135 UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - lmgc on level "<<lev<<"\n");
2136}
2137
2139// Debug Methods
2141
2142template <typename TDomain, typename TAlgebra>
2144write_debug(ConstSmartPtr<GF> spGF, std::string name, int cycleNo)
2145{
2146 write_debug(*spGF, name, cycleNo);
2147}
2148
2149template <typename TDomain, typename TAlgebra>
2151write_debug(const GF& rGF, std::string name, int cycleNo)
2152{
2153 PROFILE_FUNC_GROUP("debug");
2154
2155 if(m_spDebugWriter.invalid()) return;
2156
2157// build name
2158 GridLevel gl = rGF.grid_level();
2159 std::stringstream ss;
2160 ss << "GMG_" << name << GridLevelAppendix(gl);
2161 ss << "_i" << std::setfill('0') << std::setw(3) << m_dbgIterCnt;
2162 if (cycleNo >= 0) ss << "_cycle" << std::setfill('0') << std::setw(3) << cycleNo;
2163 ss << ".vec";
2164
2165// write
2166 GridLevel currGL = m_spDebugWriter->grid_level();
2167 m_spDebugWriter->set_grid_level(gl);
2168 m_spDebugWriter->write_vector(rGF, ss.str().c_str());
2169 m_spDebugWriter->set_grid_level(currGL);
2170}
2171
2172template <typename TDomain, typename TAlgebra>
2174write_debug(const matrix_type& mat, std::string name, const GF& rTo, const GF& rFrom)
2175{
2176 GridLevel glFrom = rFrom.grid_level();
2177 GridLevel glTo = rTo.grid_level();
2178 write_debug(mat, name, glTo, glFrom);
2179}
2180
2181template <typename TDomain, typename TAlgebra>
2183write_debug(const matrix_type& mat, std::string name, const GridLevel& glTo, const GridLevel& glFrom)
2184{
2185 PROFILE_FUNC_GROUP("debug");
2186
2187 if(m_spDebugWriter.invalid()) return;
2188
2189// build name
2190 std::stringstream ss;
2191 ss << "GMG_" << name << GridLevelAppendix(glTo);
2192 if(glFrom != glTo) ss << GridLevelAppendix(glFrom);
2193 ss << ".mat";
2194
2195// write
2196 GridLevel currGL = m_spDebugWriter->grid_level();
2197 m_spDebugWriter->set_grid_levels(glFrom, glTo);
2198 m_spDebugWriter->write_matrix(mat, ss.str().c_str());
2199 m_spDebugWriter->set_grid_level(currGL);
2200}
2201
2202template <typename TDomain, typename TAlgebra>
2204enter_debug_writer_section(GridLevel& dw_orig_gl, const char * sec_name, int lev, int cycleNo, int callNo)
2205{
2206 PROFILE_FUNC_GROUP("debug");
2207
2208 if(m_spDebugWriter.invalid()) return;
2209
2210 dw_orig_gl = m_spDebugWriter->grid_level();
2211 m_spDebugWriter->set_grid_level(m_vLevData[lev]->sd->grid_level());
2212
2213 std::stringstream ss;
2214 ss << "GMG_" << sec_name;
2215 ss << "_i" << std::setfill('0') << std::setw(3) << m_dbgIterCnt;
2216 if (cycleNo >= 0) ss << "_cycle" << std::setfill('0') << std::setw(3) << cycleNo;
2217 ss << "_l" << lev;
2218 if (callNo >= 0) ss << "_call" << std::setfill('0') << std::setw(3) << callNo;
2219 m_spDebugWriter->enter_section(ss.str().c_str());
2220}
2221
2222template <typename TDomain, typename TAlgebra>
2225{
2226 if(m_spDebugWriter.invalid()) return;
2227 m_spDebugWriter->leave_section();
2228 m_spDebugWriter->set_grid_level(dw_orig_gl);
2229}
2230
2231template <typename TDomain, typename TAlgebra>
2233log_debug_data(int lvl, int cycleNo, std::string name)
2234{
2235 if(m_spDebugWriter.valid()){
2236 std::string defName("Def_"); defName.append(name);
2237 std::string curName("Cor_"); curName.append(name);
2238 write_debug(m_vLevData[lvl]->sd, defName, cycleNo);
2239 write_debug(m_vLevData[lvl]->sc, curName, cycleNo);
2240 }
2241
2242 const bool bEnableSerialNorm = false;
2243 const bool bEnableParallelNorm = false;
2244
2245 if(!bEnableSerialNorm && !bEnableParallelNorm) return;
2246
2247 std::string prefix;
2248 if(lvl < (int)m_vLevData.size())
2249 prefix.assign(2 + 2 * (m_vLevData.size() - lvl), ' ');
2250 prefix.append(name).append(" on lev ").append(ToString(lvl)).append(": ");
2251
2252 LevData& ld = *m_vLevData[lvl];
2253 if(bEnableSerialNorm){
2254 UG_LOG(prefix << "local sd norm: " << sqrt(VecProd(*ld.sd, *ld.sd)) << std::endl);
2255 UG_LOG(prefix << "local sc norm: " << sqrt(VecProd(*ld.sc, *ld.sc)) << std::endl);
2256 }
2257 if(bEnableParallelNorm){
2258 #ifdef UG_PARALLEL
2259 uint oldStorageMask = ld.sd->get_storage_mask();
2260 number norm = ld.sd->norm();
2261 UG_LOG(prefix << "sd norm: " << norm << "\n");
2262 if(oldStorageMask & PST_ADDITIVE)
2263 ld.sd->change_storage_type(PST_ADDITIVE);
2264 else if(oldStorageMask & PST_CONSISTENT)
2265 ld.sd->change_storage_type(PST_CONSISTENT);
2266
2267 oldStorageMask = ld.sc->get_storage_mask();
2268 norm = ld.sc->norm();
2269 UG_LOG(prefix << "sc norm: " << norm << "\n");
2270 if(oldStorageMask & PST_ADDITIVE)
2271 ld.sc->change_storage_type(PST_ADDITIVE);
2272 else if(oldStorageMask & PST_CONSISTENT)
2273 ld.sc->change_storage_type(PST_CONSISTENT);
2274/*
2275 oldStorageMask = ld.st->get_storage_mask();
2276 norm = ld.st->norm();
2277 UG_LOG(prefix << "st norm: " << norm << "\n");
2278 if(oldStorageMask & PST_ADDITIVE)
2279 ld.st->change_storage_type(PST_ADDITIVE);
2280 else if(oldStorageMask & PST_CONSISTENT)
2281 ld.st->change_storage_type(PST_CONSISTENT);
2282
2283 oldStorageMask = ld.t->get_storage_mask();
2284 norm = ld.t->norm();
2285 UG_LOG(prefix << " t norm: " << norm << "\n");
2286 if(oldStorageMask & PST_ADDITIVE)
2287 ld.t->change_storage_type(PST_ADDITIVE);
2288 else if(oldStorageMask & PST_CONSISTENT)
2289 ld.t->change_storage_type(PST_CONSISTENT);
2290*/
2291 #endif
2292 }
2293}
2294
2295template <typename TDomain, typename TAlgebra>
2297mg_stats_defect(GF& gf, int lvl, typename mg_stats_type::Stage stage)
2298{
2299 if(m_mgstats.valid())
2300 m_mgstats->set_defect(gf, lvl, stage);
2301}
2302
2303template <typename TDomain, typename TAlgebra>
2304std::string
2306config_string() const
2307{
2308 std::stringstream ss;
2309 ss << "GeometricMultigrid (";
2310 if(m_cycleType == _V_) ss << "V-Cycle";
2311 else if(m_cycleType == _W_) ss << "W-Cycle";
2312 else if(m_cycleType == _F_) ss << "F-Cycle";
2313 else ss << " " << m_cycleType << "-Cycle";
2314 ss << ")\n";
2315
2316 if(m_spPreSmootherPrototype==m_spPostSmootherPrototype)
2317 ss << " Smoother (" << m_numPreSmooth << "x pre, " << m_numPostSmooth << "x post): "
2318 << ConfigShift(m_spPreSmootherPrototype->config_string());
2319 else
2320 {
2321 ss << " Presmoother (" << m_numPreSmooth << "x): " << ConfigShift(m_spPreSmootherPrototype->config_string());
2322 ss << " Postsmoother ( " << m_numPostSmooth << "x): " << ConfigShift(m_spPostSmootherPrototype->config_string());
2323 }
2324 ss << "\n";
2325 ss << " Basesolver ( Baselevel = " << m_baseLev << ", gathered base = " << (m_bGatheredBaseIfAmbiguous ? "true" : "false") << "): ";
2326 ss << ConfigShift(m_spBaseSolver->config_string());
2327 return ss.str();
2328
2329}
2330
2331} // namespace ug
2332
2333
2334#endif /* __H__UG__LIB_DISC__MULTI_GRID_SOLVER__MG_SOLVER_IMPL__ */
location name
Definition checkpoint_util.lua:128
Definition smart_pointer.h:296
Definition smart_pointer.h:108
T * get()
returns encapsulated pointer
Definition smart_pointer.h:197
bool valid() const
returns true if the pointer is valid, false if not.
Definition smart_pointer.h:206
Definition pcl_process_communicator.h:70
Extends the HorizontalAlgebraLayouts by vertical layouts.
Definition algebra_layouts.h:121
base class for approximation spaces without type of algebra or dof distribution
Definition approximation_space.h:279
geometric multi grid preconditioner
Definition mg_solver.h:84
void init_transfer()
initializes the transfers
Definition mg_solver_impl.hpp:1078
void write_debug(ConstSmartPtr< GF > spGF, std::string name, int cycleNo=-1)
writes debug output for a level vector only on smooth path
Definition mg_solver_impl.hpp:2144
void leave_debug_writer_section(GridLevel &orig_gl)
leaves the current debugger section
Definition mg_solver_impl.hpp:2224
bool gathered_base_master() const
returns if gathered base master
Definition mg_solver_impl.hpp:1667
void base_solve(int lev)
compute base solver
Definition mg_solver_impl.hpp:1969
void init_rap_rim_cpl()
operator to invert (surface grid)
Definition mg_solver_impl.hpp:1017
void assemble_level_operator()
initializes the coarse grid matrices
Definition mg_solver_impl.hpp:528
algebra_type::vector_type vector_type
Vector type.
Definition mg_solver.h:93
void set_approximation_space(SmartPtr< ApproximationSpace< TDomain > > approxSpace)
sets the approximation space
Definition mg_solver_impl.hpp:128
virtual std::string config_string() const
returns information about configuration parameters
Definition mg_solver_impl.hpp:2306
void init()
initializes common part
Definition mg_solver_impl.hpp:380
void init_smoother()
initializes the smoother and base solver
Definition mg_solver_impl.hpp:1137
void init_rap_operator()
operator to invert (surface grid)
Definition mg_solver_impl.hpp:830
void init_index_mappings()
operator to invert (surface grid)
Definition mg_solver_impl.hpp:1234
void init_base_solver()
initializes the smoother and base solver
Definition mg_solver_impl.hpp:1173
void init_level_memory(int baseLev, int topLev)
allocates the memory
Definition mg_solver_impl.hpp:1529
void copy_ghost_to_noghost(SmartPtr< GF > spVecTo, ConstSmartPtr< GF > spVecFrom, const std::vector< size_t > &vMapPatchToGlobal)
copies vector to smoothing patch using cached mapping
Definition mg_solver_impl.hpp:1429
virtual bool ignore_init_for_base_solver() const
operator to invert (surface grid)
Definition mg_solver_impl.hpp:512
void log_debug_data(int lvl, int cycleNo, std::string name)
logs a level-data-struct to the terminal
Definition mg_solver_impl.hpp:2233
void mg_stats_defect(GF &gf, int lvl, typename mg_stats_type::Stage stage)
Calls MGStats::set_defect (if available) with the given parameters.
Definition mg_solver_impl.hpp:2297
virtual bool apply_update_defect(vector_type &c, vector_type &d)
Compute new correction c = B*d and return new defect d := d - A*c.
Definition mg_solver_impl.hpp:279
void init_projection()
initializes the prolongation
Definition mg_solver_impl.hpp:1117
~AssembledMultiGridCycle()
Destructor.
Definition mg_solver_impl.hpp:167
void enter_debug_writer_section(GridLevel &orig_gl, const char *sec_name, int lev, int cycleNo=-1, int callNo=-1)
enters a new debugger section for smoothers, base solver etc
Definition mg_solver_impl.hpp:2204
void init_noghost_to_ghost_mapping(std::vector< size_t > &vNoGhostToGhostMap, ConstSmartPtr< DoFDistribution > spNoGhostDD, ConstSmartPtr< DoFDistribution > spGhostDD)
Definition mg_solver_impl.hpp:1374
AssembledMultiGridCycle()
constructor without arguments
Definition mg_solver_impl.hpp:79
algebra_type::matrix_type matrix_type
Matrix type.
Definition mg_solver.h:96
void assemble_rim_cpl(const vector_type *u)
Definition mg_solver_impl.hpp:756
void presmooth_and_restriction(int lev)
performs presmoothing on the given level
Definition mg_solver_impl.hpp:1687
SmartPtr< ILinearIterator< vector_type > > clone()
Clone.
Definition mg_solver_impl.hpp:137
void force_reinit()
reinit transfer operators
Definition mg_solver_impl.hpp:520
void prolongation_and_postsmooth(int lev)
performs prolongation to the level above
Definition mg_solver_impl.hpp:1820
void lmgc(int lev, int cycleType)
compute correction on level and update defect
Definition mg_solver_impl.hpp:2091
virtual bool apply(vector_type &c, const vector_type &d)
Compute new correction c = B*d.
Definition mg_solver_impl.hpp:176
void copy_noghost_to_ghost(SmartPtr< GF > spVecTo, ConstSmartPtr< GF > spVecFrom, const std::vector< size_t > &vMapPatchToGlobal)
copies vector from smoothing patch using cached mapping
Definition mg_solver_impl.hpp:1460
Comm-Pol to add matrix rows of inner-interface couplings.
Definition communication_policies.h:1396
Communication Policy to add values of a vector and reset value to zero on sending interface.
Definition communication_policies.h:564
Communication Policy to copy values of a vector.
Definition communication_policies.h:88
represents numerical solutions on a grid using an algebraic vector
Definition grid_function.h:121
SmartPtr< DoFDistribution > dof_distribution()
Definition grid_function.h:236
const GridLevel & grid_level() const
returns the grid level
Definition grid_function.h:247
Definition grid_level.h:42
@ TOP
Definition grid_level.h:45
@ SURFACE
Definition grid_level.h:48
@ LEVEL
Definition grid_level.h:48
A preconditioner for the multi-grid context which is aware of the grid level it operates on.
Definition level_preconditioner_interface.h:47
void set_grid_level(const GridLevel &gl)
set the grid level
Definition level_preconditioner_interface.h:62
describes a linear mapping X->Y
Definition linear_operator.h:80
Jacobi-Iteration.
Definition jacobi.h:91
Definition lu.h:57
Stage
Defines at which stage data is recorded in a given multigrid cycle.
Definition mg_stats.h:51
Definition matrix_operator.h:49
virtual void apply_sub(Y &f, const X &u)
Definition matrix_operator.h:71
Definition periodic_boundary_manager.h:99
bool is_slave(TElem *) const
Definition periodic_boundary_manager_impl.hpp:498
Definition std_injection.h:61
Standard Prolongation Operator.
Definition std_transfer.h:59
@ MG_SURFACE_RIM
surface, i.e., without children (at rim)
Definition surface_view.h:76
@ ALL
Definition surface_view.h:102
@ MG_SHADOW_RIM_COPY
covered (at rim) with identical child
Definition surface_view.h:77
void SetLayoutValues(TVector *pVec, const IndexLayout &layout, typename TVector::value_type val)
sets the values of a vector to a given number only on the layout indices
Definition parallelization_util.h:315
@ PST_CONSISTENT
Definition parallel_storage_type.h:68
@ PST_ADDITIVE
Definition parallel_storage_type.h:69
void AddMultiplyOf(ABC_type &M, const A_type &A, const B_type &B, const C_type &C, double epsilonTruncation=0.0)
Calculates M += A*B*C.
Definition sparsematrix_util.h:167
void SetRow(TSparseMatrix &A, size_t i, size_t alpha, number val=0.0)
Definition sparsematrix_util.h:710
int NumProcs()
returns the number of processes
Definition pcl_base.cpp:91
@ PCD_LOCAL
Definition pcl_process_communicator.h:56
virtual void init()
std::string ToString(const T &t)
Convert a object supporting 'std::cout << obj' to a string.
Definition string_util.h:362
const NullSmartPtr SPNULL
The equivalent to NULL for smart pointers.
Definition smart_pointer.h:90
#define UG_ASSERT(expr, msg)
Definition assert.h:70
#define UG_CATCH_THROW(msg)
Definition error.h:64
#define UG_THROW(msg)
Definition error.h:57
#define UG_DLOG(__debugID__, level, msg)
Definition log.h:298
#define UG_LOG(msg)
Definition log.h:367
unsigned int uint
Definition types.h:114
DebugID LIB_DISC_MULTIGRID
Definition debug_id.h:120
#define UG_COND_THROW(cond, msg)
UG_COND_THROW(cond, msg) : performs a UG_THROW(msg) if cond == true.
Definition error.h:61
double number
Definition types.h:124
#define GMG_PROFILE_BEGIN(name)
Definition mg_solver_impl.hpp:63
#define GMG_PROFILE_FUNC()
Definition mg_solver_impl.hpp:62
#define GMG_PROFILE_END()
Definition mg_solver_impl.hpp:64
the ug namespace
string ConfigShift(string s)
Definition string_util.cpp:457
std::string GridLevelAppendix(const GridLevel &gl, int minfill)
returns appendix for a grid level
Definition grid_level.cpp:56
double VecProd(const double &a, const double &b)
returns scal<a, b>
Definition operations_vec.h:84
@ VOLUME
Definition grid_base_objects.h:63
@ VERTEX
Definition grid_base_objects.h:60
@ EDGE
Definition grid_base_objects.h:61
@ FACE
Definition grid_base_objects.h:62
#define PROFILE_FUNC_GROUP(groups)
Definition profiler.h:258
SmartPtr< T, FreePolicy > make_sp(T *inst)
returns a SmartPtr for the passed raw pointer
Definition smart_pointer.h:836
Definition mg_solver.h:418
SmartPtr< ITransferOperator< TDomain, TAlgebra > > Restriction
Definition mg_solver.h:431
SmartPtr< GF > sc
vectors needed (sx = no-ghosts [for smoothing], t = for transfer)
Definition mg_solver.h:434
SmartPtr< MatrixOperator< matrix_type, vector_type > > A
Level matrix operator.
Definition mg_solver.h:420
std::vector< size_t > vSurfShadowing
list of corresponding surface index to shadowing indices
Definition mg_solver.h:443
SmartPtr< ITransferOperator< TDomain, TAlgebra > > Projection
Projection operator.
Definition mg_solver.h:427
int n_prolong_calls
Definition mg_solver.h:455
matrix_type RimCpl_Coarse_Fine
missing coarse grid correction
Definition mg_solver.h:452
int n_restr_calls
Definition mg_solver.h:455
SmartPtr< ITransferOperator< TDomain, TAlgebra > > Prolongation
Transfer operator.
Definition mg_solver.h:430
SmartPtr< ILinearIterator< vector_type > > PostSmoother
Definition mg_solver.h:424
SmartPtr< ILinearIterator< vector_type > > PreSmoother
Smoother.
Definition mg_solver.h:423
SmartPtr< GF > t
Definition mg_solver.h:434
int n_post_calls
Definition mg_solver.h:455
std::vector< size_t > vShadowing
list of shadowing indices
Definition mg_solver.h:440
std::vector< SurfLevelMap > vSurfLevelMap
map surface to level
Definition mg_solver.h:446
matrix_type RimCpl_Fine_Coarse
missing coarse grid correction
Definition mg_solver.h:449
SmartPtr< GF > sd
Definition mg_solver.h:434
int n_pre_calls
debugging output information (number of calls of the pre-, postsmoothers, base solver etc)
Definition mg_solver.h:455
SmartPtr< GF > st
Definition mg_solver.h:434
int n_base_calls
Definition mg_solver.h:455
std::vector< size_t > vMapPatchToGlobal
maps global indices (including ghosts) to patch indices (no ghosts included).
Definition mg_solver.h:437
SurfaceView::traits< TElem >::const_iterator const_iterator
Definition dof_distribution.h:82
function ProblemDisc new(problemDesc, dom)