ug4
grid_function_impl.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011-2015: G-CSC, Goethe University Frankfurt
3  * Author: Andreas Vogel
4  *
5  * This file is part of UG4.
6  *
7  * UG4 is free software: you can redistribute it and/or modify it under the
8  * terms of the GNU Lesser General Public License version 3 (as published by the
9  * Free Software Foundation) with the following additional attribution
10  * requirements (according to LGPL/GPL v3 §7):
11  *
12  * (1) The following notice must be displayed in the Appropriate Legal Notices
13  * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14  *
15  * (2) The following notice must be displayed at a prominent place in the
16  * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17  *
18  * (3) The following bibliography is recommended for citation and must be
19  * preserved in all covered files:
20  * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21  * parallel geometric multigrid solver on hierarchically distributed grids.
22  * Computing and visualization in science 16, 4 (2013), 151-164"
23  * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24  * flexible software system for simulating pde based models on high performance
25  * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26  *
27  * This program is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30  * GNU Lesser General Public License for more details.
31  */
32 
33 #ifndef __H__UG__LIB_DISC__FUNCTION_SPACE__GRID_FUNCTION_IMPL__
34 #define __H__UG__LIB_DISC__FUNCTION_SPACE__GRID_FUNCTION_IMPL__
35 
36 #include "grid_function.h"
37 
42 
43 #ifdef UG_PARALLEL
44  #include "pcl/pcl.h"
47 #endif
48 
49 namespace ug{
50 
52 // GridFunction : init
54 
55 template <typename TDomain, typename TAlgebra>
58  SmartPtr<DoFDistribution> spDoFDistr, bool bManage)
59 {
60  init(spApproxSpace, spDoFDistr, bManage);
61 };
62 
63 template <typename TDomain, typename TAlgebra>
65 GridFunction(SmartPtr<ApproximationSpace<TDomain> > spApproxSpace, bool bManage)
66 {
67  init(spApproxSpace, spApproxSpace->dof_distribution(GridLevel(GridLevel::TOP, GridLevel::SURFACE, false)), bManage);
68 };
69 
70 template <typename TDomain, typename TAlgebra>
72 GridFunction(SmartPtr<ApproximationSpace<TDomain> > spApproxSpace, int level, bool bManage)
73 {
74  init(spApproxSpace, spApproxSpace->dof_distribution(GridLevel(level, GridLevel::SURFACE, false)), bManage);
75 };
76 
77 template <typename TDomain, typename TAlgebra>
79 GridFunction(SmartPtr<approximation_space_type> spApproxSpace, const GridLevel& gl, bool bManage)
80 {
81  init(spApproxSpace, spApproxSpace->dof_distribution(gl), bManage);
82 };
83 
84 template <typename TDomain, typename TAlgebra>
85 void
88  SmartPtr<DoFDistribution> spDoFDistr, bool bManage)
89 {
90  m_spApproxSpace = spApproxSpace;
91  m_spDD = spDoFDistr;
92  m_bManaged = bManage;
93  m_bRedistribute = true;
94  this->set_dof_distribution_info(m_spApproxSpace->dof_distribution_info());
95  m_spAdaptGridFct = SPNULL;
96 
97 // check correct passings
98  if(m_spDD.invalid()) UG_THROW("GridFunction: DoF Distribution is null.");
99  if(m_spApproxSpace.invalid()) UG_THROW("GridFunction: ApproxSpace is null.");
100 
101 // check correct choice of compile-time algebra
102  check_algebra();
103 
104 // resize the vector to correct size
105  resize_values(num_indices());
106 
107  if(bManage) {
108  // registered as managed by dof distribution
109  m_spDD->manage_grid_function(*this);
110 
111  // register to observe grid
112  register_at_adaption_msg_hub();
113  }
114 
115 
116 #ifdef UG_PARALLEL
117 // set layouts
118  this->set_layouts(m_spDD->layouts());
119 
120 // set storage type
121  this->set_storage_type(PST_UNDEFINED);
122 #endif
123 };
124 
125 template <typename TDomain, typename TAlgebra>
126 void
128 {
129 // get blocksize of algebra
130  const int blockSize = algebra_type::blockSize;
131 
132 // a) If blocksize fixed and > 1, we need grouping.
133  if(blockSize > 1 && !this->m_spDD->grouped())
134  {
135  UG_THROW("Fixed block algebra needs grouped dofs.");
136  }
137 // b) If blocksize flexible, we group
138  else if (blockSize == AlgebraType::VariableBlockSize
139  && !this->m_spDD->grouped())
140  {
141  UG_THROW("Variable block algebra needs grouped dofs.");
142  }
143 // c) If blocksize == 1, we do not group. This will allow us to handle
144 // this case for any problem.
145  else if (blockSize == 1 && this->m_spDD->grouped())
146  {
147  UG_THROW("block 1x1 algebra needs non-grouped dofs.");
148  }
149 }
150 
151 template <typename TDomain, typename TAlgebra>
152 size_t
154 num_dofs(int fct, int si) const
155 {
156  DoFCount dc = m_spDD->dof_count();
158 
159  return dc.num(fct, si, DoFCount::UNIQUE_SS, DoFCount::UNIQUE_ES);
160 }
161 
163 // GridFunction : cloning
165 
166 template <typename TDomain, typename TAlgebra>
167 void
169 clone_pattern(const this_type& v)
170 {
171 // init normally
173  enable_redistribution(v.redistribution_enabled());
174 
175 #ifdef UG_PARALLEL
176 // copy storage type
177  this->set_storage_type(v.get_storage_mask());
178  this->set_layouts(v.layouts());
179 #endif
180 };
181 
182 
183 template <typename TDomain, typename TAlgebra>
185 {
186 // check size
187  if(v.size() != vector_type::size())
188  UG_THROW("GridFunction: Assigned vector has incorrect size.");
189 
190 // assign vector
191  *(dynamic_cast<vector_type*>(this)) = v;
192 }
193 
194 template <typename TDomain, typename TAlgebra>
196 {
197 // clone pattern
198  clone_pattern(v);
199 
200 // copy values
201  *(dynamic_cast<vector_type*>(this)) = *dynamic_cast<const vector_type*>(&v);
202 }
203 
204 template <typename TDomain, typename TAlgebra>
207 {
209  new GridFunction<TDomain, TAlgebra>(m_spApproxSpace, m_spDD, m_bManaged);
210  p->enable_redistribution(redistribution_enabled());
211  if(p->size() != this->size())
212  p->resize(this->size());
213 #ifdef UG_PARALLEL
214  p->set_layouts(this->layouts());
215 #endif
216 
217  return p;
218 }
219 
221 // GridFunction : dof distribution callbacks
223 
224 template <typename TDomain, typename TAlgebra>
225 void
227 resize_values(size_t s, number defaultValue)
228 {
229 // remember old values
230  const size_t oldSize = vector_type::size();
231 
232 // resize vector
233  vector_type::resize_sloppy(s);
234 
235 // set vector to zero-values
236  for(size_t i = oldSize; i < s; ++i)
237  this->operator[](i) = defaultValue;
238 }
239 
240 template <typename TDomain, typename TAlgebra>
241 void
243 permute_values(const std::vector<size_t>& vIndNew)
244 {
245 // check sizes
246  if(vIndNew.size() != this->size())
247  UG_THROW("GridFunction::permute_values: For a permutation the"
248  " index set must have same cardinality as vector.");
249 
250 // \todo: avoid tmp vector, only copy values into new vector and use that one
251 // create tmp vector
252  vector_type vecTmp; vecTmp.resize(this->size());
253 #ifdef UG_PARALLEL
254 // copy storage type
255  vecTmp.set_storage_type(this->get_storage_mask());
256  vecTmp.set_layouts(this->layouts());
257 #endif
258 
259 // loop indices and copy values
260  for(size_t i = 0; i < vIndNew.size(); ++i)
261  vecTmp[vIndNew[i]] = this->operator[](i);
262 
263 // copy tmp vector into this vector
264  this->assign(vecTmp);
265 }
266 
267 template <typename TDomain, typename TAlgebra>
268 void
270 copy_values(const std::vector<std::pair<size_t, size_t> >& vIndexMap,bool bDisjunct)
271 {
272 // disjunct case
273  if(bDisjunct)
274  for(size_t i = 0; i < vIndexMap.size(); ++i)
275  this->operator[](vIndexMap[i].second)
276  = this->operator[](vIndexMap[i].first);
277  else {
278  typedef typename vector_type::value_type value_type;
279  std::vector<value_type> values;
280  values.resize(vIndexMap[vIndexMap.size()-1].first);
281  for(size_t i = 0; i < vIndexMap.size(); ++i){
282  const size_t index = vIndexMap[i].first;
283  if (index>=values.size()) values.resize(index+1);
284  values[index] = this->operator[](index);
285  }
286  for(size_t i = 0; i < vIndexMap.size(); ++i)
287  this->operator[](vIndexMap[i].second)
288  = values[vIndexMap[i].first];
289  }
290 }
291 
293 // GridFunction : grid adaption
295 
296 template <typename TDomain, typename TAlgebra>
297 void
300 {
301 // register function for grid adaption
302  SPMessageHub msgHub = domain()->grid()->message_hub();
303  m_spGridAdaptionCallbackID =
304  msgHub->register_class_callback(this,
305  &this_type::grid_changed_callback);
306 
307  m_spGridDistributionCallbackID =
308  msgHub->register_class_callback(this,
309  &this_type::grid_distribution_callback);
310 }
311 
312 template <typename TDomain, typename TAlgebra>
313 void
316 {
317  // before adaption begins: copy values into grid attachments
318  if(msg.adaption_begins()){
319  // prepare
321  new AdaptionSurfaceGridFunction<TDomain>(this->domain()));
322  m_spAdaptGridFct->copy_from_surface(*this);
323  }
324 
325  // before coarsening: restrict values
326  if(msg.coarsening() && msg.step_begins()){
327  #ifdef UG_PARALLEL
328  // since ghosts may exist in a parallel environment and since those ghosts
329  // may be removed during coarsening, we have to make sure, that the correct
330  // values are stored in those ghosts before restriction is performed.
331  Grid& grid = *domain()->grid();
332  typedef typename AdaptionSurfaceGridFunction<TDomain>::AValues AValues;
333  if(m_spDDI->max_dofs(VERTEX)){
334  ComPol_CopyAttachment<VertexLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
337  INT_V_SLAVE, INT_V_MASTER, compol);
338  com.communicate();
339  }
340  if(m_spDDI->max_dofs(EDGE)){
341  ComPol_CopyAttachment<EdgeLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
344  INT_V_SLAVE, INT_V_MASTER, compol);
345  com.communicate();
346  }
347  if(m_spDDI->max_dofs(FACE)){
348  ComPol_CopyAttachment<FaceLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
351  INT_V_SLAVE, INT_V_MASTER, compol);
352  com.communicate();
353  }
354  if(m_spDDI->max_dofs(VOLUME)){
355  ComPol_CopyAttachment<VolumeLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
358  INT_V_SLAVE, INT_V_MASTER, compol);
359  com.communicate();
360  }
361  #endif
362  m_spAdaptGridFct->do_restrict(msg);
363  }
364 
365  // after refinement: prolongate values
366  if(msg.refinement() && msg.step_ends()){
367  m_spAdaptGridFct->prolongate(msg);
368  }
369 
370  // at end of adaption: copy values back into algebra vector
371  if(msg.adaption_ends())
372  {
373  // all grid functions must resize to the current number of dofs
374  resize_values(num_indices());
375 
376  #ifdef UG_PARALLEL
377  // set layouts
378  this->set_layouts(m_spDD->layouts());
379  #endif
380 
381 
382  m_spAdaptGridFct->copy_to_surface(*this);
383  m_spAdaptGridFct = SPNULL;
384  }
385 }
386 
387 template <typename TDomain, typename TAlgebra>
388 void
391 {
392  PROFILE_FUNC();
393 
394  #ifdef UG_PARALLEL
396 
397  switch(msg.msg()){
399  if(redistribution_enabled()){
400  m_preDistStorageType = this->get_storage_mask();
401  if(!(this->has_storage_type(PST_CONSISTENT) || this->has_storage_type(PST_UNDEFINED))){
402  this->change_storage_type(PST_CONSISTENT);
403  }
404 
406  new AdaptionSurfaceGridFunction<TDomain>(this->domain(), false));
407  m_spAdaptGridFct->copy_from_surface(*this);
408  Grid& grid = *domain()->grid();
409 
410  typedef typename AdaptionSurfaceGridFunction<TDomain>::AValues AValues;
411 
412  if(m_spDDI->max_dofs(VERTEX)){
413  ComPol_CopyAttachment<VertexLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
416  INT_V_SLAVE, INT_V_MASTER, compol);
417  com.communicate();
419  create(grid, m_spAdaptGridFct->value_attachment()));
420  }
421  if(m_spDDI->max_dofs(EDGE)){
422  ComPol_CopyAttachment<EdgeLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
425  INT_V_SLAVE, INT_V_MASTER, compol);
426  com.communicate();
428  create(grid, m_spAdaptGridFct->value_attachment()));
429  }
430  if(m_spDDI->max_dofs(FACE)){
431  ComPol_CopyAttachment<FaceLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
434  INT_V_SLAVE, INT_V_MASTER, compol);
435  com.communicate();
437  create(grid, m_spAdaptGridFct->value_attachment()));
438  }
439  if(m_spDDI->max_dofs(VOLUME)){
440  ComPol_CopyAttachment<VolumeLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
443  INT_V_SLAVE, INT_V_MASTER, compol);
445  create(grid, m_spAdaptGridFct->value_attachment()));
446  }
447  }
448  }break;
449 
451  {
452  PROFILE_BEGIN(grid_func_distribution_stops)
453  // all grid functions must resize to the current number of dofs
454  resize_values(num_indices());
455 
456  // set layouts
457  this->set_layouts(m_spDD->layouts());
458 
459  if(redistribution_enabled()){
460  m_spAdaptGridFct->copy_to_surface(*this);
461  m_spAdaptGridFct = SPNULL;
462 
463  if(m_preDistStorageType != this->get_storage_mask()){
464  if((m_preDistStorageType & PST_ADDITIVE) == PST_ADDITIVE)
465  this->change_storage_type(PST_ADDITIVE);
466  else if((m_preDistStorageType & PST_UNIQUE) == PST_UNIQUE)
467  this->change_storage_type(PST_UNIQUE);
468  else{
469  UG_THROW("Can't reestablish storage type!");
470  }
471  }
472  }
473 
474  PROFILE_END();
475  }break;
476 
477  default:
478  break;
479  }
480  #endif
481 }
482 
483 } // end namespace ug
484 
485 #endif /* __H__UG__LIB_DISC__FUNCTION_SPACE__GRID_FUNCTION_IMPL__ */
parameterString p
parameterString s
Definition: smart_pointer.h:108
Performs communication between interfaces on different processes.
Definition: pcl_interface_communicator.h:68
bool communicate(int tag=749345)
sends and receives the collected data.
Definition: pcl_interface_communicator_impl.hpp:409
void exchange_data(const TLayoutMap &layoutMap, const typename TLayoutMap::Key &keyFrom, const typename TLayoutMap::Key &keyTo, ICommunicationPolicy< TLayout > &commPol)
internally calls send_data and receive_data with the specified layouts.
Definition: pcl_interface_communicator_impl.hpp:213
Definition: adaption_surface_grid_function.h:43
@ VariableBlockSize
Definition: algebra_type.h:59
base class for approximation spaces without type of algebra or dof distribution
Definition: approximation_space.h:279
copies values from a specified attachment to a stream and back.
Definition: compol_copy_attachment.h:54
GridLayoutMap & grid_layout_map()
Definition: distributed_grid.h:103
Definition: dof_count.h:43
static const byte UNIQUE_SS
Definition: dof_count.h:53
void sum_values_over_procs(int proc=-1)
sums values over all procs (reduced to 'proc', allreduce for -1)
Definition: dof_count.cpp:63
static const byte UNIQUE_ES
Definition: dof_count.h:52
uint64 num(int fct, int si, SurfaceView::SurfaceState ss, byte is) const
Definition: dof_count.cpp:106
Serialization callback for grid attachments.
Definition: serialization.h:290
Serialization of data associated with grid elements.
Definition: serialization.h:186
void add(SPVertexDataSerializer cb)
Adds a callback class for serialization and deserialization.
Definition: serialization.cpp:69
represents numerical solutions on a grid using an algebraic vector
Definition: grid_function.h:121
size_t num_dofs() const
Definition: grid_function.h:310
void check_algebra()
checks the algebra
Definition: grid_function_impl.h:127
bool redistribution_enabled() const
retruns true if the grid-function is redistributed together with the grid in parallel applications
Definition: grid_function.h:361
algebra_type::vector_type vector_type
Vector type used to store dof values.
Definition: grid_function.h:139
SmartPtr< ApproximationSpace< TDomain > > m_spApproxSpace
Approximation Space.
Definition: grid_function.h:412
virtual void copy_values(const std::vector< std::pair< size_t, size_t > > &vIndexMap, bool bDisjunct=false)
copy values
Definition: grid_function_impl.h:270
virtual void clone_pattern(const this_type &v)
copies the GridFunction v, except that the values are copied.
Definition: grid_function_impl.h:169
GridFunction(SmartPtr< ApproximationSpace< TDomain > > spApproxSpace, SmartPtr< DoFDistribution > spDoFDistr, bool bManage=true)
Initializing Constructor.
Definition: grid_function_impl.h:57
void grid_changed_callback(const GridMessage_Adaption &msg)
Definition: grid_function_impl.h:315
virtual void resize_values(size_t s, number defaultValue=0.0)
resize
Definition: grid_function_impl.h:227
void grid_distribution_callback(const GridMessage_Distribution &msg)
called during parallel redistribution
Definition: grid_function_impl.h:390
virtual void permute_values(const std::vector< size_t > &vIndNew)
permutes all values
Definition: grid_function_impl.h:243
void register_at_adaption_msg_hub()
registers at message hub for grid adaption
Definition: grid_function_impl.h:299
SmartPtr< DoFDistribution > m_spDD
DoF Distribution this GridFunction relies on.
Definition: grid_function.h:409
void assign(const this_type &v)
assigns another GridFunction
Definition: grid_function_impl.h:195
bool m_bManaged
boolean for DoF Distribution management of grid function
Definition: grid_function.h:415
virtual this_type * virtual_clone_without_values() const
virtual clone using covariant return type excluding values
Definition: grid_function_impl.h:206
void init(SmartPtr< ApproximationSpace< TDomain > > spApproxSpace, SmartPtr< DoFDistribution > spDoFDistr, bool bManage)
inits the grid function
Definition: grid_function_impl.h:87
Manages the elements of a grid and their interconnection.
Definition: grid.h:132
DistributedGridManager * distributed_grid_manager()
returns a pointer to the associated distributed grid manager.
Definition: grid_impl.hpp:53
Definition: grid_level.h:42
@ TOP
Definition: grid_level.h:45
@ SURFACE
Definition: grid_level.h:48
A message sent along with "GridRefinement" messages.
Definition: lib_grid_messages.h:91
bool step_begins() const
tells whether an adaption step has just been started or has been finished.
Definition: lib_grid_messages.cpp:50
bool adaption_ends() const
tells whether grid adaption has just been started or has been finished.
Definition: lib_grid_messages.cpp:44
bool adaption_begins() const
tells whether grid adaption has just been started or has been finished.
Definition: lib_grid_messages.cpp:38
bool step_ends() const
tells whether an adaption step has just been started or has been finished.
Definition: lib_grid_messages.cpp:63
bool coarsening() const
tells whether a step is a coarsen step
Definition: lib_grid_messages.cpp:119
bool refinement() const
tells whether a step is a refinement step.
Definition: lib_grid_messages.cpp:106
Definition: lib_grid_messages.h:166
GridDataSerializationHandler & serialization_handler() const
Definition: lib_grid_messages.h:176
GridMessageDistributionType msg() const
Definition: lib_grid_messages.h:174
SPCallbackId register_class_callback(TClass *cls, void(TClass::*callback)(const TMsg &), bool autoFree=true)
registers a method callback given a message-type.
Definition: message_hub_impl.hpp:53
@ PST_UNDEFINED
Definition: parallel_storage_type.h:67
@ PST_CONSISTENT
Definition: parallel_storage_type.h:68
@ PST_UNIQUE
Definition: parallel_storage_type.h:70
@ PST_ADDITIVE
Definition: parallel_storage_type.h:69
virtual void init()
const NullSmartPtr SPNULL
The equivalent to NULL for smart pointers.
Definition: smart_pointer.h:90
#define UG_THROW(msg)
Definition: error.h:57
double number
Definition: types.h:124
function util LuaCallbackHelper create(func)
the ug namespace
@ GMDT_DISTRIBUTION_STARTS
Definition: lib_grid_messages.h:161
@ GMDT_DISTRIBUTION_STOPS
Definition: lib_grid_messages.h:162
@ VOLUME
Definition: grid_base_objects.h:63
@ VERTEX
Definition: grid_base_objects.h:60
@ EDGE
Definition: grid_base_objects.h:61
@ FACE
Definition: grid_base_objects.h:62
@ INT_V_MASTER
vertical master node
Definition: parallel_grid_layout.h:106
@ INT_V_SLAVE
vertical slave node
Definition: parallel_grid_layout.h:107
#define PROFILE_BEGIN(name)
Definition: profiler.h:254
#define PROFILE_END()
Definition: profiler.h:256
#define PROFILE_FUNC()
Definition: profiler.h:257
T value_type
Definition: sparsematrix_interface.h:2