ug4
ug::ParallelNodes Class Reference

#include <parallel_nodes.h>

Classes

struct  NewSlaveNotification
 
struct  OverlapType
 

Public Member Functions

void add_new_layouts_to (IndexLayout &newMasterLayout, IndexLayout &newSlaveLayout)
 
void append_nodes_without_comm (size_t n)
 Appending masters (without comm skills) More...
 
pcl::InterfaceCommunicator< IndexLayout > & comm () const
 
void create_node (const AlgebraID &globalID, int pid)
 
void create_node (const AlgebraID &globalID, size_t localIndex, int pid)
 
void create_node (size_t localIndex, int pid)
 
size_t create_slave_node (const AlgebraID &globalID, int distanceToMasterOrInner)
 
size_t distance_to_master_or_inner (size_t i) const
 
size_t get_local_index_if_available (const AlgebraID &globalIndex, bool &bHasIndex) const
 returns a local index by returning a old local one or a saved created one More...
 
size_t get_local_index_or_create_new (const AlgebraID &globalIndex, int distanceToMasterOrInner)
 
size_t get_local_index_or_create_new (const AlgebraID &globalIndex, int distanceToMasterOrInner, bool &bCreated)
 get_index_or_create_new: returns a local index by creating and saving a new one or returning an old More...
 
size_t get_original_size ()
 
IndexLayoutget_total_master_layout ()
 
IndexLayoutget_total_slave_layout ()
 
size_t global_to_local (const AlgebraID &globalIndex) const
 
void insert_into_interface_sorted (std::vector< size_t > &v, IndexLayout::Interface &interface)
 
void insert_into_layout_sorted (std::map< int, std::set< size_t > > &m, IndexLayout &layout)
 
void insert_into_layout_sorted (std::map< int, std::vector< size_t > > &m, IndexLayout &layout)
 
bool is_inner (size_t i) const
 
bool is_master (size_t i) const
 
bool is_master_or_inner (size_t i) const
 
bool is_slave (size_t i) const
 
void issue (pcl::InterfaceCommunicator< IndexLayout > &communicator)
 
size_t local_size () const
 
const AlgebraIDlocal_to_global (size_t i) const
 
const IndexLayoutmaster_layout () const
 
size_t operator[] (const AlgebraID &globalIndex) const
 
const AlgebraIDoperator[] (size_t i) const
 
const OverlapTypeoverlap_type (size_t i)
 returns the overlap type (inner, master, slave or distanceToMasterOrInner=X) More...
 
 ParallelNodes ()
 
 ParallelNodes (const ParallelNodes &)
 
 ParallelNodes (ConstSmartPtr< AlgebraLayouts > layout, size_t s)
 
void print () const
 
const pcl::ProcessCommunicatorproc_comm () const
 
void process ()
 
const IndexLayoutslave_layout () const
 
void sort_by_global_id (std::vector< size_t > &v)
 
void sort_interface (IndexLayout::Interface &interface)
 
void sort_layout (IndexLayout &layout)
 

Private Types

typedef std::map< int, BinaryBufferBufferMap
 
typedef std::map< AlgebraID, size_t >::const_iterator const_iterator
 
typedef std::map< AlgebraID, size_t >::iterator iterator
 

Private Member Functions

void create_mark_map (const IndexLayout &masterLayout)
 

Private Attributes

std::map< AlgebraID, size_t > m_globalToLocal
 
ConstSmartPtr< AlgebraLayoutsm_layout
 
std::vector< AlgebraIDm_localToGlobal
 
std::vector< OverlapTypem_OLtype
 
size_t m_originalSize
 
std::set< int > masterPIDs
 
std::map< int, std::set< size_t > > newMasters
 
std::map< int, std::vector< NewSlaveNotification > > newSlaveNotifications
 
std::map< int, std::set< size_t > > newSlaves
 
BufferMap notificationBufferMap
 
std::map< int, std::set< size_t > > notified
 map for marking nodes More...
 
std::set< int > slavePIDs
 
IndexLayout totalMasterLayout
 
IndexLayout totalSlaveLayout
 

Detailed Description

ParallelNodes is a class to provide functions when adding nodes on other processes. Especially it is used in the construction of matrix overlaps, where it can be that process A sends a matrix row to process B containing connections to process C. We need to make sure everyone has the right parallel connections afterwards without the need to have an all-to-all communication

Member Typedef Documentation

◆ BufferMap

typedef std::map<int, BinaryBuffer> ug::ParallelNodes::BufferMap
private

◆ const_iterator

typedef std::map<AlgebraID,size_t>::const_iterator ug::ParallelNodes::const_iterator
private

◆ iterator

typedef std::map<AlgebraID,size_t>::iterator ug::ParallelNodes::iterator
private

Constructor & Destructor Documentation

◆ ParallelNodes() [1/3]

ug::ParallelNodes::ParallelNodes ( )

◆ ParallelNodes() [2/3]

ug::ParallelNodes::ParallelNodes ( const ParallelNodes )

◆ ParallelNodes() [3/3]

ug::ParallelNodes::ParallelNodes ( ConstSmartPtr< AlgebraLayouts layout,
size_t  s 
)

Member Function Documentation

◆ add_new_layouts_to()

void ug::ParallelNodes::add_new_layouts_to ( IndexLayout newMasterLayout,
IndexLayout newSlaveLayout 
)

PN created some new master and slave nodes. now we add them to another layout (we communicate the changes to the outside world)

Parameters
newMasterLayout
newSlaveLayout

Referenced by ug::GenerateOverlapClass< matrix_type >::communicate().

◆ append_nodes_without_comm()

void ug::ParallelNodes::append_nodes_without_comm ( size_t  n)

Appending masters (without comm skills)

References pcl::ProcRank(), and UG_ASSERT.

◆ comm()

pcl::InterfaceCommunicator<IndexLayout>& ug::ParallelNodes::comm ( ) const
inline

References m_layout.

◆ create_mark_map()

◆ create_node() [1/3]

void ug::ParallelNodes::create_node ( const AlgebraID globalID,
int  pid 
)
inline
Parameters
globalID
pid
See also
create_node

References create_node(), and global_to_local().

◆ create_node() [2/3]

void ug::ParallelNodes::create_node ( const AlgebraID globalID,
size_t  localIndex,
int  pid 
)

create a new node in this ParallelNode structure on processor pid. note that the node referred to by globalID has a processor where it is master on. that is globalID.master(). Now if we create a node on a processor which is no globalID.master(), we need to inform the process globalID.master() that he's getting a new node. The process pid will notice by himself that he needs to add globalID into the slave layout to globalID.master(). This is especially necessary when we are sending matrix rows since a processor A can send a row to processor B with connections to master nodes on processor C.

Parameters
globalID
localIndex
pidwhere the node is created

References ug::find(), ug::LIB_ALG_MATRIX, ug::AlgebraID::master_proc(), pcl::ProcRank(), and UG_DLOG.

Referenced by create_node(), and ug::RowSendingScheme< matrix_type >::issue_send().

◆ create_node() [3/3]

void ug::ParallelNodes::create_node ( size_t  localIndex,
int  pid 
)
inline
Parameters
localIndex
pid
See also
create_node

References create_node(), and local_to_global().

◆ create_slave_node()

size_t ug::ParallelNodes::create_slave_node ( const AlgebraID globalID,
int  distanceToMasterOrInner 
)

when receiving nodes which get slaves, we call this function to make sure there is a slave interface to master process.

Parameters
globalIDglobal ID of possibly new slave
distanceToMasterOrInnerdistance to nearest master node
Returns
new local index

References ug::LIB_ALG_MATRIX, ug::AlgebraID::master_proc(), and UG_DLOG.

Referenced by ug::RowSendingScheme< matrix_type >::process().

◆ distance_to_master_or_inner()

size_t ug::ParallelNodes::distance_to_master_or_inner ( size_t  i) const
inline

◆ get_local_index_if_available()

size_t ug::ParallelNodes::get_local_index_if_available ( const AlgebraID globalIndex,
bool &  bHasIndex 
) const

returns a local index by returning a old local one or a saved created one

References pcl::ProcRank().

Referenced by ug::RowSendingScheme< matrix_type >::process().

◆ get_local_index_or_create_new() [1/2]

size_t ug::ParallelNodes::get_local_index_or_create_new ( const AlgebraID globalIndex,
int  distanceToMasterOrInner 
)

◆ get_local_index_or_create_new() [2/2]

size_t ug::ParallelNodes::get_local_index_or_create_new ( const AlgebraID globalIndex,
int  distanceToMasterOrInner,
bool &  bCreated 
)

get_index_or_create_new: returns a local index by creating and saving a new one or returning an old

References ug::LIB_ALG_MATRIX, pcl::ProcRank(), and UG_DLOG.

Referenced by ug::NewLayoutCreator::create_slave_node(), and ug::ReceiveMatrix().

◆ get_original_size()

size_t ug::ParallelNodes::get_original_size ( )
inline

References m_originalSize.

◆ get_total_master_layout()

IndexLayout& ug::ParallelNodes::get_total_master_layout ( )
inline

References totalMasterLayout.

◆ get_total_slave_layout()

IndexLayout& ug::ParallelNodes::get_total_slave_layout ( )
inline

References totalSlaveLayout.

◆ global_to_local()

size_t ug::ParallelNodes::global_to_local ( const AlgebraID globalIndex) const

◆ insert_into_interface_sorted()

void ug::ParallelNodes::insert_into_interface_sorted ( std::vector< size_t > &  v,
IndexLayout::Interface interface 
)

◆ insert_into_layout_sorted() [1/2]

void ug::ParallelNodes::insert_into_layout_sorted ( std::map< int, std::set< size_t > > &  m,
IndexLayout layout 
)

◆ insert_into_layout_sorted() [2/2]

void ug::ParallelNodes::insert_into_layout_sorted ( std::map< int, std::vector< size_t > > &  m,
IndexLayout layout 
)

◆ is_inner()

bool ug::ParallelNodes::is_inner ( size_t  i) const
inline

References m_OLtype.

◆ is_master()

bool ug::ParallelNodes::is_master ( size_t  i) const
inline

References m_OLtype.

◆ is_master_or_inner()

bool ug::ParallelNodes::is_master_or_inner ( size_t  i) const
inline

References m_OLtype.

◆ is_slave()

bool ug::ParallelNodes::is_slave ( size_t  i) const
inline

References m_OLtype.

◆ issue()

void ug::ParallelNodes::issue ( pcl::InterfaceCommunicator< IndexLayout > &  communicator)

write all 'i have a new slave to your process' notification into send buffers issue

  • sending of send buffers
  • receive of notification
    Parameters
    communicator

References ug::BinaryBuffer::buffer(), ug::LIB_ALG_MATRIX, pcl::InterfaceCommunicator< TLayout >::receive_raw(), pcl::InterfaceCommunicator< TLayout >::send_raw(), ug::Serialize(), UG_DLOG, and ug::BinaryBuffer::write_pos().

Referenced by ug::RowSendingScheme< matrix_type >::issue_send().

◆ local_size()

◆ local_to_global()

◆ master_layout()

const IndexLayout& ug::ParallelNodes::master_layout ( ) const
inline

References m_layout.

◆ operator[]() [1/2]

size_t ug::ParallelNodes::operator[] ( const AlgebraID globalIndex) const
inline

References global_to_local().

◆ operator[]() [2/2]

const AlgebraID& ug::ParallelNodes::operator[] ( size_t  i) const
inline

References local_to_global().

◆ overlap_type()

const OverlapType& ug::ParallelNodes::overlap_type ( size_t  i)
inline

returns the overlap type (inner, master, slave or distanceToMasterOrInner=X)

References m_OLtype.

◆ print()

void ug::ParallelNodes::print ( ) const
inline

◆ proc_comm()

const pcl::ProcessCommunicator& ug::ParallelNodes::proc_comm ( ) const
inline

References m_layout.

◆ process()

void ug::ParallelNodes::process ( )

call this when communication has been made. this function processes the received data: here it is the notification about that another processor has a slave to our process, so we need to add a node to masterLayout

References ug::Deserialize(), ug::BinaryBuffer::eof(), ug::ParallelNodes::NewSlaveNotification::id, ug::AlgebraID::index_on_master(), ug::LIB_ALG_MATRIX, ug::AlgebraID::master_proc(), ug::ParallelNodes::NewSlaveNotification::newSlaveOnPID, pcl::ProcRank(), UG_ASSERT, UG_DLOG, and ug::BinaryBuffer::write_pos().

Referenced by ug::RowSendingScheme< matrix_type >::process().

◆ slave_layout()

const IndexLayout& ug::ParallelNodes::slave_layout ( ) const
inline

References m_layout.

◆ sort_by_global_id()

void ug::ParallelNodes::sort_by_global_id ( std::vector< size_t > &  v)
inline

◆ sort_interface()

void ug::ParallelNodes::sort_interface ( IndexLayout::Interface interface)

◆ sort_layout()

Member Data Documentation

◆ m_globalToLocal

std::map<AlgebraID, size_t> ug::ParallelNodes::m_globalToLocal
private

◆ m_layout

ConstSmartPtr<AlgebraLayouts> ug::ParallelNodes::m_layout
private

◆ m_localToGlobal

std::vector<AlgebraID> ug::ParallelNodes::m_localToGlobal
private

◆ m_OLtype

std::vector<OverlapType> ug::ParallelNodes::m_OLtype
private

◆ m_originalSize

size_t ug::ParallelNodes::m_originalSize
private

Referenced by get_original_size().

◆ masterPIDs

std::set<int> ug::ParallelNodes::masterPIDs
private

◆ newMasters

std::map<int, std::set<size_t> > ug::ParallelNodes::newMasters
private

◆ newSlaveNotifications

std::map<int, std::vector<NewSlaveNotification> > ug::ParallelNodes::newSlaveNotifications
private

◆ newSlaves

std::map<int, std::set<size_t> > ug::ParallelNodes::newSlaves
private

◆ notificationBufferMap

BufferMap ug::ParallelNodes::notificationBufferMap
private

◆ notified

std::map<int, std::set<size_t> > ug::ParallelNodes::notified
private

map for marking nodes

for each processor we need to have a list which of our master nodes exist on their processor this is important because we sometimes will need to add them to interfaces

the map serves two functions:

  • knowing which processor has copies of our own master nodes, and so constructing correct interfaces
  • knowing which notifications are sent
  • if i is a master node (that is PN.local_to_global(i).master_proc() == pcl::ProcRank()) then i in notified[pid] means: process pid already knows that i is a slave node on his processor, and this processor knows that i the associated master that means: i is in a master interface on this processor and in a slave interface on pid.
  • if is is not a master node then i in notified[pid] means: we already sent a notification to the owner of i (processor PN.local_to_global(i).master_proc()) that processor pid has a copy of i.

◆ slavePIDs

std::set<int> ug::ParallelNodes::slavePIDs
private

◆ totalMasterLayout

IndexLayout ug::ParallelNodes::totalMasterLayout
private

Referenced by get_total_master_layout().

◆ totalSlaveLayout

IndexLayout ug::ParallelNodes::totalSlaveLayout
private

Referenced by get_total_slave_layout().


The documentation for this class was generated from the following files: