Loading [MathJax]/extensions/tex2jax.js
ug4
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
pcl_methods.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2009-2015: G-CSC, Goethe University Frankfurt
3 * Author: Sebastian Reiter
4 *
5 * This file is part of UG4.
6 *
7 * UG4 is free software: you can redistribute it and/or modify it under the
8 * terms of the GNU Lesser General Public License version 3 (as published by the
9 * Free Software Foundation) with the following additional attribution
10 * requirements (according to LGPL/GPL v3 §7):
11 *
12 * (1) The following notice must be displayed in the Appropriate Legal Notices
13 * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 *
15 * (2) The following notice must be displayed at a prominent place in the
16 * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 *
18 * (3) The following bibliography is recommended for citation and must be
19 * preserved in all covered files:
20 * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 * parallel geometric multigrid solver on hierarchically distributed grids.
22 * Computing and visualization in science 16, 4 (2013), 151-164"
23 * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 * flexible software system for simulating pde based models on high performance
25 * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU Lesser General Public License for more details.
31 */
32
33#ifndef __H__PCL_METHODS__
34#define __H__PCL_METHODS__
35
36#include <vector>
37#include <list>
38#include <iostream>
39
40// Don't rely on mpi being included.
41// It is only included to allow us to define some constants.
42// This include will most likely be removed in future versions.
43#include <mpi.h>
44#include "pcl_comm_world.h"
45#include "pcl_base.h"
46#include "common/types.h"
48#include "pcl_datatype.h"
49#include <cassert>
50
51
52namespace pcl
53{
54
57
58typedef int ProcID;
59
60// ReduceOperation
61#define PCL_RO_MAX MPI_MAX
62#define PCL_RO_MIN MPI_MIN
63#define PCL_RO_SUM MPI_SUM
64#define PCL_RO_PROD MPI_PROD
65#define PCL_RO_LAND MPI_LAND
66#define PCL_RO_BAND MPI_BAND
67#define PCL_RO_LOR MPI_LOR
68#define PCL_RO_BOR MPI_BOR
69#define PCL_RO_LXOR MPI_LXOR
70#define PCL_RO_BXOR MPI_BXOR
71#define PCL_RO_MAXLOC MPI_MAXLOC
72#define PCL_RO_MINLOC MPI_MINLOC
73
74typedef MPI_Op ReduceOperation;
75
76
77
79
81double Time();
82
84void SendData(ProcID destProc, void* pBuffer, int bufferSize, int tag);
85
87void ReceiveData(void* pBuffOut, ProcID srcProc, int bufferSize, int tag);
88
90void CollectData(ProcID thisProcID, int firstSendProc, int numSendProcs,
91 void* pBuffer, int bufferSizePerProc, int tag);
92
94
102void DistributeData(ProcID thisProcID, int firstRecProc, int numRecProcs,
103 void* pBuffer, int* pBufferSegSizes, int tag);
104
106
111void DistributeData(ProcID thisProcID, int* pRecProcMap, int numRecProcs,
112 void* pBuffer, int* pBufferSegSizes, int tag);
113
115
123void AllReduce(void* sendBuf, void* recBuf, int count, DataType type,
124 ReduceOperation op);
125
126//void StartWait(), StopWait();
127
128inline void MPI_Waitall(int count, MPI_Request *array_of_requests, MPI_Status *array_of_statuses)
129{
130// StartWait();
131 PROFILE_FUNC_GROUP("mpi");
132 ::MPI_Waitall(count, array_of_requests, array_of_statuses);
133// StopWait();
134}
135
136inline void Waitall(std::vector<MPI_Request> &requests, std::vector<MPI_Status> &statuses)
137{
138// StartWait();
139 PROFILE_FUNC_GROUP("mpi");
140 assert(requests.size() == statuses.size());
141 if(requests.size() > 0)
142 pcl::MPI_Waitall(requests.size(), &requests[0], &statuses[0]);
143// StopWait();
144}
145
146inline void Waitall(std::vector<MPI_Request> &requests)
147{
148// StartWait();
149 PROFILE_FUNC_GROUP("mpi");
150 if(requests.size() > 0)
151 pcl::MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
152// StopWait();
153}
154
155inline void Waitall(std::vector<MPI_Request> &requests, std::vector<MPI_Request> &requests2)
156{
157// StartWait();
158 PROFILE_FUNC_GROUP("mpi");
159 if(requests.size() > 0) pcl::MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
160 if(requests2.size() > 0) pcl::MPI_Waitall(requests2.size(), &requests2[0], MPI_STATUSES_IGNORE);
161// StopWait();
162}
163
164inline int MPI_Wait(MPI_Request *request, MPI_Status *status=MPI_STATUS_IGNORE)
165{
166// StartWait();
167 PROFILE_FUNC_GROUP("mpi");
168 int i=::MPI_Wait(request, status);
169// StopWait();
170 return i;
171}
172
173
174
175/*inline int Irecv(void *buf, int count, MPI_Datatype datatype, int source,
176 int tag, MPI_Comm comm, MPI_Request *request)
177{
178 PROFILE_FUNC_GROUP("mpi");
179 return ::MPI_Irecv(buf, count, datatype, source, tag, comm, request);
180}
181
182template<typename T>
183inline int IRecv(T *buf, int count, int source, int tag, MPI_Comm comm, MPI_Request *request)
184{
185 PROFILE_FUNC_GROUP("mpi");
186 return ::MPI_IRecv(buf, count, DataTypeTraits<T>::get_data_type(), source, tag, comm, request);
187}*/
188
189
190// end group pcl
192
193
194}// end of namespace
195
196#endif
MPI_Datatype DataType
Definition pcl_datatype.h:61
int MPI_Wait(MPI_Request *request, MPI_Status *status=MPI_STATUS_IGNORE)
Definition pcl_methods.h:164
void SendData(ProcID destProc, void *pBuffer, int bufferSize, int tag)
sends data to another process. data may be received using
Definition pcl_methods.cpp:51
int ProcID
Definition pcl_methods.h:58
void ReceiveData(void *pBuffOut, ProcID srcProc, int bufferSize, int tag)
receives the data that was send with
Definition pcl_methods.cpp:63
void Waitall(std::vector< MPI_Request > &requests, std::vector< MPI_Status > &statuses)
Definition pcl_methods.h:136
MPI_Op ReduceOperation
Definition pcl_methods.h:74
void AllReduce(void *sendBuf, void *recBuf, int count, DataType type, ReduceOperation op)
reduces the data to a single buffer using the specified ReduceOperation and distributes the result to...
Definition pcl_methods.cpp:146
void CollectData(ProcID thisProcID, int firstSendProc, int numSendProcs, void *pBuffer, int bufferSizePerProc, int tag)
collect the data send with send_data from proc firstSendProc to numSendProcs excluding destProc.
Definition pcl_methods.cpp:77
void MPI_Waitall(int count, MPI_Request *array_of_requests, MPI_Status *array_of_statuses)
Definition pcl_methods.h:128
double Time()
returns the time in seconds
Definition pcl_methods.cpp:44
void DistributeData(ProcID thisProcID, int firstRecProc, int numRecProcs, void *pBuffer, int *pBufferSegSizes, int tag)
sends the data in the different sections of the buffer to the specified processes.
Definition pcl_methods.cpp:101
Definition parallel_grid_layout.h:46
#define PROFILE_FUNC_GROUP(groups)
Definition profiler.h:258