152 #ifndef GETFEM_CONFIG_H__
153 #define GETFEM_CONFIG_H__
163 #if !defined(GETFEM_PARA_LEVEL)
164 # define GETFEM_PARA_LEVEL 0
167 #define GETFEM_MPI_INIT(argc, argv) {GMM_TRACE1("Running sequential Getfem");}
168 #define GETFEM_MPI_FINALIZE {}
170 #if defined(GMM_USES_MPI)
173 # undef GMM_TRACE_MSG_MPI
174 # define GMM_TRACE_MSG_MPI \
175 int mip_rk__; MPI_Comm_rank(MPI_COMM_WORLD, &mip_rk__); \
178 # undef GETFEM_MPI_INIT
179 # define GETFEM_MPI_INIT(argc, argv) { \
180 MPI_Init(&argc, &argv); \
181 GMM_TRACE1("Running parallelized Getfem level " << GETFEM_PARA_LEVEL); \
183 # undef GETFEM_MPI_FINALIZE
184 # define GETFEM_MPI_FINALIZE { MPI_Finalize(); }
196 using std::endl;
using std::cout;
using std::cerr;
197 using std::ends;
using std::cin;
200 #if GETFEM_PARA_LEVEL > 1
205 # define MUMPS_PARA_SOLVER 1
206 # define SCHWARZADD_PARA_SOLVER 2
208 # ifndef GETFEM_PARA_SOLVER
209 # define GETFEM_PARA_SOLVER MUMPS_PARA_SOLVER
212 template <
typename T>
inline T MPI_SUM_SCALAR(T a) {
213 T b; MPI_Allreduce(&a,&b,1,gmm::mpi_type(a), MPI_SUM, MPI_COMM_WORLD);
217 template <
typename VECT>
inline void MPI_SUM_VECTOR(
const VECT &VV) {
218 VECT &V =
const_cast<VECT &
>(VV);
219 typedef typename gmm::linalg_traits<VECT>::value_type T;
220 std::vector<T> W(gmm::vect_size(V));
221 MPI_Allreduce((
void *)(&(V[0])), &(W[0]), gmm::vect_size(V),
222 gmm::mpi_type(T()), MPI_SUM, MPI_COMM_WORLD);
226 template <
typename VECT>
inline void MPI_MAX_VECTOR(
const VECT &VV) {
227 VECT &V =
const_cast<VECT &
>(VV);
228 typedef typename gmm::linalg_traits<VECT>::value_type T;
229 std::vector<T> W(gmm::vect_size(V));
230 MPI_Allreduce((
void *)(&(V[0])), &(W[0]), gmm::vect_size(V),
231 gmm::mpi_type(T()), MPI_MAX, MPI_COMM_WORLD);
235 template <
typename T>
void MPI_BCAST0_SCALAR(T &a) {
236 MPI_Bcast((
void *)(&a), 1, gmm::mpi_type(a), 0, MPI_COMM_WORLD);
239 template <
typename VECT>
inline void MPI_BCAST0_VECTOR(
const VECT &VV) {
240 VECT &V =
const_cast<VECT &
>(VV);
241 typedef typename gmm::linalg_traits<VECT>::value_type T;
242 MPI_Bcast((
void *)(&(V[0])), gmm::vect_size(V), gmm::mpi_type(T()), 0,
246 template <
typename VECT1,
typename VECT2>
247 inline void MPI_SUM_VECTOR(
const VECT1 &VV,
const VECT2 &WW) {
248 VECT1 &V =
const_cast<VECT1 &
>(VV);
249 VECT2 &W =
const_cast<VECT2 &
>(WW);
250 typedef typename gmm::linalg_traits<VECT1>::value_type T;
251 MPI_Allreduce((
void *)(&(V[0])), &(W[0]),
252 gmm::vect_size(V), gmm::mpi_type(T()),
253 MPI_SUM, MPI_COMM_WORLD);
256 inline bool MPI_IS_MASTER(
void)
257 {
int rk; MPI_Comm_rank(MPI_COMM_WORLD, &rk);
return !rk; }
259 template <
typename T>
inline
261 typedef typename gmm::col_matrix< gmm::rsvector<T> > MAT;
262 MAT &MM =
const_cast<MAT &
>(M);
265 MPI_Comm_rank(MPI_COMM_WORLD, &rk);
266 MPI_Comm_size(MPI_COMM_WORLD, &nbp);
268 size_type nr = gmm::mat_nrows(MM), nc = gmm::mat_ncols(MM);
270 gmm::dense_matrix<int> all_nnz(nc, nbp);
271 std::vector<int> my_nnz(nc), owner(nc);
276 my_nnz[i] = gmm::nnz(gmm::mat_col(MM, i));
279 MPI_Allgather((
void *)(&(my_nnz[0])), nc, MPI_INT,
280 (
void *)(&(all_nnz(0,0))), nc, MPI_INT, MPI_COMM_WORLD);
283 std::vector<int> contributors(nc);
284 for (
int i = 0; i < nc; ++i) {
286 int column_master = -1, max_nnz = 0;
287 contributors.resize(0);
290 for (
int j = nbp-1; j >= 0; --j) {
292 if (rk != j) contributors.push_back(j);
293 if (column_master == -1 || all_nnz(i, j) > max_nnz)
294 { column_master = j; max_nnz = all_nnz(i, j); }
298 if (column_master == rk) {
299 for (
int j = 0; j < int(contributors.size()); ++j) {
300 typename gmm::rsvector<T>::base_type_ &VV = V;
301 int si = all_nnz(i, contributors[j]);
303 MPI_Recv((
void *)(&(VV[0])),
304 si*
sizeof(gmm::elt_rsvector_<T>),
305 MPI_BYTE, contributors[j], 0,
306 MPI_COMM_WORLD, MPI_STATUS_IGNORE);
310 typename gmm::rsvector<T>::base_type_ &VV = MM.col(i);
311 MPI_Send((
void *)(&(VV[0])),
312 my_nnz[i]*
sizeof(gmm::elt_rsvector_<T>),
313 MPI_BYTE, column_master, 0,
322 my_nnz[i] =
gmm::nnz(gmm::mat_col(MM, i));
323 owner[i] = (my_nnz[i]) ? rk : 0;
325 MPI_SUM_VECTOR(my_nnz);
326 MPI_SUM_VECTOR(owner);
333 typename gmm::rsvector<T>::base_type_ &VV = MM.col(i);
334 if (owner[i] != rk) VV.resize(my_nnz[i]);
335 MPI_Bcast((
void *)(&(VV[0])), my_nnz[i]*
sizeof(gmm::elt_rsvector_<T>),
336 MPI_BYTE, owner[i], MPI_COMM_WORLD);
341 template <
typename MAT>
inline void MPI_SUM_SPARSE_MATRIX(
const MAT &M) {
342 typedef typename gmm::linalg_traits<MAT>::value_type T;
343 int nbp; MPI_Comm_size(MPI_COMM_WORLD, &nbp);
345 size_type nr = gmm::mat_nrows(M), nc = gmm::mat_ncols(M);
346 gmm::col_matrix< gmm::rsvector<T> > MM(nr, nc);
347 GMM_WARNING2(
"MPI_SUM_SPARSE_MATRIX: A copy of the matrix is done. "
348 "To avoid it, adapt MPI_SUM_SPARSE_MATRIX to "
349 "your matrix type.");
351 MPI_SUM_SPARSE_MATRIX(MM);
355 template <
typename T>
inline T MPI_SUM_SCALAR(T a) {
return a; }
356 template <
typename VECT>
inline void MPI_SUM_VECTOR(
const VECT &) {}
357 template <
typename VECT>
inline void MPI_MAX_VECTOR(
const VECT &) {}
358 template <
typename T>
void MPI_BCAST0_SCALAR(T &) {}
359 template <
typename VECT>
inline void MPI_BCAST0_VECTOR(
const VECT &) {}
360 template <
typename MAT>
inline void MPI_SUM_SPARSE_MATRIX(
const MAT &) {}
361 template <
typename VECT1,
typename VECT2>
362 inline void MPI_SUM_VECTOR(
const VECT1 &V,
const VECT2 &WW)
364 VECT2 &W =
const_cast<VECT2 &
>(WW);
367 inline bool MPI_IS_MASTER(
void) {
return true; }
372 using bgeot::dim_type;
375 using bgeot::scalar_type;
376 using bgeot::complex_type;
377 using bgeot::long_scalar_type;
378 using bgeot::opt_long_scalar_type;
381 using bgeot::base_vector;
382 using bgeot::base_complex_vector;
383 using bgeot::base_matrix;
384 using bgeot::base_complex_matrix;
385 using bgeot::base_tensor;
386 using bgeot::base_complex_tensor;
390 #if defined(__GNUC__)
393 inline bool isnan(scalar_type x) {
return x != x; }
defines and typedefs for namespace bgeot
Multivariate polynomials.
tensor class, used in mat_elem computations.
sparse vector built upon std::vector.
size_type nnz(const L &l)
count the number of non-zero entries of a vector or matrix.
void copy(const L1 &l1, L2 &l2)
*/
void add(const L1 &l1, L2 &l2)
*/
gmm::uint16_type short_type
used as the common short type integer in the library
size_t size_type
used as the common size type in the library
GEneric Tool for Finite Element Methods.