53 void add_spec(
const T &a, T &b,
54 gmm::abstract_null_type, gmm::abstract_vector){
60 void add_spec(
const T &a, T &b,
61 gmm::abstract_null_type, gmm::abstract_matrix){
67 void add_list(
const T &a, T &b){
68 GMM_ASSERT2(a.size() == b.size(),
"size mismatch");
71 auto ita_end = end(a);
72 for (;ita != ita_end; ++ita, ++itb) gmm::add(*ita, *itb);
77 void add_spec(
const T &a, T &b,
78 gmm::abstract_vector, gmm::abstract_vector){
84 void add_spec(
const T &a, T &b,
85 gmm::abstract_matrix, gmm::abstract_vector){
91 void equal_resize_spec(T &a,
const T &b,
92 gmm::abstract_null_type, gmm::abstract_vector){
93 gmm::resize(a, gmm::vect_size(b));
98 void equal_resize_spec(T &a,
const T &b,
99 gmm::abstract_null_type, gmm::abstract_matrix){
100 gmm::resize(a, gmm::mat_nrows(b), gmm::mat_ncols(b));
105 void equal_resize_list(T &a,
const T &b){
106 GMM_ASSERT2(a.empty(),
"the first list should be still empty");
110 auto ita_end = end(a);
111 using Component =
typename T::value_type;
112 using AlgoC =
typename gmm::linalg_traits<Component>::linalg_type;
113 for (;ita != ita_end; ++ita, ++itb){
114 equal_resize_spec(*ita, *itb, gmm::abstract_null_type{}, AlgoC{});
120 void equal_resize_spec(T &a,
const T &b,
121 gmm::abstract_vector, gmm::abstract_vector){
122 equal_resize_list(a, b);
127 void equal_resize_spec(T &a,
const T &b,
128 gmm::abstract_matrix, gmm::abstract_vector){
129 equal_resize_list(a, b);
137 using AlgoT =
typename gmm::linalg_traits<T>::linalg_type;
138 using Component =
typename T::value_type;
139 using AlgoC =
typename gmm::linalg_traits<Component>::linalg_type;
140 detail::add_spec(a, b, AlgoC{}, AlgoT{});
148 using AlgoT =
typename gmm::linalg_traits<T>::linalg_type;
149 using Component =
typename T::value_type;
150 using AlgoC =
typename gmm::linalg_traits<Component>::linalg_type;
151 detail::equal_resize_spec(a, b, AlgoC{}, AlgoT{});
168 if (distributed.num_threads() == 1)
return;
171 for(
size_type t = 1; t != distributed.num_threads(); ++t){
177 if (distributed.num_threads() == 1 ||
178 distributed.this_thread() == 0)
return original;
179 else return distributed;
186 T& operator = (
const T &x){
187 return distributed = x;
191 if (distributed.num_threads() == 1)
return;
195 cerr <<
"Accumulation distribution should not run in parallel";
200 auto to_add = vector<T*>{};
201 to_add.push_back(&original);
202 for (
size_type t = 1; t != distributed.num_threads(); ++t){
203 to_add.push_back(&distributed(t));
210 while (to_add.size() > 1){
212 auto i = distributed.this_thread() * 2;
213 if (i + 1 < to_add.size()){
214 auto &target = *to_add[i];
215 auto &source = *to_add[i + 1];
216 gen_add(source, target);
220 for (
auto it = begin(to_add), ite = end(to_add);
221 it != end(to_add) && next(it) != end(to_add);
222 it = to_add.erase(next(it)));
Takes a matrix or vector, or vector of matrices or vectors and creates an empty copy on each thread.
Use this template class for any object you want to distribute to open_MP threads.
Tools for multithreaded, OpenMP and Boost based parallelization.
#define GETFEM_OMP_PARALLEL(body)
Organizes a proper parallel omp section:
Basic definitions and tools of GMM.
size_t size_type
used as the common size type in the library
GEneric Tool for Finite Element Methods.
void equal_resize(T &a, const T &b)
Resize 'a' to the same size as 'b'.
void gen_add(const T &a, T &b)
Generic addition for gmm types as well as vectors of gmm types.
bool me_is_multithreaded_now()
is the program running in the parallel section