GetFEM  5.4.3
bgeot_tensor.h
Go to the documentation of this file.
1 /* -*- c++ -*- (enables emacs c++ mode) */
2 /*===========================================================================
3 
4  Copyright (C) 2000-2020 Yves Renard
5 
6  This file is a part of GetFEM
7 
8  GetFEM is free software; you can redistribute it and/or modify it
9  under the terms of the GNU Lesser General Public License as published
10  by the Free Software Foundation; either version 3 of the License, or
11  (at your option) any later version along with the GCC Runtime Library
12  Exception either version 3.1 or (at your option) any later version.
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16  License and GCC Runtime Library Exception for more details.
17  You should have received a copy of the GNU Lesser General Public License
18  along with this program; if not, write to the Free Software Foundation,
19  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20 
21  As a special exception, you may use this file as it is a part of a free
22  software library without restriction. Specifically, if other files
23  instantiate templates or use macros or inline functions from this file,
24  or you compile this file and link it with other files to produce an
25  executable, this file does not by itself cause the resulting executable
26  to be covered by the GNU Lesser General Public License. This exception
27  does not however invalidate any other reasons why the executable file
28  might be covered by the GNU Lesser General Public License.
29 
30 ===========================================================================*/
31 
32 /**@file bgeot_tensor.h
33  @author Yves Renard <Yves.Renard@insa-lyon.fr>
34  @date October 09, 2000.
35  @brief tensor class, used in mat_elem computations.
36 */
37 #ifndef BGEOT_TENSOR_H__
38 #define BGEOT_TENSOR_H__
39 
40 #include "bgeot_small_vector.h"
41 #include "getfem/getfem_omp.h"
42 
43 
44 namespace bgeot {
45 
46  /* ********************************************************************* */
47  /* Class tensor<T>. */
48  /* ********************************************************************* */
49 
50  typedef size_t size_type;
51  typedef gmm::uint16_type short_type;
52 
53  class multi_index : public std::vector<size_type> {
54  public :
55 
56  void incrementation(const multi_index &m) {
57  iterator it = begin(), ite = end();
58  const_iterator itm = m.begin();
59  if (it != ite) {
60  ++(*it);
61  while (*it >= *itm && it != (ite-1)) { *it = 0; ++it; ++itm; ++(*it); }
62  } else resize(1);
63  }
64 
65  void reset() { std::fill(begin(), end(), 0); }
66 
67  inline bool finished(const multi_index &m) {
68  if (m.size() == 0)
69  return (size() == 1);
70  else
71  return ((*this)[size()-1] >= m[size()-1]);
72  }
73 
74  multi_index(size_t n) : std::vector<size_type>(n)
75  { std::fill(begin(), end(), size_type(0)); }
76  multi_index(size_type i, size_type j)
77  : std::vector<size_type>(2)
78  { (*this)[0] = i; (*this)[1] = j; }
79  multi_index(size_type i, size_type j, size_type k)
80  : std::vector<size_type>(3)
81  { (*this)[0] = i; (*this)[1] = j; (*this)[2] = k; }
82  multi_index(size_type i, size_type j, size_type k, size_type l)
83  : std::vector<size_type>(4)
84  { (*this)[0] = i; (*this)[1] = j; (*this)[2] = k; (*this)[3] = l; }
85 
86  multi_index() {}
87 
88  bool is_equal(const multi_index &m) const {
89  if (this->size() != m.size()) return false;
90  for (size_type i = 0; i < m.size(); ++i)
91  if (m[i] != (*this)[i]) return false;
92  return true;
93  }
94 
95  size_type total_size() const {
96  size_type s = 1;
97  for (size_type k = 0; k < this->size(); ++k) s *= (*this)[k];
98  return s;
99  }
100 
101  size_type memsize() const {
102  return std::vector<size_type>::capacity()*sizeof(size_type) +
103  sizeof(multi_index);
104  }
105  };
106 
107  inline std::ostream &operator <<(std::ostream &o,
108  const multi_index& mi) { /* a compiler ...*/
109  multi_index::const_iterator it = mi.begin(), ite = mi.end();
110  bool f = true;
111  o << "(";
112  for ( ; it != ite; ++it)
113  { if (!f) o << ", "; o << *it; f = false; }
114  o << ")";
115  return o;
116  }
117 
118  template<class T> class tensor : public std::vector<T> {
119  protected:
120 
121  multi_index sizes_;
122  multi_index coeff_;
123 
124  public:
125 
126  typedef typename std::vector<T>::size_type size_type;
127  typedef typename std::vector<T>::iterator iterator;
128  typedef typename std::vector<T>::const_iterator const_iterator;
129 
130  template<class CONT> inline const T& operator ()(const CONT &c) const {
131  typename CONT::const_iterator it = c.begin();
132  multi_index::const_iterator q = coeff_.begin(), e = coeff_.end();
133  multi_index::const_iterator qv = sizes_.begin();
134  size_type d = 0;
135  for ( ; q != e; ++q, ++it) {
136  d += (*q) * (*it);
137  GMM_ASSERT2(*it < *qv++, "Index out of range.");
138  }
139  return *(this->begin() + d);
140  }
141 
142  inline T& operator ()(size_type i, size_type j, size_type k,
143  size_type l) {
144  GMM_ASSERT2(order() == 4, "Bad tensor order.");
145  size_type d = coeff_[0]*i + coeff_[1]*j + coeff_[2]*k + coeff_[3]*l;
146  GMM_ASSERT2(d < size(), "Index out of range.");
147  return *(this->begin() + d);
148  }
149 
150  inline T& operator ()(size_type i, size_type j, size_type k) {
151  GMM_ASSERT2(order() == 3, "Bad tensor order.");
152  size_type d = coeff_[0]*i + coeff_[1]*j + coeff_[2]*k;
153  GMM_ASSERT2(d < size(), "Index out of range.");
154  return *(this->begin() + d);
155  }
156 
157  inline T& operator ()(size_type i, size_type j) {
158  GMM_ASSERT2(order() == 2, "Bad tensor order");
159  size_type d = coeff_[0]*i + coeff_[1]*j;
160  GMM_ASSERT2(d < size(), "Index out of range.");
161  return *(this->begin() + d);
162  }
163 
164  inline const T& operator ()(size_type i, size_type j, size_type k,
165  size_type l) const {
166  GMM_ASSERT2(order() == 4, "Bad tensor order.");
167  size_type d = coeff_[0]*i + coeff_[1]*j + coeff_[2]*k + coeff_[3]*l;
168  GMM_ASSERT2(d < size(), "Index out of range.");
169  return *(this->begin() + d);
170  }
171 
172  inline const T& operator ()(size_type i, size_type j,
173  size_type k) const {
174  GMM_ASSERT2(order() == 3, "Bad tensor order.");
175  size_type d = coeff_[0]*i + coeff_[1]*j + coeff_[2]*k;
176  GMM_ASSERT2(d < size(), "Index out of range.");
177  return *(this->begin() + d);
178  }
179 
180  inline const T& operator ()(size_type i, size_type j) const {
181  GMM_ASSERT2(order() == 2, "Bad tensor order.");
182  size_type d = coeff_[0]*i + coeff_[1]*j;
183  GMM_ASSERT2(d < size(), "Index out of range.");
184  return *(this->begin() + d);
185  }
186 
187  template<class CONT> inline T& operator ()(const CONT &c) {
188  typename CONT::const_iterator it = c.begin();
189  multi_index::iterator q = coeff_.begin(), e = coeff_.end();
190  size_type d = 0;
191  for ( ; q != e; ++q, ++it) d += (*q) * (*it);
192  GMM_ASSERT2(d < size(), "Index out of range.");
193  return *(this->begin() + d);
194  }
195 
196  inline size_type size() const { return std::vector<T>::size(); }
197  inline size_type size(size_type i) const { return sizes_[i]; }
198  inline const multi_index &sizes() const { return sizes_; }
199  inline size_type order() const { return sizes_.size(); }
200 
201  void init(const multi_index &c) {
202  auto it = c.begin();
203  size_type d = 1;
204  sizes_ = c; coeff_.resize(c.size());
205  auto p = coeff_.begin(), pe = coeff_.end();
206  for ( ; p != pe; ++p, ++it) { *p = d; d *= *it; }
207  this->resize(d);
208  }
209 
210  inline void init() { sizes_.resize(0); coeff_.resize(0); this->resize(1); }
211 
212  inline void init(size_type i) {
213  sizes_.resize(1); sizes_[0] = i; coeff_.resize(1); coeff_[0] = 1;
214  this->resize(i);
215  }
216 
217  inline void init(size_type i, size_type j) {
218  sizes_.resize(2); sizes_[0] = i; sizes_[1] = j;
219  coeff_.resize(2); coeff_[0] = 1; coeff_[1] = i;
220  this->resize(i*j);
221  }
222 
223  inline void init(size_type i, size_type j, size_type k) {
224  sizes_.resize(3); sizes_[0] = i; sizes_[1] = j; sizes_[2] = k;
225  coeff_.resize(3); coeff_[0] = 1; coeff_[1] = i; coeff_[2] = i*j;
226  this->resize(i*j*k);
227  }
228 
229  inline void init(size_type i, size_type j, size_type k, size_type l) {
230  sizes_.resize(4);
231  sizes_[0] = i; sizes_[1] = j; sizes_[2] = k; sizes_[3] = k;
232  coeff_.resize(4);
233  coeff_[0] = 1; coeff_[1] = i; coeff_[2] = i*j; coeff_[3] = i*j*k;
234  this->resize(i*j*k*l);
235  }
236 
237  inline void adjust_sizes(const multi_index &mi) { init(mi); }
238  inline void adjust_sizes() { init(); }
239  inline void adjust_sizes(size_type i) { init(i); }
240  inline void adjust_sizes(size_type i, size_type j) { init(i, j); }
241  inline void adjust_sizes(size_type i, size_type j, size_type k)
242  { init(i, j, k); }
243  inline void adjust_sizes(size_type i, size_type j, size_type k, size_type l)
244  { init(i, j, k, l); }
245 
246  inline size_type adjust_sizes_changing_last(const tensor &t, size_type P) {
247  const multi_index &mi = t.sizes_; size_type d = mi.size();
248  sizes_.resize(d); coeff_.resize(d);
249  if (d) {
250  std::copy(mi.begin(), mi.end(), sizes_.begin());
251  std::copy(t.coeff_.begin(), t.coeff_.end(), coeff_.begin());
252  size_type e = coeff_.back();
253  sizes_.back() = P;
254  this->resize(e*P);
255  return e;
256  } else {
257  this->resize(1);
258  return 1;
259  }
260  }
261 
262  inline void remove_unit_dim() {
263  if (sizes_.size()) {
264  size_type i = 0, j = 0;
265  for (; i < sizes_.size(); ++i)
266  if (sizes_[i] != 1) { sizes_[j]=sizes_[i]; coeff_[j]=coeff_[i]; ++j; }
267  if (!j) ++j;
268  sizes_.resize(j);
269  coeff_.resize(j);
270  }
271  }
272 
273  /** reduction of tensor t with respect to index ni with matrix m:
274  * t(...,j,...) <-- t(...,i,..) m(i, j)
275  */
276  void mat_reduction(const tensor &t, const gmm::dense_matrix<T> &m, int ni);
277  void mat_transp_reduction(const tensor &t, const gmm::dense_matrix<T> &m,
278  int ni);
279  /** mm(i,j) = t(i,j,k,l) * m(k,l); For order four tensor. */
280  void mat_mult(const gmm::dense_matrix<T> &m, gmm::dense_matrix<T> &mm);
281 
282  /** tt = t(...) * t2(...) */
283  void product(const tensor &t2, tensor &tt);
284  /** tt = t(...,k) * t2(k,...) */
285  void dot_product(const tensor &t2, tensor &tt);
286  void dot_product(const gmm::dense_matrix<T> &m, tensor &tt);
287  /** tt = t(...,k,l) * t2(k,l,...) */
288  void double_dot_product(const tensor &t2, tensor &tt);
289  void double_dot_product(const gmm::dense_matrix<T> &m, tensor &tt);
290 
291  size_type memsize() const {
292  return sizeof(T) * this->size()
293  + sizeof(*this) + sizes_.memsize() + coeff_.memsize();
294  }
295 
296  std::vector<T> &as_vector() { return *this; }
297  const std::vector<T> &as_vector() const { return *this; }
298 
299 
300  tensor<T>& operator +=(const tensor<T>& w)
301  { gmm::add(w.as_vector(), this->as_vector()); return *this; }
302 
303  tensor<T>& operator -=(const tensor<T>& w) {
304  gmm::add(gmm::scaled(w.as_vector(), T(-1)), this->as_vector());
305  return *this;
306  }
307 
308  tensor<T>& operator *=(const scalar_type w)
309  { gmm::scale(this->as_vector(), w); return *this; }
310 
311  tensor<T>& operator /=(const scalar_type w)
312  { gmm::scale(this->as_vector(), scalar_type(1)/w); return *this; }
313 
314  tensor &operator =(const tensor &t) {
315  if (this->size() != t.size()) this->resize(t.size());
316  std::copy(t.begin(), t.end(), this->begin());
317  if (sizes_.size() != t.sizes_.size()) sizes_.resize(t.sizes_.size());
318  std::copy(t.sizes_.begin(), t.sizes_.end(), sizes_.begin());
319  if (coeff_.size() != t.coeff_.size()) coeff_.resize(t.coeff_.size());
320  std::copy(t.coeff_.begin(), t.coeff_.end(), coeff_.begin());
321  return *this;
322  }
323 
324  tensor(const tensor &t)
325  : std::vector<T>(t), sizes_(t.sizes_), coeff_(t.coeff_) { }
326  tensor(const multi_index &c) { init(c); }
327  tensor(size_type i) = delete; // { init(i); }
328  tensor(size_type i, size_type j) { init(i, j); }
329  tensor(size_type i, size_type j, size_type k) { init(i, j, k); }
330  tensor(size_type i, size_type j, size_type k, size_type l)
331  { init(i, j, k, l); }
332  tensor() {}
333  };
334 
335  template<class T> void tensor<T>::mat_transp_reduction
336  (const tensor &t, const gmm::dense_matrix<T> &m, int ni) {
337  /* contraction of tensor t by its index ni and the transpose of matrix m. */
338 
339  THREAD_SAFE_STATIC std::vector<T> tmp;
340  THREAD_SAFE_STATIC multi_index mi;
341 
342  mi = t.sizes();
343  size_type dimt = mi[ni], dim = m.nrows();
344 
345  GMM_ASSERT2(dimt, "Inconsistent dimension.");
346  GMM_ASSERT2(dimt == m.ncols(), "Dimensions mismatch.");
347  GMM_ASSERT2(&t != this, "Does not work when t and *this are the same.");
348 
349  mi[ni] = dim;
350  if (tmp.size() < dimt) tmp.resize(dimt);
351  adjust_sizes(mi);
352 
353  const_iterator pft = t.begin();
354  iterator pf = this->begin();
355  size_type dd = coeff_[ni]*( sizes()[ni]-1)-1, co = coeff_[ni];
356  size_type ddt = t.coeff_[ni]*(t.sizes()[ni]-1)-1, cot = t.coeff_[ni];
357  std::fill(mi.begin(), mi.end(), 0);
358  for (;!mi.finished(sizes()); mi.incrementation(sizes()), ++pf, ++pft) {
359  if (mi[ni] != 0) {
360  for (size_type k = 0; k <= size_type(ni); ++k)
361  mi[k] = size_type(sizes()[k] - 1);
362  pf += dd; pft += ddt;
363  } else {
364  const_iterator pl = pft; iterator pt = tmp.begin();
365  *pt++ = *pl;
366  for(size_type k = 1; k < dimt; ++k, ++pt) { pl += cot; *pt = *pl;}
367 
368  iterator pff = pf;
369  for (size_type k = 0; k < dim; ++k) {
370  if (k) pff += co;
371  *pff = T(0); pt = tmp.begin(); pl = m.begin() + k;
372  *pff += (*pl) * (*pt); ++pt;
373  for (size_type l = 1; l < dimt; ++l, ++pt) {
374  pl += dim;
375  *pff += (*pl) * (*pt);
376  }
377  }
378  }
379  }
380  }
381 
382  template<class T> void tensor<T>::mat_mult(const gmm::dense_matrix<T> &m,
383  gmm::dense_matrix<T> &mm) {
384  GMM_ASSERT2(order() == 4,
385  "This operation is for order four tensors only.");
386  GMM_ASSERT2(sizes_[2] == gmm::mat_nrows(m) &&
387  sizes_[3] == gmm::mat_ncols(m), "Dimensions mismatch.");
388  mm.base_resize(sizes_[0], sizes_[1]);
389  gmm::clear(mm);
390 
391  const_iterator pt = this->begin();
392  const_iterator pm = m.begin();
393  for (size_type l = 0; l < sizes_[3]; ++l)
394  for (size_type k = 0; k < sizes_[2]; ++k) {
395  iterator pmm = mm.begin();
396  for (size_type j = 0; j < sizes_[1]; ++j)
397  for (size_type i = 0; i < sizes_[0]; ++i)
398  *pmm++ += *pt++ * (*pm);
399  ++pm;
400  }
401  }
402 
403  template<class T> void tensor<T>::mat_reduction
404  (const tensor &t, const gmm::dense_matrix<T> &m, int ni) {
405  /* contraction of tensor t by its index ni and the matrix m. */
406  THREAD_SAFE_STATIC std::vector<T> tmp;
407  THREAD_SAFE_STATIC multi_index mi;
408 
409  mi = t.sizes();
410  size_type dimt = mi[ni], dim = m.ncols();
411  GMM_ASSERT2(dimt, "Inconsistent dimension.");
412  GMM_ASSERT2(dimt == m.nrows(), "Dimensions mismatch.");
413  GMM_ASSERT2(&t != this, "Does not work when t and *this are the same.");
414 
415  mi[ni] = dim;
416  if (tmp.size() < dimt) tmp.resize(dimt);
417  adjust_sizes(mi);
418  const_iterator pft = t.begin();
419  iterator pf = this->begin();
420  size_type dd = coeff_[ni]*( sizes()[ni]-1)-1, co = coeff_[ni];
421  size_type ddt = t.coeff_[ni]*(t.sizes()[ni]-1)-1, cot = t.coeff_[ni];
422  std::fill(mi.begin(), mi.end(), 0);
423  for (;!mi.finished(sizes()); mi.incrementation(sizes()), ++pf, ++pft) {
424  if (mi[ni] != 0) {
425  for (size_type k = 0; k <= size_type(ni); ++k)
426  mi[k] = size_type(sizes()[k] - 1);
427  pf += dd; pft += ddt;
428  }
429  else {
430  const_iterator pl = pft; iterator pt = tmp.begin();
431  *pt++ = *pl;
432  for(size_type k = 1; k < dimt; ++k, ++pt) { pl += cot; *pt = *pl; }
433 
434  iterator pff = pf; pl = m.begin();
435  for (size_type k = 0; k < dim; ++k) {
436  if (k) pff += co;
437  *pff = T(0); pt = tmp.begin();
438  for (size_type l = 0; l < dimt; ++l, ++pt, ++pl)
439  *pff += (*pl) * (*pt);
440  }
441  }
442  }
443  }
444 
445 
446  template<class T> void tensor<T>::product(const tensor<T> &t2,
447  tensor<T> &tt) {
448  size_type res_order = order() + t2.order();
449  multi_index res_size(res_order);
450  for (size_type i = 0 ; i < this->order(); ++i) res_size[i] = this->size(i);
451  for (size_type i = 0 ; i < t2.order(); ++i) res_size[order() + i] = t2.size(i);
452  tt.adjust_sizes(res_size);
453  gmm::clear(tt.as_vector());
454 
455  size_type size1 = this->size();
456  size_type size2 = t2.size();
457  const_iterator pt2 = t2.begin();
458  iterator ptt = tt.begin();
459  for (size_type j = 0; j < size2; ++j, ++pt2) {
460  const_iterator pt1 = this->begin();
461  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
462  *ptt += *pt1 * (*pt2);
463  }
464  }
465 
466 
467  template<class T> void tensor<T>::dot_product(const tensor<T> &t2,
468  tensor<T> &tt) {
469  GMM_ASSERT2(size(order()-1) == t2.size(0),
470  "Dimensions mismatch between last dimension of first tensor "
471  "and first dimension of second tensor.");
472  size_type res_order = order() + t2.order() - 2;
473  multi_index res_size(res_order);
474  for (size_type i = 0 ; i < this->order() - 1; ++i) res_size[i] = this->size(i);
475  for (size_type i = 0 ; i < t2.order() - 1; ++i) res_size[order() - 1 + i] = t2.size(i);
476  tt.adjust_sizes(res_size);
477  gmm::clear(tt.as_vector());
478 
479  size_type size0 = t2.size(0);
480  size_type size1 = this->size()/size0;
481  size_type size2 = t2.size()/size0;
482  const_iterator pt2 = t2.begin();
483  iterator ptt = tt.begin();
484  for (size_type j = 0; j < size2; ++j) {
485  const_iterator pt1 = this->begin();
486  iterator ptt0 = ptt;
487  for (size_type q = 0; q < size0; ++q, ++pt2) {
488  ptt = ptt0;
489  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
490  *ptt += *pt1 * (*pt2);
491  }
492  }
493  }
494 
495  template<class T> void tensor<T>::dot_product(const gmm::dense_matrix<T> &m,
496  tensor<T> &tt) {
497  GMM_ASSERT2(size(order()-1) == gmm::mat_nrows(m),
498  "Dimensions mismatch between last dimensions of tensor "
499  "and rows of the matrix.");
500  tensor<T> t2(multi_index(gmm::mat_nrows(m),gmm::mat_ncols(m)));
501  gmm::copy(m.as_vector(), t2.as_vector());
502  dot_product(t2, tt);
503  }
504 
505 
506  template<class T> void tensor<T>::double_dot_product(const tensor<T> &t2,
507  tensor<T> &tt) {
508  GMM_ASSERT2(order() >= 2 && t2.order() >= 2,
509  "Tensors of wrong size. Tensors of order two or higher are required.");
510  GMM_ASSERT2(size(order()-2) == t2.size(0) && size(order()-1) == t2.size(1),
511  "Dimensions mismatch between last two dimensions of first tensor "
512  "and first two dimensions of second tensor.");
513  size_type res_order = order() + t2.order() - 4;
514  multi_index res_size(res_order);
515  for (size_type i = 0 ; i < this->order() - 2; ++i) res_size[i] = this->size(i);
516  for (size_type i = 0 ; i < t2.order() - 2; ++i) res_size[order() - 2 + i] = t2.size(i);
517  tt.adjust_sizes(res_size);
518  gmm::clear(tt.as_vector());
519 
520  size_type size0 = t2.size(0)*t2.size(1);
521  size_type size1 = this->size()/size0;
522  size_type size2 = t2.size()/size0;
523  const_iterator pt2 = t2.begin();
524  iterator ptt = tt.begin();
525  for (size_type j = 0; j < size2; ++j) {
526  const_iterator pt1 = this->begin();
527  iterator ptt0 = ptt;
528  for (size_type q = 0; q < size0; ++q, ++pt2) {
529  ptt = ptt0;
530  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
531  *ptt += *pt1 * (*pt2);
532  }
533  }
534  }
535 
536  template<class T> void tensor<T>::double_dot_product(const gmm::dense_matrix<T> &m,
537  tensor<T> &tt) {
538  GMM_ASSERT2(order() >= 2,
539  "Tensor of wrong size. Tensor of order two or higher is required.");
540  GMM_ASSERT2(size(order()-2) == gmm::mat_nrows(m) &&
541  size(order()-1) == gmm::mat_ncols(m),
542  "Dimensions mismatch between last two dimensions of tensor "
543  "and dimensions of the matrix.");
544  tensor<T> t2(multi_index(gmm::mat_nrows(m),gmm::mat_ncols(m)));
545  gmm::copy(m.as_vector(), t2.as_vector());
546  double_dot_product(t2, tt);
547  }
548 
549 
550  template<class T> std::ostream &operator <<
551  (std::ostream &o, const tensor<T>& t) {
552  o << "sizes " << t.sizes() << " " << vref(t.as_vector());
553  return o;
554  }
555 
556  typedef tensor<scalar_type> base_tensor;
557  typedef tensor<complex_type> base_complex_tensor;
558 
559 
560 } /* end of namespace bgeot. */
561 
562 
563 #endif /* BGEOT_TENSOR_H */
Small (dim < 8) vectors.
Tools for multithreaded, OpenMP and Boost based parallelization.
void copy(const L1 &l1, L2 &l2)
*‍/
Definition: gmm_blas.h:978
void clear(L &l)
clear (fill with zeros) a vector or matrix.
Definition: gmm_blas.h:59
void resize(V &v, size_type n)
*‍/
Definition: gmm_blas.h:210
void add(const L1 &l1, L2 &l2)
*‍/
Definition: gmm_blas.h:1277
Basic Geometric Tools.
gmm::uint16_type short_type
used as the common short type integer in the library
Definition: bgeot_config.h:73
std::ostream & operator<<(std::ostream &o, const convex_structure &cv)
Print the details of the convex structure cvs to the output stream o.
size_t size_type
used as the common size type in the library
Definition: bgeot_poly.h:49