37 #ifndef GETFEM_ASSEMBLING_TENSORS_H__
38 #define GETFEM_ASSEMBLING_TENSORS_H__
48 #define ASM_THROW_PARSE_ERROR(x) \
49 GMM_ASSERT1(false, "parse error: " << x << endl << "found here:\n " \
50 << syntax_err_print());
51 #define ASM_THROW_TENSOR_ERROR(x) \
52 GMM_ASSERT1(false, "tensor error: " << x);
53 #define ASM_THROW_ERROR(x) GMM_ASSERT1(false, "error: " << x);
56 using bgeot::stride_type;
57 using bgeot::index_type;
58 using bgeot::index_set;
59 using bgeot::tensor_ranges;
60 using bgeot::tensor_strides;
61 using bgeot::tensor_mask;
62 using bgeot::tensor_shape;
63 using bgeot::tensor_ref;
64 using bgeot::multi_tensor_iterator;
74 std::deque< ATN_tensor* > childs_;
79 dim_type current_face;
81 ATN(
const std::string& n=std::string(
"unnamed")) :
82 name_(n), number_(unsigned(-1)), current_cv(
size_type(-1)),
83 current_face(dim_type(-1)) {}
86 void add_child(ATN_tensor& a) { childs_.push_back(&a); }
87 ATN_tensor& child(
size_type n) {
return *childs_[n]; }
88 size_type nchilds() {
return childs_.size(); }
91 void reinit() {
if (!is_zero_size()) reinit_(); }
94 if (cv != current_cv || face != current_face) {
101 const std::string& name() {
return name_; }
102 void set_name(
const std::string& n) { name_ = n; }
106 virtual void update_childs_required_shape();
108 virtual bool is_zero_size();
112 void set_number(
unsigned &gcnt);
113 unsigned number()
const {
return number_; }
115 virtual void reinit_() = 0;
116 virtual void exec_(
size_type , dim_type ) {}
119 class ATN_tensors_sum_scaled;
122 class ATN_tensor :
public ATN {
127 tensor_shape req_shape;
133 ATN_tensor() { shape_updated_ =
false; frozen_ =
false; }
134 bool is_shape_updated()
const {
return shape_updated_; }
135 void freeze() { frozen_ =
true; }
136 bool is_frozen()
const {
return frozen_; }
137 const tensor_ranges& ranges()
const {
return r_; }
138 const tensor_shape& required_shape()
const {
return req_shape; }
144 virtual void check_shape_update(
size_type , dim_type) {}
146 virtual void init_required_shape() { req_shape.set_empty(r_); }
149 virtual void update_childs_required_shape() {
150 for (dim_type i=0; i < nchilds(); ++i) {
151 child(i).merge_required_shape(req_shape);
157 tensor_ref& tensor() {
161 bool is_zero_size() {
return r_.is_zero_size(); }
163 void merge_required_shape(
const tensor_shape& shape_from_parent) {
164 req_shape.merge(shape_from_parent,
false);
169 virtual ATN_tensors_sum_scaled* is_tensors_sum_scaled() {
return 0; }
178 bool is_mf_ref()
const {
return (pmf != 0); }
179 vdim_specif() { dim =
size_type(-1); pmf = 0; }
180 vdim_specif(
size_type i) { dim = i; pmf = 0; }
181 vdim_specif(
const mesh_fem *pmf_) { dim = pmf_->nb_dof(); pmf = pmf_; }
183 class vdim_specif_list :
public std::vector< vdim_specif > {
185 vdim_specif_list() { reserve(8); }
188 void build_strides_for_cv(
size_type cv, tensor_ranges& r,
189 std::vector<tensor_strides >& str)
const;
195 template<
typename VEC >
class ATN_array_output :
public ATN {
197 vdim_specif_list vdim;
198 multi_tensor_iterator mti;
199 tensor_strides strides;
202 ATN_array_output(ATN_tensor& a, VEC& v_, vdim_specif_list &d)
205 strides.resize(vdim.size()+1);
209 for (
size_type i=0; i < vdim.size(); ++i) {
210 if (vdim[i].pmf) pmf = vdim[i].pmf;
211 strides[i+1] = strides[i]*int(vdim[i].dim);
213 if (gmm::vect_size(v) !=
size_type(strides[vdim.size()]))
214 ASM_THROW_TENSOR_ERROR(
"wrong size for output vector: supplied "
215 "vector size is " << gmm::vect_size(v)
216 <<
" while it should be "
217 << strides[vdim.size()]);
221 mti = multi_tensor_iterator(child(0).tensor(),
true);
226 std::vector< tensor_strides > str;
227 vdim.build_strides_for_cv(cv, r, str);
228 if (child(0).ranges() != r) {
229 ASM_THROW_TENSOR_ERROR(
"can't output a tensor of dimensions "
230 << child(0).ranges() <<
231 " into an output array of size " << r);
234 if (pmf && pmf->is_reduced()) {
235 if ( pmf->nb_dof() != 0)
239 dim_type qqdim = dim_type(gmm::vect_size(v) / nb_dof);
243 for (dim_type j=0; j < mti.ndim(); ++j) i += str[j][mti.index(j)];
244 gmm::add(gmm::scaled(gmm::mat_row(pmf->extension_matrix(), i),
248 GMM_ASSERT1(
false,
"To be verified ... ");
250 for (dim_type j=0; j < mti.ndim(); ++j) i += str[j][mti.index(j)];
251 gmm::add(gmm::scaled(gmm::mat_row(pmf->extension_matrix(),i/qqdim),
253 gmm::sub_vector(v, gmm::sub_slice(i%qqdim,nb_dof,qqdim)));
255 }
while (mti.qnext1());
260 typename gmm::linalg_traits<VEC>::iterator it = gmm::vect_begin(v);
261 for (dim_type j = 0; j < mti.ndim(); ++j) it += str[j][mti.index(j)];
263 }
while (mti.qnext1());
268 template <
typename MAT,
typename ROW,
typename COL>
269 void asmrankoneupdate(
const MAT &m_,
const ROW &row,
const COL &col,
271 MAT &m =
const_cast<MAT &
>(m_);
272 typename gmm::linalg_traits<ROW>::const_iterator itr = row.begin();
273 for (; itr != row.end(); ++itr) {
274 typename gmm::linalg_traits<COL>::const_iterator itc = col.begin();
275 for (; itc != col.end(); ++itc)
276 m(itr.index(), itc.index()) += (*itr) * (*itc) * r;
280 template <
typename MAT,
typename ROW>
281 void asmrankoneupdate(
const MAT &m_,
const ROW &row,
size_type j, scalar_type r) {
282 MAT &m =
const_cast<MAT &
>(m_);
283 typename gmm::linalg_traits<ROW>::const_iterator itr = row.begin();
284 for (; itr != row.end(); ++itr) m(itr.index(), j) += (*itr) * r;
287 template <
typename MAT,
typename COL>
288 void asmrankoneupdate(
const MAT &m_,
size_type j,
const COL &col, scalar_type r) {
289 MAT &m =
const_cast<MAT &
>(m_);
290 typename gmm::linalg_traits<COL>::const_iterator itc = col.begin();
291 for (; itc != col.end(); ++itc) m(j, itc.index()) += (*itc) * r;
295 template<
typename MAT >
class ATN_smatrix_output :
public ATN {
296 const mesh_fem &mf_r, &mf_c;
298 multi_tensor_iterator mti;
306 ATN_smatrix_output(ATN_tensor& a,
const mesh_fem& mf_r_,
307 const mesh_fem& mf_c_, MAT& m_)
308 : mf_r(mf_r_), mf_c(mf_c_), m(m_) {
314 mti = multi_tensor_iterator(child(0).tensor(),
true);
318 size_type nb_r = mf_r.nb_basic_dof_of_element(cv);
319 size_type nb_c = mf_c.nb_basic_dof_of_element(cv);
320 if (child(0).tensor().ndim() != 2)
321 ASM_THROW_TENSOR_ERROR(
"cannot write a " <<
322 int(child(0).tensor().ndim()) <<
323 "D-tensor into a matrix!");
324 if (child(0).tensor().dim(0) != nb_r ||
325 child(0).tensor().dim(1) != nb_c) {
326 ASM_THROW_TENSOR_ERROR(
"size mismatch for sparse matrix output:"
327 " tensor dimension is " << child(0).ranges()
328 <<
", while the elementary matrix for convex "
329 << cv <<
" should have " << nb_r <<
"x"
330 << nb_c <<
" elements");
332 std::vector<size_type> cvdof_r(mf_r.ind_basic_dof_of_element(cv).begin(),
333 mf_r.ind_basic_dof_of_element(cv).end());
334 std::vector<size_type> cvdof_c(mf_c.ind_basic_dof_of_element(cv).begin(),
335 mf_c.ind_basic_dof_of_element(cv).end());
337 if (it.size() == 0) {
345 }
while (mti.qnext1());
348 bool valid_mf_r = mf_r.nb_dof() > 0;
349 bool valid_mf_c = mf_c.nb_dof() > 0;
351 if (mf_r.is_reduced()) {
352 if (mf_c.is_reduced() && valid_mf_r && valid_mf_c) {
353 for (
unsigned i = 0; i < it.size(); ++i)
355 asmrankoneupdate(m, gmm::mat_row(mf_r.extension_matrix(),
357 gmm::mat_row(mf_c.extension_matrix(),
361 else if (valid_mf_r) {
362 for (
unsigned i = 0; i < it.size(); ++i)
364 asmrankoneupdate(m, gmm::mat_row(mf_r.extension_matrix(),
366 cvdof_c[it[i].j], *it[i].p);
370 if (mf_c.is_reduced() && valid_mf_c) {
371 for (
unsigned i = 0; i < it.size(); ++i)
373 asmrankoneupdate(m, cvdof_r[it[i].i],
374 gmm::mat_row(mf_c.extension_matrix(),
379 for (
unsigned i = 0; i < it.size(); ++i)
381 m(cvdof_r[it[i].i], cvdof_c[it[i].j]) += *it[i].p;
393 class base_asm_data {
396 virtual void copy_with_mti(
const std::vector<tensor_strides> &,
397 multi_tensor_iterator &,
398 const mesh_fem *)
const = 0;
399 virtual ~base_asm_data() {}
402 template<
typename VEC >
class asm_data :
public base_asm_data {
405 asm_data(
const VEC *v_) : v(*v_) {}
407 return gmm::vect_size(v);
411 void copy_with_mti(
const std::vector<tensor_strides> &str,
412 multi_tensor_iterator &mti,
const mesh_fem *pmf)
const {
414 if (pmf && pmf->is_reduced()) {
417 for (dim_type i = 0; i < mti.ndim(); ++i) ppos+=str[i][mti.index(i)];
419 =
gmm::vect_sp(gmm::mat_row(pmf->extension_matrix(), ppos), v);
420 }
while (mti.qnext1());
426 for (dim_type i = 0; i < mti.ndim(); ++i) ppos+=str[i][mti.index(i)];
428 }
while (mti.qnext1());
435 virtual std::unique_ptr<ATN> build_output_tensor(ATN_tensor &a,
436 vdim_specif_list& vdim)=0;
437 virtual ~base_asm_vec() {}
440 template<
typename VEC >
class asm_vec :
public base_asm_vec {
441 std::shared_ptr<VEC> v;
443 asm_vec(
const std::shared_ptr<VEC> &v_) : v(v_) {}
444 asm_vec(VEC *v_) : v(std::shared_ptr<VEC>(), v_) {}
445 virtual std::unique_ptr<ATN> build_output_tensor(ATN_tensor &a,
446 vdim_specif_list& vdim) {
447 return std::make_unique<ATN_array_output<VEC>>(a, *v, vdim);
449 VEC *vec() {
return v.get(); }
455 class base_vec_factory {
457 virtual base_asm_vec* create_vec(
const tensor_ranges& r) = 0;
458 virtual ~base_vec_factory() {}
461 template<
typename VEC >
class vec_factory
462 :
public base_vec_factory,
private std::deque<asm_vec<VEC> > {
464 base_asm_vec* create_vec(
const tensor_ranges& r) {
467 ASM_THROW_TENSOR_ERROR(
"can't create a vector of size " << r);
468 this->push_back(asm_vec<VEC>(std::make_shared<VEC>(sz)));
469 return &this->back();
477 virtual std::unique_ptr<ATN>
478 build_output_tensor(ATN_tensor& a,
const mesh_fem& mf1,
479 const mesh_fem& mf2) = 0;
480 virtual ~base_asm_mat() {}
483 template<
typename MAT >
class asm_mat :
public base_asm_mat {
484 std::shared_ptr<MAT> m;
486 asm_mat(
const std::shared_ptr<MAT> &m_) : m(m_) {}
487 asm_mat(MAT *m_) : m(std::shared_ptr<MAT>(), m_) {}
489 build_output_tensor(ATN_tensor& a,
const mesh_fem& mf1,
490 const mesh_fem& mf2) {
491 return std::make_unique<ATN_smatrix_output<MAT>>(a, mf1, mf2, *m);
493 MAT *mat() {
return m.get(); }
497 class base_mat_factory {
500 virtual ~base_mat_factory() {};
503 template<
typename MAT >
class mat_factory
504 :
public base_mat_factory,
private std::deque<asm_mat<MAT> > {
507 this->push_back(asm_mat<MAT>(std::make_shared<MAT>(m, n)));
508 return &this->back();
515 typedef enum { TNCONST, TNTENSOR, TNNONE } node_type;
521 tnode() : type_(TNNONE), x(1e300), t(NULL) {}
522 tnode(scalar_type x_) { assign(x_); }
523 tnode(ATN_tensor *t_) { assign(t_); }
524 void assign(scalar_type x_) { type_ = TNCONST; t = NULL; x = x_; }
525 void assign(ATN_tensor *t_) { type_ = TNTENSOR; t = t_; x = 1e300; }
526 ATN_tensor* tensor() { assert(type_ == TNTENSOR);
return t; }
527 scalar_type xval() { assert(type_ == TNCONST);
return x; }
528 node_type type() {
return type_; }
529 void check0() {
if (xval() == 0) ASM_THROW_ERROR(
"division by zero"); }
532 class asm_tokenizer {
534 typedef enum { OPEN_PAR=
'(', CLOSE_PAR=
')', COMMA=
',',
535 SEMICOLON=
';', COLON=
':', EQUAL=
'=', MFREF=
'#', IMREF=
'%',
536 PLUS=
'+',MINUS=
'-', PRODUCT=
'.',MULTIPLY=
'*',
537 DIVIDE=
'/', ARGNUM_SELECTOR=
'$',
538 OPEN_BRACE=
'{', CLOSE_BRACE=
'}',
539 END=0, IDENT=1, NUMBER=2 } tok_type_enum;
543 tok_type_enum curr_tok_type;
544 std::string curr_tok;
546 double curr_tok_dval;
548 std::deque<size_type> marks;
551 void set_str(
const std::string& s_) {
552 str = s_; tok_pos = 0; tok_len =
size_type(-1); curr_tok_type = END;
553 err_msg_mark = 0; get_tok();
555 std::string tok()
const {
return curr_tok; }
556 tok_type_enum tok_type()
const {
return curr_tok_type; }
559 {
return str.substr(i1, i2-i1); }
560 void err_set_mark() {
561 err_msg_mark = tok_pos;
563 void push_mark() { marks.push_back(tok_pos); }
564 void pop_mark() { assert(marks.size()); marks.pop_back(); }
565 std::string mark_txt() {
566 assert(marks.size());
567 return tok_substr(marks.back(),tok_pos);
571 std::string syntax_err_print();
572 void accept(tok_type_enum t,
const char *msg_=
"syntax error") {
573 if (tok_type() != t) ASM_THROW_PARSE_ERROR(msg_); advance();
575 void accept(tok_type_enum t, tok_type_enum t2,
576 const char *msg_=
"syntax error") {
577 if (tok_type() != t && tok_type() != t2)
578 ASM_THROW_PARSE_ERROR(msg_);
581 bool advance_if(tok_type_enum t) {
582 if (tok_type() == t) { advance();
return true; }
else return false;
584 void advance() { tok_pos += tok_len; get_tok(); }
586 double tok_number_dval()
587 { assert(tok_type()==NUMBER);
return curr_tok_dval; }
588 int tok_number_ival(
int maxval=10000000) {
589 int n=int(tok_number_dval());
590 if (n != curr_tok_dval) ASM_THROW_PARSE_ERROR(
"not an integer");
591 if (n > maxval) ASM_THROW_PARSE_ERROR(
"out of bound integer");
595 { assert(tok_type()==MFREF);
return curr_tok_ival; }
597 { assert(tok_type()==IMREF);
return curr_tok_ival; }
599 { assert(tok_type()==ARGNUM_SELECTOR);
return curr_tok_ival; }
608 std::vector<const mesh_fem *> mftab;
609 std::vector<const mesh_im *> imtab;
610 std::vector<pnonlinear_elem_term> innonlin;
612 std::vector<std::unique_ptr<base_asm_data>> indata;
613 std::vector<std::shared_ptr<base_asm_vec>> outvec;
615 std::vector<std::shared_ptr<base_asm_mat>> outmat;
618 base_vec_factory *vec_fact;
620 base_mat_factory *mat_fact;
623 std::vector<std::unique_ptr<ATN>> outvars;
626 std::map<std::string, ATN_tensor *> vars;
627 std::vector<std::unique_ptr<ATN_tensor>> atn_tensors;
638 vec_fact(0), mat_fact(0), parse_done(
false)
642 void set(
const std::string& s_) { set_str(s_); }
643 const std::vector<const mesh_fem*>& mf()
const {
return mftab; }
644 const std::vector<const mesh_im*>& im()
const {
return imtab; }
645 const std::vector<pnonlinear_elem_term> nonlin()
const {
return innonlin; }
646 const std::vector<std::unique_ptr<base_asm_data>>& data()
const {
return indata; }
647 const std::vector<std::shared_ptr<base_asm_vec>>& vec()
const {
return outvec; }
648 const std::vector<std::shared_ptr<base_asm_mat>>& mat()
const {
return outmat; }
653 void push_im(
const mesh_im& im_) { imtab.push_back(&im_); }
656 innonlin.push_back(net);
660 indata.push_back(std::make_unique<asm_data<VEC>>(&d));
664 outvec.push_back(std::make_shared<asm_vec<VEC>>(&(gmm::linalg_cast(v))));
667 template<
typename VEC >
void push_vec(
const VEC& v) {
668 outvec.push_back(std::make_shared<asm_vec<VEC>>(&(gmm::linalg_cast(v))));
671 template<
typename MAT >
void push_mat(
const MAT& m) {
672 outmat.push_back(std::make_shared<asm_mat<MAT>>(&(gmm::linalg_cast(m))));
676 outmat.push_back(std::make_shared<asm_mat<MAT>>(&(gmm::linalg_cast(m))));
679 template <
typename T>
void push_mat_or_vec(T &v) {
680 push_mat_or_vec(v,
typename gmm::linalg_traits<T>::linalg_type());
685 void set_mat_factory(base_mat_factory *fact) { mat_fact = fact; }
688 ATN_tensor* record(std::unique_ptr<ATN_tensor> &&t) {
689 t->set_name(mark_txt());
690 atn_tensors.push_back(std::move(t));
return atn_tensors.back().get();
692 ATN* record_out(std::unique_ptr<ATN> t) {
693 t->set_name(mark_txt());
694 outvars.push_back(std::move(t));
return outvars.back().get();
696 const mesh_fem& do_mf_arg_basic();
697 const mesh_fem& do_mf_arg(std::vector<const mesh_fem*> *multimf = 0);
698 void do_dim_spec(vdim_specif_list& lst);
699 std::string do_comp_red_ops();
700 ATN_tensor* do_comp();
701 ATN_tensor* do_data();
702 std::pair<ATN_tensor*, std::string> do_red_ops(ATN_tensor* t);
705 tnode do_prod_trans();
710 void consistency_check();
711 template <
typename T>
void push_mat_or_vec(T &v, gmm::abstract_vector) {
714 template <
typename T>
void push_mat_or_vec(T &v, gmm::abstract_matrix) {
722 void assembly(
const mesh_region ®ion =
Sparse tensors, used during the assembly.
Generic assembly of vectors, matrices.
void push_vec(const VEC &v)
Add a new output vector (fake const version..)
void push_mat(MAT &m)
Add a new output matrix.
void push_vec(VEC &v)
Add a new output vector.
void push_data(const VEC &d)
Add a new data (dense array)
void set_vec_factory(base_vec_factory *fact)
used by the getfem_interface..
void push_mat(const MAT &m)
Add a new output matrix (fake const version..)
void assembly(const mesh_region ®ion=mesh_region::all_convexes())
do the assembly on the specified region (boundary or set of convexes)
void push_nonlinear_term(pnonlinear_elem_term net)
Add a new non-linear term.
void push_mi(const mesh_im &im_)
Add a new mesh_im.
void push_mf(const mesh_fem &mf_)
Add a new mesh_fem.
Describe a finite element method linked to a mesh.
Describe an integration method linked to a mesh.
static mesh_region all_convexes()
provide a default value for the mesh_region parameters of assembly procedures etc.
abstract class for integration of non-linear terms into the mat_elem computations the nonlinear term ...
elementary computations (used by the generic assembly).
Build elementary tensors descriptors, used by generic assembly.
Define the getfem::mesh_fem class.
Define the getfem::mesh_im class (integration of getfem::mesh_fem).
strongest_value_type< V1, V2 >::value_type vect_sp(const V1 &v1, const V2 &v2)
*/
void add(const L1 &l1, L2 &l2)
*/
Include the base gmm files.
size_t size_type
used as the common size type in the library
GEneric Tool for Finite Element Methods.