53 static double time_[30];
57 template <
class T>
class GenTensor;
68 static unsigned int compute_nvec(
const TensorType& tt) {
70 if (tt==
TT_2D)
return 2;
71 print(
"unknown TensorType",tt);
76 typedef typename Tensor<T>::scalar_type scalar_type;
80 static double& time(
int i) {
return time_[i];}
92 Tensor< typename Tensor<T>::scalar_type >
weights_;
109 std::vector<Slice>
s_;
125 static int max_sigma(
const double& thresh,
const int& rank,
const Tensor<double>& w) {
131 for (i=rank-1; i>=0; i--) {
133 if (residual>thresh*thresh)
break;
152 const long nvec=compute_nvec(tt);
153 MADNESS_ASSERT(dim%nvec==0);
156 weights_=Tensor<double>(int(0));
157 vector_=std::vector<Tensor<T> > (nvec);
160 vector_[0]=
tensorT(std::vector<long>(dim,k));
163 for (
unsigned int idim=0; idim<nvec; idim++) vector_[idim]=Tensor<T>(0,kVec());
176 SRConf(
const Tensor<double>&
weights,
const std::vector<Tensor<T> >& vectors,
177 const unsigned int&
dim,
const unsigned int maxk,
const TensorType& tt)
183 MADNESS_ASSERT(vectors.size()>0);
184 MADNESS_ASSERT(weights.ndim()==1 and weights.dim(0)==vectors[0].dim(0));
187 unsigned int nvec=compute_nvec(tt);
188 MADNESS_ASSERT(vectors.size()==nvec);
189 MADNESS_ASSERT(dim%nvec==0);
191 rank_=weights.dim(0);
193 vector_=std::vector<Tensor<T> > (long(vectors.size()));
194 for (
unsigned int idim=0; idim<vectors.size(); idim++) {
195 vector_[idim]=vectors[idim];
203 : dim_(vector1.ndim())
204 , weights_(Tensor<double>())
206 , maxk_(vector1.
dim(0))
215 SRConf(
const Tensor<double>&
weights,
const tensorT& vector1,
const tensorT& vector2,
216 const unsigned int&
dim,
const unsigned int maxk)
219 , tensortype_(
TT_2D) {
221 MADNESS_ASSERT(weights.ndim()==1);
222 MADNESS_ASSERT(vector1.ndim()==2);
223 MADNESS_ASSERT(vector2.ndim()==2);
224 MADNESS_ASSERT(weights.dim(0)==vector1.dim(0));
225 MADNESS_ASSERT(vector2.dim(0)==vector1.dim(0));
230 rank_=weights.dim(0);
240 if (&rhs==
this)
return *
this;
248 if (rhs.has_no_data()) {
250 weights_=Tensor<double>(0);
251 vector_=std::vector<Tensor<T> > (rhs.dim_eff());
253 for (
unsigned int idim=0; idim<dim_eff(); idim++) vector_[idim]=Tensor<T>(0,long(this->kVec()));
258 weights_=Tensor<double>();
265 vector_.resize(rhs.
vector_.size());
266 for (
unsigned int i=0; i<rhs.
vector_.size(); i++) {
275 for (
unsigned int idim=0; idim<dim_eff(); idim++) {
276 MADNESS_ASSERT(weights_.dim(0)==vector_[idim].dim(0));
287 MADNESS_ASSERT((start>=0) and (end<=rank()));
288 MADNESS_ASSERT(s_.size()>1);
289 const long nvec=dim_eff();
290 const long dim_pv_eff=s_.size()-1;
293 std::vector<tensorT> v(nvec);
297 for (
long i=0; i<nvec; i++) v[i]=
ref_vector(i)(s,_);
298 }
else if (dim_pv_eff==2) {
299 for (
long i=0; i<nvec; i++) v[i]=
ref_vector(i)(s,_,_);
300 }
else if (dim_pv_eff==3) {
301 for (
long i=0; i<nvec; i++) v[i]=
ref_vector(i)(s,_,_,_);
314 template <
typename Archive>
316 int i=int(tensortype_);
317 ar & dim_ & weights_ & vector_ & rank_ & maxk_ & i;
328 if (tensortype_==
TT_FULL)
return (vector_.size()>0 and vector_[0].has_data());
335 bool has_no_data()
const {
return !
has_data();}
338 void reserve(
long r) {
341 MADNESS_ASSERT(r>=this->rank());
342 MADNESS_ASSERT(
has_data() or vector_.size()>0);
348 if (this->vector_[0].
dim(0)>=r)
return;
354 const long rank=this->rank();
355 const long kvec=this->kVec();
357 if (had_structure) this->undo_structure();
360 Tensor<scalar_type> newWeights(r);
361 if (rank>0) newWeights(Slice(0,rank-1))=
weights_(Slice(0,rank-1));
365 for (
unsigned int idim=0; idim<this->dim_eff(); idim++) {
367 tensorT newVector(r,kvec);
368 if (rank>0) newVector(this->c0())=vector_[idim](this->c0());
372 MADNESS_ASSERT(weights_.dim(0)==vector_[0].dim(0));
373 if (had_structure) this->make_structure(
true);
379 const std::vector<Slice>& c0()
const {
380 MADNESS_ASSERT(s_.size()>0);
385 void divide_and_conquer_reduce(
const double& thresh) {
387 if (has_no_data())
return;
394 const long chunksize=8;
395 if (rank()>chunksize) {
397 SRConf<T> chunk2=this->
get_configs(rank()/2+1,rank()-1);
398 chunk1.divide_and_conquer_reduce(thresh*0.5);
399 chunk2.divide_and_conquer_reduce(thresh*0.5);
403 this->add_SVD(chunk2,thresh);
418 if (has_no_data())
return;
436 tensorT v0=flat_vector(0);
437 tensorT v1=flat_vector(1);
441 ortho3(v0,v1,weights_,thresh);
447 rank_=weights_.size();
448 MADNESS_ASSERT(rank_>=0);
449 this->make_structure();
468 void append(
const SRConf<T>& rhs,
const double fac=1.0) {
471 if (rhs.has_no_data())
return;
472 if (this->has_no_data()) {
478 const long newRank=this->rank()+rhs.rank();
479 const long lhsRank=this->rank();
480 const long rhsRank=rhs.rank();
484 this->
weights_(Slice(lhsRank,newRank-1))=rhs.
weights_(Slice(0,rhsRank-1))*fac;
486 s[0]=Slice(lhsRank,newRank-1);
489 for (
unsigned int idim=0; idim<this->dim_eff(); idim++) {
491 vector_[idim](s)=rhs.
vector_[idim](rhs.c0());
504 void add_SVD(
const SRConf<T>& rhs,
const double& thresh) {
508 if (rhs.has_no_data())
return;
514 if (check_orthonormality) check_right_orthonormality();
515 if (check_orthonormality) rhs.check_right_orthonormality();
517 this->undo_structure();
519 rhs.flat_vector(0),rhs.flat_vector(1),rhs.weights_,thresh);
520 rank_=weights_.size();
534 void inplace_add(
const SRConf<T>& rhs2, std::vector<Slice> lhs_s,
535 std::vector<Slice> rhs_s,
const double alpha,
const double beta) {
539 if (rhs2.has_no_data())
return;
543 vector_[0](lhs_s)+=rhs2.vector_[0](rhs_s);
548 SRConf<T>& lhs=*
this;
549 const SRConf<T>& rhs=rhs2;
550 if (lhs.has_no_data()) lhs.make_structure(
true);
551 MADNESS_ASSERT(lhs.has_structure() or (lhs.has_no_data()));
552 MADNESS_ASSERT(rhs.has_structure());
555 MADNESS_ASSERT(alpha==1.0);
558 const long lhsRank=lhs.rank();
559 const long rhsRank=rhs.rank();
560 const long newRank=lhs.rank()+rhs.rank();
562 const long rhs_k=rhs.get_k();
563 const long lhs_k=lhs.get_k();
565 const long dim_pv=lhs.dim_per_vector();
568 for (
unsigned int idim=0; idim<lhs.dim(); idim++) {
569 if (lhs_s[idim].end<0) lhs_s[idim].end+=lhs_k;
570 if (rhs_s[idim].end<0) rhs_s[idim].end+=rhs_k;
572 MADNESS_ASSERT((lhs_s[idim].end-lhs_s[idim].start) == (rhs_s[idim].end-rhs_s[idim].start));
574 MADNESS_ASSERT(lhs_k>=(rhs_s[idim].end-rhs_s[idim].start+1));
577 lhs.reserve(newRank);
580 if (alpha!=1.0) lhs.scale(alpha);
581 lhs.weights_(Slice(lhsRank,newRank-1))=rhs.weights_(Slice(0,rhsRank-1))*
beta;
585 for (
unsigned int idim=0; idim<lhs.dim_eff(); idim++) {
589 lhs.ref_vector(idim)(Slice(lhsRank,newRank-1),lhs_s[idim])=
590 rhs.ref_vector(idim)(Slice(0,rhsRank-1),rhs_s[idim]);
592 }
else if (dim_pv==2) {
593 lhs.ref_vector(idim)(Slice(lhsRank,newRank-1),lhs_s[2*idim],lhs_s[2*idim+1])=
594 rhs.ref_vector(idim)(Slice(0,rhsRank-1),rhs_s[2*idim],rhs_s[2*idim+1]);
596 }
else if (dim_pv==3) {
597 lhs.ref_vector(idim)(Slice(lhsRank,newRank-1),lhs_s[3*idim],lhs_s[3*idim+1],lhs_s[3*idim+2])=
598 rhs.ref_vector(idim)(Slice(0,rhsRank-1),rhs_s[3*idim],rhs_s[3*idim+1],rhs_s[3*idim+2]);
614 if (rhs.has_no_data())
return SRConf<T>(rhs.dim(),rhs.get_k(),rhs.
type());
619 std::vector<tensorT> vector(rhs.dim_eff());
620 for (
unsigned int idim=0; idim<rhs.dim_eff(); idim++)
629 if (this->has_no_data()) {
637 s_.resize(vector_[0].ndim());
638 s_[0]=Slice(0,this->rank()-1);
639 for (
int i=1; i<vector_[0].ndim(); i++) {
646 void make_structure(
bool force=
false) {
649 if ((not force) and this->has_no_data())
return;
653 MADNESS_ASSERT(dim_pv>0 and dim_pv<=3);
654 int rr=weights_.dim(0);
655 if (weights_.size()==0) rr=0;
656 const int k=this->get_k();
659 for (
unsigned int idim=0; idim<this->dim_eff(); idim++) {
660 if (dim_pv==2) this->vector_[idim]=vector_[idim].reshape(rr,k,k);
661 if (dim_pv==3) this->vector_[idim]=vector_[idim].reshape(rr,k,k,k);
668 void undo_structure(
bool force=
false) {
671 if ((not force) and this->has_no_data())
return;
675 MADNESS_ASSERT(dim_pv>0 and dim_pv<=3);
676 int rr=weights_.dim(0);
677 if (weights_.size()==0) rr=0;
678 const int kvec=this->kVec();
680 for (
unsigned int idim=0; idim<this->dim_eff(); idim++) {
681 this->vector_[idim]=this->vector_[idim].reshape(rr,kvec);
690 return vector_[idim];
694 const Tensor<T>&
ref_vector(
const unsigned int& idim)
const {
695 return vector_[idim];
700 const Tensor<T> flat_vector(
const unsigned int& idim)
const {
701 MADNESS_ASSERT(rank()>0);
702 return vector_[idim](c0()).reshape(rank(),kVec());
706 Tensor<T> flat_vector(
const unsigned int& idim) {
707 MADNESS_ASSERT(rank()>0);
708 return vector_[idim](c0()).reshape(rank(),kVec());
712 void fillWithRandom(
const long& rank=1) {
717 weights_=Tensor<double>(rank);
720 for (
unsigned int idim=0; idim<this->dim_eff(); idim++) {
721 vector_[idim]=Tensor<T>(
rank_,this->kVec());
722 vector_[idim].fillrandom();
726 for (
unsigned int idim=0; idim<this->dim_eff(); idim++) {
727 vector_[idim].scale(madness::RandomValue<T>()*scalar_type(10.0));
729 weights_(Slice(0,this->rank()-1)).fillrandom().scale(10.0);
738 if (rank()==0)
return;
742 const unsigned int rank=this->rank();
749 for (
unsigned int r=0; r<rank; r++) {
752 for (
unsigned int idim=0; idim<dim_eff(); idim++) {
763 const double norm=this->vector_[idim](s).normf();
764 const double fac=
norm;
765 double oofac=1.0/fac;
766 if (fac<1.e-13) oofac=0.0;
770 vector_[idim](s).
scale(oofac);
778 bool check_right_orthonormality()
const {
781 if (rank()==0)
return true;
785 const tensorT t1=
ref_vector(1)(c0()).reshape(rank(),kVec());
786 tensorT S=
inner(t1,t1,1,1);
787 for (
int i=0; i<S.dim(0); i++) S(i,i)-=1.0;
790 double norm=S.normf();
791 double small=
sqrt(norm*norm/S.size());
792 return (small<1.e-13);
796 bool is_flat()
const {
797 return (vector_[0].ndim()==2);
802 return (
type()==
TT_FULL or has_no_data() or vector_[0].
dim(1)==this->get_k());
807 unsigned int dim()
const {
return dim_;}
810 unsigned int dim_eff()
const {
return vector_.size();}
813 long rank()
const {
return rank_;};
816 unsigned int get_k()
const {
return maxk_;};
822 for (
int i=0; i<dimpv; ++i) kv*=this->get_k();
831 const int nvec=vector_.size();
832 const int dim=this->
dim();
833 MADNESS_ASSERT(dim%nvec==0);
842 unsigned int nCoeff()
const {
844 return this->dim_eff()*this->kVec()*this->rank();
848 size_t real_size()
const {
850 for (
size_t i=0; i<vector_.size(); ++i) {
851 n+=vector_[i].size()*
sizeof(
T) +
sizeof(tensorT);
853 n+=weights_.size()*
sizeof(double) +
sizeof(Tensor<double>);
855 n+=s_.size()*
sizeof(Slice);
864 if ((lhs.has_no_data()) or (rhs.has_no_data()))
return 0.0;
873 MADNESS_ASSERT(rhs.dim()==lhs.dim());
874 MADNESS_ASSERT(rhs.dim()>0);
879 return rhs.ref_vector(0).trace(lhs.ref_vector(0));
882 const unsigned int dim_eff=rhs.dim_eff();
885 Tensor<resultT> weightMatrix=
outer(lhs.weights_(Slice(0,lhs.rank()-1)),
886 rhs.weights_(Slice(0,rhs.rank()-1)));
889 for (
unsigned int idim=0; idim<dim_eff; idim++) {
890 const Tensor<T> lhs2=lhs.flat_vector(idim);
891 const Tensor<Q> rhs2=rhs.flat_vector(idim);
892 Tensor<resultT> ovlp(lhs.rank(),rhs.rank());
896 weightMatrix.emul(ovlp);
905 typename TensorTypeData<
T>::float_scalar_type svd_normf()
const {
906 if (has_no_data())
return 0.0;
908 return weights_(Slice(0,rank()-1)).normf();
912 typename TensorTypeData<T>::float_scalar_type normf()
const {
915 if (has_no_data())
return 0.0;
919 MADNESS_ASSERT(
dim()>0);
920 MADNESS_ASSERT(not TensorTypeData<T>::iscomplex);
926 for (
unsigned int idim=0; idim<dim_eff(); idim++) {
927 const Tensor<T>
vec=flat_vector(idim);
928 Tensor<T> ovlp(rank(),rank());
932 weightMatrix.emul(ovlp);
935 typedef typename TensorTypeData<T>::float_scalar_type resultT;
936 const resultT overlap=std::abs(weightMatrix.sum());
937 return sqrt(overlap);
941 void scale(
const double& fac) {weights_.scale(fac);};
960 if (this->has_no_data()) {
965 if (
type()==TT_FULL) {
970 SRConf<T> result=
copy(*
this);
976 for (
unsigned int idim=0; idim<this->dim_eff(); idim++) {
977 for (
unsigned int jdim=1; jdim<this->
ref_vector(idim).ndim(); jdim++) {
986 MADNESS_ASSERT(result.has_structure());
999 if (this->has_no_data())
return SRConf<T>(
copy(*
this));
1005 SRConf<T> result=
copy(*
this);
1009 print(
"no structure!");
1015 for (
unsigned int idim=0; idim<this->dim_eff(); idim++) {
1016 for (
unsigned int jdim=1; jdim<this->
ref_vector(idim).ndim(); jdim++) {
1020 result.ref_vector(idim)=
madness::inner(result.ref_vector(idim),c[i],1,0);
1025 MADNESS_ASSERT(result.has_structure());
1029 SRConf<T>
transform_dir(
const Tensor<T>& c,
const int& axis)
const {
1031 if (this->has_no_data()) {
1032 return SRConf<T>(
copy(*
this));
1041 SRConf<T> result=
copy(*
this);
1044 MADNESS_ASSERT(c.ndim()==2);
1056 MADNESS_ASSERT(result.has_structure());
1076 template<
typename T>
1077 void ortho3(Tensor<T>& x, Tensor<T>& y, Tensor<double>&
weights,
const double& thresh) {
1084 const long rank=x.dim(0);
1085 const double w_max=weights.absmax()*rank;
1089 tensorT S1=
inner(x,x,1,1);
1090 tensorT S2=
inner(y,y,1,1);
1101 Tensor<double>
e1, e2;
1109 const double e1_max=e1.absmax();
1110 const double e2_max=e2.absmax();
1113 if ((e1_max*w_max<thresh) or (e2_max*w_max<thresh)) {
1123 Tensor<double> sqrt_e1(rank), sqrt_e2(rank);
1129 for (
unsigned int r=0; r<rank; r++) {
1130 if (
e1(r)*w_max<thresh) lo1=r+1;
1131 if (e2(r)*w_max<thresh) lo2=r+1;
1132 sqrt_e1(r)=
sqrt(std::abs(
e1(r)));
1133 sqrt_e2(r)=
sqrt(std::abs(e2(r)));
1138 sqrt_e1=sqrt_e1(
Slice(lo1,-1));
1139 sqrt_e2=sqrt_e2(
Slice(lo2,-1));
1140 unsigned int rank1=rank-lo1;
1141 unsigned int rank2=rank-lo2;
1144 MADNESS_ASSERT(sqrt_e1.size()==rank1);
1145 MADNESS_ASSERT(sqrt_e2.size()==rank2);
1153 tensorT M(rank1,rank2);
1154 for (
unsigned int i=0; i<rank1; i++) {
1155 for (
unsigned int j=0; j<rank2; j++) {
1156 for (
unsigned int r=0; r<rank; r++) {
1157 M(i,j)+=U1(r,i)*sqrt_e1(i)*
weights(r)*U2(r,j) * sqrt_e2(j);
1165 for (
unsigned int r=0; r<rank1; r++) {
1166 double fac=1.0/sqrt_e1(r);
1167 for (
unsigned int t=0; t<rank; t++) {
1173 for (
unsigned int r=0; r<rank2; r++) {
1174 double fac=1.0/sqrt_e2(r);
1175 for (
unsigned int t=0; t<rank; t++) {
1195 Up=
inner(Up,U1,0,1);
1196 VTp=
inner(VTp,U2,1,1);
1227 weights=Sp(
Slice(0,i));
1251 template<
typename T>
1252 void ortho5(Tensor<T>& x1, Tensor<T>& y1, Tensor<double>& w1,
1253 const Tensor<T>& x2,
const Tensor<T>& y2,
const Tensor<double>& w2,
1254 const double& thresh) {
1261 const long rank1=x1.dim(0);
1262 const long rank2=x2.dim(0);
1263 const long rank=rank1+rank2;
1266 const Slice s0(0,rank1-1), s1(rank1,rank-1);
1268 const double w_max=
std::max(w1.absmax(),w2.absmax());
1269 const double norm_max=w_max*rank;
1273 tensorT Sx12=
inner(x1,x2,1,1);
1274 tensorT Sy12=
inner(y1,y2,1,1);
1280 tensorT Sx(rank,rank);
1281 tensorT Sy(rank,rank);
1284 for (
long i=0; i<rank; i++) {
1303 Tensor<double>
e1, e2;
1314 const double e1_max=e1.absmax();
1315 const double e2_max=e2.absmax();
1318 if ((e1_max*norm_max<thresh) or (e2_max*norm_max<thresh)) {
1328 Tensor<double> sqrt_e1(rank), sqrt_e2(rank);
1334 for (
unsigned int r=0; r<rank; r++) {
1335 if (
e1(r)<thresh/norm_max) lo1=r+1;
1336 else sqrt_e1(r)=
sqrt(std::abs(
e1(r)));
1337 if (e2(r)<thresh/norm_max) lo2=r+1;
1338 else sqrt_e2(r)=
sqrt(std::abs(e2(r)));
1343 sqrt_e1=sqrt_e1(
Slice(lo1,-1));
1344 sqrt_e2=sqrt_e2(
Slice(lo2,-1));
1345 unsigned int rank_x=rank-lo1;
1346 unsigned int rank_y=rank-lo2;
1357 tensorT UU1=
copy(U1);
1358 for (
unsigned int i=0; i<rank1; ++i) UU1(i,_)*=w1(i);
1359 for (
unsigned int i=rank1; i<rank; ++i) UU1(i,_)*=w2(i-rank1);
1361 tensorT M=
inner(UU1,U2,0,0);
1362 tensorT ee=
outer(sqrt_e1,sqrt_e2);
1366 for (
unsigned int r=0; r<rank_x; r++) {
1367 double fac=1.0/sqrt_e1(r);
1375 for (
unsigned int r=0; r<rank_y; r++) {
1376 double fac=1.0/sqrt_e2(r);
1399 Up=
inner(Up,U1,0,1);
1400 VTp=
inner(VTp,U2,1,1);
1408 double residual=0.0;
1410 for (i=Sp.dim(0)-1; i>=0; i--) {
1411 residual+=Sp(i)*Sp(i);
1412 if (residual>thresh*thresh)
break;
1425 x1=
inner(Up1,x1,0,0);
1427 y1=
inner(VTp1,y1,0,0);
1444 template<
typename T>
1446 std::ostream& operator<<(std::ostream& s, const SRConf<T>& sr) {
1448 s <<
"dim_ " << sr.dim_ <<
"\n";;
1449 s <<
"rank_ " << sr.rank_ <<
"\n";;
1450 s <<
"maxk_ " << sr.maxk_ <<
"\n";;
1451 s <<
"vector_.size()" << sr.vector_.size() <<
"\n";
1452 s <<
"has_data() " << sr.has_data() <<
"\n";
1453 s <<
"TensorType " << sr.type() <<
"\n\n";
~SRConf()
dtor
Definition: srconf.h:312
C++ prototypes for Fortran LAPACK with associated typedefs and macos.
long rank_
what is the rank of this
Definition: srconf.h:99
T norm(Vector< T, N > v)
Compute norm of a Vector.
Definition: array.h:447
std::complex< double > double_complex
Definition: lineplot.cc:16
SRConf(const unsigned int &dim, const unsigned int &k, const TensorType &tt)
ctor with dimensions for a vector configuration (tested)
Definition: srconf.h:144
SRConf(const Tensor< double > &weights, const std::vector< Tensor< T > > &vectors, const unsigned int &dim, const unsigned int maxk, const TensorType &tt)
ctor with provided weights and effective vectors; shallow copy
Definition: srconf.h:176
TensorType
low rank representations of tensors (see gentensor.h)
Definition: tensor.h:275
std::vector< tensorT > vector_
Definition: srconf.h:96
Tensor< T > tensorT
Definition: srconf.h:83
FLOAT e1(FLOAT z)
Definition: y1.cc:144
TensorType type() const
return the tensor type
Definition: srconf.h:324
unsigned int dim_
the number of dimensions (the order of the tensor)
Definition: srconf.h:89
void inner_result(const Tensor< T > &left, const Tensor< Q > &right, long k0, long k1, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result)
Accumulate inner product into user provided, contiguous, correctly sized result tensor.
Definition: tensor.h:2204
SRConf()
default ctor
Definition: srconf.h:140
const mpreal dim(const mpreal &a, const mpreal &b, mp_rnd_t rnd_mode)
Definition: mpreal.h:2201
GenTensor< T > outer(const GenTensor< T > &left, const GenTensor< T > &right)
Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
Definition: gentensor.h:230
void serialize(Archive &ar)
Definition: srconf.h:315
void ortho5(Tensor< T > &x1, Tensor< T > &y1, Tensor< double > &w1, const Tensor< T > &x2, const Tensor< T > &y2, const Tensor< double > &w2, const double &thresh)
specialized version of ortho3
Definition: srconf.h:1252
const Tensor< T > & ref_vector(const unsigned int &idim) const
return reference to one of the vectors F
Definition: srconf.h:694
const double beta
Definition: gygi_soltion.cc:63
T inner(const vecfunc< T, NDIM > &a, const vecfunc< T, NDIM > &b)
the non-linear solver requires an inner product
Definition: nemo.h:112
SRConf(const SRConf &rhs)
copy ctor (tested); shallow copy
Definition: srconf.h:170
void syev(const Tensor< T > &A, Tensor< T > &V, Tensor< typename Tensor< T >::scalar_type > &e)
Real-symmetric or complex-Hermitian eigenproblem.
Definition: lapack.cc:478
TensorType tensortype_
how will this be represented
Definition: srconf.h:112
Tensor< T > & ref_vector(const unsigned int &idim)
return reference to one of the vectors F
Definition: srconf.h:689
Defines and implements most of Tensor.
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition: type_data.h:209
static int max_sigma(const double &thresh, const int &rank, const Tensor< double > &w)
Definition: srconf.h:125
SRConf & operator=(const SRConf &rhs)
assignment operator (tested), shallow copy of vectors
Definition: srconf.h:237
const SRConf get_configs(const int &start, const int &end) const
Definition: srconf.h:285
const T1 &f1 return GTEST_2_TUPLE_() T(f0, f1)
#define max(a, b)
Definition: lda.h:53
Vector< T, 1 > vec(T x)
Your friendly neighborhood factory function.
Definition: array.h:456
void scale(World &world, std::vector< Function< T, NDIM > > &v, const std::vector< Q > &factors, bool fence=true)
Scales inplace a vector of functions by distinct values.
Definition: vmra.h:290
const int k
Definition: dielectric.cc:184
void ortho3(Tensor< T > &x, Tensor< T > &y, Tensor< double > &weights, const double &thresh)
sophisticated version of ortho2
Definition: srconf.h:1077
static const bool check_orthonormality
check orthonormality at low rank additions
Definition: srconf.h:86
friend bool compatible(const SRConf &lhs, const SRConf &rhs)
check compatibility
Definition: srconf.h:948
Prototypes for a partial interface from Tensor to LAPACK.
Tensor< TENSOR_RESULT_TYPE(T, Q)> transform_dir(const Tensor< T > &t, const Tensor< Q > &c, int axis)
Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor.
Definition: tensor.h:1929
#define TENSOR_MAXDIM
Definition: tensor_macros.h:194
void svd(const Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT)
Compute the singluar value decomposition of an n-by-m matrix using *gesvd.
Definition: lapack.cc:273
SRConf(const Tensor< double > &weights, const tensorT &vector1, const tensorT &vector2, const unsigned int &dim, const unsigned int maxk)
explicit ctor with two vectors (aka SVD), shallow
Definition: srconf.h:215
tensorT sqrt(const tensorT &s, double tol=1e-8)
Computes matrix square root (not used any more?)
Definition: DFcode/moldft.cc:446
double weights(const unsigned int &i) const
return the weight
Definition: srconf.h:838
bool has_data() const
does this have any data?
Definition: srconf.h:327
const mpreal sum(const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode)
Definition: mpreal.cc:241
Defines simple templates for printing to std::cout "a la Python".
int dim_per_vector() const
return the number of physical dimensions
Definition: srconf.h:830
GenTensor< TENSOR_RESULT_TYPE(T, Q)> general_transform(const GenTensor< T > &t, const Tensor< Q > c[])
Transform all dimensions of the tensor t by distinct matrices c.
Definition: gentensor.h:1342
friend SRConf< T > copy(const SRConf< T > &rhs)
deep copy of rhs, shrink
Definition: srconf.h:611
std::vector< Slice > s_
Definition: srconf.h:109
Tensor< T > transpose(const Tensor< T > &t)
Returns a new deep copy of the transpose of the input tensor.
Definition: tensor.h:1945
SRConf(const tensorT &vector1)
explicit ctor with one vector (aka full representation), shallow
Definition: srconf.h:202
unsigned int maxk_
Definition: srconf.h:105
void print(const A &a)
Print a single item to std::cout terminating with new line.
Definition: print.h:122
#define MADNESS_EXCEPTION(msg, value)
Definition: worldexc.h:88
bool has_structure() const
return if this has a tensor structure (has not been flattened)
Definition: srconf.h:801
Tensor< typename Tensor< T >::scalar_type > weights_
for each configuration the weight; length should be r
Definition: srconf.h:92
A slice defines a sub-range or patch of a dimension.
Definition: slice.h:103
Definition: gentensor.h:123
void orthonormalize(const double &thresh)
orthonormalize this
Definition: srconf.h:415
Holds machinery to set up Functions/FuncImpls using various Factories and Interfaces.
Definition: chem/atomutil.cc:45
const double c
Definition: gfit.cc:200
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const DistributedMatrix< R > &c, bool fence=true)
Definition: chem/SCF.cc:86
friend const SRConf< Q > &lhs if((lhs.has_no_data()) or(rhs.has_no_data())) return 0.0
void swap(mpfr::mpreal &x, mpfr::mpreal &y)
Definition: mpreal.h:3069
double wall_time()
Returns the wall time in seconds relative to arbitrary origin.
Definition: world.cc:248
void normalize(World &world, std::vector< Function< T, NDIM > > &v, bool fence=true)
Normalizes a vector of functions — v[i] = v[i].scale(1.0/v[i].norm2())
Definition: vmra.h:765