34 #ifndef TENSORTRAIN_H_
35 #define TENSORTRAIN_H_
73 std::vector<Tensor<T> > core;
91 : core(), zero_rank(false) {
93 MADNESS_ASSERT(t.size() != 0);
94 MADNESS_ASSERT(t.ndim() != 0);
96 std::vector<long> dims(t.ndim());
97 for (
int d=0; d<t.ndim(); ++d) dims[d]=t.dim(d);
114 TensorTrain(
const Tensor<T>& t,
double eps,
const std::vector<long> dims)
115 : core(), zero_rank(false) {
117 MADNESS_ASSERT(t.size() != 0);
118 MADNESS_ASSERT(t.ndim() != 0);
128 const std::vector<long>& dims) {
130 core.resize(dims.size());
131 eps=eps/
sqrt(dims.size()-1);
139 for (
int i=0, j=dims.size()-1; i<=j; ++i, --j) {
145 long m1=t.size()/dims[i];
147 long m2=t.size()/dims[j];
156 const int rmax=
std::min(rmax1,rmax2);
159 Tensor<T> u(rmax1*rmax2);
161 Tensor< typename Tensor<T>::scalar_type > s(rmax);
170 Tensor<T> work(lwork);
174 std::vector<long> r(dims.size()+1,0l);
175 r[0] = r[dims.size()] = 1;
177 for (std::size_t d=1; d<dims.size(); ++d) {
181 const long k=dims[d-1];
182 const long d1=r[d-1]*k;
183 c=c.reshape(d1,c.size()/d1);
184 const long rmax=
std::min(c.dim(0),c.dim(1));
190 Tensor<T> aa=
copy(c);
199 const long rank=r[d];
204 Tensor<T> uu=(u(
Slice(0,c.dim(0)*rmax-1))).reshape(c.dim(0),rmax);
206 Tensor<T>
b(d1,vtdim);
207 for (
long i=0; i<c.dim(0); ++i)
208 for (
long j=0; j<c.dim(1); ++j)
209 for (
long k=0; k<rmax; ++k)
210 b(i,j) += uu(i,k) *
T(s(k)) * vtt(k,j);
212 print(
"b.conforms c",b.conforms(c));
213 print(
"early error:",b.absmax());
224 core[d-1]=
copy((u(
Slice(0,c.dim(0)*rmax-1)))
225 .reshape(c.dim(0),rmax)(_,
Slice(0,rank-1)));
226 core[d-1]=core[d-1].reshape(r[d-1],k,r[d]);
231 for (
int i=0; i<rank; ++i) {
232 for (
int j=0; j<c.dim(1); ++j) {
237 if (d == dims.size()-1) core[d]=c;
241 core[d-1] = Tensor<T>(r[d-1],k,long(0));
243 for(++d; d<dims.size(); ++d) {
244 const long k=dims[d-1];
245 core[d-1] = Tensor<T>(long(0),k,long(0));
247 core[dims.size()-1] = Tensor<T>(long(0),dims[dims.size()-1]);
250 core[0]=core[0].fusedim(0);
263 MADNESS_ASSERT(this->
ndim()==rhs.
ndim());
265 if (this->zero_rank) {
267 }
else if (rhs.zero_rank) {
273 long k=core[0].dim(0);
274 long r1_this=core[0].dim(1);
275 long r1_rhs=rhs.core[0].dim(1);
276 Tensor<T> core_new(k,r1_this+r1_rhs);
277 core_new(_,
Slice(0,r1_this-1))=core[0];
278 core_new(_,
Slice(r1_this,r1_this+r1_rhs-1))=rhs.core[0];
283 for (std::size_t i=1; i<core.size()-1; ++i) {
284 MADNESS_ASSERT(core[i].
ndim()==3 or i==core.size()-1);
285 long r0_this=core[i].dim(0);
286 long r0_rhs=rhs.core[i].dim(0);
287 long k=core[i].dim(1);
288 long r1_this=core[i].dim(2);
289 long r1_rhs=rhs.core[i].dim(2);
290 Tensor<T> core_new(r0_this+r0_rhs,k,r1_this+r1_rhs);
291 core_new(
Slice(0,r0_this-1),_,
Slice(0,r1_this-1))=core[i];
292 core_new(
Slice(r0_this,r0_this+r0_rhs-1),_,
Slice(r1_this,r1_this+r1_rhs-1))=rhs.core[i];
298 std::size_t d=core.size()-1;
299 long r0_this=core[d].dim(0);
300 long r0_rhs=rhs.core[d].dim(0);
301 long k=core[d].dim(1);
302 Tensor<T> core_new(r0_this+r0_rhs,k);
303 core_new(
Slice(0,r0_this-1),_)=core[d];
304 core_new(
Slice(r0_this,r0_this+r0_rhs-1),_)=rhs.core[d];
323 const int index=core[i].ndim()-2;
325 if (not zero_rank) core[i]=
inner(core[i],core[i+1]);
326 core[i]=core[i].fusedim(index);
329 for (std::size_t d=i+1; d<core.size()-1; ++d) core[d]=core[d+1];
344 for (
int i=1; i<this->
ndim(); ++i) size*=core[i].
dim(1);
345 return Tensor<T>(
size);
347 std::vector<long> d(this->
ndim());
349 for (
int i=1; i<this->
ndim(); ++i) d[i]=core[i].
dim(1);
354 Tensor<T> result=core.front();
355 typename std::vector<Tensor<T> >::const_iterator it;
357 for (it=++core.begin(); it!=core.end(); ++it) {
358 result=
inner(result,*it);
359 if (flat) result=result.fusedim(0);
370 Tensor<
typename Tensor<T>::scalar_type >& s) {
373 MADNESS_ASSERT(
ndim()%2==0);
376 typename std::vector<Tensor<T> >::const_iterator it1, it2;
379 for (it1=++core.begin(), it2=--(--core.end()); it1<it2; ++it1, --it2) {
383 s=Tensor< typename Tensor<T>::scalar_type >(VT.dim(0));
387 long dim1 = core.front().dim(0);
388 long dim2 = core.back().dim(1);
389 for (
int d1=1, d2=core.size()-2; d1<d2; ++d1, --d2) {
390 dim1 *= core[d1].dim(1);
391 dim2 *= core[d2].dim(1);
393 U = Tensor<T>(dim1,long(0));
394 VT = Tensor<T>(long(0),dim2);
395 s = Tensor< typename Tensor<T>::scalar_type >(VT.dim(0));
409 for (std::size_t d=core.size()-1; d>0; --d) {
412 const long ndim=core[d].ndim();
413 for (
int i=0; i<
ndim; ++i) dims[i]=core[d].
dim(i);
416 const long r0=core[d].dim(0);
417 core[d]=core[d].reshape(r0,core[d].
size()/r0);
421 core[d]=core[d].reshape(ndim,dims);
424 core[d-1]=
inner(core[d-1],R);
428 for (std::size_t d=0; d<core.size()-1; ++d) {
431 const long ndim=core[d].ndim();
432 for (
int i=0; i<
ndim; ++i) dims[i]=core[d].
dim(i);
435 long r1=core[d].dim(core[d].
ndim()-1);
436 core[d]=core[d].reshape(core[d].
size()/r1,r1);
440 Tensor< typename Tensor<T>::scalar_type > s(rmax);
450 dims[ndim-1]=r_truncate;
451 core[d]=U.reshape(ndim,dims);
454 for (
int i=0; i<VT.dim(0); ++i) {
455 for (
int j=0; j<VT.dim(1); ++j) {
461 core[d+1]=
inner(VT,core[d+1]);
468 long ndim()
const {
return core.size();}
472 if (zero_rank)
return 0;
474 typename std::vector<Tensor<T> >::const_iterator it;
475 for (it=core.begin(); it!=core.end(); ++it) n+=it->size();
481 long n=this->
size()*
sizeof(
T);
482 n+=core.size() *
sizeof(Tensor<T>);
488 long dim(
const int i)
const {
489 if (i==0)
return core[0].dim(0);
490 return core[i].dim(1);
498 if (zero_rank)
return std::vector<long>(0,core.size()-1);
499 std::vector<long> r(core.size()-1);
500 for (std::size_t i=0; i<r.size(); ++i) r[i]=core[i+1].
dim(0);
std::vector< long > ranks() const
return the TT ranks
Definition: tensortrain.h:497
long dim(const int i) const
return the number of entries in dimension i
Definition: tensortrain.h:488
Corresponding C and Fortran types.
C++ prototypes for Fortran LAPACK with associated typedefs and macos.
const double R
Definition: dielectric.cc:191
void two_mode_representation(Tensor< T > &U, Tensor< T > &VT, Tensor< typename Tensor< T >::scalar_type > &s)
construct a two-mode representation (aka unnormalized SVD)
Definition: tensortrain.h:369
void lq(Tensor< T > &A, Tensor< T > &R)
compute the LQ decomposition of the matrix A = L Q
Definition: lapack.cc:630
long real_size() const
return the size of this instance, including static memory for vectors and such
Definition: tensortrain.h:480
T inner(const vecfunc< T, NDIM > &a, const vecfunc< T, NDIM > &b)
the non-linear solver requires an inner product
Definition: nemo.h:112
TensorTrain(const Tensor< T > &t, double eps)
ctor for a TensorTrain, with the tolerance eps
Definition: tensortrain.h:90
Tensor< T > reconstruct(const bool flat=false) const
reconstruct this to a full representation
Definition: tensortrain.h:339
Defines and implements most of Tensor.
static int max_sigma(const double &thresh, const int &rank, const Tensor< double > &w)
Definition: srconf.h:125
long ndim() const
return the number of dimensions
Definition: tensortrain.h:468
const T1 &f1 return GTEST_2_TUPLE_() T(f0, f1)
#define max(a, b)
Definition: lda.h:53
const int k
Definition: dielectric.cc:184
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence. ...
Definition: mra.h:1835
const mpreal min(const mpreal &x, const mpreal &y)
Definition: mpreal.h:2675
Prototypes for a partial interface from Tensor to LAPACK.
void truncate(double eps)
recompress and truncate this TT representation
Definition: tensortrain.h:403
#define TENSOR_MAXDIM
Definition: tensor_macros.h:194
bool is_zero_rank() const
if rank is zero
Definition: tensortrain.h:494
void svd(const Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT)
Compute the singluar value decomposition of an n-by-m matrix using *gesvd.
Definition: lapack.cc:273
tensorT sqrt(const tensorT &s, double tol=1e-8)
Computes matrix square root (not used any more?)
Definition: DFcode/moldft.cc:446
void decompose(const Tensor< T > &t, double eps, const std::vector< long > &dims)
decompose the input tensor into a TT representation
Definition: tensortrain.h:127
TensorTrain< T > & operator+=(const TensorTrain< T > &rhs)
inplace addition of two Tensortrains; will increase ranks of this
Definition: tensortrain.h:260
Definition: tensortrain.h:69
void svd_result(Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT, Tensor< T > &work)
same as svd, but it optimizes away the tensor construction: a = U * diag(s) * VT
Definition: lapack.cc:308
void print(const A &a)
Print a single item to std::cout terminating with new line.
Definition: print.h:122
A slice defines a sub-range or patch of a dimension.
Definition: slice.h:103
void fusedim(const long i)
merge two dimensions into one
Definition: tensortrain.h:318
Holds machinery to set up Functions/FuncImpls using various Factories and Interfaces.
Definition: chem/atomutil.cc:45
const double c
Definition: gfit.cc:200
FLOAT b(int j, FLOAT z)
Definition: y1.cc:79
handles the low-level details of a separated representation tensor
long size() const
return the number of coefficients in all core tensors
Definition: tensortrain.h:471
TensorTrain(const Tensor< T > &t, double eps, const std::vector< long > dims)
ctor for a TensorTrain, with the tolerance eps
Definition: tensortrain.h:114