35 #ifndef MADNESS_TENSOR_TENSOR_H__INCLUDED
36 #define MADNESS_TENSOR_TENSOR_H__INCLUDED
67 #define HAVE_GENTENSOR 1
69 #define HAVE_GENTENSOR 0
217 #ifndef HAVE_STD_ABS_LONG
218 #ifndef HAVE_STD_LABS
220 static long abs(
long a) {
221 return a>=0 ? a : -
a;
226 static long abs(
long a) {
235 #define IS_ODD(n) ((n)&0x1)
236 #define IS_UNALIGNED(p) (((unsigned long)(p))&0x7)
240 template <
typename Q,
bool iscomplex>
242 static Q
op(
const Q& coeff) {
248 template <
typename Q>
250 static Q
op(
const Q& coeff) {
256 template <
typename Q>
266 template <
typename T>
T mynorm(std::complex<T> t) {
271 template <
class T>
class SliceTensor;
281 if (tt==
TT_FULL) str=
"full rank tensor";
282 if (tt==
TT_2D) str=
"low rank tensor 2-way";
283 if (tt==
TT_NONE) str=
"no tensor type specified";
293 template <
class T>
class Tensor :
public BaseTensor {
294 template <
class U>
friend class SliceTensor;
301 void allocate(
long nd,
const long d[],
bool dozero) {
302 _id = TensorTypeData<T>::id;
313 for (
int i=0; i<nd; ++i) {
314 TENSOR_ASSERT(d[i]>=0 && d[i]<268435456,
"invalid dimension size in new tensor",d[i],0);
318 TENSOR_ASSERT(_size>=0 && _size<268435456,
"invalid size in new tensor",_size,0);
321 #define TENSOR_ALIGNMENT 16
323 #define TENSOR_ALIGNMENT 32
325 #define TENSOR_ALIGNMENT 16
328 #ifdef WORLD_GATHER_MEM_STATS
333 _shptr.
reset(_p, &::madness::detail::checked_free<T>);
337 std::printf(
"new failed nd=%ld type=%ld size=%ld\n", nd,
id(), _size);
338 std::printf(
" %ld %ld %ld %ld %ld %ld\n",
339 d[0], d[1], d[2], d[3], d[4], d[5]);
347 memset((
void *) _p, 0, _size*
sizeof(
T));
349 aligned_zero(_size, _p);
372 typedef typename TensorTypeData<T>::scalar_type scalar_type;
375 typedef typename TensorTypeData<T>::float_scalar_type float_scalar_type;
379 _id = TensorTypeData<T>::id;
391 Tensor(
const Tensor<T>& t) {
392 _id = TensorTypeData<T>::id;
406 Tensor<T>& operator=(
const Tensor<T>& t) {
422 template <
class Q>
operator Tensor<Q>()
const {
423 Tensor<Q> result = Tensor<Q>(this->
_ndim,this->
_dim,
false);
432 explicit Tensor(
long d0) : _p(0) {
434 allocate(1, _dim,
true);
441 explicit Tensor(
long d0,
long d1) : _p(0) {
442 _dim[0] = d0; _dim[1] = d1;
443 allocate(2, _dim,
true);
451 explicit Tensor(
long d0,
long d1,
long d2) : _p(0) {
452 _dim[0] = d0; _dim[1] = d1; _dim[2] = d2;
453 allocate(3, _dim,
true);
462 explicit Tensor(
long d0,
long d1,
long d2,
long d3) : _p(0) {
463 _dim[0] = d0; _dim[1] = d1; _dim[2] = d2; _dim[3] = d3;
464 allocate(4, _dim,
true);
474 explicit Tensor(
long d0,
long d1,
long d2,
long d3,
long d4) : _p(0) {
475 _dim[0] = d0; _dim[1] = d1; _dim[2] = d2; _dim[3] = d3; _dim[4] = d4;
476 allocate(5, _dim,
true);
487 explicit Tensor(
long d0,
long d1,
long d2,
long d3,
long d4,
long d5) {
488 _dim[0] = d0; _dim[1] = d1; _dim[2] = d2; _dim[3] = d3; _dim[4] = d4; _dim[5] = d5;
489 allocate(6, _dim,
true);
496 explicit Tensor(
const std::vector<long>& d,
bool dozero=
true) : _p(0) {
497 allocate(d.size(), d.size() ? &(d[0]) : 0, dozero);
505 explicit Tensor(
long nd,
const long d[],
bool dozero=
true) : _p(0) {
506 allocate(nd,d,dozero);
513 Tensor<T>& operator=(
T x) {
522 Tensor<T>& fill(
T x) {
531 template <
typename Q>
532 Tensor<T>& operator+=(
const Tensor<Q>& t) {
541 template <
typename Q>
542 Tensor<T>& operator-=(
const Tensor<Q>& t) {
551 template <
typename Q>
552 Tensor< TENSOR_RESULT_TYPE(T,Q) >
operator+(
const Tensor<Q>& t)
const {
554 Tensor<resultT> result(_ndim,_dim,false);
563 template <typename Q>
566 Tensor<resultT> result(_ndim,_dim,false);
575 template <typename Q>
577 operator*(const Q& x)
const {
579 Tensor<resultT> result(_ndim,_dim,false);
588 template <typename Q>
590 operator/(const Q& x)
const {
592 Tensor<resultT> result(_ndim,_dim);
601 template <typename Q>
603 operator+(const Q& x)
const {
605 Tensor<resultT> result(_ndim,_dim);
614 template <typename Q>
616 operator-(const Q& x)
const {
617 return (*
this) + (-x);
624 Tensor<T> result = Tensor<T>(_ndim,_dim,
false);
633 template <
typename Q>
634 typename IsSupported<TensorTypeData<Q>,Tensor<T>&>::type
635 operator*=(
const Q& x) {
644 template <
typename Q>
645 typename IsSupported<TensorTypeData<Q>,Tensor<T>&>::type
654 template <
typename Q>
655 typename IsSupported<TensorTypeData<Q>,Tensor<T>&>::type
656 operator+=(
const Q& x) {
665 template <
typename Q>
666 typename IsSupported<TensorTypeData<Q>,Tensor<T>&>::type
667 operator-=(
const Q& x) {
684 Tensor<T>& fillrandom() {
686 madness::RandomVector<T>(
size(), ptr());
708 Tensor<T>& fillindex() {
718 Tensor<T>& screen(
double x) {
728 static bool bounds_checking() {
729 #ifdef TENSOR_BOUNDS_CHECKING
740 T& operator[](
long i) {
741 #ifdef TENSOR_BOUNDS_CHECKING
742 TENSOR_ASSERT(i>=0 && i<_dim[0],
"1d bounds check failed dim=0",i,
this);
751 const T& operator[](
long i)
const {
752 #ifdef TENSOR_BOUNDS_CHECKING
753 TENSOR_ASSERT(i>=0 && i<_dim[0],
"1d bounds check failed dim=0",i,
this);
762 T& operator()(
long i) {
763 #ifdef TENSOR_BOUNDS_CHECKING
764 TENSOR_ASSERT(i>=0 && i<_dim[0],
"1d bounds check failed dim=0",i,
this);
773 const T& operator()(
long i)
const {
774 #ifdef TENSOR_BOUNDS_CHECKING
775 TENSOR_ASSERT(i>=0 && i<_dim[0],
"1d bounds check failed dim=0",i,
this);
785 T& operator()(
long i,
long j) {
786 #ifdef TENSOR_BOUNDS_CHECKING
787 TENSOR_ASSERT(i>=0 && i<_dim[0],
"2d bounds check failed dim=0",i,
this);
788 TENSOR_ASSERT(j>=0 && j<_dim[1],
"2d bounds check failed dim=1",j,
this);
798 const T& operator()(
long i,
long j)
const {
799 #ifdef TENSOR_BOUNDS_CHECKING
800 TENSOR_ASSERT(i>=0 && i<_dim[0],
"2d bounds check failed dim=0",i,
this);
801 TENSOR_ASSERT(j>=0 && j<_dim[1],
"2d bounds check failed dim=1",j,
this);
812 T& operator()(
long i,
long j,
long k) {
813 #ifdef TENSOR_BOUNDS_CHECKING
814 TENSOR_ASSERT(i>=0 && i<_dim[0],
"3d bounds check failed dim=0",i,
this);
815 TENSOR_ASSERT(j>=0 && j<_dim[1],
"3d bounds check failed dim=1",j,
this);
816 TENSOR_ASSERT(k>=0 && k<_dim[2],
"3d bounds check failed dim=2",k,
this);
827 const T& operator()(
long i,
long j,
long k)
const {
828 #ifdef TENSOR_BOUNDS_CHECKING
829 TENSOR_ASSERT(i>=0 && i<_dim[0],
"3d bounds check failed dim=0",i,
this);
830 TENSOR_ASSERT(j>=0 && j<_dim[1],
"3d bounds check failed dim=1",j,
this);
831 TENSOR_ASSERT(k>=0 && k<_dim[2],
"3d bounds check failed dim=2",k,
this);
843 T& operator()(
long i,
long j,
long k,
long l) {
844 #ifdef TENSOR_BOUNDS_CHECKING
845 TENSOR_ASSERT(i>=0 && i<_dim[0],
"4d bounds check failed dim=0",i,
this);
846 TENSOR_ASSERT(j>=0 && j<_dim[1],
"4d bounds check failed dim=1",j,
this);
847 TENSOR_ASSERT(k>=0 && k<_dim[2],
"4d bounds check failed dim=2",k,
this);
848 TENSOR_ASSERT(l>=0 && l<_dim[3],
"4d bounds check failed dim=3",l,
this);
861 const T& operator()(
long i,
long j,
long k,
long l)
const {
862 #ifdef TENSOR_BOUNDS_CHECKING
863 TENSOR_ASSERT(i>=0 && i<_dim[0],
"4d bounds check failed dim=0",i,
this);
864 TENSOR_ASSERT(j>=0 && j<_dim[1],
"4d bounds check failed dim=1",j,
this);
865 TENSOR_ASSERT(k>=0 && k<_dim[2],
"4d bounds check failed dim=2",k,
this);
866 TENSOR_ASSERT(l>=0 && l<_dim[3],
"4d bounds check failed dim=3",l,
this);
880 T& operator()(
long i,
long j,
long k,
long l,
long m) {
881 #ifdef TENSOR_BOUNDS_CHECKING
882 TENSOR_ASSERT(i>=0 && i<_dim[0],
"5d bounds check failed dim=0",i,
this);
883 TENSOR_ASSERT(j>=0 && j<_dim[1],
"5d bounds check failed dim=1",j,
this);
884 TENSOR_ASSERT(k>=0 && k<_dim[2],
"5d bounds check failed dim=2",k,
this);
885 TENSOR_ASSERT(l>=0 && l<_dim[3],
"5d bounds check failed dim=3",l,
this);
886 TENSOR_ASSERT(m>=0 && m<_dim[4],
"5d bounds check failed dim=4",m,
this);
900 const T& operator()(
long i,
long j,
long k,
long l,
long m)
const {
901 #ifdef TENSOR_BOUNDS_CHECKING
902 TENSOR_ASSERT(i>=0 && i<_dim[0],
"5d bounds check failed dim=0",i,
this);
903 TENSOR_ASSERT(j>=0 && j<_dim[1],
"5d bounds check failed dim=1",j,
this);
904 TENSOR_ASSERT(k>=0 && k<_dim[2],
"5d bounds check failed dim=2",k,
this);
905 TENSOR_ASSERT(l>=0 && l<_dim[3],
"5d bounds check failed dim=3",l,
this);
906 TENSOR_ASSERT(m>=0 && m<_dim[4],
"5d bounds check failed dim=4",m,
this);
921 T& operator()(
long i,
long j,
long k,
long l,
long m,
long n) {
922 #ifdef TENSOR_BOUNDS_CHECKING
923 TENSOR_ASSERT(i>=0 && i<_dim[0],
"6d bounds check failed dim=0",i,
this);
924 TENSOR_ASSERT(j>=0 && j<_dim[1],
"6d bounds check failed dim=1",j,
this);
925 TENSOR_ASSERT(k>=0 && k<_dim[2],
"6d bounds check failed dim=2",k,
this);
926 TENSOR_ASSERT(l>=0 && l<_dim[3],
"6d bounds check failed dim=3",l,
this);
927 TENSOR_ASSERT(m>=0 && m<_dim[4],
"6d bounds check failed dim=4",m,
this);
928 TENSOR_ASSERT(n>=0 && n<_dim[5],
"6d bounds check failed dim=5",n,
this);
943 const T& operator()(
long i,
long j,
long k,
long l,
long m,
long n)
const {
944 #ifdef TENSOR_BOUNDS_CHECKING
945 TENSOR_ASSERT(i>=0 && i<_dim[0],
"6d bounds check failed dim=0",i,
this);
946 TENSOR_ASSERT(j>=0 && j<_dim[1],
"6d bounds check failed dim=1",j,
this);
947 TENSOR_ASSERT(k>=0 && k<_dim[2],
"6d bounds check failed dim=2",k,
this);
948 TENSOR_ASSERT(l>=0 && l<_dim[3],
"6d bounds check failed dim=3",l,
this);
949 TENSOR_ASSERT(m>=0 && m<_dim[4],
"6d bounds check failed dim=4",m,
this);
950 TENSOR_ASSERT(n>=0 && n<_dim[5],
"6d bounds check failed dim=5",n,
this);
960 T& operator()(
const long ind[]) {
962 for (
int d=0; d<_ndim; ++d) {
964 #ifdef TENSOR_BOUNDS_CHECKING
965 TENSOR_ASSERT(i>=0 && i<_dim[0],
"non-PC general indexing bounds check failed dim=",d,
this);
976 const T& operator()(
const long ind[])
const {
978 for (
int d=0; d<_ndim; ++d) {
980 #ifdef TENSOR_BOUNDS_CHECKING
981 TENSOR_ASSERT(i>=0 && i<_dim[0],
"non-PC general indexing bounds check failed dim=",d,
this);
992 T& operator()(
const std::vector<long> ind) {
993 TENSOR_ASSERT(ind.size()>=(
unsigned int) _ndim,
"invalid number of dimensions",ind.size(),
this);
995 for (
long d=0; d<_ndim; ++d) {
996 TENSOR_ASSERT(ind[d]>=0 && ind[d]<_dim[d],
"out-of-bounds access",ind[d],
this);
1006 const T& operator()(
const std::vector<long> ind)
const {
1007 TENSOR_ASSERT(ind.size()>=(
unsigned int) _ndim,
"invalid number of dimensions",ind.size(),
this);
1009 for (
long d=0; d<_ndim; ++d) {
1010 TENSOR_ASSERT(ind[d]>=0 && ind[d]<_dim[d],
"out-of-bounds access",ind[d],
this);
1020 SliceTensor<T> operator()(
const std::vector<Slice>& s) {
1021 TENSOR_ASSERT(s.size()>=(unsigned)(this->
ndim()),
"invalid number of dimensions",
1023 return SliceTensor<T>(*
this,&(s[0]));
1030 const Tensor<T> operator()(
const std::vector<Slice>& s)
const {
1031 TENSOR_ASSERT(s.size()>=(unsigned)(this->
ndim()),
"invalid number of dimensions",
1033 return SliceTensor<T>(*
this,&(s[0]));
1039 SliceTensor<T> operator()(
const Slice& s0) {
1043 return SliceTensor<T>(*
this,s);
1049 const Tensor<T> operator()(
const Slice& s0)
const {
1053 return SliceTensor<T>(*
this,s);
1059 SliceTensor<T> operator()(
long i,
const Slice& s1) {
1062 Slice s[2] = {Slice(i,i,0),s1};
1063 return SliceTensor<T>(*
this,s);
1069 const Tensor<T> operator()(
long i,
const Slice& s1)
const {
1072 Slice s[2] = {Slice(i,i,0),s1};
1073 return SliceTensor<T>(*
this,s);
1079 SliceTensor<T> operator()(
const Slice& s0,
long j) {
1082 Slice s[2] = {s0,Slice(j,j,0)};
1083 return SliceTensor<T>(*
this,s);
1089 const Tensor<T> operator()(
const Slice& s0,
long j)
const {
1092 Slice s[2] = {s0,Slice(j,j,0)};
1093 return SliceTensor<T>(*
this,s);
1099 SliceTensor<T> operator()(
const Slice& s0,
const Slice& s1) {
1102 Slice s[2] = {s0,s1};
1103 return SliceTensor<T>(*
this,s);
1109 const Tensor<T> operator()(
const Slice& s0,
const Slice& s1)
const {
1112 Slice s[2] = {s0,s1};
1113 return SliceTensor<T>(*
this,s);
1119 SliceTensor<T> operator()(
const Slice& s0,
const Slice& s1,
const Slice& s2) {
1122 Slice s[3] = {s0,s1,s2};
1123 return SliceTensor<T>(*
this,s);
1129 const Tensor<T> operator()(
const Slice& s0,
const Slice& s1,
const Slice& s2)
const {
1132 Slice s[3] = {s0,s1,s2};
1133 return SliceTensor<T>(*
this,s);
1139 SliceTensor<T> operator()(
long i,
const Slice& s1,
const Slice& s2) {
1142 Slice s[3] = {Slice(i,i,0),s1,s2};
1143 return SliceTensor<T>(*
this,s);
1149 const Tensor<T> operator()(
long i,
const Slice& s1,
const Slice& s2)
const {
1152 Slice s[3] = {Slice(i,i,0),s1,s2};
1153 return SliceTensor<T>(*
this,s);
1159 SliceTensor<T> operator()(
const Slice& s0,
long j,
const Slice& s2) {
1162 Slice s[3] = {s0,Slice(j,j,0),s2};
1163 return SliceTensor<T>(*
this,s);
1169 const Tensor<T> operator()(
const Slice& s0,
long j,
const Slice& s2)
const {
1172 Slice s[3] = {s0,Slice(j,j,0),s2};
1173 return SliceTensor<T>(*
this,s);
1179 SliceTensor<T> operator()(
const Slice& s0,
const Slice& s1,
long k) {
1182 Slice s[3] = {s0,s1,Slice(k,k,0)};
1183 return SliceTensor<T>(*
this,s);
1189 const Tensor<T> operator()(
const Slice& s0,
const Slice& s1,
long k)
const {
1192 Slice s[3] = {s0,s1,Slice(k,k,0)};
1193 return SliceTensor<T>(*
this,s);
1199 SliceTensor<T> operator()(
long i,
long j,
const Slice& s2) {
1202 Slice s[3] = {Slice(i,i,0),Slice(j,j,0),s2};
1203 return SliceTensor<T>(*
this,s);
1209 const Tensor<T> operator()(
long i,
long j,
const Slice& s2)
const {
1212 Slice s[3] = {Slice(i,i,0),Slice(j,j,0),s2};
1213 return SliceTensor<T>(*
this,s);
1219 SliceTensor<T> operator()(
long i,
const Slice& s1,
long k) {
1222 Slice s[3] = {Slice(i,i,0),s1,Slice(k,k,0)};
1223 return SliceTensor<T>(*
this,s);
1229 const Tensor<T> operator()(
long i,
const Slice& s1,
long k)
const {
1232 Slice s[3] = {Slice(i,i,0),s1,Slice(k,k,0)};
1233 return SliceTensor<T>(*
this,s);
1239 SliceTensor<T> operator()(
const Slice& s0,
long j,
long k) {
1242 Slice s[3] = {s0,Slice(j,j,0),Slice(k,k,0)};
1243 return SliceTensor<T>(*
this,s);
1249 const Tensor<T> operator()(
const Slice& s0,
long j,
long k)
const {
1252 Slice s[3] = {s0,Slice(j,j,0),Slice(k,k,0)};
1253 return SliceTensor<T>(*
this,s);
1259 SliceTensor<T> operator()(
const Slice& s0,
const Slice& s1,
const Slice& s2,
1263 Slice s[4] = {s0,s1,s2,s3};
1264 return SliceTensor<T>(*
this,s);
1270 const Tensor<T> operator()(
const Slice& s0,
const Slice& s1,
const Slice& s2,
1271 const Slice& s3)
const {
1274 Slice s[4] = {s0,s1,s2,s3};
1275 return SliceTensor<T>(*
this,s);
1281 SliceTensor<T> operator()(
const Slice& s0,
const Slice& s1,
const Slice& s2,
1282 const Slice& s3,
const Slice& s4) {
1285 Slice s[5] = {s0,s1,s2,s3,s4};
1286 return SliceTensor<T>(*
this,s);
1292 const Tensor<T> operator()(
const Slice& s0,
const Slice& s1,
const Slice& s2,
1293 const Slice& s3,
const Slice& s4)
const {
1296 Slice s[5] = {s0,s1,s2,s3,s4};
1297 return SliceTensor<T>(*
this,s);
1303 SliceTensor<T> operator()(
const Slice& s0,
const Slice& s1,
const Slice& s2,
1304 const Slice& s3,
const Slice& s4,
const Slice& s5) {
1307 Slice s[6] = {s0,s1,s2,s3,s4,s5};
1308 return SliceTensor<T>(*
this,s);
1315 const Tensor<T> operator()(
const Slice& s0,
const Slice& s1,
const Slice& s2,
1316 const Slice& s3,
const Slice& s4,
const Slice& s5)
const {
1319 Slice s[6] = {s0,s1,s2,s3,s4,s5};
1320 return SliceTensor<T>(*
this,s);
1328 Tensor<T> reshape(
int ndimnew,
const long* d) {
1329 Tensor<T> result(*
this);
1330 result.reshape_inplace(ndimnew,d);
1339 const Tensor<T> reshape(
int ndimnew,
const long* d)
const {
1340 Tensor<T> result(*
const_cast<Tensor<T>*
>(
this));
1341 result.reshape_inplace(ndimnew,d);
1349 Tensor<T> reshape(
const std::vector<long>& d) {
1350 return reshape(d.size(), d.size() ? &d[0] : 0);
1357 const Tensor<T> reshape(
const std::vector<long>& d)
const {
1358 return reshape(d.size(), d.size() ? &d[0] : 0);
1365 Tensor<T> reshape(
long dim0) {
1367 return reshape(1,d);
1373 const Tensor<T> reshape(
long dim0)
const {
1375 return reshape(1,d);
1383 Tensor<T> reshape(
long dim0,
long dim1) {
1384 long d[2] = {dim0,dim1};
1385 return reshape(2,d);
1393 const Tensor<T> reshape(
long dim0,
long dim1)
const {
1394 long d[2] = {dim0,dim1};
1395 return reshape(2,d);
1404 Tensor<T> reshape(
long dim0,
long dim1,
long dim2) {
1405 long d[3] = {dim0,dim1,dim2};
1406 return reshape(3,d);
1415 const Tensor<T> reshape(
long dim0,
long dim1,
long dim2)
const {
1416 long d[3] = {dim0,dim1,dim2};
1417 return reshape(3,d);
1427 Tensor<T> reshape(
long dim0,
long dim1,
long dim2,
long dim3) {
1428 long d[4] = {dim0,dim1,dim2,dim3};
1429 return reshape(4,d);
1439 const Tensor<T> reshape(
long dim0,
long dim1,
long dim2,
long dim3)
const {
1440 long d[4] = {dim0,dim1,dim2,dim3};
1441 return reshape(4,d);
1452 Tensor<T> reshape(
long dim0,
long dim1,
long dim2,
long dim3,
long dim4) {
1453 long d[5] = {dim0,dim1,dim2,dim3,dim4};
1454 return reshape(5,d);
1465 const Tensor<T> reshape(
long dim0,
long dim1,
long dim2,
long dim3,
long dim4)
const {
1466 long d[5] = {dim0,dim1,dim2,dim3,dim4};
1467 return reshape(5,d);
1479 Tensor<T> reshape(
long dim0,
long dim1,
long dim2,
long dim3,
long dim4,
long dim5) {
1480 long d[6] = {dim0,dim1,dim2,dim3,dim4,dim5};
1481 return reshape(6,d);
1493 const Tensor<T> reshape(
long dim0,
long dim1,
long dim2,
long dim3,
long dim4,
long dim5)
const {
1494 long d[6] = {dim0,dim1,dim2,dim3,dim4,dim5};
1495 return reshape(6,d);
1500 long d[1] = {_size};
1501 return reshape(1,d);
1505 const Tensor<T> flat()
const {
1506 long d[1] = {_size};
1507 return reshape(1,d);
1513 Tensor<T> splitdim(
long i,
long dimi0,
long dimi1) {
1514 Tensor<T> result(*
this);
1515 result.splitdim_inplace(i, dimi0, dimi1);
1522 const Tensor<T> splitdim(
long i,
long dimi0,
long dimi1)
const {
1523 Tensor<T> result(*
const_cast<Tensor<T>*
>(
this));
1524 result.splitdim_inplace(i, dimi0, dimi1);
1531 Tensor<T> fusedim(
long i) {
1532 Tensor<T> result(*
this);
1533 result.fusedim_inplace(i);
1540 const Tensor<T> fusedim(
long i)
const {
1541 Tensor<T> result(*
const_cast<Tensor<T>*
>(
this));
1542 result.fusedim_inplace(i);
1549 Tensor<T> swapdim(
long idim,
long jdim) {
1550 Tensor<T> result(*
this);
1551 result.swapdim_inplace(idim, jdim);
1558 const Tensor<T> swapdim(
long idim,
long jdim)
const {
1559 Tensor<T> result(*
const_cast<Tensor<T>*
>(
this));
1560 result.swapdim_inplace(idim, jdim);
1568 Tensor<T>
mapdim(
const std::vector<long>& map) {
1569 Tensor<T> result(*
this);
1570 result.mapdim_inplace(map);
1577 const Tensor<T>
mapdim(
const std::vector<long>& map)
const {
1578 Tensor<T> result(*
const_cast<Tensor<T>*
>(
this));
1579 result.mapdim_inplace(map);
1585 Tensor<T> cycledim(
long nshift,
long start,
long end) {
1586 Tensor<T> result(*
this);
1587 result.cycledim_inplace(nshift, start, end);
1593 const Tensor<T> cycledim(
long nshift,
long start,
long end)
const {
1594 Tensor<T> result(*
const_cast<Tensor<T>*
>(
this));
1595 result.cycledim_inplace(nshift, start, end);
1601 template <
class Q>
bool conforms(
const Tensor<Q>& t)
const {
1627 T
min(
long* ind=0)
const {
1628 T result = *(this->_p);
1630 for (
long i=0; i<_ndim; ++i) ind[i]=0;
1633 if (result > *_p0) {
1635 for (
long i=0; i<nd; ++i) ind[i]=iter.ind[i];
1647 T
max(
long* ind=0)
const {
1648 T result = *(this->_p);
1650 for (
long i=0; i<_ndim; ++i) ind[i]=0;
1653 if (result < *_p0) {
1655 for (
long i=0; i<nd; ++i) ind[i]=iter.ind[i];
1670 float_scalar_type normf()
const {
1671 float_scalar_type result = 0;
1673 return (float_scalar_type)
std::sqrt(result);
1677 scalar_type absmin(
long *ind = 0)
const {
1678 scalar_type result = std::abs(*(this->_p));
1680 for (
long i=0; i<_ndim; ++i) ind[i]=0;
1683 scalar_type absval = std::abs(*_p0);
1684 if (result > absval) {
1686 for (
long i=0; i<nd; ++i) ind[i]=iter.ind[i];
1698 scalar_type absmax(
long *ind = 0)
const {
1699 scalar_type result = std::abs(*(this->_p));
1701 for (
long i=0; i<_ndim; ++i) ind[i]=0;
1704 scalar_type absval = std::abs(*_p0);
1705 if (result < absval) {
1707 for (
long i=0; i<nd; ++i) ind[i]=iter.ind[i];
1720 T trace(
const Tensor<T>& t)
const {
1735 template <typename opT>
1736 Tensor<T>& unaryop(opT&
op) {
1742 Tensor<T>& emul(
const Tensor<T>& t) {
1748 Tensor<T>&
gaxpy(T alpha,
const Tensor<T>& t, T
beta) {
1752 if (alpha ==
T(1.0)) {
1753 for (
long i=0; i<_size; ++i) a[i] += b[i]*beta;
1756 for (
long i=0; i<_size; ++i) a[i] = a[i]*alpha + b[i]*beta;
1773 const T* ptr()
const {
1788 TensorIterator<T> unary_iterator(
long iterlevel=0,
1791 long jdim=default_jdim)
const {
1792 return TensorIterator<T>(
this,(
const Tensor<T>*) 0, (
const Tensor<T>*) 0,
1793 iterlevel, optimize, fusedim, jdim);
1798 TensorIterator<T,Q> binary_iterator(
const Tensor<Q>& q,
1802 long jdim=default_jdim)
const {
1803 return TensorIterator<T,Q>(
this,&q,(
const Tensor<T>*) 0,
1804 iterlevel, optimize, fusedim, jdim);
1808 template <
class Q,
class R>
1809 TensorIterator<T,Q,R> ternary_iterator(
const Tensor<Q>& q,
1814 long jdim=default_jdim)
const {
1815 return TensorIterator<T,Q,R>(
this,&q,&r,
1816 iterlevel, optimize, fusedim, jdim);
1820 const TensorIterator<T>& end()
const {
1821 static TensorIterator<T> theend(0,0,0,0,0,0);
1825 virtual ~Tensor() {}
1828 void clear() {deallocate();}
1830 bool has_data()
const {
return size()!=0;};
1835 std::ostream& operator << (std::ostream& out, const Tensor<T>& t);
1840 template <
class Archive,
typename T>
1842 static void store(
const Archive& s,
const Tensor<T>& t) {
1843 if (t.iscontiguous()) {
1844 s & t.size() & t.id();
1855 template <
class Archive,
typename T>
1857 static void load(
const Archive& s, Tensor<T>& t) {
1858 long sz = 0l,
id = 0l;
1860 if (
id != t.id())
throw "type mismatch deserializing a tensor";
1864 t = Tensor<T>(_ndim, _dim,
false);
1865 if (sz != t.size())
throw "size mismatch deserializing a tensor";
1866 s &
wrap(t.ptr(), t.size());
1879 template <
typename T,
typename Q>
1880 typename IsSupported < TensorTypeData<Q>, Tensor<T> >::type
1888 template <
typename T,
typename Q>
1889 typename IsSupported < TensorTypeData<Q>, Tensor<T> >::type
1897 template <
typename T,
typename Q>
1898 typename IsSupported < TensorTypeData<Q>, Tensor<T> >::type
1907 template <
class T> Tensor<T>
copy(
const Tensor<T>& t) {
1909 Tensor<T> result = Tensor<T>(t.ndim(),t.dims(),
false);
1928 template <
class T,
class Q>
1929 Tensor<TENSOR_RESULT_TYPE(T,Q)>
transform_dir(
const Tensor<T>& t,
const Tensor<Q>&
c,
int axis) {
1931 return inner(c,t,0,axis);
1933 else if (axis == t.ndim()-1) {
1934 return inner(t,c,axis,0);
1937 return copy(
inner(t,c,axis,0).cycledim(1,axis, -1));
1946 TENSOR_ASSERT(t.ndim() == 2,
"transpose requires a matrix", t.ndim(), &t);
1947 return copy(t.swapdim(0,1));
1955 TENSOR_ASSERT(t.ndim() == 2,
"conj_transpose requires a matrix", t.ndim(), &t);
1956 return conj(t.swapdim(0,1));
1968 template <
class T>
class SliceTensor :
public Tensor<T> {
1973 SliceTensor(
const Tensor<T>& t,
const Slice s[])
1974 : Tensor<T>(const_cast<Tensor<T>&>(t))
1979 long nd = 0, size=1;
1980 for (
long i=0; i<t._ndim; ++i) {
1981 long start=s[i].start, end=s[i].end, step=s[i].step;
1984 if (start < 0) start += this->_dim[i];
1985 if (end < 0) end += this->_dim[i];
1986 long len = end-start+1;
1987 if (step) len /= step;
1991 end = start + (len-1)*step;
1996 TENSOR_ASSERT(start>=0 && start<this->_dim[i],
"slice start invalid",start,
this);
1997 TENSOR_ASSERT(end>=0 && end<this->_dim[i],
"slice end invalid",end,
this);
1998 TENSOR_ASSERT(len>0,
"slice length must be non-zero",len,
this);
2000 this->_p += start * t._stride[i];
2004 this->_dim[nd] = len;
2005 this->_stride[nd] = step * t._stride[i];
2013 this->_stride[i] = 0;
2020 SliceTensor<T>& operator=(
const SliceTensor<T>& t) {
2026 SliceTensor<T>& operator=(
const SliceTensor<Q>& t) {
2031 SliceTensor<T>& operator=(
const Tensor<T>& t) {
2037 SliceTensor<T>& operator=(
const Tensor<Q>& t) {
2042 SliceTensor<T>& operator=(
const T& t) {
2047 virtual ~SliceTensor() {};
2063 std::ostream& operator << (std::ostream& s, const Tensor<T>& t) {
2064 if (t.size() == 0) {
2065 s <<
"[empty tensor]\n";
2070 long index_width = 0;
2071 for (
int i = 0; i<(t.ndim()-1); ++i) {
2072 if (maxdim < t.dim(i)) maxdim = t.dim(i);
2076 else if (maxdim < 100)
2078 else if (maxdim < 1000)
2080 else if (maxdim < 10000)
2085 std::ios::fmtflags oldflags = s.setf(std::ios::scientific);
2086 long oldprec = s.precision();
2087 long oldwidth = s.width();
2090 for (
TensorIterator<T> iter=t.unary_iterator(1,
false,
false); iter!=t.end(); ++iter) {
2091 const T* p = iter._p0;
2092 long inc = iter._s0;
2093 long dimj = iter.dimj;
2094 s.unsetf(std::ios::scientific);
2096 for (
long i=0; i<iter.ndim; ++i) {
2097 s.width(index_width);
2099 if (i != iter.ndim) s <<
",";
2103 s.setf(std::ios::fixed);
2104 for (
long j=0; j<dimj; ++j, p+=inc) {
2110 s.unsetf(std::ios::scientific);
2113 s.setf(oldflags,std::ios::floatfield);
2114 s.precision(oldprec);
2125 Tensor<T>
outer(
const Tensor<T>& left,
const Tensor<T>& right) {
2126 long nd = left.ndim() + right.ndim();
2130 for (
long i=0; i<left.ndim(); ++i) d[i] = left.dim(i);
2131 for (
long i=0; i<right.ndim(); ++i) d[i+left.ndim()] = right.dim(i);
2132 Tensor<T> result(nd,d,
false);
2133 T* ptr = result.ptr();
2139 for (iter.
reset(); iter.
_p0; ++iter) {
2140 long dimj = iter.
dimj;
2142 long Tstride = iter.
_s0;
2143 for (
long _j=0; _j<dimj; ++_j, _p0+=Tstride) {
2144 *ptr++ = val1 * (*_p0);
2161 template <
class T,
class Q>
2162 Tensor<TENSOR_RESULT_TYPE(T,Q)>
inner(
const Tensor<T>& left,
const Tensor<Q>& right,
2163 long k0=-1,
long k1=0) {
2164 if (k0 < 0) k0 += left.ndim();
2165 if (k1 < 0) k1 += right.ndim();
2166 long nd = left.ndim() + right.ndim() - 2;
2167 TENSOR_ASSERT(nd!=0,
"result is a scalar but cannot return one ... use dot",
2169 TENSOR_ASSERT(left.dim(k0) == right.dim(k1),
"common index must be same length",
2170 right.dim(k1), &left);
2173 "invalid number of dimensions in the result", nd,0);
2178 for (
long i=0; i<k0; ++i) d[i] = left.dim(i);
2179 for (
long i=k0+1; i<left.ndim(); ++i) d[i-1] = left.dim(i);
2180 base = left.ndim()-1;
2181 for (
long i=0; i<k1; ++i) d[i+base] = right.dim(i);
2183 for (
long i=k1+1; i<right.ndim(); ++i) d[i+base] = right.dim(i);
2185 Tensor<TENSOR_RESULT_TYPE(T,Q)> result(nd,d);
2203 template <
class T,
class Q>
2211 resultT* ptr = result.ptr();
2213 if (k0 < 0) k0 += left.ndim();
2214 if (k1 < 0) k1 += right.ndim();
2216 if (left.iscontiguous() && right.iscontiguous()) {
2217 if (k0==0 && k1==0) {
2219 long dimk = left.dim(k0);
2220 long dimj = right.stride(0);
2221 long dimi = left.stride(0);
2222 ::mTxm(dimi,dimj,dimk,ptr,left.ptr(),right.ptr());
2225 else if (k0==(left.ndim()-1) && k1==(right.ndim()-1)) {
2227 long dimk = left.dim(k0);
2228 long dimi = left.size()/dimk;
2229 long dimj = right.size()/dimk;
2230 ::mxmT(dimi,dimj,dimk,ptr,left.ptr(),right.ptr());
2233 else if (k0==0 && k1==(right.ndim()-1)) {
2235 long dimk = left.dim(k0);
2236 long dimi = left.stride(0);
2237 long dimj = right.size()/dimk;
2238 ::mTxmT(dimi,dimj,dimk,ptr,left.ptr(),right.ptr());
2241 else if (k0==(left.ndim()-1) && k1==0) {
2243 long dimk = left.dim(k0);
2244 long dimi = left.size()/dimk;
2245 long dimj = right.stride(0);
2246 ::mxm(dimi,dimj,dimk,ptr,left.ptr(),right.ptr());
2251 long dimj = left.dim(k0);
2255 iter0._p0; ++iter0) {
2257 long s0 = iter0._s0;
2258 for (iter1.
reset(); iter1.
_p0; ++iter1) {
2261 long s1 = iter1.
_s0;
2263 for (
long j=0; j<dimj; ++j,p0+=s0,p1+=s1) {
2264 sum += (*p0) * (*p1);
2285 template <
class T,
class Q>
2286 Tensor<TENSOR_RESULT_TYPE(T,Q)>
transform(
const Tensor<T>& t,
const Tensor<Q>&
c) {
2288 TENSOR_ASSERT(c.ndim() == 2,
"second argument must be a matrix",c.ndim(),&
c);
2289 if (c.dim(0)==c.dim(1) && t.iscontiguous() && c.iscontiguous()) {
2290 Tensor<resultT> result(t.ndim(),t.dims(),
false);
2291 Tensor<resultT> work(t.ndim(),t.dims(),
false);
2295 Tensor<resultT> result = t;
2296 for (
long i=0; i<t.ndim(); ++i) {
2297 result =
inner(result,c,0,0);
2313 template <
class T,
class Q>
2316 Tensor<resultT> result = t;
2317 for (
long i=0; i<t.ndim(); ++i) {
2318 result =
inner(result,c[i],0,0);
2350 template <
class T,
class Q>
2354 const Q *pc=c.ptr();
2355 resultT *t0=workspace.ptr(), *t1=result.ptr();
2358 t1 = workspace.ptr();
2361 long dimj = c.dim(1);
2363 for (
int n=1; n<t.ndim(); ++n) dimi *= dimj;
2365 #ifdef AVX_MTXMQ_TEST
2367 mTxmq(dimi, dimj, dimj, t0, t.ptr(), pc);
2368 for (
int n=1; n<t.ndim(); ++n) {
2369 mTxmq(dimi, dimj, dimj, t1, t0, pc);
2373 long nij = dimi*dimj;
2375 for (
long i=0; i<nij; ++i) t0[i] = 0.0;
2376 mTxm(dimi, dimj, dimj, t0, t.ptr(), pc);
2377 for (
int n=1; n<t.ndim(); ++n) {
2378 for (
long i=0; i<nij; ++i) t1[i] = 0.0;
2379 mTxm(dimi, dimj, dimj, t1, t0, pc);
2385 for (
int n=1; n<t.ndim(); ++n) {
2391 long nij = dimi*dimj;
2394 for (
long i=0; i<nij; ++i) t0[i] = 0.0;
2395 mTxm(dimi, dimj, dimj, t0, t.ptr(), pc);
2396 for (
int n=1; n<t.ndim(); ++n) {
2397 for (
long i=0; i<nij; ++i) t1[i] = 0.0;
2398 mTxm(dimi, dimj, dimj, t1, t0, pc);
2403 mTxmq(dimi, dimj, dimj, t0, t.ptr(), pc);
2404 for (
int n=1; n<t.ndim(); ++n) {
2405 mTxmq(dimi, dimj, dimj, t1, t0, pc);
2418 Tensor< typename Tensor<T>::scalar_type >
abs(
const Tensor<T>& t) {
2419 typedef typename Tensor<T>::scalar_type scalar_type;
2420 Tensor<scalar_type> result(t.ndim(),t.dims(),
false);
2429 Tensor< typename Tensor<T>::scalar_type >
arg(
const Tensor<T>& t) {
2430 typedef typename Tensor<T>::scalar_type scalar_type;
2431 Tensor<scalar_type> result(t.ndim(),t.dims(),
false);
2440 Tensor< typename Tensor<T>::scalar_type >
real(
const Tensor<T>& t) {
2441 typedef typename Tensor<T>::scalar_type scalar_type;
2442 Tensor<scalar_type> result(t.ndim(),t.dims(),
false);
2451 Tensor< typename Tensor<T>::scalar_type >
imag(
const Tensor<T>& t) {
2452 typedef typename Tensor<T>::scalar_type scalar_type;
2453 Tensor<scalar_type> result(t.ndim(),t.dims(),
false);
2462 Tensor<T>
conj(
const Tensor<T>& t) {
2463 Tensor<T> result(t.ndim(),t.dims(),
false);
2469 #endif // MADNESS_TENSOR_TENSOR_H__INCLUDED
long _size
Number of elements in the tensor.
Definition: basetensor.h:93
Function< TENSOR_RESULT_TYPE(L, R), NDIM > operator+(const Function< L, NDIM > &left, const Function< R, NDIM > &right)
Adds two functions with the new result being of type TensorResultType
Definition: mra.h:1759
double norm(const T &t)
Definition: adquad.h:42
BaseTensor()
Definition: basetensor.h:116
Definition: shared_ptr_bits.h:38
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
std::complex< double > double_complex
Definition: lineplot.cc:16
double imag(double x)
Definition: complexfun.h:56
Tensor< T > conj_transpose(const Tensor< T > &t)
Returns a new deep copy of the complex conjugate transpose of the input tensor.
Definition: tensor.h:1954
void gaxpy(World &world, Q alpha, std::vector< Function< T, NDIM > > &a, Q beta, const std::vector< Function< R, NDIM > > &b, bool fence=true)
Generalized A*X+Y for vectors of functions -— a[i] = alpha*a[i] + beta*b[i].
Definition: vmra.h:680
TensorType
low rank representations of tensors (see gentensor.h)
Definition: tensor.h:275
long ndim() const
Returns the number of dimensions in the tensor.
Definition: basetensor.h:144
void inner_result(const Tensor< T > &left, const Tensor< Q > &right, long k0, long k1, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result)
Accumulate inner product into user provided, contiguous, correctly sized result tensor.
Definition: tensor.h:2204
#define TERNARY_OPTIMIZED_ITERATOR(X, x, Y, y, Z, z, exp)
Definition: tensor_macros.h:719
archive_array< T > wrap(const T *, unsigned int)
Factory function to wrap dynamically allocated pointer as typed archive_array.
Definition: archive.h:820
GenTensor< T > outer(const GenTensor< T > &left, const GenTensor< T > &right)
Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
Definition: gentensor.h:230
::std::string string
Definition: gtest-port.h:872
Definition: mpreal.h:3066
void mTxmq_padding(long dimi, long dimj, long dimk, long ext_b, cT *c, const aT *a, const bT *b)
Definition: mtxmq.h:74
Interface templates for the archives (serialization)
#define BINARY_OPTIMIZED_ITERATOR(X, x, Y, y, exp)
Definition: tensor_macros.h:701
T * _p0
Definition: tensoriter.h:66
static void store(const Archive &s, const Tensor< T > &t)
Definition: tensor.h:1842
Provides routines for internal use optimized for aligned data.
Tensor< typename Tensor< T >::scalar_type > imag(const Tensor< T > &t)
Return a new tensor holding the imaginary part of each element of t (complex types only) ...
Definition: tensor.h:2451
TENSOR_RESULT_TYPE(T, R) inner(const Function<T
Computes the scalar/inner product between two functions.
Default store of a thingy via serialize(ar,t)
Definition: archive.h:708
const double beta
Definition: gygi_soltion.cc:63
Tensor< typename Tensor< T >::scalar_type > arg(const Tensor< T > &t)
Return a new tensor holding the argument of each element of t (complex types only) ...
Definition: tensor.h:2429
T inner(const vecfunc< T, NDIM > &a, const vecfunc< T, NDIM > &b)
the non-linear solver requires an inner product
Definition: nemo.h:112
void set_dims_and_size(long nd, const long d[])
Definition: basetensor.h:99
#define IS_ODD(n)
Definition: tensor.h:235
static void load(const Archive &s, Tensor< T > &t)
Definition: tensor.h:1857
const T1 &f1 return GTEST_2_TUPLE_() T(f0, f1)
#define max(a, b)
Definition: lda.h:53
void scale(World &world, std::vector< Function< T, NDIM > > &v, const std::vector< Q > &factors, bool fence=true)
Scales inplace a vector of functions by distinct values.
Definition: vmra.h:290
bool conforms(const BaseTensor *t) const
Returns true if this and *t are the same shape and size.
Definition: basetensor.h:159
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence. ...
Definition: mra.h:1835
const mpreal min(const mpreal &x, const mpreal &y)
Definition: mpreal.h:2675
long _ndim
Number of dimensions (-1=invalid; 0=scalar; >0=tensor)
Definition: basetensor.h:94
Tensor< TENSOR_RESULT_TYPE(T, Q)> transform_dir(const Tensor< T > &t, const Tensor< Q > &c, int axis)
Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor.
Definition: tensor.h:1929
FLOAT a(int j, FLOAT z)
Definition: y1.cc:86
#define TENSOR_MAXDIM
Definition: tensor_macros.h:194
Default load of a thingy via serialize(ar,t)
Definition: archive.h:718
bool iscontiguous() const
Returns true if the tensor refers to contiguous memory locations.
Definition: basetensor.h:168
#define UNARY_OPTIMIZED_ITERATOR(X, x, exp)
Definition: tensor_macros.h:658
#define UNARY_UNOPTIMIZED_ITERATOR(X, x, exp)
Definition: tensor_macros.h:678
tensorT sqrt(const tensorT &s, double tol=1e-8)
Computes matrix square root (not used any more?)
Definition: DFcode/moldft.cc:446
void mTxmq(long dimi, long dimj, long dimk, cT *restrict c, const aT *a, const bT *b)
Definition: mtxmq.h:50
long _id
Id from TensorTypeData in type_data.h.
Definition: basetensor.h:95
void mxmT(long dimi, long dimj, long dimk, double *restrict c, const double *restrict a, const double *restrict b)
Matrix * matrix transpose (hand unrolled version)
Definition: mxm.h:215
For real types return value, for complex return conjugate.
Definition: tensor.h:241
Function< T, NDIM > mapdim(const Function< T, NDIM > &f, const std::vector< long > &map, bool fence=true)
Generate a new function by reordering dimensions ... optional fence.
Definition: mra.h:2059
long _stride[TENSOR_MAXDIM]
Increment between elements in each dimension.
Definition: basetensor.h:97
void mxm(long dimi, long dimj, long dimk, double *restrict c, const double *restrict a, const double *restrict b)
Matrix * matrix (hand unrolled version)
Definition: mxm.h:264
void mTxm(long dimi, long dimj, long dimk, double *restrict c, const double *restrict a, const double *restrict b)
Matrix transpose * matrix (hand unrolled version)
Definition: mxm.h:168
const mpreal sum(const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode)
Definition: mpreal.cc:241
const double m
Definition: gfit.cc:199
double real(double x)
Definition: complexfun.h:52
Tensor< T > conj(const Tensor< T > &t)
Returns a new deep copy of the complex conjugate of the input tensor (complex types only) ...
Definition: tensor.h:2462
void reset()
Definition: shared_ptr_bits.h:459
static Q op(const Q &coeff)
Definition: tensor.h:242
Function< TENSOR_RESULT_TYPE(Q, T), NDIM > operator*(const Function< T, NDIM > &f, const Q alpha)
Returns new function equal to f(x)*alpha.
Definition: mra.h:1553
GenTensor< TENSOR_RESULT_TYPE(T, Q)> general_transform(const GenTensor< T > &t, const Tensor< Q > c[])
Transform all dimensions of the tensor t by distinct matrices c.
Definition: gentensor.h:1342
static Q op(const Q &coeff)
Definition: tensor.h:250
double abs(double x)
Definition: complexfun.h:48
std::complex< float > float_complex
Definition: ran.h:40
#define TENSOR_EXCEPTION(msg, value, t)
Definition: tensorexcept.h:126
std::ostream & operator<<(std::ostream &s, const ContractedGaussianShell &c)
Definition: chem/molecularbasis.cc:38
long size() const
Returns the number of elements in the tensor.
Definition: basetensor.h:138
long _s0
Definition: tensoriter.h:71
#define IS_UNALIGNED(p)
Definition: tensor.h:236
long _dim[TENSOR_MAXDIM]
Size of each dimension.
Definition: basetensor.h:96
Function< TENSOR_RESULT_TYPE(L, R), NDIM > operator-(const Function< L, NDIM > &left, const Function< R, NDIM > &right)
Subtracts two functions with the new result being of type TensorResultType
Definition: mra.h:1789
void reset()
Reset the iterator back to the start ...
Definition: tensoriter.h:354
T mynorm(T t)
Definition: tensor.h:262
Implement dummy posix_memalign if it is missing on the system.
Tensor< TENSOR_RESULT_TYPE(T, Q) > & fast_transform(const Tensor< T > &t, const Tensor< Q > &c, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result, Tensor< TENSOR_RESULT_TYPE(T, Q) > &workspace)
Restricted but heavily optimized form of transform()
Definition: tensor.h:2351
Tensor< T > transpose(const Tensor< T > &t)
Returns a new deep copy of the transpose of the input tensor.
Definition: tensor.h:1945
Tensor< double > op(const Tensor< double > &x)
Definition: kain.cc:508
Definition: tensoriter.h:61
Q conditional_conj(const Q &coeff)
For real types return value, for complex return conjugate.
Definition: tensor.h:257
long dimj
Definition: tensoriter.h:70
Declares and implements TensorException.
#define restrict
Definition: config.h:403
Tensor< typename Tensor< T >::scalar_type > real(const Tensor< T > &t)
Return a new tensor holding the real part of each element of t (complex types only) ...
Definition: tensor.h:2440
Holds machinery to set up Functions/FuncImpls using various Factories and Interfaces.
Definition: chem/atomutil.cc:45
const double c
Definition: gfit.cc:200
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const DistributedMatrix< R > &c, bool fence=true)
Definition: chem/SCF.cc:86
FLOAT b(int j, FLOAT z)
Definition: y1.cc:79
void swap(mpfr::mpreal &x, mpfr::mpreal &y)
Definition: mpreal.h:3069
void mTxmT(long dimi, long dimj, long dimk, double *restrict csave, const double *restrict asave, const double *restrict b)
Matrix transpose * matrix transpose (hand tiled and unrolled)
Definition: mxm.h:303
Function< T, NDIM > conj(const Function< T, NDIM > &f, bool fence=true)
Return the complex conjugate of the input function with the same distribution and optional fence...
Definition: mra.h:1879