gTensor Utilities¶
Overview of Utilities¶
- Initialize a tensor
- Permute
ndtensor/permute.hpp(Completed) - Reshape
ndtensor/reshape.hppandgeneric/reshape.hpp(Completed) - Diag
generic/diag.hpp(Completed) - Trace
generic/trace.hpp(Completed) - Rscale
generic/rscale.hpp(Test function to be implemented) - Contract
gtensor_blas_lapack/contract.hpp(Completed) - Mult
ndtensor/mult.hpp(Completed) - Deparallelise
ndtensor/deparallelise.hpp(Test function to be implemented) - Direct Sum
/ndtensor/directsum.hpp(Completed) - QR Decomposition
/ndtensor/qrcompress.hpp(Completed) - SVD Compression
/ndtensor/svdcompress.hpp(Completed) - Fusion
./ndtensor/fusion.hpp(Completed) - Matrix exp
/ndtensor/expm4.hpp(Completed)
Usage¶
Initialize a tensor¶
gc::TensorNd<data_type, rank, order> TensorName(TensorExtent)
data_typecould bedoubleorcomplex<double>rank: rank of your tensororder: data storage order could be row or column major. Available input isCblasRowMajorandCblasColMajorTensorName: name of your tensorTensorExtent: dimension of your tensor withrankdefined.- Example
- Initialize an rank 3 empty tensor
EmptyT:gc::TensorNd<double, 3, CblasRowMajor> EmptyT; - Initialize an rank 3 empty tensor
EmptyTwith extent 234:gc::TensorNd<double, 3, CblasRowMajor> EmptyT(2,3,4); - Fill the tensor with
1.:EmptyT.fill(1.) - Fill the tensor with random number
auto g = drand48; EmptyT.fill(g); - Fill the tensor with your own array by iterator:
- Initialize an rank 3 empty tensor
/* test_construct.cpp
* Test function for tensor initialization
*/
#include <iostream>
#include <gtensor/ndtensor.hpp>
#include <gtensor/tensorio.hpp>
using namespace gc;
// Initialize a tensor using vector
void t_construct() {
gc::TensorNd<double, 3, CblasRowMajor> CTensor(2,2,2);
std::vector<double> a = {1, 2, 3, 4, 5, 6, 7, 8};
int i = 0;
for (auto it = CTensor.begin(); it != CTensor.end(); ++it, ++i) {
*it = a[i];
}
std::cout << CTensor << std::endl;
}
// Initalize a tensor using fill
void t_fill() {
gc::TensorNd<double, 3, CblasRowMajor> CTensor(2,3,4);
CTensor.fill(1.0);
std::cout << CTensor << std::endl;
}
// Initalize a matrix using eye
void t_eye() {
auto mat_id = gc::eye(10);
std::cout << mat_id << std::endl;
}
/* !!!!!!!!!!!!!Warning!!!!!!!!!!!!!!!!!!!
* ones and zeros contains template arguments including
* data_type (double/complex double), array major (row major/column major).
* Default major for `ones` and `zeros` are column major.
* Mixed usage of column major and row major would give errors.
*/
// Initaliaze a tensor fill with 1 with default major (column)
void t_ones() {
std::array<size_t, 3> shape = {2, 2, 2};
auto t_one = gc::ones(shape); // Default
std::cout << t_one << std::endl;
}
// Initialize a tensor fill with 0 with default major (column)
void t_zeros() {
std::array<size_t, 3> shape = {2, 3, 4};
auto t_zero = gc::zeros(shape);
std::cout << t_zero << std::endl;
}
// Initialize a tensor fill with 1 with row major
void t_ones_row() {
auto t_one = gc::ones<double, 2, CblasRowMajor>({4,4});
std::cout << t_ones_row << std::endl;
}
int main()
{
// t_construct();
// t_fill();
// t_eye();
// t_ones();
// t_zeros();
t_ones_row();
return 0;
}
Permute¶
We could permute the tensor by a similar fashion in Matlab
permute(tensor,order)
- tensor is the target tensor to be permuted;
- order is an array that defines the new tensor extent.
- Output: tensor of the same rank with new extent.
Example:
/* test_permute.cpp
* Test function for tensor permutation
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a tensor
gc::TensorNd<double, 3, CblasRowMajor> callTensor() {
gc::TensorNd<double, 3, CblasRowMajor> tensor(2, 3, 4);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Permute a tensor
void t_permute() {
auto PTensor = callTensor();
//const std::array<size_t, 3> order = {2, 1, 0};
//std::cout << gc::permute(PTensor, order) << std::endl;
std::cout << gc::permute(PTensor, {2, 1, 0}) << std::endl;
}
int main()
{
t_permute();
return 0;
}
Reshape¶
Reshape is slightly different as permute function. permute is a standalone function whereas reshape is a member function of the tensor class.
tensor.reshape(shape)
tensoris the target tensorshapeis the new shape defined by an array
Example:
/* test_reshape.cpp
* Test function for tensor reshape
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a tensor
gc::TensorNd<double, 3, CblasRowMajor> callTensor() {
gc::TensorNd<double, 3, CblasRowMajor> tensor(2, 3, 4);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Reshape a tensor
void t_reshape() {
auto RTensor = callTensor();
//std::array<size_t, 4> shape = {2, 2, 2, 3};
std::cout << "Before reshaping" << std::endl
<< RTensor << std::endl
<< "After reshaping" << std::endl
<< RTensor.reshape<4>({2, 2, 2, 3}) << std::endl;
// Note 4 is a template argument to specify the new rank.
}
int main()
{
t_reshape();
return 0;
}
Diag¶
Similar to Matlab, diag takes the diagonal elements for rank 2 tensors (matrices)
diag(tensor)
tensoris the target tensor
Example:
/* test_diag.cpp
* Test function for diag (taking diagonal elements)
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a matrix
gc::TensorNd<double, 2, CblasRowMajor> callMat() {
gc::TensorNd<double, 2, CblasRowMajor> tensor(4, 4);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Take the diagonal elements
void t_diag() {
auto mat = callMat();
std::cout << "The matrix" << std::endl
<< mat << std::endl
<< "Diagonal elements are" << std::endl
<< diag(mat) << std::endl;
}
int main()
{
t_diag();
return 0;
}
Trace¶
Trace applies to rank 2 tensor (matrix)
trace(tensor)
tensor: target tensor- return a double or double complex number
Example:
/* test_trace.cpp
* Test function for trace (taking the sum of diagonal elements)
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a matrix
gc::TensorNd<double, 2, CblasRowMajor> callMat() {
gc::TensorNd<double, 2, CblasRowMajor> tensor(4,4);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Take the sum of the diagonal elements
void t_trace() {
auto mat = callMat();
std::cout << "The matrix" << std::endl
<< mat << std::endl
<< "The trace is" << std::endl
<< trace(mat) << std::endl;
}
int main()
{
t_trace();
return 0;
}
Rscale¶
Rscale is implemented to reduce the computational cost for multiplication between diagonal matrix and a dense matrix. It is an auxiliary function for SVD compress. $B=AB$ Rscale (Double Check) and $B=BA$ Cscale (Double Check)
(standalone test function to be updated).
Contract¶
Tensor contraction. The usage is similar as that in Verstrate's demo code.
$$T_{ijlm} = \sum_k A_{ijk} B_{klm}$$
Example:
/* test_contract.cpp
* Test function for tensor contraction
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a tensor
gc::TensorNd<double, 3, CblasRowMajor> callTensor(size_t _i,
size_t _j, size_t _k) {
gc::TensorNd<double, 3, CblasRowMajor> tensor(_i,_j,_k);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Tensor contraction test
void t_contract() {
auto FTensor1 = callTensor(2, 3, 4);
auto FTensor2= callTensor(2, 2, 3);
std::array<size_t, 1> ax1 = {1}, ax2 = {2};
auto result = gc::contract(FTensor1, ax1, FTensor2, ax2);
std::cout << "Tensor 1" << std::endl
<< FTensor1 << std::endl
<< "Tensor 2" << std::endl
<< FTensor2 << std::endl
<< "Contracted on indices 1 and 2 (0, 1, 2)" << std::endl
<< result << std::endl;
}
int main()
{
t_contract();
return 0;
}
Multiplication¶
Multiplication is defined for matrix (rank 2 tensor). * is overloaded to realize it.
Note that the major convention cannot be mixed.
Example:
/* test_mult.cpp
* Test function for mult (matrix)
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a matrix
gc::TensorNd<double, 2, CblasRowMajor> callMat() {
gc::TensorNd<double, 2, CblasRowMajor> tensor(4,4);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Take the diagonal elements
void t_mult() {
auto mat = callMat();
std::cout << "The matrix" << std::endl
<< mat << std::endl
<< "The product is for mat*mat is" << std::endl
<< mat*mat << std::endl;
}
int main()
{
t_mult();
return 0;
}
Deparallelisation¶
Deparallelisation is used for mps/mpo compress. Notes, here compress does not refer to SVD compress. By deparallelisation, the compression does not introduce any error. For example, we have matrix
$$\begin{pmatrix} 1 & 2 & 1 \\ 3 & 6 & 5 \\ 2 & 4 & 2 \end{pmatrix}$$
where the first two two column are differ by a factor of 2 (i.e., they are linear dependent). Such matrix can be decompose into two matrix with dimension (3,2) and (2,3). As such, the dimension could be decreased.
(standalone test function to be updated)
Direct Sum¶
Direct sum for tensor is an important function. It is necessary for MPS/MPO addition.
For example,
- Case 1: the direct sum of a matrix A(2,3) and matrix B(3,3). We add up its axis(dimension) 0.
$$\begin{pmatrix}1 & 2 & 3 \\ 4 & 5 & 6\end{pmatrix} \oplus_{(0)} \begin{pmatrix}7 & 8 & 9 \\ 10 & 11 & 12 \\ 13 & 14 & 15 \end{pmatrix} = \begin{pmatrix}1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \\ 10 & 11 & 12 \\ 13 & 14 & 15 \end{pmatrix} $$
- Case 2: the direct sum of a matrix A(2,3) and matrix B(3,3). We add up its axis(dimension) 1. However, here the fixed axis does not have same size and the direct sum is not defined
- Case 3: the direct sum of a matrix A(2,2) and B(2,2) for both axes. The sum matrix is C(4,4). $$\begin{pmatrix}a & b \\ c & d\end{pmatrix} \oplus_{(0,1)} \begin{pmatrix}e & f \\ g & h\end{pmatrix} = \begin{pmatrix}a & b & 0 & 0 \\ c & d & 0 & 0 \\ 0 & 0 & e & f \\ 0 & 0 & g & h \end{pmatrix} $$
- Case 4: the direct sum of a matrix A(2,3) and B(4,5) for both axes. The sum matrix is C(6,8). $$ \mathbf{A}+\mathbf{B} = \begin{pmatrix} \mathbf{A} & 0 \\ 0 & \mathbf{B}\end{pmatrix} $$
Example:
/* test_directSum.cpp
* Test function for directSum (taking directSum)
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a matrix
gc::TensorNd<double, 2, CblasRowMajor> callMat(size_t n, size_t m) {
gc::TensorNd<double, 2, CblasRowMajor> tensor(n,m);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Case 1
// Take the direct sum (Summation one index)
void t_directSum1() {
std::cout << "mat1(2,3) +(0) mat2(3,3)" << std::endl;
auto mat1 = callMat(2,3);
auto mat2 = callMat(3,3);
std::array<size_t,1> ax = {0};
std::cout << "Matrix 1" << std::endl
<< mat1 << std::endl
<< "Matrix 2" << std::endl
<< mat2 << std::endl
<< "The direct sum is" << std::endl
<< directSum(mat1, mat2, ax) << std::endl;
}
// Case 2
// Take the direct sum (Summation one index)
// This function would not be able to run as the dimension of
// the fixed index should be the same
void t_directSum2() {
std::cout << "mat1(2,3) +(1) mat2(3,3)" << std::endl;
auto mat1 = callMat(2,3);
auto mat2 = callMat(3,3);
std::array<size_t,1> ax = {1};
std::cout << "Matrix 1" << std::endl
<< mat1 << std::endl
<< "Matrix 2" << std::endl
<< mat2 << std::endl
<< "The direct sum is" << std::endl
<< directSum(mat1, mat2, ax) << std::endl;
}
// Case 3
// Take the diret sum (Summation both index)
void t_directSum3() {
std::cout << "mat1(2,2) +(0,1) mat2(2,2)" << std::endl;
auto mat1 = callMat(2,2);
auto mat2 = callMat(2,2);
std::array<size_t,2> ax = {0, 1};
std::cout << "Matrix 1" << std::endl
<< mat1 << std::endl
<< "Matrix 2" << std::endl
<< mat2 << std::endl
<< "The direct sum is" << std::endl
<< directSum(mat1, mat2, ax) << std::endl;
}
// Case 4
// Take the diret sum (Summation both index)
void t_directSum4() {
std::cout << "mat1(2,3) +(0,1) mat2(4,5)" << std::endl;
auto mat1 = callMat(2,3);
auto mat2 = callMat(4,5);
std::array<size_t,2> ax = {0, 1};
std::cout << "Matrix 1" << std::endl
<< mat1 << std::endl
<< "Matrix 2" << std::endl
<< mat2 << std::endl
<< "The direct sum is" << std::endl
<< directSum(mat1, mat2, ax) << std::endl;
}
int main()
{
t_directSum1();
// t_directSum2();
t_directSum3();
t_directSum4();
return 0;
}
Matrix Exponential¶
gc:expm(matrix)
- Pade approximation to 6/7th order
Example:
/* test_expm.cpp
* Test function for matrix exponential
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a matrix
gc::TensorNd<double, 2, CblasRowMajor> callMat() {
gc::TensorNd<double, 2, CblasRowMajor> tensor(4,4);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Take matrix exp
void t_expm() {
auto mat = callMat();
std::cout << "The matrix" << std::endl
<< mat << std::endl;
gc::expm(mat);
std::cout << "The matrix exp is" << std::endl
<< mat << std::endl;
}
int main()
{
t_expm();
return 0;
}
QR Decomposition¶
The index convention for QR decomposition is defined in the following way:
- Giving a rank 4 tensor qrtensor(a,b,c,d), with a,b,c,d denoting the dimensions of respective indices.
- If the axis is defined as 3, i.e., the 2nd index with dimension b, qrtensor would be reshape as a matrix with dimension (abc,d)
- The outcome matrix would be Q=(abc,d), R=(d,d)
- If the axes are defined as 1,2, i.e, the 2nd and 3rd indices with dimension b, c respectively, qrtensor would be reshape as matrix with dimension (ad,bc)
- The outcome matrix would be Q=(ad,bc), R=(bc,bc) if ad > bc or Q=(ad,ad) R=(ad,bc) if ad < bc.
/* test_qrcompress.cpp
* Test function for qr decomposition
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a tensor
gc::TensorNd<double, 4, CblasRowMajor> callTensor() {
gc::TensorNd<double, 4, CblasRowMajor> tensor(2,3,4,2);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// QR Decomposition to a Tensor
void t_qr_oneindex() {
auto QRTensor = callTensor();
std::array<size_t, 1> ax = {3};
auto result = gc::qrCompress(QRTensor,ax);
std::cout << "The tensor" << std::endl
<< QRTensor << std::endl
<< "Q Tensor" << std::endl
<< result.first << std::endl
<< "R Tensor" << std::endl
<< result.second << std::endl;
}
void t_qr_twoindices() {
auto QRTensor = callTensor();
std::array<size_t, 2> ax = {1, 2};
auto result = gc::qrCompress(QRTensor,ax);
std::cout << "The tensor" << std::endl
<< QRTensor << std::endl
<< "Q Tensor" << std::endl
<< result.first << std::endl
<< "R Tensor" << std::endl
<< result.second << std::endl;
}
int main()
{
t_qr_oneindex();
t_qr_twoindices();
return 0;
}
SVD Decomposition¶
SVD Compress a single tensor depending on the max bond dimension and svdcutoff. The max bond dimension is first checked. No truncation would be done if the bond dimension is within the max bond dimension.
gc::svdCompress(tensor,ax,maxbonddimension,svdcutoff,verbose)
tensor: tensor to be compressedax: the axes on the tensor to be decomposedmaxbonddimension: the upper bound for bond dimension (elements to be kept)svdcutoff: the cutoff errorverbose: 0, 1, 2 for runtime output
The index convention follows the same spirit as QR compression/decomposition.
/* test_svdcompress.cpp
* Test function for SVD decomposition
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a tensor
gc::TensorNd<double, 4, CblasRowMajor> callTensor() {
gc::TensorNd<double, 4, CblasRowMajor> tensor(2,3,4,2);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// SVD Decomposition to a Tensor
void t_svd_oneindex() {
auto SVDTensor = callTensor();
std::array<size_t, 1> ax = {3};
auto result = gc::svdCompress(SVDTensor, ax, -1, 0, 1);
std::cout << "The tensor" << std::endl
<< SVDTensor << std::endl
<< "S Tensor" << std::endl
<< std::get<0>(result) << std::endl
<< "V Tensor" << std::endl
<< std::get<1>(result) << std::endl
<< "D Tensor" << std::endl
<< std::get<2>(result) << std::endl;
}
void t_svd_twoindices() {
auto SVDTensor = callTensor();
std::array<size_t, 2> ax = {2,3};
auto result = gc::svdCompress(SVDTensor, ax, 10, 0, 1);
std::cout << "The tensor" << std::endl
<< SVDTensor << std::endl
<< "S Tensor" << std::endl
<< std::get<0>(result) << std::endl
<< "V Tensor" << std::endl
<< std::get<1>(result) << std::endl
<< "D Tensor" << std::endl
<< std::get<2>(result) << std::endl;
}
int main()
{
t_svd_oneindex();
t_svd_twoindices();
return 0;
}
Index fusion¶
Index fusion is a procedure for tensor contraction. Hence it is implemented for a pair of tensor instead of one. To use it for a single tensor, the second tensor input could be left as empty.
Alternatively, the contraction on the index could be replaced by using permute and reshape functions.
/* test_fusion.cpp
* Test function for tensor index fusion
*/
#include <iostream>
#include <gtensor_operation/ndtensor_ext.hpp>
#include <gtensor_operation/ndtensor_operation.hpp>
using namespace gc;
// Construct a tensor
gc::TensorNd<double, 4, CblasRowMajor> callTensor() {
gc::TensorNd<double, 4, CblasRowMajor> tensor(2,3,4,2);
int i = 0;
for (auto it = tensor.begin(); it != tensor.end(); ++it, ++i) {
*it = i;
}
return tensor;
}
// Tensor index fusion
void t_fusion() {
auto FTensor = callTensor();
auto FTensor1 = callTensor();
auto FTensor2= callTensor();
std::array<size_t, 2> ax = {1,2};
auto result = gc::fusion(FTensor, ax, FTensor, ax);
std::cout << "The tensor" << std::endl
<< FTensor << std::endl
<< "Fusion 1" << std::endl
<< result.first << std::endl
<< "Fusion 2" << std::endl
<< result.second << std::endl
<< "What happend to FTensor" << std::endl
<< FTensor << std::endl;
std::array<size_t, 2> ax1 = {0, 1}, ax2= {1, 2}, ax3 = {2, 3};
std::cout << "The tensor" << std::endl;
// auto result1 = gc::fusion(FTensor, ax1, FTensor2, ax2); Error
// After fusion, the left tail dimension and right leading dimension has to be the same.
auto result1 = gc::fusion(FTensor1,ax1,FTensor2, ax1); // Passed
std::cout << "Fusion 1" << std::endl
<< result1.first << std::endl
<< "Fusion 2" << std::endl
<< result1.second << std::endl;
}
int main()
{
t_fusion();
return 0;
}