Import numpy library and create a random matrix
import numpy as np
n = 4
A = np.random.random((n,n))
A
Create another matrix and multiply the two
B = np.random.random((n,n))
np.dot(A,B)
Import Cyclops Tensor Framework library and convert numpy matrices to CTF matrices
import ctf
tA = ctf.astensor(A)
tB = ctf.astensor(B)
ctf.dot(tA,tB)
Use CTF index-based notation to perform multiplication
tC = ctf.zeros((n,n))
tC.i("ij") << tA.i("ik")*tB.i("kj")
tC
The particular character 'i','j','k'
don't matter, we can replace them with 'z','?','+'
tC = ctf.zeros((n,n))
tC.i("z?") << tA.i("z+")*tB.i("+?")
tC
numpy
actually has similar functionality via einsum
np.einsum("ik,kj->ij",A,B)
ctf.einsum("ik,kj->ij",tA,tB)
This notation can be used to contract tensor networks, for instance the tensor train (MPS)
k = 2 # rank
W = ctf.tensor([k,n,k])
V = ctf.tensor([k,n])
W.fill_random(-1.0,1.0)
V.fill_random(-1.0,1.0)
Z = ctf.tensor([n,n,n,n,n,n])
Z.i("ijklmn") << V.i("ai")*W.i("ajb")*W.i("bkc")*W.i("cld")*W.i("dme")*V.i("en")
print(Z[1:3,0,2,1,0:3,1].reshape((3,2)))
Using np.einsum
the contractions look as follows
V2 = V.to_nparray()
W2 = W.to_nparray()
Z2 = np.einsum("ai,ajb,bkc,cld,dme,en->ijklmn",V2,W2,W2,W2,W2,V2)
print(Z2[1:3,0,2,1,0:3,1].reshape((3,2)))
#same possible with CTF
Z = ctf.einsum("ai,ajb,bkc,cld,dme,en->ijklmn",V,W,W,W,W,V)
print(Z[1:3,0,2,1,0:3,1].reshape((3,2)))
To contract together a CP decomposition, we need to use Hadamard products
U = ctf.tensor([k,n])
U.fill_random(-1.0,1.0)
Z.set_zero()
#note that the `a` index appears in multiple operands
Z.i("ijklmn") << U.i("ai")*U.i("aj")*U.i("ak")*U.i("al")*U.i("am")*U.i("an")
print(Z[1:3,0,2,1,0:3,1].reshape((3,2)))
U2 = U.to_nparray()
Z2 = np.einsum("ai,aj,ak,al,am,an->ijklmn",U2,U2,U2,U2,U2,U2)
print(Z2[1:3,0,2,1,0:3,1].reshape((3,2)))
Lets test the preformance of the MPS contractions for a different rank
import time
for k in np.arange(4)*2+2:
t = time.time()
W = ctf.tensor([k,n,k])
V = ctf.tensor([k,n])
W.fill_random(-1.0,1.0)
V.fill_random(-1.0,1.0)
Z = ctf.tensor([n,n,n,n,n,n])
Z.i("ijklmn") << V.i("ai")*W.i("ajb")*W.i("bkc")*W.i("cld")*W.i("dme")*V.i("en")
V2 = V.to_nparray()
W2 = W.to_nparray()
#time CTF, including all initialization and conversions
print("ctf k =",k,"took",time.time()-t,"seconds.")
t2 = time.time()
Z2 = np.einsum("ai,ajb,bkc,cld,dme,en->ijklmn",V2,W2,W2,W2,W2,V2)
print("numpy k =",k,"took",time.time()-t2,"seconds.")
Lets create a sparse tensor of total size
Z = ctf.tensor([n,n,n,n,n,n,n,n,n,n,n,n],sp=1)
Z.fill_sp_random(-1.,1.,.00001)
Z.read_local_nnz()
#create a random vector in a sparse representation
v = ctf.tensor([n],sp=1)
v.fill_sp_random(0.,1.,1.)
#create an order 12 sparse tensor
Z = ctf.tensor([n,n,n,n,n,n,n,n,n,n,n,n],sp=1)
#fill tensor so that .001% of entries are nonzero
Z.fill_sp_random(0.,1.,.00001)
#set diagonal to zero
Z.i("iiiiiiiiiiii") << 1.
str12 = "1234567890ab"
for i in range(1,12)[::-1]:
#normalize tensor
Z.i(str12[0:i]).scl(1./ctf.sum(Z))
#create tensor with one less dimension
Z_new = ctf.tensor([n for j in range(i)],sp=1)
#contract tensor over its last mode with a vector
Z_new.i(str12[0:i]) << Z.i(str12[0:i+1])*v.i(str12[i])
#replace old tensor with lower-dimensional one
Z = Z_new
#read the nonzeros from Z stored on this processor
inds, vals = Z.read_local_nnz()
print(Z.ndim)
print(inds,vals)