def createMPSA(self, tensorLikeFlag=False): tensor1L = Tensor(shape=(3, 3), labels=['o', 'internal'], tensorLikeFlag=tensorLikeFlag) tensor11 = Tensor(shape=(3, 5, 4), labels=['itl', 'oo', 'itr'], tensorLikeFlag=tensorLikeFlag) tensor12 = Tensor(shape=(4, 2, 4), labels=['itl', 'oo', 'itr'], tensorLikeFlag=tensorLikeFlag) tensor13 = Tensor(shape=(4, 3, 2), labels=['itl', 'oo', 'itr'], tensorLikeFlag=tensorLikeFlag) tensor1R = Tensor(shape=(2, 5), labels=['internal', 'o'], tensorLikeFlag=tensorLikeFlag) makeLink('internal', 'itl', tensor1L, tensor11) makeLink('itr', 'itl', tensor11, tensor12) makeLink('itr', 'itl', tensor12, tensor13) makeLink('itr', 'internal', tensor13, tensor1R) tensorsA = [tensor1L, tensor11, tensor12, tensor13, tensor1R] mpsA = FreeBoundaryMPS(tensorList=tensorsA, chi=16) return mpsA
def example(): shapeA = (300, 400, 50) shapeB = (300, 600) shapeC = (400, 600, 50) a = Tensor(labels=['a3', 'b4', 'c5'], data=np.ones(shapeA)) b = Tensor(labels=['a3', 'd6'], data=np.ones(shapeB)) c = Tensor(labels=['e4', 'd6', 'c5'], data=np.ones(shapeC)) # create tensors with labels makeLink('a3', 'a3', a, b) makeLink('c5', 'c5', a, c) makeLink('d6', 'd6', b, c) # make links via labels # note that labels can also be made between the "Leg" objects to avoid reused leg names, but here for simplicity from leg names # now we have a tensor network, we can generate the optimal sequence of this tensor list optimalSeq = generateOptimalSequence([a, b, c]) print('optimal contraction sequence = {}'.format(optimalSeq)) # if we do not have any knowledge in prior, we can contract the tensor list like res = contractTensorList([a, b, c]) print(res) # if you already have a good sequence to use res, cost = contractAndCostWithSequence([a, b, c], seq=optimalSeq) print(res) print('contraction cost = {}'.format(cost)) # if you want to save time / space by contract in place(note that after this you cannot contract them again, since their bonds between have been broken): res = contractWithSequence([a, b, c], seq=optimalSeq, inplace=True) print(res) print('')
def test_TensorLabels(self): tensor = Tensor(data = np.zeros((3, 4, 5), dtype = np.float64), labels = ['abc', 'def', 'abc']) self.assertEqual(tensor.indexOfLabel('abc'), 0) self.assertEqual(tensor.indexOfLabel('abd'), -1) self.assertEqual(tensor.indexOfLabel('abc', backward = True), 2) self.assertEqual(tensor.shapeOfLabels(['abc', 'def']), (3, 4))
def test_createMPS(self): tensor = Tensor(shape=(3, 4, 5)) mps = createMPSFromTensor(tensor=tensor, chi=16) self.assertEqual(mps.n, 3) # print(mps) with self.assertWarns(RuntimeWarning) as cm: tensor = Tensor(shape=(3, )) mps = createMPSFromTensor(tensor=tensor, chi=16) self.assertEqual(mps.n, 1) # print(mps) self.assertIn('MPS.py', cm.filename) message = cm.warning.__str__() self.assertIn('creating MPS for 1-D tensor', message) def zeroDimensionMPSFunc(): tensor = Tensor(shape=()) _ = createMPSFromTensor(tensor) self.assertRaises(AssertionError, zeroDimensionMPSFunc) tensor = Tensor(shape=(3, 4, 5, 3, 3, 2)) mps = createMPSFromTensor(tensor=tensor, chi=16) self.assertEqual(mps.n, 6)
def test_TensorRenameWarning(self): tensor = Tensor(data = np.zeros((3, 4, 5), dtype = np.float64), labels = ['abc', 'def', 'abc']) with self.assertWarns(RuntimeWarning) as cm: tensor.renameLabel('ade', 'bcf') self.assertIn('tensor.py', cm.filename) message = cm.warning.__str__() self.assertIn('leg name ade does not exist', message) self.assertIn('no rename happened', message)
def test_TensorCopy(self): a = Tensor(shape = (2, 2, 3), data = np.zeros(12)) self.assertEqual(a.a[(0, 0, 0)], 0.0) b = a.copy() b.a[(0, 0, 0)] = 1.0 self.assertEqual(b.a[(0, 0, 0)], 1.0) self.assertEqual(a.a[(0, 0, 0)], 0.0)
def twoTensorsContraction(): shapeA = (3, 4, 5) shapeB = (5, 4) a = Tensor(labels=['a3', 'a4', 'a5'], data=np.ones(shapeA)) b = Tensor(labels=['b5', 'b4'], data=np.ones(shapeB)) makeLink('a4', 'b4', a, b) makeLink('a5', 'b5', a, b) c = a @ b print(c)
def test_FreeBondFTN(self): shapeA = (300, 4, 5) shapeB = (300, 6) shapeC = (4, 6, 5) a = Tensor(shape=shapeA, labels=['a300', 'b4', 'c5'], data=np.ones(shapeA)) b = Tensor(shape=shapeB, labels=['a300', 'd6'], data=np.ones(shapeB)) c = Tensor(shape=shapeC, labels=['e4', 'd6', 'c5'], data=np.ones(shapeC)) tensorDict = TensorDict() tensorDict.setTensor('a', a) tensorDict.setTensor('b', b) tensorDict.setTensor('c', c) FTN = FiniteTensorNetwork(['a', 'b'], realCost=True) FTN.addTensor('c') FTN.addLink('a', 'a300', 'b', 'a300') FTN.addLink('a', 'c5', 'c', 'c5') FTN.addLink('b', 'd6', 'c', 'd6') result = FTN.contract(tensorDict, removeTensorTag=False) self.assertTrue(funcs.compareLists(result.labels, ['a-b4', 'c-e4'])) self.assertEqual(int(result.a[0][1]), 9000) result = FTN.contract(tensorDict, removeTensorTag=True) self.assertTrue(funcs.compareLists(result.labels, ['b4', 'e4'])) self.assertEqual(int(result.a[0][1]), 9000) FTN.unlock() FTN.addPostNameChange('c', 'e4', 'e4FromC') FTN.addPreNameChange('a', 'b4', 'b4FromA') FTN.addPreNameChange('a', 'a300', 'a3') FTN.removePreNameChange('a', 'a300', 'a3') FTN.addPostNameChange('a', 'd6', 'foo') FTN.removePostNameChange('a', 'd6', 'foo') result = FTN.contract(tensorDict, removeTensorTag=True) # print(result.labels) self.assertTrue( funcs.compareLists(result.labels, ['b4FromA', 'e4FromC'])) self.assertEqual(int(result.a[0][1]), 9000) FTN.unlock() FTN.removePostNameChange('c', 'e4', 'e4FromC') FTN.addPreNameChange('c', 'e4', 'e4FromC') FTN.addPostOutProduct([('a', 'b4FromA'), ('c', 'e4FromC')], 'out') result = FTN.contract(tensorDict, removeTensorTag=True) self.assertListEqual(result.labels, ['out']) self.assertEqual(result.shape[0], 16)
def test_FTN(self): shapeA = (300, 4, 5) shapeB = (300, 6) shapeC = (4, 6, 5) a = Tensor(shape=shapeA, labels=['a300', 'b4', 'c5'], data=np.ones(shapeA)) b = Tensor(shape=shapeB, labels=['a300', 'd6'], data=np.ones(shapeB)) c = Tensor(shape=shapeC, labels=['b4', 'd6', 'c5'], data=np.ones(shapeC)) tensorDict = TensorDict() tensorDict.setTensor('a', a) tensorDict.setTensor('b', b) tensorDict.setTensor('c', c) FTN = FiniteTensorNetwork(['a', 'b'], realCost=True) FTN.addTensor('c') FTN.addLink('a', 'a300', 'b', 'a300') FTN.addLink('a', 'b4', 'c', 'b4') FTN.addLink('a', 'c5', 'c', 'c5') FTN.addLink('b', 'd6', 'c', 'd6') result = FTN.contract(tensorDict) self.assertEqual(int(result.a), 36000) self.assertListEqual(FTN.optimalSeq, [(0, 1), (2, 0)]) result2 = FTN.contract(tensorDict) self.assertEqual(int(result2.a), 36000) self.assertListEqual(FTN.optimalSeq, [(0, 1), (2, 0)]) newShapeA = (3, 4, 5) newShapeB = (3, 6) newA = Tensor(shape=newShapeA, labels=['a300', 'b4', 'c5'], data=np.ones(newShapeA)) newB = Tensor(shape=newShapeB, labels=['a300', 'd6'], data=np.ones(newShapeB)) tensorDict.setTensor('a', newA) tensorDict.setTensor('b', newB) result3 = FTN.contract(tensorDict) self.assertEqual(int(result3.a), 360) self.assertListEqual(FTN.optimalSeq, [(0, 2), (1, 0)]) self.assertEqual(FTN.bondDims['a-a300'], 3) self.assertEqual(FTN.tensorCount, 3)
def createSpecialTN(self): a = Tensor(shape=(3, 5, 7), labels=['a3', 'a5', 'a7']) b = Tensor(shape=(2, 4, 5), labels=['b2', 'b4', 'b5']) c = Tensor(shape=(2, 7, 7, 7), labels=['c2', 'c71', 'c72', 'c73']) d = Tensor(shape=(7, 7, 3, 4), labels=['d71', 'd72', 'd3', 'd4']) makeLink('a3', 'd3', a, d) makeLink('a5', 'b5', a, b) makeLink('a7', 'c72', a, c) makeLink('b2', 'c2', b, c) makeLink('b4', 'd4', b, d) makeLink('c71', 'd72', c, d) makeLink('c73', 'd71', c, d) return [a, b, d, c]
def iterate(self): # make SVD decomposition on square tensors # do for both a and b are different and the same if (self.iterateFTN is None): self.iterateFTN = squareContractOutFTN() dof = self.a.degreeOfFreedom # makeLink(self.a.getLeg('a-1'), self.b.getLeg('b-1')) # iTensor = contractTwoTensors(self.a, self.b) # a2Dim, b2Dim, a3Dim, b3Dim = iTensor.shapeOfLabels(['a-2', 'b-2', 'a-3', 'b-3']) # aMat = iTensor.toMatrix(rows = ['a-2', 'b-2'], cols = ['a-3', 'b-3']) # a: at ul and dr vDim, hDim = self.a.shapeOfLabels(['u', 'l']) aMat = self.a.toMatrix(rows=['u', 'l'], cols=['d', 'r']) bMat = self.b.toMatrix(rows=['u', 'r'], cols=['d', 'l']) uA, vA, errorA = SVDDecomposition(aMat, self.chi) uB, vB, errorB = SVDDecomposition(bMat, self.chi) # uA: u, l, o as dr # vA: d, r, o as ul dr = Tensor(shape=(vDim, hDim, uA.shape[1]), labels=['u', 'l', 'o'], data=uA) ul = Tensor(shape=(vDim, hDim, vA.shape[1]), labels=['d', 'r', 'o'], data=vA) dl = Tensor(shape=(vDim, hDim, uB.shape[1]), labels=['u', 'r', 'o'], data=uB) ur = Tensor(shape=(vDim, hDim, vB.shape[1]), labels=['d', 'l', 'o'], data=vB) self.a = self.iterateFTN.contract( TensorDict({ 'ul': ul, 'ur': ur, 'dl': dl, 'dr': dr })) self.a.degreeOfFreedom = dof * 2 self.b = self.a self.appendToArchive() self.errors.append((errorA, errorB))
def IsingSiteTensor(betaJ, dim=4, labels=None): """ The site tensor that can be connected to form Ising tensor network. Parameters ---------- betaJ : float or list of float The interaction combined with inverse temperature, namely, J / T. When a list is given, the length should be dim, and each for one different edge. dim : int, default 4 The degree of sites(the number of edges it is linked to). By default, square lattice value 4. labels : list of str, optional The labels of the result tensor on each leg. If betaJ is a number, since the legs are the same, the order of labels do not matter. Otherwise please be carefully about the order of labels since it corresponds to the order of betaJ. Returns ------- Tensor The tensor of dim legs, labelled with labels, and representing the local interaction around a site(a diagonal site tensor with multiple edge tensors). """ assert (funcs.isRealNumber(betaJ) or (len(betaJ) == dim)), funcs.errorMessage( "betaJ {} do not have required dim {}.".format(betaJ, dim)) assert ((labels is None) or (len(labels) == dim)), funcs.errorMessage( "labels {} do not have required dim {}.".format(labels, dim)) a = xplib.xp.array([1.0, 1.0]) a = funcs.diagonalNDTensor(a, dim=dim) if (funcs.isRealNumber(betaJ)): betaJ = [betaJ] * dim # edgeMat = IsingEdgeMatrix(betaJ) for i in range(dim): edgeMat = IsingEdgeMatrix(betaJ[i]) a = xplib.xp.tensordot(a, edgeMat, (0, 0)) # print(a) return Tensor(data=a, labels=labels)
def makeSquareOutTensor(data, loc): """ Make an out-going tensor on one corner of a square. Parameters ---------- data : 3-D ndarray of float The data for the out-going tensor. loc : {"ul", "ur", "dl", "dr"} One of the four locations on a square. Returns ------- Tensor 3-D tensor with legs named according to its location(loc), and the outer leg is named "o". """ labels = [] assert ( loc in ['ul', 'ur', 'dl', 'dr'] ), "Error: loc of makeSquareOutTensor must be one of {}, but {} gotten.".format( ['ul', 'ur', 'dl', 'dr'], loc) if (loc == 'ul'): labels = ['d', 'r', 'o'] if (loc == 'ur'): labels = ['d', 'l', 'o'] if (loc == 'dl'): labels = ['u', 'r', 'o'] if (loc == 'dr'): labels = ['u', 'l', 'o'] return Tensor(data=data, labels=labels)
def test_qEnv(self): try: from ncon import ncon except: print("ncon is not installed. Skip the qEnv test.") return print( 'checking finite tensor network with qEnv network from Evenbly TNR' ) chiHI = 5 chiVI = 6 tensorArray = [] for _ in range(8): tensorArray.append(np.random.rand(chiHI, chiVI, chiHI, chiVI)) qEnv = ncon(tensorArray, [[-1, -2, 11, 12], [7, 8, 11, 9], [5, 12, 1, 2], [5, 9, 3, 4], [-3, -4, 13, 14], [7, 8, 13, 10], [6, 14, 1, 2], [6, 10, 3, 4]]).reshape( chiHI * chiVI, chiHI * chiVI) # by Glen Evenbly (c) for www.tensors.net, (v1.1) - last modified 29/1/2019 FTN = EvenblyTNRQEnvFTN() tensorDict = TensorDict() tensorNames = ['uul', 'uur', 'udl', 'udr', 'ddl', 'ddr', 'dul', 'dur'] for tensor, name in zip(tensorArray, tensorNames): t = Tensor(data=tensor, labels=['l', 'u', 'r', 'd']) tensorDict.setTensor(name, t) res = FTN.contract(tensorDict) resMat = res.toMatrix(rows=['1'], cols=['2']) error = np.linalg.norm(resMat - qEnv) / np.linalg.norm(resMat) print('qenv error = {}'.format(error)) self.assertTrue(error < 1e-10)
def directedIterate(self, d, prjTensor, inputTensor1=None, inputTensor2=None): # print(inputTensor1, inputTensor2) # print(prjTensor.shape) funcs.assertInSet(d, ['u', 'd', 'l', 'r'], 'direction') # use the real projector for iteration if (inputTensor1 is None): inputTensor1 = self.a if (inputTensor2 is None): inputTensor2 = self.a chiH = inputTensor1.shapeOfLabel('l') chiV = inputTensor1.shapeOfLabel('u') if (d == 'l') or (d == 'r'): # given U_L: # the prjTensor lTensor = Tensor(data=prjTensor, shape=(chiH, chiH, prjTensor.shape[1]), labels=['u', 'd', 'o']) rTensor = Tensor(data=funcs.transposeConjugate(prjTensor), shape=(prjTensor.shape[1], chiH, chiH), labels=['o', 'u', 'd']) if (d == 'r'): lTensor, rTensor = rTensor, lTensor return self.horizontalIterateFTN.contract({ 'u': inputTensor1, 'd': inputTensor2, 'l': lTensor, 'r': rTensor }) else: uTensor = Tensor(data=prjTensor, shape=(chiV, chiV, prjTensor.shape[1]), labels=['l', 'r', 'o']) dTensor = Tensor(data=funcs.transposeConjugate(prjTensor), shape=(prjTensor.shape[1], chiV, chiV), labels=['o', 'l', 'r']) if (d == 'd'): uTensor, dTensor = dTensor, uTensor return self.verticalIterateFTN.contract({ 'u': uTensor, 'd': dTensor, 'l': inputTensor1, 'r': inputTensor2 })
def test_singleTensorMPS(self): tensor = Tensor(data=np.random.random_sample(3), labels=['oo']) mps = FreeBoundaryMPS(tensorList=[tensor], chi=16) self.assertEqual(mps.n, 1) mps.canonicalize(0) self.assertTrue(mps.checkCanonical(excepIdx=0)) self.assertEqual(mps.getTensor(0).legs[0].name, 'o')
def createSpecialTN2(self): a = Tensor(shape=(3, 5, 7), labels=['a3', 'a5', 'a7']) b = Tensor(shape=(2, 4, 5), labels=['b2', 'b4', 'b5']) c = Tensor(shape=(2, 7, 7, 7), labels=['c2', 'c71', 'c72', 'c73']) d = Tensor(shape=(7, 7, 3, 4), labels=['d71', 'd72', 'd3', 'd4']) e = Tensor(shape=(3, 3, 5), labels=['e31', 'e32', 'e5']) f = Tensor(shape=(2, 2, 5), labels=['f21', 'f22', 'f5']) g = Tensor(shape=(4, 4, 3, 3), labels=['g41', 'g42', 'g31', 'g32']) makeLink('a3', 'e31', a, e) makeLink('a5', 'b5', a, b) makeLink('a7', 'c72', a, c) makeLink('b2', 'f21', b, f) makeLink('b4', 'g41', b, g) makeLink('c2', 'f22', c, f) makeLink('c71', 'd72', c, d) makeLink('c73', 'd71', c, d) makeLink('d3', 'g31', d, g) makeLink('d4', 'g42', d, g) makeLink('e5', 'f5', e, f) makeLink('e32', 'g32', e, g) return [a, b, d, c, g, f, e]
def contractHandmadeTN(): print('contractHandmadeTN():') a = Tensor(shape=(3, 5, 7), labels=['a3', 'a5', 'a7']) b = Tensor(shape=(2, 4, 5), labels=['b2', 'b4', 'b5']) c = Tensor(shape=(2, 7, 7, 7), labels=['c2', 'c71', 'c72', 'c73']) d = Tensor(shape=(7, 7, 3, 4), labels=['d71', 'd72', 'd3', 'd4']) e = Tensor(shape=(3, 3, 5), labels=['e31', 'e32', 'e5']) f = Tensor(shape=(2, 2, 5), labels=['f21', 'f22', 'f5']) g = Tensor(shape=(4, 4, 3, 3), labels=['g41', 'g42', 'g31', 'g32']) makeLink('a3', 'e31', a, e) makeLink('a5', 'b5', a, b) makeLink('a7', 'c72', a, c) makeLink('b2', 'f21', b, f) makeLink('b4', 'g41', b, g) makeLink('c2', 'f22', c, f) makeLink('c71', 'd72', c, d) makeLink('c73', 'd71', c, d) makeLink('d3', 'g31', d, g) makeLink('d4', 'g42', d, g) makeLink('e5', 'f5', e, f) makeLink('e32', 'g32', e, g) tensors = [a, b, d, c, g, f, e] res, _ = contractAndCostWithSequence(tensors) print('res from direct contraction = {}'.format(res.single())) mpsRes = contractWithMPS(tensors, chi=32) print('res from mps = {}'.format(mpsRes.single())) print('')
def createMPSFromDim(self, dims, itbRange=(3, 10), tensorLikeFlag=False, chi=16): # internal bonds will be automaticall lastDim = -1 tensors = [] n = len(dims) if (n == 1): tensors.append( Tensor(shape=(dims[0], ), labels=['o'], tensorLikeFlag=tensorLikeFlag)) return FreeBoundaryMPS(tensorList=tensors, chi=chi) itbLow, itbHigh = itbRange bondDim = np.random.randint(low=itbLow, high=itbHigh) tensor = Tensor(shape=(dims[0], bondDim), labels=['o', 'r'], tensorLikeFlag=tensorLikeFlag) tensors.append(tensor) lastDim = bondDim for i in range(1, n - 1): bondDim = np.random.randint(low=itbLow, high=itbHigh) newTensor = Tensor(shape=(lastDim, dims[i], bondDim), labels=['l', 'o', 'r'], tensorLikeFlag=tensorLikeFlag) tensors.append(newTensor) makeLink('r', 'l', tensor, newTensor) lastDim = bondDim tensor = newTensor newTensor = Tensor(shape=(lastDim, dims[-1]), labels=['l', 'o'], tensorLikeFlag=tensorLikeFlag) tensors.append(newTensor) makeLink('r', 'l', tensor, newTensor) return FreeBoundaryMPS(tensorList=tensors, chi=chi)
def createMPSB(self, tensorLikeFlag=False): tensor2L = Tensor(shape=(3, 3), labels=['o', 'internal'], tensorLikeFlag=tensorLikeFlag) tensor21 = Tensor(shape=(3, 5, 4), labels=['itl', 'oo', 'itr'], tensorLikeFlag=tensorLikeFlag) tensor22 = Tensor(shape=(4, 2, 4), labels=['itl', 'oo', 'itr'], tensorLikeFlag=tensorLikeFlag) tensor2R = Tensor(shape=(4, 5), labels=['internal', 'o'], tensorLikeFlag=tensorLikeFlag) makeLink('internal', 'itl', tensor2L, tensor21) makeLink('itr', 'itl', tensor21, tensor22) makeLink('itr', 'internal', tensor22, tensor2R) tensorsB = [tensor2L, tensor21, tensor22, tensor2R] mpsB = FreeBoundaryMPS(tensorList=tensorsB, chi=12) return mpsB
def simplestExample(): shapeA = (300, 4, 5) shapeB = (300, 6) shapeC = (4, 6, 5) a = Tensor(labels=['a300', 'b4', 'c5'], data=xplib.xp.ones(shapeA)) b = Tensor(labels=['a300', 'd6'], data=xplib.xp.ones(shapeB)) c = Tensor(labels=['e4', 'd6', 'c5'], data=xplib.xp.ones(shapeC)) # create tensors with labels makeLink('a300', 'a300', a, b) makeLink('c5', 'c5', a, c) makeLink('d6', 'd6', b, c) # make links via labels # note that labels can also be made between the "Leg" objects to avoid reused leg names, but here for simplicity from leg names # now we have a tensor network, we can generate the optimal sequence of this tensor list optimalSeq = generateOptimalSequence([a, b, c]) print('optimal contraction sequence = {}'.format(optimalSeq)) # if we do not have any knowledge in prior, we can contract the tensor list like res = contractTensorList([a, b, c]) print(res) # if you already have a good sequence to use res = contractWithSequence([a, b, c], seq=optimalSeq) print(res) # if you want to save time / space by contract in place(note that after this you cannot contract them again, since their bonds between have been broken): res = contractWithSequence([a, b, c], seq=optimalSeq, inplace=True) print(res) print('') # for reusable inplace contraction(which is our goal), refer to the use of CTL.tensornetwork.tensornetwork.FiniteTensorNetwork return res
def squareIsingTensor(beta, obs=None, symmetryBroken=0.0): """ Tensor for square lattice Ising model, based on square decomposition. Parameters ---------- beta : float The inverse temperature(suppose J = 1). obs : str, {'M', 'E'}, optional The observable to be measured. If None, then measuring Z. symmetryBroken : float, default 0.0 A small value corresponding to the symmetry broken, used for calculating magnetization. Returns ------- Tensor A tensor with four legs corresponding to four spins, and contain local weight or measurement(depending on obs). """ # tensor for square Ising model # use the simplest way to build on plaquettes, and linked with domain wall data = xplib.xp.zeros((2, 2, 2, 2), dtype=xplib.xp.float64) for s in range(16): idx = funcs.intToBitTuple(s, 4) localE = 0.0 # idxSum = 0 # if 0, then localE += beta * 0.5 # otherwise, localE -= beta * 0.5 # sum must be even for i in range(4): # idxSum += idx[i] if (idx[i] == idx[(i + 1) % 4]): localE -= 1.0 else: localE += 1.0 # if (idxSum % 2 == 0): data[idx] = squareTensorMeasure(idx, obs) * xplib.xp.exp( -beta * localE) data[idx] *= xplib.xp.exp(symmetryBroken * squareTensorMeasure(idx, 'M')) # else: # data[idx] = 0.0 # print(data) return Tensor(labels=['u', 'l', 'd', 'r'], data=data, degreeOfFreedom=2)
def plaquetteIsingTensor(weight, diamondForm=False): """ A local tensor of Ising model, based on plaquette. Including the interactions on four edges. Parameters ---------- weight : float or length-2 tuple of float The weights(J) of Ising model. If a length-2 tuple, then two values represent (J_vertical, J_horizontal) diamondForm : bool, default False If True, then instead of usual ['lu', 'ru', 'rd', 'ld'] tensor, the tensor will be rotated 45 degrees clockwise, so that the ['lu', 'ru', 'rd', 'ld'] will be ['u', 'r', 'd', 'l'], so vertical weight will become the weight from left-bottom to right-top Returns ------- Tensor Plaquette tensor of labels ['lu', 'ru', 'rd', 'ld'] or ['l', 'd', 'r', 'u'] """ funcName = 'CTL.models.Ising.plaquetteIsingTensor' if isinstance(weight, float): weight = [weight, weight] else: assert len(weight) == 2, funcs.errorMessage( 'Only float or (float, float) is accepted by {}, {} obtained.'. format(funcName, weight), location=funcName) weight = list(weight) data = xplib.xp.zeros((2, 2, 2, 2), dtype=xplib.xp.float64) # ru, rd, ld, lu for s in range(16): idx = funcs.intToBitTuple(s, 4) localE = 0.0 for i in range(4): if (idx[i] == idx[(i + 1) % 4]): localE -= weight[i % 2] # e.g. ru & rd will share a bond of weight[0](J_vertical) else: localE += weight[i % 2] data[idx] = xplib.xp.exp(-localE) labels = ['ru', 'rd', 'ld', 'lu'] if diamondForm: labels = ['r', 'd', 'l', 'u'] return Tensor(labels=labels, data=data, degreeOfFreedom=2)
def iterate(self): if (not self.initialized): self.initialize() # iterate impurity to a new set of impurity tensors newImpurities = [] norm = self.RG.getNorm(self.iterateIdx + 1) partN = self.RG.impurityParts newDOF = partN * self.impurities[0].degreeOfFreedom for i in range(self.highestOrder): weight = 0.5**i totalWeight = 0.0 resA = None resLabels = None for parts in funcs.divideIntoKParts(i, partN): existFlag = True for j in parts: if (self.impurities[j] is None): existFlag = False if (not existFlag): continue partWeight = funcs.calculateDivisionTimes(parts) * weight totalWeight += partWeight tensors = [] for j in parts: tensors.append(self.impurities[j]) res = self.RG.impurityIterate(tensors, self.iterateIdx) if (resLabels is None): resLabels = res.labels resA = res.a * partWeight else: res.reArrange(resLabels) resA += res.a * partWeight resA /= totalWeight resTensor = Tensor(labels=resLabels, data=resA, degreeOfFreedom=newDOF) newImpurities.append(resTensor) self.iterateIdx += 1 self.impurities = newImpurities self.appendToArchive(norm)
def createCompleteGraph(self, n, dimRange=(2, 3)): low, high = dimRange dims = np.random.randint(low=low, high=high, size=(n, n)) for i in range(n): for j in range(i, n): dims[j][i] = dims[i][j] tensors = [] for i in range(n): shape = tuple([dims[i][j] for j in range(n) if (j != i)]) labels = [str(j) for j in range(n) if (j != i)] tensor = Tensor(shape=shape, labels=labels) tensors.append(tensor) for i in range(n): for j in range(i + 1, n): makeLink(str(j), str(i), tensors[i], tensors[j]) return tensors
def makeTriangleTensor(data, labels=['1', '2', '3']): """ Make a tensor of 3 legs. Parameters ---------- data : 3-D ndarray of float Three-dimensional tensor that will be considered as the data for the tensros. labels : list of str, default ['1', '2', '3'] Labels for three legs. Returns ------- Tensor A tensor of 3 legs. """ assert ( len(data.shape) == 3 ), "Error: makeTriangleTensor can only accept tensor with 3 dimensions, but shape {} obtained.".format( data.shape) return Tensor(data=data, labels=labels)
def makeSquareTensor(data, labels=['u', 'l', 'd', 'r']): """ Make a tensor of 4 legs. Parameters ---------- data : 4-D ndarray of float Four-dimensional tensor that will be considered as the data for the tensros. labels : list of str, default ['u', 'l', 'd', 'r'] Labels for four legs. Returns ------- Tensor A tensor of 4 legs. """ assert ( len(data.shape) == 4 ), "Error: makeSquareTensor can only accept tensor with 4 dimensions, but shape {} obtained.".format( data.shape) return Tensor(data=data, labels=labels)
def test_toTensorLike(self): a = Tensor(shape = (5, 3, 4), labels = ['c', 'b', 'a'], tensorLikeFlag = False) aLike = a.toTensorLike() self.assertIsNone(aLike.a) self.assertTrue(aLike.tensorLikeFlag) self.assertListEqual(aLike.labels, ['c', 'b', 'a']) self.assertTupleEqual(aLike.shape, (5, 3, 4)) a = Tensor(shape = (5, 3, 4), labels = ['c', 'b', 'a'], tensorLikeFlag = True) # print('test_toTensorLike.a = {}'.format(a)) aLike = a.toTensorLike() self.assertIsNone(aLike.a) self.assertTrue(aLike.tensorLikeFlag) self.assertListEqual(aLike.labels, ['c', 'b', 'a']) self.assertTupleEqual(aLike.shape, (5, 3, 4))
def contractTwoTensors(ta, tb, bonds=None, outProductWarning=True): """ Calculate the result of contraction of two tensors. Parameters ---------- ta, tb : Tensor bonds : list of Bond, optional If given, then contract only over the given bonds but not all bonds shared. outProductWarning : bool, default True Whether to make a warning message of outer product, used for debug. Returns ------- Tensor The contraction result of ta and tb. """ # contract between bonds(if not, then find the shared legs) # this requires that: tensor contraction happens in-place # after we prepare some tensor, we must create tensors to make links if (not ta.diagonalFlag) and (tb.diagonalFlag): return contractTwoTensors(tb, ta, bonds=bonds, outProductWarning=outProductWarning) if (bonds is None): bonds = shareBonds(ta, tb) if (ta.tensorLikeFlag != tb.tensorLikeFlag): raise TypeError( funcs.errorMessage( "ta and tb must be the same type(tensor/tensorlike): {} and {} gotten." .format(ta.typeName, tb.typeName), location='CTL.tensor.contract.contractTwoTensors')) tensorLikeContract = ta.tensorLikeFlag if (len(bonds) == 0): if (outProductWarning): warnings.warn( '{} and {} do not share same label, do out product'.format( ta, tb), RuntimeWarning) # aMatrix = ta.toMatrix(rows = ta.legs, cols = []) # bMatrix = tb.toMatrix(rows = [], cols = tb.legs) # data = xplib.xp.matmul(aMatrix, bMatrix) labels = ta.labels + tb.labels shape = ta.shape + tb.shape legs = ta.legs + tb.legs if (ta.diagonalFlag and tb.diagonalFlag): if (tensorLikeContract): return DiagonalTensor(labels=labels, data=None, shape=shape, legs=legs, tensorLikeFlag=True) else: return DiagonalTensor(labels=labels, data=ta.a * tb.a, shape=shape, legs=legs) elif (ta.diagonalFlag): if (tensorLikeContract): return Tensor(labels=labels, data=None, shape=shape, legs=legs, tensorLikeFlag=True) else: data = xplib.xp.zeros(shape, dtype=ta.a.dtype) einsumStr = ('j' * ta.dim) + '...->j...' outerData = xplib.xp.multiply.outer(ta.a, tb.a) xplib.xp.einsum(einsumStr, data)[...] = outerData return Tensor(labels=labels, data=data, shape=shape, legs=legs) else: if (tensorLikeContract): return Tensor(labels=labels, data=None, shape=shape, legs=legs, tensorLikeFlag=True) else: return Tensor(labels=labels, data=xplib.xp.multiply.outer(ta.a, tb.a), shape=shape, legs=legs) # aVector = ta.toVector() # bVector = tb.toVector() # data = xplib.xp.outer(aVector, bVector) # data = xplib.xp.reshape(data, shape) # return Tensor(labels = labels, data = data, legs = legs) contractALegs = [bond.legs[0] for bond in bonds] contractBLegs = [bond.legs[1] for bond in bonds] taRemainLegs = ta.complementLegs(contractALegs) tbRemainLegs = tb.complementLegs(contractBLegs) newLegs = taRemainLegs + tbRemainLegs newShape = tuple([leg.dim for leg in newLegs]) if (ta.diagonalFlag) and (tb.diagonalFlag): # return a diagonal tensor if (tensorLikeContract): return DiagonalTensor(shape=newShape, data=None, legs=newLegs, tensorLikeFlag=True) if (len(newLegs) != 0): return DiagonalTensor(shape=newShape, data=ta.a * tb.a, legs=newLegs) else: return DiagonalTensor(data=xplib.xp.array(xplib.xp.sum(ta.a * tb.a))) if (ta.diagonalFlag): if (tensorLikeContract): return Tensor(shape=newShape, data=None, legs=newLegs, tensorLikeFlag=True) # then tb is not diagonal tensor # 1. calculate the core with broadcast # 2. calculate the real tensor with xplib.xp.outer # how to broadcast? # we need to broadcast from the end(instead of the first dimension) # so we need to make the contract legs to the end # then we need to take diagonal from these dimensions tb.moveLegsToFront(tbRemainLegs) dim = len(contractBLegs) l = contractBLegs[0].dim # print('contract A legs: {}'.format(len(contractALegs))) # print('contract B legs: {}'.format(len(contractBLegs))) remADim = len(taRemainLegs) einsumStr = '...' + ('j' * dim) + '->...j' # print('ta.a = {}, tb.a = {}'.format(ta.a, tb.a)) data = xplib.xp.einsum(einsumStr, tb.a) * ta.a # print('einsum str = {}'.format(einsumStr)) # print('einsum b = {}'.format(xplib.xp.einsum(einsumStr, tb.a))) # print('data = {}'.format(data)) if (remADim == 0): newData = xplib.xp.sum(data, axis=-1) else: # print('remADim = {}, data.shape = {}'.format(remADim, data.shape)) newData = xplib.xp.zeros(data.shape + (data.shape[-1], ) * (remADim - 1), dtype=data.dtype) # print('newData.shape = {}'.format(newData.shape)) einsumDiagStr = '...' + ('j' * (remADim)) + '->...j' xplib.xp.einsum(einsumDiagStr, newData)[...] = data # newData = xplib.xp.einsum(eimsumDiagStr, data) # print(funcs.ndEye(remADim - 1, l)) # newData = xplib.xp.multiply.outer(data, funcs.ndEye(remADim - 1, l)) # print('newData = {}'.format(newData)) # print('newData = {}'.format(newData)) # print('shape = {}, data = {}, legs = {}'.format(newShape, newData, newLegs)) newLegs = tbRemainLegs + taRemainLegs newShape = tuple([leg.dim for leg in newLegs]) return Tensor(shape=newShape, data=newData, legs=newLegs) if (tensorLikeContract): return Tensor(shape=newShape, data=None, legs=newLegs, tensorLikeFlag=True) dataA = ta.toMatrix(rows=None, cols=contractALegs) dataB = tb.toMatrix(rows=contractBLegs, cols=None) newData = xplib.xp.matmul(dataA, dataB) newData = xplib.xp.reshape(newData, newShape) return Tensor(shape=newShape, data=newData, legs=newLegs)
def merge(ta, tb, chi=None, bondName=None, renameWarning=True): """ Merge the shared bonds of two tensors. If not connected, make a warning and do nothing. Parameters ---------- ta, tb : Tensor chi : int, optional The upper-bound of the bond dimension of the bond after merged. If None, then no truncation. bondName : str, optional The name of bond after merging. If None, then for a list of [name1, name2, ... nameN], the name will be "{name1}|{name2}| .... |{nameN}". renameWarning : bool, default True If only one bond is shared, then the two Returns ------- ta, tb : Tensor The two tensors after merging all the common bonds to one bond. """ funcName = "CTL.tensor.contract.contract.truncate" # assert (ta.xp == tb.xp), funcs.errorMessage("Truncation cannot accept two tensors with different xp: {} and {} gotten.".format(ta.xp, tb.xp), location = funcName) assert (ta.tensorLikeFlag == tb.tensorLikeFlag), funcs.errorMessage( 'two tensors to be merged must be either Tensor or TensorLike simultaneously, {} and {} obtained.' .format(ta, tb), location=funcName) tensorLikeFlag = ta.tensorLikeFlag # xp = ta.xp ta, tb = mergeLink(ta, tb, bondName=bondName, renameWarning=renameWarning) if (chi is None): # no need for truncation return ta, tb sb = shareBonds(ta, tb) # assert (len(sb) > 0), funcs.errorMessage("Truncation cannot work on two tensors without common bonds: {} and {} gotten.".format(ta, tb), location = funcName) # if (bondName is None): # bondNameListA = [bond.sideLeg(ta).name for bond in sb] # bondNameListB = [bond.sideLeg(tb).name for bond in sb] # bondNameA = '|'.join(bondNameListA) # bondNameB = '|'.join(bondNameListB) # elif (isinstance(bondName, str)): # bondNameA = bondName # bondNameB = bondName # else: # bondNameA, bondNameB = bondName # tuple/list # if (renameFlag): if (len(sb) == 0): if (renameWarning): warnings.warn( funcs.warningMessage( warn= 'mergeLink cannot merge links between two tensors {} and {} not sharing any bond' .format(ta, tb), location=funcName), RuntimeWarning) return ta, tb assert (len(sb) == 1), funcs.errorMessage( "There should only be one common leg between ta and tb after mergeLink, {} obtained." .format(sb), location=funcName) legA = [bond.sideLeg(ta) for bond in sb] legB = [bond.sideLeg(tb) for bond in sb] bondNameA = legA[0].name bondNameB = legB[0].name remainLegA = ta.complementLegs(legA) remainLegB = tb.complementLegs(legB) if (not tensorLikeFlag): matA = ta.toMatrix(rows=None, cols=legA) matB = tb.toMatrix(rows=legB, cols=None) mat = matA @ matB u, s, vh = xplib.xp.linalg.svd(mat) chi = min( [chi, funcs.nonZeroElementN(s), matA.shape[0], matB.shape[1]]) u = u[:, :chi] s = s[:chi] vh = vh[:chi] uOutLeg = Leg(tensor=None, dim=chi, name=bondNameA) vOutLeg = Leg(tensor=None, dim=chi, name=bondNameB) # print(legA, legB) sqrtS = xplib.xp.sqrt(s) uS = funcs.rightDiagonalProduct(u, sqrtS) vS = funcs.leftDiagonalProduct(vh, sqrtS) uTensor = Tensor(data=uS, legs=remainLegA + [uOutLeg]) vTensor = Tensor(data=vS, legs=[vOutLeg] + remainLegB) else: chi = min([ chi, legA[0].dim, ta.totalSize // legA[0].dim, tb.totalSize // legB[0].dim ]) uOutLeg = Leg(tensor=None, dim=chi, name=bondNameA) vOutLeg = Leg(tensor=None, dim=chi, name=bondNameB) uTensor = Tensor(tensorLikeFlag=True, legs=remainLegA + [uOutLeg]) vTensor = Tensor(tensorLikeFlag=True, legs=[vOutLeg] + remainLegB) makeLink(uOutLeg, vOutLeg) return uTensor, vTensor