コード例 #1
0
    def canonicalize(self, idx):
        '''
        canonicalize the MPS, and the only non-isometry will be put at 0 <= idx < n
        after this, activeIdx will be set to idx
        and we can check the canonicalization of the MPS with self.checkCanonical
        if None for excepIdx(or set to the idx), it will give true before modified
        '''
        assert (isinstance(idx, int) and (idx >= 0)
                and (idx < self.n)), funcs.errorMessage(
                    'index must in [0, n - 1), {} gotten.'.format(idx),
                    location="FreeBoundaryMPS.canonicalize")
        for i in range(idx):
            # print('canonicalizing {} to {}'.format(i, i + 1))
            u, s, v = SchimdtDecomposition(self._tensors[i],
                                           self._tensors[i + 1], self.chi)
            sv = contractTwoTensors(s, v)
            self._tensors[i] = u
            self._tensors[i + 1] = sv
        for i in range(self.n - 1, idx, -1):
            # print('canonicalizing {} to {}'.format(i, i - 1))
            u, s, v = SchimdtDecomposition(self._tensors[i],
                                           self._tensors[i - 1], self.chi)
            # print('isometry = {}'.format(isIsometry(u, ['o'])))
            sv = contractTwoTensors(s, v)
            self._tensors[i] = u
            self._tensors[i - 1] = sv

        self.activeIdx = idx
コード例 #2
0
def doubleMerge(mpsA, mpsB, idxA, idxB):

    funcName = 'CTL.examples.MPS.doubleMerge'
    tensorA = contractTwoTensors(mpsA.getTensor(idxA),
                                 mpsA.getTensor(idxA + 1))
    tensorB = contractTwoTensors(mpsB.getTensor(idxB),
                                 mpsB.getTensor(idxB + 1))

    tensorA, tensorB = merge(tensorA, tensorB, chi=None, bondName='o')
    mpsA.mergeTensor(idxA, tensorA)
    mpsB.mergeTensor(idxB, tensorB)

    mpsA.canonicalize(idx=idxA)
    mpsB.canonicalize(idx=idxB)

    tensorA, tensorB = mpsA.getTensor(idxA), mpsB.getTensor(idxB)
    tensorA, tensorB = merge(tensorA,
                             tensorB,
                             chi=min(mpsA.chi, mpsB.chi),
                             bondName='o',
                             renameWarning=False)
    mpsA.setTensor(idxA, tensorA)
    mpsB.setTensor(idxB, tensorB)

    sb = shareBonds(tensorA, tensorB)
    assert (len(sb) == 1), funcs.errorMessage(
        "{} and {} do not share exactly one bond.".format(tensorA, tensorB),
        location=funcName)
    return sb[0]
コード例 #3
0
ファイル: TRG.py プロジェクト: CaoRX/CTL
    def iterate(self):
        # print('iterate:')
        if (self.iterateFTN is None):
            self.iterateFTN = triangleContractFTN()

        self.a.addTensorTag('a')
        self.b.addTensorTag('b')

        dof = self.a.degreeOfFreedom
        makeLink(self.a.getLeg('a-1'), self.b.getLeg('b-1'))
        iTensor = contractTwoTensors(self.a, self.b)

        a2Dim, b2Dim, a3Dim, b3Dim = iTensor.shapeOfLabels(
            ['a-2', 'b-2', 'a-3', 'b-3'])

        iMat = iTensor.toMatrix(rows=['a-2', 'b-2'], cols=['a-3', 'b-3'])
        u, v, error = SVDDecomposition(iMat, self.chi)
        self.errors.append(error)
        # print(u.shape, v.shape)
        # print(iTensor.shape)

        u = xplib.xp.reshape(u, (a2Dim, b2Dim, u.shape[1]))
        v = xplib.xp.reshape(v, (a3Dim, b3Dim, v.shape[1]))

        uTensor = makeTriangleTensor(u, labels=['2', '3', '1'])
        vTensor = makeTriangleTensor(v, labels=['2', '3', '1'])

        self.a = self.iterateFTN.contract(makeTriangleTensorDict(uTensor))
        self.b = self.iterateFTN.contract(makeTriangleTensorDict(vTensor))

        self.a.degreeOfFreedom = dof * 3
        self.b.degreeOfFreedom = dof * 3

        # self.normalizeTensors()
        self.appendToArchive()
コード例 #4
0
def triangleTensorTrace(a, b):
    """
    Take the trace of two triangle tensors, usually work for the end of the RG of triangular lattice.

    Parameters
    ----------
    a, b : Tensor
        Two tensors of the triangular shape(namely, 3 legs ['1', '2', '3']).
    
    Returns
    -------
    Tensor
        The trace tensor of the two tensors. If a and b only have legs ['1', '2', '3'], then shapeless(so the value can be obtained as res.single()), otherwise the tensor of the remaining legs.
    """
    tensorA = a.copy()
    tensorB = b.copy()

    bonds = []

    for label in ['1', '2', '3']:
        bonds += makeLink(label, label, tensorA=tensorA, tensorB=tensorB)

    res = contractTwoTensors(tensorA, tensorB, bonds=bonds)
    return res
コード例 #5
0
def contractMPS(mpsA, mpsB):
    '''
    solution 0.
    step 0. find all the connections between mpsA and mpsB(in some of o's, the same number)
    step 1. make them continuous on both mps, merge them(merge 2, canonicalize, ...?)
    step 2. use swap to move tensors to one end(tail of mpsA, head of mpsB)
    step 3. merge two tensors, and eliminate the 2-way tensor(to one side)
    Problem: how about canonicalization?
    Only one bond should exist!

    solution 1. we need to save the MPS information in tensors
    extend as MPSTensor
    problem: Schimdt will return a Tensor?
    contractTwoTensors need to maintain MPS information?
    maybe solution: to write a wrapper on these functions maintaining MPS information of Tensor

    solution 2. "merge" for all pairs after one contraction(used here)
    after the contraction, merge the new MPS(mergeMPS) with all existing MPSes
    this may increase the cost of finding merges
    but at the same time, make tensors not need to save MPS information, and for wider usage
    so in this function: no need for considering this task
    '''
    funcName = 'CTL.examples.MPS.contractMPS'
    indexA, indexB = commonLegs(mpsA, mpsB)
    # print('indexA = {}, indexB = {}'.format(indexA, indexB))
    # print('mpsA = {}, mpsB = {}'.format(mpsA, mpsB))
    assert (len(indexA) == 1), funcs.errorMessage(
        "contractMPS can only work on two MPSes sharing one bond, {} obtained."
        .format((indexA, indexB)),
        location=funcName)
    if (mpsA.chi != mpsB.chi):
        warnings.warn(
            funcs.warningMessage(
                warn=
                "chi for two MPSes are not equal: {} and {}, choose minimum for new chi."
                .format(mpsA.chi, mpsB.chi),
                location=funcName))
    indexA = indexA[0]
    indexB = indexB[0]

    mpsA.moveTensor(indexA, mpsA.n - 1, warningFlag=False)
    mpsB.moveTensor(indexB, 0, warningFlag=False)
    # print('mpsA after swap = {}'.format(mpsA))
    # print('mpsB after swap = {}'.format(mpsB))

    tensorA = mpsA.getTensor(mpsA.n - 1)
    tensorB = mpsB.getTensor(0)

    newTensor = contractTwoTensors(tensorA, tensorB)
    if (newTensor.dim == 0):
        return newTensor

    # otherwise, there must be tensors remaining in A or B
    if (mpsA.n > 1):
        newTensor = contractTwoTensors(mpsA.getTensor(-2), newTensor)
        newTensorList = mpsA._tensors[:(-2)] + [newTensor] + mpsB._tensors[1:]
    else:
        newTensor = contractTwoTensors(newTensor, mpsB.getTensor(1))
        newTensorList = mpsA._tensors[:(-1)] + [newTensor] + mpsB._tensors[2:]

    return FreeBoundaryMPS(newTensorList, chi=min(mpsA.chi, mpsB.chi))
コード例 #6
0
    def test_outerProduct(self):
        a = Tensor(shape=(2, ), labels=['a'])
        b = Tensor(shape=(2, ), labels=['b'])

        op = contractTwoTensors(a, b, outProductWarning=False)
        self.assertTrue(funcs.compareLists(op.labels, ['a', 'b']))

        a = Tensor(shape=(2, 2, 2), labels=['a', 'b', 'c'])
        b = Tensor(shape=(2, ), labels=['x'])
        c = Tensor(shape=(2, ), labels=['y'])

        makeLink('a', 'x', a, b)
        makeLink('b', 'y', a, c)
        prod = contractTensorList([a, b, c], outProductWarning=False)
        self.assertTrue(funcs.compareLists(prod.labels, ['c']))

        dataA = np.random.random_sample((2, 2))
        dataB = np.random.random_sample((3, 3))
        a = Tensor(shape=(2, 2), labels=['a1', 'a2'], data=dataA)
        b = Tensor(shape=(3, 3), labels=['b1', 'b2'], data=dataB)

        prod = contractTensorList([a, b], outProductWarning=False)
        prod.reArrange(['a1', 'a2', 'b1', 'b2'])

        res = np.zeros((2, 2, 3, 3))
        for i in range(2):
            for j in range(2):
                for k in range(3):
                    for l in range(3):
                        res[(i, j, k, l)] = dataA[(i, j)] * dataB[(k, l)]
        # print(res, prod.a)
        self.assertTrue(funcs.floatArrayEqual(res, prod.a))

        a = Tensor(shape=(2, 2), labels=['a1', 'a2'], data=dataA)
        b = DiagonalTensor(shape=(3, 3),
                           labels=['b1', 'b2'],
                           data=np.diag(dataB))
        prod = contractTensorList([a, b], outProductWarning=False)
        prodData = prod.toTensor(['a1', 'a2', 'b1', 'b2'])
        # prod.reArrange(['a1', 'a2', 'b1', 'b2'])

        res = np.zeros((2, 2, 3, 3))
        for i in range(2):
            for j in range(2):
                for k in range(3):
                    # for l in range(3):
                    res[(i, j, k, k)] = dataA[(i, j)] * dataB[(k, k)]
        # print(res, prod.a)
        self.assertTrue(funcs.floatArrayEqual(res, prodData))

        dataA = np.random.random_sample((2, 2))
        dataB = np.random.random_sample(3)

        a = Tensor(shape=(2, 2), labels=['a1', 'a2'], data=dataA)
        b = DiagonalTensor(shape=(3, 3, 3),
                           labels=['b1', 'b2', 'b3'],
                           data=dataB)
        prod = contractTensorList([a, b], outProductWarning=False)
        prodData = prod.toTensor(['a1', 'a2', 'b1', 'b2', 'b3'])
        # prod.reArrange(['a1', 'a2', 'b1', 'b2'])

        res = np.zeros((2, 2, 3, 3, 3))
        for i in range(2):
            for j in range(2):
                for k in range(3):
                    # for l in range(3):
                    res[(i, j, k, k, k)] = dataA[(i, j)] * dataB[k]
        # print(res, prod.a)
        self.assertTrue(funcs.floatArrayEqual(res, prodData))
コード例 #7
0
ファイル: tensor.py プロジェクト: CaoRX/CTL
 def __matmul__(self, b):
     return contractTwoTensors(ta=self, tb=b)
コード例 #8
0
def contractAndCostWithSequence(tensorList,
                                seq=None,
                                bf=False,
                                typicalDim=10,
                                inplace=False,
                                outProductWarning=True,
                                greedy=False):
    """
    The main function for contraction of tensor lists.

    Given a list of tensor and some options, make a contraction.

    Supporting input sequence and auto-generated sequence for contraction.

    Parameters
    ----------
    tensorList : list of Tensor

    seq : None or list of length-2 tuple of ints
        The sequence to be used for contraction, generated with generateGreedySequence or generateOptimalSequence. If None, then auto-generated here.
    bf : bool
        Whether to generate a sequence with brute-force algorithm.
    typicalDim : int
        If int, then in the calculation of cost, all bonds are supposed to be the same dimension as typicalDim.
        If None, then calculated with the real dimension.
    inplace : bool, default False
        Whether to contract inplace(so the original tensors will be destroyed). True can give more efficiency while False can keep the original data.
    outProductWarning : bool, default True
        Whether to raise a warning when finding outer product during contraction.
        In some cases, making outer product first can make a better order.
        However, outer product may also happen when we want to contract two set of tensors without bonds between them, usually comes from a mistake. So if you are not going to do this, turn the flag True can help debugging.
    greedy : bool, default False
        Whether to generate a sequence with greedy algorithm. If True, then the sequence may not be optimal. This is for large tensor networks where the optimal sequence is very time comsuming.

    Returns
    -------
    Tensor
        The result tensor of contraction.
    totalCost : int
        The exact cost for this contraction process.
    """
    if (seq is None):
        # print('{} tensors'.format(len(tensorList)))
        if (greedy):
            seq = generateGreedySequence(tensorList)
        else:
            seq = generateOptimalSequence(tensorList,
                                          bf=bf,
                                          typicalDim=typicalDim)
        # print(seq)
    totalCost = 0.0
    totalLevel = 0

    if (not inplace):
        tensorList = copyTensorList(tensorList)

    for s, t in seq:
        # print(tensorList[s], tensorList[t])
        cost, costLevel = contractCost(tensorList[s], tensorList[t])
        totalCost += cost
        totalLevel = max(totalLevel, costLevel)
        tensorList[min(s, t)] = contractTwoTensors(
            tensorList[s], tensorList[t], outProductWarning=outProductWarning)

    return tensorList[0], totalCost
コード例 #9
0
def SchimdtDecomposition(ta,
                         tb,
                         chi,
                         squareRootSeparation=False,
                         swapLabels=([], []),
                         singularValueEps=1e-10):
    '''
    Schimdt decomposition between tensor ta and tb
    return ta, s, tb
    ta should be in canonical form, that is, a a^dagger = I
    to do this, first contract ta and tb, while keeping track of legs from a and legs from b
    then SVD over the matrix, take the required chi singular values
    take first chi eigenvectors for a and b, create a diagonal tensor for singular value tensor

    if squareRootSeparation is True: then divide s into two square root diagonal tensors
    and contract each into ta and tb, return ta, None, tb

    if swapLabels is not ([], []): swap the two set of labels for output, so we swapped the locations of two tensors on MPS
    e.g. t[i], t[i + 1] = SchimdtDecomposition(t[i], t[i + 1], chi = chi, squareRootSeparation = True, swapLabels = (['o'], ['o']))
    we can swap the two tensors t[i] & t[i + 1], both have an "o" leg connected to outside
    while other legs(e.g. internal legs in MPS, usually 'l' and 'r') will not be affected
    '''

    funcName = 'CTL.examples.Schimdt.SchimdtDecomposition'
    sb = shareBonds(ta, tb)
    assert (len(sb) > 0), funcs.errorMessage(
        "Schimdt Decomposition cannot accept two tensors without common bonds, {} and {} gotten."
        .format(ta, tb),
        location=funcName)
    assert (ta.tensorLikeFlag == tb.tensorLikeFlag), funcs.errorMessage(
        "Schimdt Decomposition must havge two objects being either Tensor or TensorLike simultaneously, but {} and {} obtained."
        .format(ta, tb),
        location=funcName)

    TLFlag = ta.tensorLikeFlag
    sbDim = funcs.tupleProduct(tuple([bond.legs[0].dim for bond in sb]))

    sharedLabelA = sb[0].sideLeg(ta).name
    sharedLabelB = sb[0].sideLeg(tb).name
    # if (sharedLabelA.startswith('a-')):
    #     raise ValueError(funcs.errorMessage(err = "shared label {} of tensor A starts with 'a-'.".format(sharedLabelA), location = funcName))
    # if (sharedLabelB.startswith('b-')):
    #     raise ValueError(funcs.errorMessage(err = "shared label {} of tensor B starts with 'b-'.".format(sharedLabelB), location = funcName))

    # assert (ta.xp == tb.xp), funcs.errorMessage("Schimdt Decomposition cannot accept two tensors with different xp: {} and {} gotten.".format(ta.xp, tb.xp), location = funcName)

    assert (len(swapLabels[0]) == len(swapLabels[1])), funcs.errorMessage(
        err="invalid swap labels {}.".format(swapLabels), location=funcName)
    assert ta.labelsInTensor(swapLabels[0]), funcs.errorMessage(
        err="{} not in tensor {}.".format(swapLabels[0], ta),
        location=funcName)
    assert tb.labelsInTensor(swapLabels[1]), funcs.errorMessage(
        err="{} not in tensor {}.".format(swapLabels[1], tb),
        location=funcName)

    ta.addTensorTag('a')
    tb.addTensorTag('b')
    for swapLabel in swapLabels[0]:
        ta.renameLabel('a-' + swapLabel, 'b-' + swapLabel)
    for swapLabel in swapLabels[1]:
        tb.renameLabel('b-' + swapLabel, 'a-' + swapLabel)

    tot = contractTwoTensors(ta, tb)

    legA = [leg for leg in tot.legs if leg.name.startswith('a-')]
    legB = [leg for leg in tot.legs if leg.name.startswith('b-')]

    labelA = [leg.name for leg in legA]
    labelB = [leg.name for leg in legB]
    # not remove a- and b- here, since we need to add an internal leg, and we need to distinguish it from others

    shapeA = tuple([leg.dim for leg in legA])
    shapeB = tuple([leg.dim for leg in legB])

    totShapeA = funcs.tupleProduct(shapeA)
    totShapeB = funcs.tupleProduct(shapeB)

    if (TLFlag):
        u = None
        vh = None
        s = None
        chi = min([chi, totShapeA, totShapeB, sbDim])
    else:
        mat = tot.toMatrix(rows=labelA, cols=labelB)

        # np = ta.xp # default numpy

        u, s, vh = xplib.xp.linalg.svd(mat)

        chi = min([
            chi, totShapeA, totShapeB,
            funcs.nonZeroElementN(s, singularValueEps)
        ])
        u = u[:, :chi]
        s = s[:chi]
        vh = vh[:chi]

    if (squareRootSeparation):
        if (TLFlag):
            uS = None
            vS = None
        else:
            sqrtS = xplib.xp.sqrt(s)
            uS = funcs.rightDiagonalProduct(u, sqrtS)
            vS = funcs.leftDiagonalProduct(vh, sqrtS)

        outLegForU = Leg(None, chi, name=sharedLabelA)
        # inLegForU = Leg(None, chi, name = sharedLabelB)
        # internalLegForS1 = Leg(None, chi, name = 'o')
        # internalLegForS2 = Leg(None, chi, name = 'o')
        # inLegForV = Leg(None, chi, name = sharedLabelA)
        outLegForV = Leg(None, chi, name=sharedLabelB)

        uTensor = Tensor(data=uS,
                         legs=legA + [outLegForU],
                         shape=shapeA + (chi, ),
                         tensorLikeFlag=TLFlag)
        # s1Tensor = DiagonalTensor(data = xplib.xp.sqrt(s), legs = [inLegForU, internalLegForS1], shape = (chi, chi))
        # s2Tensor = DiagonalTensor(data = xplib.xp.sqrt(s), legs = [internalLegForS2, inLegForV], shape = (chi, chi))
        vTensor = Tensor(data=vS,
                         legs=[outLegForV] + legB,
                         shape=(chi, ) + shapeB,
                         tensorLikeFlag=TLFlag)

        # legs should be automatically set by Tensor / DiagonalTensor, so no need for setTensor

        # outLegForU.setTensor(uTensor)
        # outLegForV.setTensor(vTensor)

        # inLegForU.setTensor(sTensor)
        # inLegForV.setTensor(sTensor)

        # remove a- and b-
        for leg in legA:
            if (leg.name.startswith('a-')):
                leg.name = leg.name[2:]

        for leg in legB:
            if (leg.name.startswith('b-')):
                leg.name = leg.name[2:]

        makeLink(outLegForU, outLegForV)

        # makeLink(outLegForU, inLegForU)
        # makeLink(outLegForV, inLegForV)
        # makeLink(internalLegForS1, internalLegForS2)
        # uTensor = contractTwoTensors(uTensor, s1Tensor)
        # vTensor = contractTwoTensors(vTensor, s2Tensor)
        return uTensor, None, vTensor

    outLegForU = Leg(None, chi, name=sharedLabelA)
    inLegForU = Leg(None, chi, name=sharedLabelB)
    inLegForV = Leg(None, chi, name=sharedLabelA)
    outLegForV = Leg(None, chi, name=sharedLabelB)

    uTensor = Tensor(data=u,
                     legs=legA + [outLegForU],
                     shape=shapeA + (chi, ),
                     tensorLikeFlag=TLFlag)
    sTensor = DiagonalTensor(data=s,
                             legs=[inLegForU, inLegForV],
                             shape=(chi, chi),
                             tensorLikeFlag=TLFlag)
    vTensor = Tensor(data=vh,
                     legs=[outLegForV] + legB,
                     shape=(chi, ) + shapeB,
                     tensorLikeFlag=TLFlag)

    # legs should be automatically set by Tensor / DiagonalTensor, so no need for setTensor

    # outLegForU.setTensor(uTensor)
    # outLegForV.setTensor(vTensor)

    # inLegForU.setTensor(sTensor)
    # inLegForV.setTensor(sTensor)

    # remove a- and b-
    for leg in legA:
        if (leg.name.startswith('a-')):
            leg.name = leg.name[2:]

    for leg in legB:
        if (leg.name.startswith('b-')):
            leg.name = leg.name[2:]

    makeLink(outLegForU, inLegForU)
    makeLink(outLegForV, inLegForV)

    return uTensor, sTensor, vTensor