예제 #1
0
파일: mps_node_np.py 프로젝트: iwrache/catn
    def cano_to(self, idx):
        """
        move canonical position to i
        """
        if (idx == -1):
            idx = len(self.mps) - 1
        if (self.cano == idx):  # there is nothing to do
            return
        if (self.cano < idx):
            for i in range(self.cano, idx):
                dl = self.mps[i].shape[0]
                d = self.mps[i].shape[1]
                #Q,R = torch.qr(self.mps[i].reshape(dl * d,-1))
                #U,s,V = torch.svd(self.mps[i].reshape(dl * d,-1))
                U, s, V = svd(self.mps[i].reshape(dl * d, -1))
                #Q=U
                #R=torch.diag(s)@V.t()
                seff = s[s > self.cutoff]
                myd = seff.shape[0]
                if (myd == 0):
                    myd = U.shape[1]
                else:
                    s = seff
                Q = U[:, :myd]
                R = np.diag(s) @ (V[:, :myd]).T
                self.mps[i] = Q.reshape(dl, d, -1)
                self.mps[i + 1] = np.einsum("ij,jab->iab", R, self.mps[i + 1])
                self.cano = i + 1
        else:
            for i in range(self.cano, idx, -1):
                dr = self.mps[i].shape[2]
                d = self.mps[i].shape[1]
                #Q,R = torch.qr(self.mps[i].reshape(-1,d*dr).t())
                #U,s,V = torch.svd(self.mps[i].reshape(-1,d*dr).t())
                U, s, V = svd(self.mps[i].reshape(-1, d * dr).T)
                #Q=U
                #R=torch.diag(s)@V.t()
                seff = s[s > self.cutoff]
                myd = seff.shape[0]
                if (myd == 0):
                    myd = U.shape[1]
                else:
                    s = seff
                Q = U[:, :myd]
                R = np.diag(s) @ (V[:, :myd].T)

                self.mps[i] = Q.T.reshape(-1, d, dr)
                self.mps[i - 1] = np.einsum("abc,ci->abi", self.mps[i - 1],
                                            R.T)
                self.cano = i - 1
        return 0
예제 #2
0
파일: tn_np.py 프로젝트: iwrache/catn
    def cut_bondim(self, i, idx_j_in_i):
        error = 0

        j = self.tensors[i].neighbor[idx_j_in_i]
        idx_i_in_j = self.tensors[j].find_neighbor(i)
        if (self.verbose >= 1):
            sys.stdout.write(
                "  %s,%s --->" %
                (str(list(self.tensors[i].mps[idx_j_in_i].shape)),
                 str(list(self.tensors[j].mps[idx_i_in_j].shape))))
            sys.stdout.flush()
        da_l = self.tensors[i].mps[idx_j_in_i].shape[0]
        da_r = self.tensors[i].mps[idx_j_in_i].shape[2]
        d = self.tensors[i].mps[idx_j_in_i].shape[1]

        db_l = self.tensors[j].mps[idx_i_in_j].shape[0]
        db_r = self.tensors[j].mps[idx_i_in_j].shape[2]

        mati = self.tensors[i].mps[idx_j_in_i].transpose([0, 2,
                                                          1]).reshape(-1, d)

        matj = self.tensors[j].mps[idx_i_in_j].transpose([0, 2,
                                                          1]).reshape(-1, d)
        merged_matrix = mati @ matj.T
        try:
            [U, s, V] = svd(merged_matrix)
        except:
            print("SVD failed: shape of merged_matrix", merged_matrix.shape)
            sys.exit(-1)
        s_eff = s[s > self.cutoff]
        if (len(s_eff) == 0):
            s_eff = s[:1]
        error += s[len(s_eff):].sum()
        myd = min(len(s_eff), self.Dmax)
        if (myd == 0):
            print("Warning: encountered ZERO matrix in cut_bondim()")
            myd = 1
            mati = (U[:, 0] * s[0])[:, None]
            matj = ((s[0] * V[:, 0].T).T)[:, None]
        else:
            error = error + s_eff[myd:].sum()
            s_eff = s_eff[:myd]
            s = np.diag(np.sqrt(s_eff))
            U = U[:, :myd]
            V = V[:, :myd]
            mati = U @ s
            matj = (s @ V.T).T
        mati = mati.reshape(da_l, da_r, mati.shape[1]).transpose([0, 2, 1])
        self.tensors[i].mps[idx_j_in_i] = mati
        matj = matj.reshape(db_l, db_r, matj.shape[1]).transpose([0, 2, 1])
        self.tensors[j].mps[idx_i_in_j] = matj

        print(list(self.tensors[i].mps[idx_j_in_i].shape),
              list(self.tensors[j].mps[idx_i_in_j].shape))
        return error
예제 #3
0
파일: mps_node_np.py 프로젝트: iwrache/catn
    def compress(self):
        """
        Compress the whole mps.
        First, do left canonicalization to move self.cano to -1.
        Second, do two-site merging-splitting, for moving self.cano back to 0.
        """
        error = 0
        if len(self.mps) == 0:
            return error
        self.left_canonical()  # now self.cano is at the bottom (right)
        for j in range(len(self.mps) - 1, 0, -1):
            i = j - 1
            tl = self.mps[i]
            tr = self.mps[j]

            d0 = tl.shape[0]
            d1 = tl.shape[1]

            d2 = tr.shape[1]
            d3 = tr.shape[2]  # notice the difference to self.swap()
            mat = np.einsum("ijk,kab->ijab", tl, tr).reshape(
                d0 * d1, d2 * d3)  # notice the difference to self. swap()
            [U, s, V] = svd(mat)
            s_eff = s[s > self.cutoff]
            myd = min(len(s_eff), self.chi)
            if (myd == 0):
                print(
                    "Warning in swap(), probably a zero matrix is encountered !!! myd=",
                    myd)
                sys.exit(-8)
            s_eff = s_eff[:myd]
            error = error + s[myd:].sum()
            U = U[:, :myd]
            V = V[:, :myd]
            s = np.diag(s_eff)
            U = U @ s
            self.mps[i] = U.reshape(d0, d1, myd)
            self.mps[j] = V.T.reshape(myd, d2, d3)
        self.cano = 0
예제 #4
0
파일: mps_node_np.py 프로젝트: iwrache/catn
 def raw2mps(self, tensor):
     if (len(tensor.shape) == 0):  # scalar, a isolated node
         return []
     shape = [1] + list(tensor.shape) + [1]
     if (len(tensor.shape) == 1):  # degree 1, leaf
         return [tensor.reshape(shape)]
     order = len(tensor.shape)
     tensor = tensor.reshape(1, -1)
     mps = []
     for i in range(order - 1):
         dleft = tensor.shape[0]
         tensor = tensor.reshape(dleft * shape[i + 1], -1)
         [U, s, V] = svd(tensor)
         s_eff = s[s > self.cutoff]
         myd = min(len(s_eff), self.chi)
         s_eff = s_eff[:myd]
         U = U[:, :myd]
         V = V[:, :myd]
         s = np.diag(s_eff)
         mps.append(U.reshape(dleft, shape[i + 1], myd))
         tensor = s @ V.T
     mps.append(tensor.reshape(myd, shape[order], 1))
     self.cano = order - 1  # left canonical
     return mps
예제 #5
0
파일: tn_np.py 프로젝트: iwrache/catn
    def cut_bondim_opt(self, i, idx_j_in_i):
        error = 0

        j = self.tensors[i].neighbor[idx_j_in_i]
        idx_i_in_j = self.tensors[j].find_neighbor(i)
        self.tensors[i].cano_to(idx_j_in_i)
        self.tensors[j].cano_to(idx_i_in_j)
        #        print("cano_i",self.tensors[i].cano,idx_j_in_i)
        #        print("cano_j",self.tensors[j].cano,idx_i_in_j)
        Dold = self.tensors[i].mps[idx_j_in_i].shape[1]
        if (self.verbose >= 1):
            sys.stdout.write(
                "  %s,%s ---> " %
                (str(list(self.tensors[i].mps[idx_j_in_i].shape)),
                 str(list(self.tensors[j].mps[idx_i_in_j].shape))))
            sys.stdout.flush()
        da_l = self.tensors[i].mps[idx_j_in_i].shape[0]
        da_r = self.tensors[i].mps[idx_j_in_i].shape[2]
        d = self.tensors[i].mps[idx_j_in_i].shape[1]

        db_l = self.tensors[j].mps[idx_i_in_j].shape[0]
        db_r = self.tensors[j].mps[idx_i_in_j].shape[2]

        mati = self.tensors[i].mps[idx_j_in_i].transpose([0, 2, 1]).reshape(
            da_l * da_r, d)

        matj = self.tensors[j].mps[idx_i_in_j].transpose([0, 2, 1]).reshape(
            db_l * db_r, d)

        flag = False
        #if(mati.shape[0]*matj.shape[0] < mati.shape[1]*matj.shape[1]):
        #        if(1==2):
        #            merged_matrix = [email protected]
        #        else:
        #            flag=True
        #            qi,ri = np.linalg.qr(mati)
        #            qj,rj = np.linalg.qr(matj)
        #            merged_matrix = [email protected]
        #
        flag_left = False
        if (mati.shape[0] > mati.shape[1]):
            qi, ri = np.linalg.qr(mati)
            flag_left = True
        else:
            ri = mati

        flag_right = False
        if (matj.shape[0] > matj.shape[1]):
            qj, rj = np.linalg.qr(matj)
            flag_right = True
        else:
            rj = matj

        merged_matrix = ri @ rj.T

        [U, s, V] = svd(merged_matrix)
        #        s_str = str(["%.3f"%t for t in s])
        s_bak = s
        s_eff = s[s > self.cutoff]
        if (len(s_eff) == 0):
            s_eff = s[:1]
        error = error + s[len(s_eff):].sum()
        myd = min(len(s_eff), self.Dmax)

        if (myd == 0):
            print("Warning: encountered ZERO matrix in cut_bondim()")
            myd = 1
            mati = (U[:, 0] * s[0])[:, None]
            matj = ((s[0] * V[:, 0].T).T)[:, None]
        else:
            error = error + s[myd:].sum()
            s_eff = s_eff[:myd]
            s = np.diag(np.sqrt(s_eff))
            U = U[:, :myd]
            V = V[:, :myd]
            mati = U @ s
            matj = (s @ V.T).T
#        if flag:
#            mati = qi @ mati
#            matj = qj @ matj
        if flag_left:
            mati = qi @ mati
        if flag_right:
            matj = qj @ matj

        mati = mati.reshape(da_l, da_r, mati.shape[1]).transpose([0, 2, 1])
        self.tensors[i].mps[idx_j_in_i] = mati
        matj = matj.reshape(db_l, db_r, matj.shape[1]).transpose([0, 2, 1])
        self.tensors[j].mps[idx_i_in_j] = matj
        if (self.verbose >= 1):
            if (self.tensors[i].mps[idx_j_in_i].shape[1] < Dold):
                sys.stdout.write(
                    str([
                        list(self.tensors[i].mps[idx_j_in_i].shape),
                        list(self.tensors[j].mps[idx_i_in_j].shape)
                    ]))
                #                sys.stdout.write(" %s"%s_str)
                #                print(s_bak)
                print(" ")
            else:
                print(" ")
        return error
예제 #6
0
파일: tn_np.py 프로젝트: iwrache/catn
    def cut_bondim_opt2(self, i, idx_j_in_i):
        error = 0

        j = self.tensors[i].neighbor[idx_j_in_i]
        idx_i_in_j = self.tensors[j].find_neighbor(i)
        self.tensors[i].cano_to(idx_j_in_i)
        self.tensors[j].cano_to(idx_i_in_j)
        #        print("cano_i",self.tensors[i].cano,idx_j_in_i)
        #        print("cano_j",self.tensors[j].cano,idx_i_in_j)
        if (self.verbose >= 1):
            sys.stdout.write(
                "  %s,%s --->" %
                (str(list(self.tensors[i].mps[idx_j_in_i].shape)),
                 str(list(self.tensors[j].mps[idx_i_in_j].shape))))
            sys.stdout.flush()
        da_l = self.tensors[i].mps[idx_j_in_i].shape[0]
        da_r = self.tensors[i].mps[idx_j_in_i].shape[2]
        d = self.tensors[i].mps[idx_j_in_i].shape[1]

        db_l = self.tensors[j].mps[idx_i_in_j].shape[0]
        db_r = self.tensors[j].mps[idx_i_in_j].shape[2]

        mati = self.tensors[i].mps[idx_j_in_i].transpose([0, 2, 1]).reshape(
            da_l * da_r, d)

        matj = self.tensors[j].mps[idx_i_in_j].transpose([0, 2, 1]).reshape(
            db_l * db_r, d)

        flag = False
        #if(mati.shape[0]*matj.shape[0] < mati.shape[1]*matj.shape[1]):
        if (1 == 2):
            merged_matrix = mati @ matj.T
        else:
            flag = True
            qi, ri = np.linalg.qr(mati)
            qj, rj = np.linalg.qr(matj)
            merged_matrix = ri @ rj.T

        [U, s, V] = svd(merged_matrix)
        s_eff = s[s > self.cutoff]
        if (len(s_eff) == 0):
            s_eff = s[:1]
        error = error + s[len(s_err):].sum()
        myd = min(len(s_eff), self.Dmax)

        if (myd == 0):
            print("Warning: encountered ZERO matrix in cut_bondim()")
            myd = 1
            mati = (U[:, 0] * s[0])[:, None]
            matj = ((s[0] * V[:, 0].T).T)[:, None]
        else:
            error = error + s_eff[myd:].sum()
            s_eff = s_eff[:myd]
            s = np.diag(np.sqrt(s_eff))
            U = U[:, :myd]
            V = V[:, :myd]
            mati = U @ s
            matj = (s @ V.T).T
        if flag:
            mati = qi @ mati
            matj = qj @ matj

        mati = mati.reshape(da_l, da_r, mati.shape[1]).transpose([0, 2, 1])
        self.tensors[i].mps[idx_j_in_i] = mati
        matj = matj.reshape(db_l, db_r, matj.shape[1]).transpose([0, 2, 1])
        self.tensors[j].mps[idx_i_in_j] = matj

        if (self.verbose >= 1):
            print(list(self.tensors[i].mps[idx_j_in_i].shape),
                  list(self.tensors[j].mps[idx_i_in_j].shape))
        return error
예제 #7
0
파일: mps_node_np.py 프로젝트: iwrache/catn
    def swap(self, i, j):
        """
        swap index i and index j in mps, i and j must be consecutive indices
        Assuming that canonical form is maintained.
        Default direction is i \to j, that is the canonical position will be j after swap
        The canonicalization is maintained.
        """
        error = 0
        #        sys.stdout.write(" swap %d %d cano=%d "%(i,j,self.cano));sys.stdout.flush()
        if (j < 0 or j > len(self.mps)):
            return
#        print("in swap(), move cano")
        if (self.cano != i and self.cano != j):
            self.cano_to(i if abs(self.cano - i) < abs(self.cano - j) else j)

        if (abs(i - j) != 1):
            print(
                "swap(): i and j must be consecutive indices, there must be something wrong"
            )
            sys.exit(3)

        if (i < j):
            tl = self.mps[i]
            tr = self.mps[j]
        else:
            tl = self.mps[j]
            tr = self.mps[i]

        d0 = tl.shape[0]
        d1 = tr.shape[1]
        d2 = tl.shape[1]
        d3 = tr.shape[2]
        mat = np.einsum("ijk,kab->iajb", tl, tr).reshape(d0 * d1,
                                                         d2 * d3)  # swaped
        if (self.swapopt and ((mat.shape[0] > 7000 and mat.shape[1] > 7000) or
                              (mat.shape[0] > 20000 or mat.shape[1] > 20000))):
            [U, s, V] = rsvd(mat, self.chi, 10, 10)
        else:
            [U, s, V] = svd(mat)
        s_eff = s[s > self.cutoff]
        if (len(s_eff) == 0):
            s_eff = s[:1]
        myd = min(len(s_eff), self.chi)
        if (myd == 0):
            print(
                "Warning in swap(), probably a zero matrix is encountered !!! myd=",
                myd)
            sys.exit(-7)
        s_eff = s_eff[:myd]
        error = error + s[myd:].sum()
        U = U[:, :myd]
        V = V[:, :myd]
        s = np.diag(s_eff)
        if (i < j):  #going right
            V = s @ V.T
            self.mps[i] = U.reshape(d0, d1, myd)
            self.mps[j] = V.reshape(myd, d2, d3)
        else:  # going left
            U = U @ s
            self.mps[j] = U.reshape(d0, d1, myd)
            self.mps[i] = V.T.reshape(myd, d2, d3)
        self.cano = j
        return error
예제 #8
0
파일: mps_node_np.py 프로젝트: iwrache/catn
    def compress_opt(self):
        """
        Compress the whole mps.
        First, do left canonicalization to move self.cano to -1.
        Second, do two-site merging-splitting, for moving self.cano back to 0.
        Do qr before SVD
        """
        error = 0
        if len(self.mps) == 0:
            return error
        self.left_canonical()  # now self.cano is at the bottom (right)
        for j in range(len(self.mps) - 1, 0, -1):
            i = j - 1
            tl = self.mps[i]
            tr = self.mps[j]

            d0 = tl.shape[0]
            d1 = tl.shape[1]

            d2 = tr.shape[1]
            d3 = tr.shape[2]  # notice the difference to self.swap()

            dd = tl.shape[2]
            assert (dd == tr.shape[0])
            #            mat = torch.einsum("ijk,kab->ijab",tl,tr).reshape(d0*d1,d2*d3)  # notice the difference to self. swap()
            matl = tl.reshape(d0 * d1, dd)
            matr = tr.reshape(dd, d2 * d3)

            #            flag=False
            #if(matl.shape[0]*matr.shape[1] > dd*dd):
            #            if(1==1):
            #                flag=True
            #                Ql,Rl = np.linalg.qr(matl)
            #                Qr,Rr = np.linalg.qr(matr.T)
            #                mat = [email protected]
            #            else:
            #                mat = matl@matr

            flag_left = False
            flag_right = False

            if (matl.shape[0] > matl.shape[1]):
                flag_left = True
                Ql, Rl = np.linalg.qr(matl)
            else:
                Rl = matl

            if (matr.shape[0] < matr.shape[1]):
                flag_right = True
                Qr, Rr = np.linalg.qr(matr.T)
            else:
                Rr = matr.T

            mat = Rl @ Rr.T

            [U, s, V] = svd(mat)
            s_eff = s[s > self.cutoff]
            if (len(s_eff) == 0):
                s_eff = s[:1]
            myd = min(len(s_eff), self.chi)
            if (myd == 0):
                print(
                    "Warning in swap(), probably a zero matrix is encountered !!! myd=",
                    myd)
                sys.exit(-8)
            s_eff = s_eff[:myd]
            error = error + s[myd:].sum()
            U = U[:, :myd]
            V = V[:, :myd]
            s = np.diag(s_eff)
            U = U @ s
            #            if flag:
            #                U = Ql @ U
            #                V =  Qr @ V
            if flag_left:
                U = Ql @ U
            if flag_right:
                V = Qr @ V

            self.mps[i] = U.reshape(d0, d1, myd)
            self.mps[j] = V.T.reshape(myd, d2, d3)
#            print("correct after swap:, error=",self.check_mps())
        self.cano = 0