def initial_ipeps(self): dim = (self.d, ) + (self.chi, ) * self.nVirtual if self.iniWay is 'random': tensor = tm.symmetrical_rand_peps_tensor(self.d, self.chi, self.nVirtual) if self.stateType is 'mixed': bond = (self.d0, self.d0) + (self.chi, ) * self.nVirtual tensor = tensor.reshape(bond) ind = (1, 0) + tuple(range(2, self.nVirtual + 2)) tensor = (tensor + tensor.transpose(ind)) / 2 bond = (self.d, ) + (self.chi, ) * self.nVirtual tensor = tensor.reshape(bond) for n in range(0, self.nTensor): self.tensors[n] = tensor.copy() elif self.iniWay is 'ones': for n in range(0, self.nTensor): self.tensors[n] = np.ones(dim) elif self.iniWay is 'id': if self.stateType is 'mixed': if self._is_debug: if abs(self.d0**2 - self.d) > 1e-10: bf.print_error('For mixed state, d should be as d0^2. ' 'Check self.d or self.stateType') for n in range(0, self.nTensor): self.tensors[n] = np.eye( self.d0).reshape((self.d, ) + (1, ) * self.nVirtual) else: bf.print_error('Initial way "id" is only for thermal states')
def check_consistency(self): if self.dataInfo['NumTotalTrain'] != sum(self.dataInfo['nClassNum']): bf.print_error('The total number in the dataset is NOT consistent ' 'with the sum of the samples of all classes') for n in range(0, self.dataInfo['NumClass']): start = self.dataInfo['nStart'][n] end = start + self.dataInfo['nClassNum'][n] tmp = self.labels[start:end] if not np.prod(tmp == tmp[0]): bf.print_error('In the ' + str(tmp[0]) + '-th labels, not all labels are ' + str(tmp[0])) print(bf.arg_find_array(tmp != tmp[0]))
def initial_lm(self): lm = np.random.rand(self.chi, ) lm /= np.linalg.norm(lm) if self.iniWay is 'random': for n in range(0, self.nLm): self.lm[n] = lm.copy() elif self.iniWay is 'ones': for n in range(0, self.nLm): self.tensors[n] = np.ones((self.chi, )) / (self.chi**0.5) elif self.iniWay is 'id': if self.stateType is 'mixed': for n in range(0, self.nLm): self.lm[n] = np.ones(1, ) else: bf.print_error('Initial way "id" is only for thermal states')
def find_pos_of_lm(self): self.lm_ten_bond = np.zeros((self.nLm, 2, 2), dtype=int) for n_lm in range(0, self.nLm): n_found = 0 for n in range(0, self.nTensor): if n_lm in self.pos_lm[n]: self.lm_ten_bond[n_lm, n_found, 0] = n self.lm_ten_bond[n_lm, n_found, 1] = self.pos_lm[n].index(n_lm) n_found += 1 if n_found == 2: break if self._is_debug and n_found < 2: bf.print_error( 'In "find_pos_of_one_lm", n_found is ony %g. It should 2' % n_found)
def __init__(self, data, parity=0): self.parity = parity if type(data) is dict: self.data = data one_data = data['00'] self.ndim = one_data.ndim self.shape = [one_data.shape[n] * 2 for n in range(0, self.ndim)] else: self.ndim = data.ndim self.shape = list(data.shape) self.data = dict() self.normal_to_z2(data) for n in range(0, self.ndim): if self.shape[n] % 2 == 1: bf.print_error( 'DimError: for a Z2 tensor, all bond dimensions should be even' )
def images2vecs(self, classes, numbers, how='random'): self.vec_classes = classes num_class = classes.__len__() ntot = 0 if numbers is None: numbers = ['all'] * num_class for n in range(0, num_class): if numbers[n] is 'all': ntot += self.dataInfo['nClassNum'][n] else: ntot += min(numbers[n], self.dataInfo['nClassNum'][n]) self.numVecSample = ntot self.LabelNow = np.zeros((ntot,), dtype=int) self.tmp = np.zeros((self.length, ntot)) n_now = 0 for n in range(0, num_class): if numbers[n] is 'all': start = self.dataInfo['nStart'][n] end = start + self.dataInfo['nClassNum'][n] self.tmp[:, n_now:self.dataInfo['nClassNum'][n]] = self.images[:, start:end] self.LabelNow[n_now:self.dataInfo['nClassNum'][n]] = self.labels[start:end] n_now += self.dataInfo['nClassNum'][n] else: n_sample = numbers[n] start = self.dataInfo['nStart'][classes[n]] if n_sample >= self.dataInfo['nClassNum'][classes[n]]: rand_p = range(start, self.dataInfo['nClassNum'][classes[n]] + start) elif how is 'random': rand_p = np.random.permutation(self.dataInfo['nClassNum'][classes[n]])[ :n_sample] + start elif how is 'first': rand_p = range(start, n_sample + start) else: rand_p = range(self.dataInfo['nClassNum'][classes[n]] - n_sample + start, self.dataInfo['nClassNum'][classes[n]] + start) for ns in rand_p: self.tmp[:, n_now] = self.images[:, ns] self.LabelNow[n_now] = self.labels[ns] n_now += 1 self.multiple_images2vecs() self.clear_tmp_data() if is_debug and n_now != ntot: bf.print_error('In images2vecs_train_samples: total number of vectorized ' 'images NOT consistent')
def one_bond_so_transformation(self, nt1, vb1, nt2, vb2): # Super-orthogonal transformation on one virtual bond # vb does NOT count the physical bond if self._is_debug: if self.pos_lm[nt1][vb1] != self.pos_lm[nt2][vb2]: bf.print_error( 'In one_bond_so_transformation, the two virtual bonds must' 'correspond to the same lambda') m1 = self.bond_env_matrix_simple(nt1, vb1) m2 = self.bond_env_matrix_simple(nt2, vb2) flag = False if self._is_debug: _lm = self.lm[self.pos_lm[nt1][vb1]].copy() flag = (self.chi == self.tensors[nt1].shape[vb1 + 1]) u1, u2, self.lm[self.pos_lm[nt1] [vb1]] = tm.transformation_from_env_mats( m1, m2, self.lm[self.pos_lm[nt1][vb1]], self.chi, norm_way=1)[:3] if self._is_debug and flag: _tmp = u1.dot(np.diag(self.lm[self.pos_lm[nt1][vb1]])).dot(u2.T) err = np.linalg.norm(tm.off_diagonal_mat(_tmp).reshape(-1, )) if err > 1e-10: print( 'Warning of the transformations from environment: not diagonal (%g)' % err) _tmp = np.diag(_tmp) _tmp = _tmp / np.linalg.norm(_tmp) err = np.linalg.norm(_tmp - self.lm[self.pos_lm[nt1][vb1]]) if err > 1e-10: print( 'Warning of the transformations from environment: not recover lm (%g)' % err) print(self.lm[self.pos_lm[nt1][vb1]]) self.tensors[nt1] = tm.absorb_matrix2tensor(self.tensors[nt1], u1, vb1 + 1) self.tensors[nt2] = tm.absorb_matrix2tensor(self.tensors[nt2], u2, vb2 + 1) self.tensors[nt1] /= max(abs(self.tensors[nt1].reshape(-1, 1))) self.tensors[nt2] /= max(abs(self.tensors[nt2].reshape(-1, 1))) # self.lm[self.pos_lm[nt1][vb1]] = tm.normalize_tensor(self.lm[self.pos_lm[nt1][vb1]])[0] return m1, m2
def initial_lattice(self): # pos_lm[nt][nb]=x denotes the x-th lm locates at the nb-th bond of the nt-th tensor if self.lattice is 'honeycomb0': self.nTensor = 2 self.nLm = 3 self.nVirtual = 3 # means for the 0-the tensor, it is the 0, 1, and 2-th lm on the three virtual bonds self.pos_lm = [[], []] self.pos_lm[0] = [0, 1, 2] self.pos_lm[1] = [0, 1, 2] elif self.lattice is 'honeycombTreeDMRG': # for tree DMRG of honeycomb lattice; the TN is square (one tensor with two spins) self.nTensor = 5 self.nVirtual = 4 self.d = self.d0**2 else: bf.print_error('Incorrect input of the lattice') self.find_pos_of_lm()
def initial_lattice(self): # pos_lm[nt][nb]=x denotes the x-th lm locates at the nb-th bond of the nt-th tensor if self.lattice is 'honeycomb0': self.nTensor = 2 self.nLm = 3 self.nVirtual = 3 # self.pos_lm[x] means for the x-th tensor, it is the 0, 1, and 2-th lm on the three virtual bonds self.pos_lm = [[], []] self.pos_lm[0] = [0, 1, 2] self.pos_lm[1] = [0, 1, 2] elif self.lattice is 'honeycombTreeDMRG': # for tree DMRG of honeycomb lattice; the TN is square (one tensor with two spins) self.nTensor = 5 self.nVirtual = 4 self.d = self.d0**2 elif self.lattice in ['kagome', 'husimi']: self.nTensor = 2 # use symmetrical environment: only one orthogonal tensor self.nVirtual = 3 self.d = self.d0 self.stateType = 'pure' else: bf.print_error('Incorrect input of the lattice') self.find_pos_of_lm()
def rho_two_body_nlm_simple(self, n_lm): nt1 = self.lm_ten_bond[n_lm, 0, 0] vb1 = self.lm_ten_bond[n_lm, 0, 1] nt2 = self.lm_ten_bond[n_lm, 1, 0] vb2 = self.lm_ten_bond[n_lm, 1, 1] if self._is_debug: if n_lm != self.pos_lm[nt2][vb2]: bf.print_error( 'In rho_two_body_simple, the two virtual bonds must' 'correspond to the same lambda') bonds = list(range(0, self.nVirtual)) bonds.remove(vb1) tmp1 = self.absorb_lm(nt1, False, bonds) tmp2 = self.absorb_lm(nt2, False, 'all') if self.stateType is 'pure': bonds = list(range(1, self.nVirtual + 1)) bonds.remove(vb1 + 1) tmp1 = np.tensordot(tmp1.conj(), tmp1, (bonds, bonds)) bonds = list(range(1, self.nVirtual + 1)) bonds.remove(vb2 + 1) tmp2 = np.tensordot(tmp2.conj(), tmp2, (bonds, bonds)) elif self.stateType is 'mixed': s = tmp1.shape bonds = list(range(1, self.nVirtual + 2)) bonds.remove(vb1 + 2) tmp1 = tmp1.reshape((self.d0, self.d0) + s[1:]) tmp1 = np.tensordot(tmp1.conj(), tmp1, (bonds, bonds)) s = tmp2.shape bonds = list(range(1, self.nVirtual + 2)) bonds.remove(vb2 + 2) tmp2 = tmp2.reshape((self.d0, self.d0) + s[1:]) tmp2 = np.tensordot(tmp2.conj(), tmp2, (bonds, bonds)) rho = tm.cont([tmp1, tmp2], [[-1, 1, -3, 2], [-2, 1, -4, 2]]) rho = rho.reshape(self.d0 * self.d0, self.d0 * self.d0) rho = (rho + rho.conj().T) / 2 rho /= np.trace(rho) return rho
def observe_by_features(self, features, pos): if len(pos) == self.length: bf.print_error( 'Input features cannot be as many as the total features') features = np.array(features).reshape(-1, 1) features = self.map_to_vectors(features).squeeze() data_mps = self.mps.wrap_data() mps = MPS(self.length, self.d, self.chi) mps.refresh_mps_properties(data_mps) for n in range(np.array(pos).size): mps.mps[pos[n]] = np.tensordot(mps.mps[pos[n]], features[:, n], [[1], [0]]) pos = np.sort(pos) for n in pos[::-1]: if n > 0: mps.mps[n - 1] = np.tensordot(mps.mps[n - 1], mps.mps[n], [[mps.mps[n - 1].ndim - 1], [0]]) else: mps.mps[n + 1] = np.tensordot(mps.mps[n], mps.mps[n + 1], [[1], [0]]) mps.mps.__delitem__(n) mps.refresh_mps_properties() mps.correct_orthogonal_center(0, normalize=True) return mps