def tree_dmrg_ipeps_kagome_gs(para=None, A=None): from library.PEPSClass import TreePepsIdmrgKagome as Peps is_print = True t_start = time.time() info = dict() if is_print: print('Start tree-DMRG on iPEPS kagome (Husimi) calculation') if para is None: para = pm.generate_parameters_tree_ipeps_kagome() para = pm.make_para_consistent_tree_ipeps_kagome(para) if A is None: A = Peps(para['chi'], para['spin']) ob = dict() e1 = 0 de = 1 A.update_ort_tensor_kagome() A.update_bath_onsite_kagome(para['j1'], para['j2'], para['hx'], para['hz']) A.update_effective_ops_kagome() # sweep for t in range(0, para['sweep_time']): A.update_central_tensor_kagome(para['tau'], para['j1'], para['j2'], para['hx'], para['hz']) if t % para['dt_ob'] == 0: A.rho_from_central_tensor_kagome() ob['eb'], ob['mag'], ob['energy_site'], ob['ent'] = A.observation_kagome( para['j1'], para['j2'], para['hx'], para['hz']) if is_print: print('At the %g-th sweep: Eb = ' % t + str(e1)) de = sum(abs(ob['eb'] - e1)) / ob['eb'].__len__() if de > para['break_tol']: e1 = ob['eb'] elif is_print: print('Converged with de = %g' % de) break if t == para['sweep_time']: print('Not sufficiently converged with de = %g' % de) A.update_ort_tensor_kagome() A.update_bath_onsite_kagome(para['j1'], para['j2'], para['hx'], para['hz']) A.update_effective_ops_kagome() info['t_cost'] = time.time() - t_start print('Energy per site = %g' % ob['energy_site']) print('x-magnetization = ' + str(ob['mag']['x'])) print('z-magnetization = ' + str(ob['mag']['z'])) print('Entanglement = ' + str(ob['ent'])) print('Total time cost: %g' % info['t_cost']) return A, ob, info
def __init__(self, para=Parameters.gtn(), debug_mode=False, device='cpu'): # Initialize Parameters Programclass.Program.__init__(self, device=device, dtype=para['dtype']) MLclass.MachineLearning.__init__(self, para, debug_mode) MPSclass.MPS.__init__(self) self.initialize_parameters_gtn() self.name_md5_generate() self.debug_mode = debug_mode # Initialize MPS and update info if not debug_mode: self.load_gtn() # if not exist saved mps gtn model,then initialize if len(self.tensor_data) == 0: self.initialize_dataset() # Initialize info self.generate_tensor_info() self.generate_update_info() self.initialize_mps_gtn() if not self.tensor_data[0].device == self.device: for ii in range(len(self.tensor_data)): self.tensor_data[ii] = torch.tensor(self.tensor_data[ii], device=self.device) if device == 'cuda': torch.cuda.empty_cache() # Environment Preparation self.tensor_input = tuple() self.environment_left = tuple() self.environment_right = tuple() self.environment_zoom = tuple()
def decision_mps(para=None): if para is None: para = pm.parameters_decision_mps() a = TNmachineLearning.DecisionTensorNetwork( para['dataset'], 2, para['chi'], 'mps', para['classes'], para['numbers'], if_reducing_samples=para['if_reducing_samples']) a.images2vecs_test_samples(para['classes']) print(a.vLabel) for n in range(0, a.length): bf.print_sep() print('Calculating the %i-th tensor' % n) a.update_tensor_decision_mps_svd(n) # a.update_tensor_decision_mps_svd_threshold_algo(n) # a.update_tensor_decision_mps_gradient_algo(n) a.update_v_ctr_train(n) a.calculate_intermediate_accuracy_train(n) if a.remaining_samples_train.__len__() == 0: print('All samples are classified correctly. Training stopped.') break print('The current accuracy = %g' % a.intermediate_accuracy_train[n]) print('Entanglement: ' + str(a.lm[n].reshape(1, -1))) if para['if_reducing_samples']: print('Number of remaining samples: ' + str(a.remaining_samples_train.__len__()))
def __init__(self, para=Parameters.ml(), debug_mode=False): # initialize parameters self.para = copy.deepcopy(para) self.images_data = dict() self.index = dict() self.labels_data = dict() self.update_info = dict() self.data_info = dict() self.tmp = {} self.generative_model = ['GTN', 'GTN_Net'] self.discriminative_model = ['DTNC']
def gcmpm_one_class(para=None): if para is None: para = pm.parameters_gcmpm_one_class() para['save_exp'] = save_exp_gcmpm_one_class(para) if para['parallel'] is True: par_pool = para['n_nodes'] else: par_pool = None if para['if_load'] and os.path.isfile(para['save_exp']): a = bf.load_pr(os.path.join(para['data_path'], para['save_exp']), 'a') else: a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset'], par_pool=par_pool) a.images2vecs([para['class']], [100]) a.initialize_virtual_vecs_train() a.update_virtual_vecs_train('all', 'all', 'both') a.mps.correct_orthogonal_center(0, normalize=True) a.mps.mps[0] /= np.linalg.norm(a.mps.mps[0].reshape(-1, )) mps0 = a.mps.mps.copy() for t in range(0, para['sweep_time']): # from left to right if para['if_print_detail']: print('At the ' + str(t) + '-th sweep, from left to right') for nt in range(0, a.length): a.update_tensor_gradient(nt, para['step']) if nt != a.length-1: a.update_virtual_vecs_train('all', nt, 'left') # from left to right print('At the ' + str(t) + '-th sweep, from right to left') for nt in range(a.length-1, -1, -1): a.update_tensor_gradient(nt, para['step']) if nt != 0: a.update_virtual_vecs_train('all', nt, 'right') if t > para['check_time0'] and ((t+1) % para['check_time'] == 0 or t+1 == para['sweep_time']): fid = ln_fidelity_per_site(mps0, a.mps.mps) if fid < (para['step'] * para['ratio_step_tol']): print('After ' + str(t+1) + ' sweeps: fid = %g' % fid) para['step'] *= para['step_ratio'] elif t+1 == para['sweep_time']: print('After all ' + str(t+1) + ' sweeps finished, fid = %g. ' 'Consider to increase the sweep times.' % fid) else: print('After ' + str(t+1) + ' sweeps, fid = %g.' % fid) mps0 = a.mps.mps.copy() if para['step'] < para['step_min']: print('Now step = ' + str(para['step']) + ' is sufficiently small. Break the loop') break else: print('Now step = ' + str(para['step'])) if para['if_save']: save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para']) return a, para
def __init__(self, para=Parameters.gtnc(), debug_mode=False, device='cpu'): # Initialize Parameters Programclass.Program.__init__(self, device=device, dtype=para['dtype']) MLclass.MachineLearning.__init__(self, para, debug_mode) self.debug_mode = debug_mode self.initialize_parameters_gtnc() self.name_md5_generate() self.inner_product = dict() self.data_mapped = dict() self.right_label = dict() self.accuracy = dict() self.test_info = dict() self.is_all_gtn_trained = False if not self.debug_mode: self.load_accuracy()
def gtnc(para_tot=None): print('Preparing parameters') if para_tot is None: para_tot = pm.parameters_gcmpm() n_class = len(para_tot['classes']) paras = bf.empty_list(n_class) for n in range(0, n_class): paras[n] = copy.deepcopy(para_tot) paras[n]['class'] = int(para_tot['classes'][n]) paras[n]['chi'] = para_tot['chi'][n] paras[n]['theta'] = para_tot['theta'] paras[n]['save_exp'] = save_exp_gtn_one_class(paras[n]) classifiers = bf.empty_list(n_class) for n in range(0, n_class): print_dict(paras[n]) data = para_tot['data_path'] + paras[n]['save_exp'] if para_tot['if_load'] and os.path.isfile(data): print('The classifier already exists. Load directly') classifiers[n] = load_pr(data, 'a') else: print('Training the MPS of ' + str(para_tot['classes'][n])) classifiers[n] = gtn_one_class(paras[n])[0] # if para_tot['if_save']: # save_pr('../data_tnml/gcmpm/', paras[n]['save_exp'], # [classifiers[n]], ['classifier']) # classifiers[n].mps.check_orthogonality_by_tensors(tol=1e-12) # ==================== Testing accuracy ==================== print('Calculating the testing accuracy') b = TNmachineLearning.MachineLearningFeatureMap(para_tot['d']) b.load_data(data_path='..\\..\\..\\MNIST\\', file_sample='t10k-images.idx3-ubyte', file_label='t10k-labels.idx1-ubyte', is_normalize=True) b.select_samples(para_tot['classes']) if classifiers[0].is_dct: b.dct(shift=para_tot['shift'], factor=para_tot['factor']) b.images2vecs(para_tot['theta'] * np.pi / 2) fid = bf.empty_list(n_class) for n in range(0, n_class): fid[n] = b.compute_fidelities(classifiers[n].mps.mps) max_fid = np.argmin(np.hstack(fid), axis=1) predict = np.zeros(max_fid.shape, dtype=int) for n in range(0, n_class): predict += (max_fid == n) * int(para_tot['classes'][n]) # plot(predict) # plot(b.labels) accuracy = np.sum(predict == b.labels, dtype=float) / b.numVecSample print(accuracy)
def __init__(self, para=Parameters.gtn_net(), debug_mode=False, device='cuda'): # Initialize Parameters self.para = para Programclass.Program.__init__(self, device=device, dtype=para['dtype'], debug_mode=debug_mode) MPSclass.MPS.__init__(self) MLclass.MachineLearning.__init__(self, self.para, debug_mode=debug_mode) tc.nn.Module.__init__(self) self.initialize_parameters_gtn_net() self.name_md5_generate() self.debug_mode = debug_mode self.update_info = dict() self.data_info = dict() # Initialize MPS and update info self.weight = None self.tensor_shape = None if not debug_mode: self.load_weight() if self.weight is None: self.initialize_dataset() self.generate_tensor_info() self.generate_update_info() self.initialize_mps_gtn_net() self.initialize_tensor_shape() self.weight = tc.nn.Parameter( tc.empty((self.tensor_info['n_length'], self.tensor_info['tensor_initialize_bond'], self.tensor_info['physical_bond'], self.tensor_info['tensor_initialize_bond']), device=self.device, dtype=self.dtype)) self.initialize_weight() self.tensor_input = None self.opt = None self.initialize_opt() self.fun = tc.nn.LogSoftmax(dim=0)
def gcmpm(para_tot=None): print('Preparing parameters') if para_tot is None: para_tot = pm.parameters_gcmpm() n_class = len(para_tot['classes']) paras = bf.empty_list(n_class) for n in range(0, n_class): paras[n] = para_tot.copy() paras[n]['class'] = para_tot['classes'][n] paras[n]['chi'] = para_tot['chi'][n] paras[n]['save_exp'] = save_exp_gcmpm_one_class(paras[n]) classifiers = bf.empty_list(n_class) for n in range(0, n_class): data = '../data_tnml/gcmpm/' + paras[n]['save_exp'] if para_tot['if_load'] and os.path.isfile(data): print('The classifier already exists. Load directly') classifiers[n] = load_pr(data, 'classifier') else: print('Training the MPS of ' + str(para_tot['classes'][n])) classifiers[n] = gcmpm_one_class(paras[n])[0] if para_tot['if_save']: save_pr('../data_tnml/gcmpm/', paras[n]['save_exp'], [classifiers[n]], ['classifier']) # Testing accuracy print('Calculating the testing accuracy') labels = para_tot['classes'] b = TNmachineLearning.MachineLearningFeatureMap('MNIST', para_tot['d'], file_sample='t10k-images.idx3-ubyte', file_label='t10k-labels.idx1-ubyte') b.images2vecs(para_tot['classes'], ['all', 'all']) fid = np.zeros((n_class, )) num_wrong = 0 for ni in range(0, b.numVecSample): for n in range(0, n_class): fid[n] = b.fidelity_mps_image(classifiers[n].mps.mps, ni) n_max = int(np.argmax(fid)) if labels[n_max] != b.LabelNow[ni]: num_wrong += 1 accuracy = num_wrong/b.numVecSample print(accuracy)
from library import MPSMLclass from library import Parameters para = Parameters.gtn() GTN = MPSMLclass.GTN(para=para, device='cpu') # change device='cuda' to use GPU GTN.start_learning()
def deep_dmrg_infinite_size(para=None): from library.MPSClass import MpsDeepInfinite as Minf is_print = True t_start = time.time() info = dict() if is_print: print('Start deep DMRG calculation') if para is None: para = pm.generate_parameters_deep_mps_infinite() hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'], para['jz'], -para['hx'] / 2, -para['hz'] / 2) tensor = hamiltonian2cell_tensor(hamilt, para['tau']) A = Minf(para['form'], para['d'], para['chi'], para['d'], para['chib0'], para['chib'], para['is_symme_env'], n_site=para['n_site'], is_debug=is_debug) # use standard DMRG to get the GS MPS A, ob0, info0 = dmrg_infinite_size(para, A, hamilt) # get uMPO from the MPS A.get_unitary_mpo_from_mps() if A.n_site == 1: e0 = 0 e1 = 1 else: e0 = np.zeros((1, 3)) e1 = np.ones((1, 3)) de = 1 for t in range(0, para['sweep_time']): A.update_ort_tensor_dmps('left') A.update_left_env_dmps_simple(tensor) if not A.is_symme_env: A.update_ort_tensor_dmps('right') A.update_right_env_dmps_simple(tensor) A.update_central_tensor_dmps(tensor) if t % para['dt_ob'] == 0: A.rho_from_central_tensor_dmps() e1 = A.observe_energy(hamilt) if is_print: print('At the %g-th sweep: Eb = ' % t + str(e1)) de = np.sum(abs(e0 - e1)) if de > para['break_tol']: e0 = e1 elif is_print: print('Converged with de = %g' % de) break if t == para['sweep_time']: print('Not sufficiently converged with de = %g' % de) ob = {'eb': e1} info['t_cost'] = time.time() - t_start if is_print: print('Total time cost: %g' % info['t_cost']) return A, ob, info, ob0, info0
from algorithms.DMRG_anyH import dmrg_finite_size from library import Parameters as Pm para = Pm.generate_parameters_dmrg('husimi') para['spin'] = 'half' para['depth'] = 2 # The interactions are assumed to be uniform; if not, use parameter_dmrg_arbitrary instead para['jxy'] = 1 para['jz'] = 1 para['hx'] = 0 para['hz'] = 0 para['chi'] = 32 # Virtual bond dimension cut-off para['eigWay'] = 1 para = Pm.make_consistent_parameter_dmrg(para) ob, A, info1, para1 = dmrg_finite_size(para) print(ob['e_per_site'])
# -*- encoding: utf-8 -*- from library import MPSMLclass from library import Parameters import matplotlib.pyplot as plt import numpy as np import json import copy para = Parameters.gtnc() # get the relation between cutting dimention and testing accuracy batch_bond = [] batch_acc = [] saved_para = dict() for i in range(1, 9): bond = 2**i # get the cutting bond para['virtual_bond_limitation'] = bond print('The current cutting bond is %d' % bond) A = MPSMLclass.GTNC(para=para, device='cpu') # change device='cuda' to use GPU A.training_gtn() # if the GTN are not trained acc = A.calculate_accuracy('test') print('Bond {0} acc is {1}'.format(bond, acc)) batch_bond.append(bond) # para[''] = acc batch_acc.append(acc) counterpart_para = copy.deepcopy(para) for item in counterpart_para.keys(): counterpart_para[item] = str(counterpart_para[item]) counterpart_para['Accuracy'] = acc saved_para["bond" + str(bond)] = counterpart_para
from library import TNMLclass from library import Parameters as Pa pa = Pa.gtn_net() A = TNMLclass.GTN_Net(para=pa, device='cuda:0', debug_mode=False) A.start_learning()
def labeled_gtn(para): if para is None: para = pm.parameters_labeled_gtn() para['save_exp'] = save_exp_labeled_gtn(para) if para['parallel'] is True: par_pool = para['n_nodes'] else: par_pool = None # Preparing testing dataset b = TNmachineLearning.MachineLearningFeatureMap( para['d'], file_sample='t10k-images.idx3-ubyte', file_label='t10k-labels.idx1-ubyte') b.load_data() b.select_samples(para['classes']) b.add_labels_to_images() b.images2vecs(para['theta']) data_file = os.path.join(para['data_path'], para['save_exp']) if para['if_load'] and os.path.isfile(data_file): print('Data exist. Load directly.') a = bf.load_pr(data_file, 'a') else: a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset'], par_pool=par_pool) a.load_data() a.select_samples(para['classes']) a.add_labels_to_images() a.images2vecs(para['theta'] * np.pi / 2) a.initial_mps() a.mps.correct_orthogonal_center(0, normalize=True) a.initialize_virtual_vecs_train() a.update_virtual_vecs_train_all_tensors('both') accuracy0 = 0 for t in range(0, para['sweep_time']): # from left to right for nt in range(0, a.length): a.update_tensor_gradient(nt, para['step']) if nt != a.length - 1: a.update_virtual_vecs_train(nt, 'left') # from left to right for nt in range(a.length - 1, -1, -1): a.update_tensor_gradient(nt, para['step']) if nt != 0: a.update_virtual_vecs_train(nt, 'right') if t > para['check_time0'] and ((t + 1) % para['check_time'] == 0 or t + 1 == para['sweep_time']): b.input_mps(a.mps) accuracy = b.calculate_accuracy() print('After the ' + str(t) + '-th sweep, the testing accuracy = ' + str(accuracy)) if abs(accuracy - accuracy0) < (para['step'] * para['ratio_step_tol']): para['step'] *= para['step_ratio'] accuracy0 = accuracy print('Converged. Reduce the gradient step to ' + str(para['step'])) elif t + 1 == para['sweep_time']: print('After all ' + str(t + 1) + ' sweeps finished, not converged. ' 'Consider to increase the sweep times.') else: accuracy0 = accuracy if para['step'] < para['step_min']: print('Now step = ' + str(para['step']) + ' is sufficiently small. Break the loop') break else: print('Now step = ' + str(para['step'])) a.clear_before_save() if para['if_save']: save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para']) accuracy = b.calculate_accuracy() print('The final testing accuracy = ' + str(accuracy)) return a, para
from library import TNMLclass from library import Parameters as Pa pa = Pa.gtn() A = TNMLclass.GTN(para=pa, device='cuda:0', debug_mode=False) A.start_learning()
def gtn_one_class(para=None, images=None, labels=None): if 'to_black_and_white' not in para: para['to_black_and_white'] = False if para is None: para = pm.parameters_gtn_one_class() para['save_exp'] = save_exp_gtn_one_class(para) if para['if_load'] and os.path.isfile( os.path.join(para['data_path'], para['save_exp'])): a = bf.load_pr(os.path.join(para['data_path'], para['save_exp']), 'a') else: a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset']) if para['dataset'] is 'custom': a.input_data(copy.deepcopy(images), copy.deepcopy(labels)) else: a.load_data() if a.is_there_labels: a.select_samples([para['class']]) if para['to_black_and_white']: a.to_black_and_white() if para['dct'] is True: a.dct(shift=para['shift'], factor=para['factor']) a.images2vecs(theta_max=para['theta'] * np.pi / 2) a.initial_mps(center=0, ini_way='1') a.initialize_virtual_vecs_train() a.mps.correct_orthogonal_center(0) a.update_tensor_gradient(0, para['step']) nll0 = a.compute_nll() step = copy.deepcopy(para['step']) print('Iniitially, NLL = ' + str(nll0)) for t in range(0, para['sweep_time']): # from left to right if para['if_print_detail']: print('At the ' + str(t + 1) + '-th sweep, from left to right') t0 = time.time() tt0 = time.clock() for nt in range(0, a.length): a.mps.correct_orthogonal_center(nt) if nt != 0: a.update_virtual_vecs_train(nt - 1, 'left') a.update_tensor_gradient(nt, step) # from right to left if para['if_print_detail']: print('At the ' + str(t + 1) + '-th sweep, from right to left') for nt in range(a.length - 1, -1, -1): a.mps.correct_orthogonal_center(nt) if nt != a.length - 1: a.update_virtual_vecs_train(nt + 1, 'right') a.update_tensor_gradient(nt, step) if para['if_print_detail']: print('Wall time cost for one loop: %s' % (time.time() - t0)) print('CPU time cost for one loop: %s' % (time.clock() - tt0)) if t > (para['check_time0'] - 2) and ( (t + 1) % para['check_time'] == 0 or t + 1 == para['sweep_time']): nll = a.compute_nll() print('NLL = ' + str(nll)) # fid = fidelity_per_site(mps0, a.mps.mps) fid = abs(nll - nll0) / nll0 if fid < (step * para['step_ratio']): print('After ' + str(t + 1) + ' sweeps: fid = %g' % fid) step *= para['step_ratio'] # mps0 = copy.deepcopy(a.mps.mps) nll0 = nll elif t + 1 == para['sweep_time']: print('After all ' + str(t + 1) + ' sweeps finished, fid = %g. ' 'Consider to increase the sweep times.' % fid) else: print('After ' + str(t + 1) + ' sweeps, fid = %g.' % fid) # mps0 = copy.deepcopy(a.mps.mps) nll0 = nll if step < para['step_min']: print('Now step = ' + str(step) + ' is sufficiently small. Break the loop') break else: print('Now step = ' + str(step)) a.clear_before_save() if para['if_save']: save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para']) return a, para
def super_orthogonalization_honeycomb(para=None): if para is None: para = pm.generate_parameters_so_honeycomb() peps = PepsInfinite(para['lattice'], para['chi'], para['state_type'], ini_way=para['ini_way'], is_debug=para['is_debug']) h = hm.hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'], para['jz'], para['hx'], para['hz']) op = hm.spin_operators(para['spin']) ob = dict() if para['state_type'] is 'pure': ob['mx'] = np.zeros((para['tau'].__len__(), peps.nTensor)) ob['mz'] = np.zeros((para['tau'].__len__(), peps.nTensor)) ob['eb'] = np.zeros((para['tau'].__len__(), peps.nLm)) else: ob['mx'] = np.zeros((para['beta'].__len__(), peps.nTensor)) ob['mz'] = np.zeros((para['beta'].__len__(), peps.nTensor)) ob['eb'] = np.zeros((para['beta'].__len__(), peps.nLm)) ob['e_site'] = np.zeros((para['beta'].__len__(), )) if para['state_type'] is 'pure': for n_tau in range(0, para['tau'].__len__()): gate_t = hm.hamiltonian2gate_tensors(h, para['tau'][n_tau], 'exp') eb0 = np.ones((peps.nLm, )) eb = np.zeros((peps.nLm, )) for t in range(1, round(para['beta']/para['tau'][n_tau]) + 1): for n_lm in range(0, peps.nLm): peps.evolve_once_tensor_and_lm(gate_t[0], gate_t[1], n_lm) if para['so_time'] == 0: peps.super_orthogonalization(n_lm) else: peps.super_orthogonalization('all', it_time=para['so_time']) # if is_debug: # peps.check_super_orthogonality() if t % para['dt_ob'] == 0: rho2 = peps.rho_two_body_simple('all') for nr in range(0, peps.nLm): eb[nr] = rho2[nr].reshape(1, -1).dot(h.reshape(-1, 1)) if para['if_print']: print('For tau = %g, t = %g: ' % (para['tau'][n_tau], t) + 'bond energy = ' + str(eb)) err = np.linalg.norm(eb0 - eb) if err < para['tol']: ob['eb'][n_tau, :] = eb.copy() rho1 = peps.rho_one_body_simple('all') for nr in range(0, peps.nTensor): ob['mx'][n_tau, nr] = rho1[nr].reshape(1, -1).dot(op['sx'].reshape(-1, 1)) ob['mz'][n_tau, nr] = rho1[nr].reshape(1, -1).dot(op['sz'].reshape(-1, 1)) if para['if_print']: print('Converged with error = %g' % err) break else: eb0 = eb.copy() elif para['state_type'] is 'mixed': gate_t = hm.hamiltonian2gate_tensors(h, para['tau'], 'exp') beta_now = 0 t_ob = 0 for t in range(0, int(1e-6 + para['beta'][-1]/para['tau'])): for n_lm in range(0, peps.nLm): peps.evolve_once_tensor_and_lm(gate_t[0], gate_t[1], n_lm) if para['so_time'] == 0: peps.super_orthogonalization(n_lm) else: peps.super_orthogonalization('all', it_time=para['so_time']) # if is_debug: # peps.check_super_orthogonality() beta_now += para['tau'] if abs(para['beta'][t_ob] - beta_now) < 1e-8: rho2 = peps.rho_two_body_simple('all') for nr in range(0, peps.nLm): ob['eb'][t_ob, nr] = rho2[nr].reshape(1, -1).dot(h.reshape(-1, 1)) ob['e_site'][t_ob] = np.sum(ob['eb'][t_ob, :]) / 2 if para['if_print']: print('For beta = %g: ' % beta_now + 'energy per site = ' + str(ob['e_site'][t_ob])) rho1 = peps.rho_one_body_simple('all') for nr in range(0, peps.nTensor): ob['mx'][t_ob, nr] = rho1[nr].reshape(1, -1).dot(op['sx'].reshape(-1, 1)) ob['mz'][t_ob, nr] = rho1[nr].reshape(1, -1).dot(op['sz'].reshape(-1, 1)) t_ob += 1 para['data_exp'] = 'HoneycombSO_' + para['state_type'] + '_j(%g,%g)_h(%g,%g)_chi%d' % \ (para['jxy'], para['jz'], para['hx'], para['hz'], para['chi']) save(para['data_path'], para['data_exp'], [peps, para, ob], ['peps', 'para', 'ob'])
def dmrg_infinite_size(para=None, A=None, hamilt=None): from library.MPSClass import MpsInfinite as Minf is_print = True t_start = time.time() info = dict() if is_print: print('Start ' + str(para['n_site']) + '-site iDMRG calculation') if para is None: para = pm.generate_parameters_infinite_dmrg_sawtooth() if hamilt is None: hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'], para['jz'], para['hx'] / 2, para['hz'] / 2) if A is None: if para['dmrg_type'] is 'mpo': d = para['d']**para['n_site'] else: d = para['d'] A = Minf(para['form'], d, para['chi'], para['d']**para['n_site'], n_site=para['n_site'], is_symme_env=para['is_symme_env'], dmrg_type=para['dmrg_type'], hamilt_index=para['hamilt_index']) if A.dmrg_type is 'mpo': tensor = hamiltonian2cell_tensor(hamilt, para['tau']) else: tensor = np.zeros(0) if A.n_site == 1: # singe-site iDMRG e0 = 0 e1 = 1 else: # double-site iDMRG (including White's way) e0 = np.zeros((1, 3)) e1 = np.ones((1, 3)) de = 1 if A.is_symme_env: A.update_ort_tensor_mps('left') if A.dmrg_type is 'white': A.update_bath_onsite() A.update_effective_ops() else: A.update_left_env(tensor) else: A.update_ort_tensor_mps('both') A.update_left_env(tensor) A.update_right_env(tensor) # iDMRG sweep for t in range(0, para['sweep_time']): if A.dmrg_type is 'mpo': A.update_central_tensor(tensor) else: A.update_central_tensor((para['tau'], 'full')) if t % para['dt_ob'] == 0: A.rho_from_central_tensor() e1 = A.observe_energy(hamilt) if is_print: print('At the %g-th sweep: Eb = ' % t + str(e1)) de = np.sum(abs(e0 - e1)) / A.n_site if de > para['break_tol']: e0 = e1 elif is_print: print('Converged with de = %g' % de) break if t == para['sweep_time']: print('Not sufficiently converged with de = %g' % de) if A.is_symme_env: A.update_ort_tensor_mps('left') if A.dmrg_type is 'mpo': A.update_left_env(tensor) else: A.update_bath_onsite() A.update_effective_ops() else: A.update_ort_tensor_mps('both') A.update_left_env(tensor) A.update_right_env(tensor) ob = {'eb': e1} info['t_cost'] = time.time() - t_start if is_print: print('Total time cost: %g' % info['t_cost']) return A, ob, info
# num_samples = 100 # h1 = np.random.rand(num_samples, 1) * delta # h2 = np.random.rand(num_samples, 1) * delta + (0.82 - delta) # h = np.vstack((h1, h2)) gap = [0.5] for delta in gap: num_samples = 100 h1 = np.random.rand(num_samples, 1) * delta h2 = np.random.rand(num_samples, 1) * delta + (1 - delta) h = np.vstack((h1, h2)) j = [1] tol = 1e-4 # to judge if the state has two-fold degeneracy lattice = 'chain' para = pm.generate_parameters_dmrg(lattice) para['spin'] = 'one' para['bound_cond'] = 'periodic' para['chi'] = 128 para['l'] = 12 para['jxy'] = 1 para['hx'] = 0 model = 'Spin_' + para['spin'] + '_' + lattice # para['data_path'] = '..\\dataQubism\\states_' + model + '\\' para['data_path'] = 'E:\\tmpData\\states_' + model + '\\' para[ 'image_path'] = '..\\dataQubism\\images_' + model + '\\train 0-' + str( delta) mkdir(para['data_path']) mkdir(para['image_path'])
def dmrg_finite_size(para=None): from library.MPSClass import MpsOpenBoundaryClass as Mob t_start = time.time() info = dict() print('Preparing the parameters and MPS') if para is None: para = pm.generate_parameters_dmrg() # Initialize MPS is_parallel = para['isParallel'] if is_parallel or para['isParallelEnvLMR']: par_pool = dict() par_pool['n'] = n_nodes par_pool['pool'] = ThreadPool(n_nodes) else: par_pool = None A = Mob(length=para['l'], d=para['d'], chi=para['chi'], way='qr', ini_way='r', operators=para['op'], debug=is_debug, is_parallel=para['isParallel'], par_pool=par_pool, is_save_op=para['is_save_op'], eig_way=para['eigWay'], is_env_parallel_lmr=para['isParallelEnvLMR']) A.correct_orthogonal_center(para['ob_position']) print('Starting to sweep ...') e0_per_site = 0 info['convergence'] = 1 ob = dict() for t in range(0, para['sweep_time']): if_ob = ((t + 1) % para['dt_ob'] == 0) or t == (para['sweep_time'] - 1) if if_ob: print('In the %d-th round of sweep ...' % (t + 1)) for n in range(para['ob_position'] + 1, para['l']): if para['if_print_detail']: print('update the %d-th tensor from left to right...' % n) A.update_tensor_eigs(n, para['index1'], para['index2'], para['coeff1'], para['coeff2'], para['tau'], para['is_real'], tol=para['eigs_tol']) for n in range(para['l'] - 2, -1, -1): if para['if_print_detail']: print('update the %d-th tensor from right to left...' % n) A.update_tensor_eigs(n, para['index1'], para['index2'], para['coeff1'], para['coeff2'], para['tau'], para['is_real'], tol=para['eigs_tol']) for n in range(1, para['ob_position']): if para['if_print_detail']: print('update the %d-th tensor from left to right...' % n) A.update_tensor_eigs(n, para['index1'], para['index2'], para['coeff1'], para['coeff2'], para['tau'], para['is_real'], tol=para['eigs_tol']) if if_ob: ob['eb_full'] = A.observe_bond_energy(para['index2'], para['coeff2']) ob['mx'] = A.observe_magnetization(1) ob['mz'] = A.observe_magnetization(3) ob['e_per_site'] = (sum(ob['eb_full']) - para['hx'] * sum(ob['mx']) - para['hz'] * sum(ob['mz'])) / A.length # if para['lattice'] in ('square', 'chain'): # ob['e_per_site'] = (sum(ob['eb_full']) - para['hx']*sum(ob['mx']) - para['hz'] * # sum(ob['mz']))/A.length # else: # ob['e_per_site'] = sum(ob['eb_full']) # for n in range(0, para['l']): # ob['e_per_site'] += para['hx'][n] * ob['mx'][n] # ob['e_per_site'] += para['hz'][n] * ob['mz'][n] # ob['e_per_site'] /= A.length info['convergence'] = abs(ob['e_per_site'] - e0_per_site) if info['convergence'] < para['break_tol']: print( 'Converged at the %d-th sweep with error = %g of energy per site.' % (t + 1, info['convergence'])) break else: print('Convergence error of energy per site = %g' % info['convergence']) e0_per_site = ob['e_per_site'] if t == para['sweep_time'] - 1 and info['convergence'] > para[ 'break_tol']: print('Not converged with error = %g of eb per bond' % info['convergence']) print('Consider to increase para[\'sweep_time\']') ob['eb'] = get_bond_energies(ob['eb_full'], para['positions_h2'], para['index2']) A.calculate_entanglement_spectrum() A.calculate_entanglement_entropy() ob['corr_x'] = A.observe_correlators_from_middle(1, 1) ob['corr_z'] = A.observe_correlators_from_middle(3, 3) info['t_cost'] = time.time() - t_start print('Simulation finished in %g seconds' % info['t_cost']) A.clean_to_save() if A._is_parallel: par_pool['pool'].close() return ob, A, info, para
def dmrg_infinite_size_sawtooth(para=None, A=None): from library.MPSClass import MpsInfiniteSawtooth as Minf is_print = True t_start = time.time() info = dict() if is_print: print('Start ' + str(para['n_site']) + '-site iDMRG (sawtooth) calculation') if para is None: para = pm.generate_parameters_infinite_dmrg_sawtooth() para = pm.make_para_consistent_idmrg_sawtooth(para) if A is None: d = para['d'] A = Minf(para['form'], d, para['chi'], para['d']**para['n_site'], n_site=para['n_site'], is_symme_env=para['is_symme_env'], dmrg_type=para['dmrg_type'], spin=para['spin']) ob = dict() e1 = 0 de = 1 A.update_ort_tensor_mps_sawtooth() A.update_bath_onsite_sawtooth(para['j1'], para['j2'], para['hx'], para['hz']) A.update_effective_ops_sawtooth() # iDMRG sweep for t in range(0, para['sweep_time']): A.update_central_tensor_sawtooth(para['tau'], para['j1'], para['j2'], para['hx'], para['hz']) if t % para['dt_ob'] == 0: A.rho_from_central_tensor_sawtooth() ob['eb'], ob['mag'], ob['energy_site'], ob[ 'ent'] = A.observation_sawtooth(para['j1'], para['j2'], para['hx'], para['hz']) if is_print: print('At the %g-th sweep: Eb = ' % t + str(e1)) de = sum(abs(ob['eb'] - e1)) / ob['eb'].__len__() if de > para['break_tol']: e1 = ob['eb'] elif is_print: print('Converged with de = %g' % de) break if t == para['sweep_time']: print('Not sufficiently converged with de = %g' % de) A.update_ort_tensor_mps_sawtooth() A.update_bath_onsite_sawtooth(para['j1'], para['j2'], para['hx'], para['hz']) A.update_effective_ops_sawtooth() info['t_cost'] = time.time() - t_start print('Energy per site = %g' % ob['energy_site']) print('x-magnetization = ' + str(ob['mag']['x'])) print('z-magnetization = ' + str(ob['mag']['z'])) print('Entanglement = ' + str(ob['ent'])) print('Total time cost: %g' % info['t_cost']) return A, ob, info
from algorithms.DMRG_anyH import dmrg_finite_size from library import Parameters as Pm import numpy as np from os import path from library.BasicFunctions import load_pr, save_pr, plot, output_txt, get_size from algorithms.DeepMPSfinite import parameters_dmps, deep_mps_qubit, \ fidelities_to_original_state, act_umpo_on_mps from library.TensorBasicModule import open_mps_product_state_spin_up """ NOTE: for Linux, add the fold "T-Nalg" in your system path """ para_dmrg = Pm.generate_parameters_dmrg( 'chain') # set default parameters of DMRG para_dmrg['spin'] = 'half' # spin-half model para_dmrg['bound_cond'] = 'open' # open boundary condition para_dmrg['chi'] = 48 # dimension cut-off of DMRG para_dmrg['l'] = 48 # system size para_dmrg['jxy'] = 0 # coupling constant - jx and jy in the Heisenberg model para_dmrg['jz'] = 1 # coupling constant - jz in the Heisenberg model para_dmrg['hx'] = 0.3 # magnetic field in the x direction para_dmrg['hz'] = 0 # magnetic field in the z direction para_dmrg = Pm.make_consistent_parameter_dmrg( para_dmrg) # check consistency of the parameters para_dmps = parameters_dmps() # set default parameters of MPS encoding para_dmps['num_layers'] = 9 # number of the MPU layers in the circuit para_dmps['chi_overlap'] = 256 # dimension cut-off of the disentangled MPS # calculate the ground state by DMRG pre_fix = path.basename(__file__)[:-3] + '_' if path.isfile(path.join(para_dmrg['data_path'],