def read_mps(load,): load_path = load['path'] load_exp = load['exp'] if load['Is_continue']: data = bf.load_pr(load_path+load_exp+'_.pr') else: data = bf.load_pr(load_path+load_exp+'.pr') print(load_path+load_exp+'.pr') A = data['A'] n = len(A.mps) dims=[1] for i in range(n-1): dims.append(A.mps[i].shape[2]) dims.append(1) return A.mps, dims
def gcmpm_one_class(para=None): if para is None: para = pm.parameters_gcmpm_one_class() para['save_exp'] = save_exp_gcmpm_one_class(para) if para['parallel'] is True: par_pool = para['n_nodes'] else: par_pool = None if para['if_load'] and os.path.isfile(para['save_exp']): a = bf.load_pr(os.path.join(para['data_path'], para['save_exp']), 'a') else: a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset'], par_pool=par_pool) a.images2vecs([para['class']], [100]) a.initialize_virtual_vecs_train() a.update_virtual_vecs_train('all', 'all', 'both') a.mps.correct_orthogonal_center(0, normalize=True) a.mps.mps[0] /= np.linalg.norm(a.mps.mps[0].reshape(-1, )) mps0 = a.mps.mps.copy() for t in range(0, para['sweep_time']): # from left to right if para['if_print_detail']: print('At the ' + str(t) + '-th sweep, from left to right') for nt in range(0, a.length): a.update_tensor_gradient(nt, para['step']) if nt != a.length-1: a.update_virtual_vecs_train('all', nt, 'left') # from left to right print('At the ' + str(t) + '-th sweep, from right to left') for nt in range(a.length-1, -1, -1): a.update_tensor_gradient(nt, para['step']) if nt != 0: a.update_virtual_vecs_train('all', nt, 'right') if t > para['check_time0'] and ((t+1) % para['check_time'] == 0 or t+1 == para['sweep_time']): fid = ln_fidelity_per_site(mps0, a.mps.mps) if fid < (para['step'] * para['ratio_step_tol']): print('After ' + str(t+1) + ' sweeps: fid = %g' % fid) para['step'] *= para['step_ratio'] elif t+1 == para['sweep_time']: print('After all ' + str(t+1) + ' sweeps finished, fid = %g. ' 'Consider to increase the sweep times.' % fid) else: print('After ' + str(t+1) + ' sweeps, fid = %g.' % fid) mps0 = a.mps.mps.copy() if para['step'] < para['step_min']: print('Now step = ' + str(para['step']) + ' is sufficiently small. Break the loop') break else: print('Now step = ' + str(para['step'])) if para['if_save']: save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para']) return a, para
def labeled_gtn(para): if para is None: para = pm.parameters_labeled_gtn() para['save_exp'] = save_exp_labeled_gtn(para) if para['parallel'] is True: par_pool = para['n_nodes'] else: par_pool = None # Preparing testing dataset b = TNmachineLearning.MachineLearningFeatureMap( para['d'], file_sample='t10k-images.idx3-ubyte', file_label='t10k-labels.idx1-ubyte') b.load_data() b.select_samples(para['classes']) b.add_labels_to_images() b.images2vecs(para['theta']) data_file = os.path.join(para['data_path'], para['save_exp']) if para['if_load'] and os.path.isfile(data_file): print('Data exist. Load directly.') a = bf.load_pr(data_file, 'a') else: a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset'], par_pool=par_pool) a.load_data() a.select_samples(para['classes']) a.add_labels_to_images() a.images2vecs(para['theta'] * np.pi / 2) a.initial_mps() a.mps.correct_orthogonal_center(0, normalize=True) a.initialize_virtual_vecs_train() a.update_virtual_vecs_train_all_tensors('both') accuracy0 = 0 for t in range(0, para['sweep_time']): # from left to right for nt in range(0, a.length): a.update_tensor_gradient(nt, para['step']) if nt != a.length - 1: a.update_virtual_vecs_train(nt, 'left') # from left to right for nt in range(a.length - 1, -1, -1): a.update_tensor_gradient(nt, para['step']) if nt != 0: a.update_virtual_vecs_train(nt, 'right') if t > para['check_time0'] and ((t + 1) % para['check_time'] == 0 or t + 1 == para['sweep_time']): b.input_mps(a.mps) accuracy = b.calculate_accuracy() print('After the ' + str(t) + '-th sweep, the testing accuracy = ' + str(accuracy)) if abs(accuracy - accuracy0) < (para['step'] * para['ratio_step_tol']): para['step'] *= para['step_ratio'] accuracy0 = accuracy print('Converged. Reduce the gradient step to ' + str(para['step'])) elif t + 1 == para['sweep_time']: print('After all ' + str(t + 1) + ' sweeps finished, not converged. ' 'Consider to increase the sweep times.') else: accuracy0 = accuracy if para['step'] < para['step_min']: print('Now step = ' + str(para['step']) + ' is sufficiently small. Break the loop') break else: print('Now step = ' + str(para['step'])) a.clear_before_save() if para['if_save']: save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para']) accuracy = b.calculate_accuracy() print('The final testing accuracy = ' + str(accuracy)) return a, para
def gtn_one_class(para=None, images=None, labels=None): if 'to_black_and_white' not in para: para['to_black_and_white'] = False if para is None: para = pm.parameters_gtn_one_class() para['save_exp'] = save_exp_gtn_one_class(para) if para['if_load'] and os.path.isfile( os.path.join(para['data_path'], para['save_exp'])): a = bf.load_pr(os.path.join(para['data_path'], para['save_exp']), 'a') else: a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset']) if para['dataset'] is 'custom': a.input_data(copy.deepcopy(images), copy.deepcopy(labels)) else: a.load_data() if a.is_there_labels: a.select_samples([para['class']]) if para['to_black_and_white']: a.to_black_and_white() if para['dct'] is True: a.dct(shift=para['shift'], factor=para['factor']) a.images2vecs(theta_max=para['theta'] * np.pi / 2) a.initial_mps(center=0, ini_way='1') a.initialize_virtual_vecs_train() a.mps.correct_orthogonal_center(0) a.update_tensor_gradient(0, para['step']) nll0 = a.compute_nll() step = copy.deepcopy(para['step']) print('Iniitially, NLL = ' + str(nll0)) for t in range(0, para['sweep_time']): # from left to right if para['if_print_detail']: print('At the ' + str(t + 1) + '-th sweep, from left to right') t0 = time.time() tt0 = time.clock() for nt in range(0, a.length): a.mps.correct_orthogonal_center(nt) if nt != 0: a.update_virtual_vecs_train(nt - 1, 'left') a.update_tensor_gradient(nt, step) # from right to left if para['if_print_detail']: print('At the ' + str(t + 1) + '-th sweep, from right to left') for nt in range(a.length - 1, -1, -1): a.mps.correct_orthogonal_center(nt) if nt != a.length - 1: a.update_virtual_vecs_train(nt + 1, 'right') a.update_tensor_gradient(nt, step) if para['if_print_detail']: print('Wall time cost for one loop: %s' % (time.time() - t0)) print('CPU time cost for one loop: %s' % (time.clock() - tt0)) if t > (para['check_time0'] - 2) and ( (t + 1) % para['check_time'] == 0 or t + 1 == para['sweep_time']): nll = a.compute_nll() print('NLL = ' + str(nll)) # fid = fidelity_per_site(mps0, a.mps.mps) fid = abs(nll - nll0) / nll0 if fid < (step * para['step_ratio']): print('After ' + str(t + 1) + ' sweeps: fid = %g' % fid) step *= para['step_ratio'] # mps0 = copy.deepcopy(a.mps.mps) nll0 = nll elif t + 1 == para['sweep_time']: print('After all ' + str(t + 1) + ' sweeps finished, fid = %g. ' 'Consider to increase the sweep times.' % fid) else: print('After ' + str(t + 1) + ' sweeps, fid = %g.' % fid) # mps0 = copy.deepcopy(a.mps.mps) nll0 = nll if step < para['step_min']: print('Now step = ' + str(step) + ' is sufficiently small. Break the loop') break else: print('Now step = ' + str(step)) a.clear_before_save() if para['if_save']: save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para']) return a, para
Np2 = np.size(np.array(P2)) lable = np.array(P) print(lable.T) fe = np.zeros((Np1, Np2)) fe_log = np.zeros((Np1, Np2)) t0 = time.time() for it1 in range(Np1): print('label=', P1[it1]) for it2 in range(Np2): hx1 = P1[it1] hx2 = P2[it2] #load_path = '..\\..\\..\\data_dmrg\\QTSEN\\' + modelpath + '\\data_pr\\L%g_chi%g\\' % (N, chi) load_exp1 = chainname + 'N%d_J(%g,%g)_h(%g,%g)_chi%d' % ( N, Jxy, Jz, hx1, hz, chi) + bounc + '.pr' load_exp2 = chainname + 'N%d_J(%g,%g)_h(%g,%g)_chi%d' % ( N, Jxy, Jz, hx2, hz, chi) + bounc + '.pr' datap1 = bf.load_pr(load_exp1) datap2 = bf.load_pr(load_exp2) Ap1 = datap1['A'] Ap2 = datap2['A'] f_log, f = MPSClass.ln_fidelity_per_site_yy(Ap1.mps, Ap2.mps) fe_log[it1, it2] = f_log fe[it1, it2] = f print('the cost of time:', time.time() - t0) #save_path='..\\..\\..\\data_dmrg\\QTSEN\\' + modelpath + '\\data_fidelity_matrix\\' save_exp = 'fidelitymatrix_N%d_chi%d_hx_(%g,%g,%g)' % (N, chi, P[0], P[-1], Np) + '.npz' #mkdir(save_path) np.savez(save_exp, fe=fe, fe_log=fe_log, lable=lable, P1=P1, P2=P2)