def get_sequence(gtn, num_f, param, order_way): if order_way is 'SequencedMeasure': order_file = os.path.join(param['data_path'], 'Order_' + param['save_exp']) if os.path.isfile(order_file): order = load_pr(order_file, 'order') else: order = gtn.mps.markov_measurement(if_restore=True)[0] save_pr(param['data_path'], 'Order_' + param['save_exp'], [order], ['order']) order_now = copy.copy(order[:num_f]) elif order_way is 'MaxSEE': ent = gtn.mps.calculate_onsite_reduced_density_matrix()[0] order = np.argsort(ent)[::-1] order_now = copy.copy(order[:num_f]) elif order_way is 'Variance': tmp = TNmachineLearning.MachineLearningFeatureMap( param['d'], param['dataset']) tmp.load_data() tmp.select_samples([param['class']]) variance = tmp.variance_pixels() order = np.argsort(variance)[::-1] order_now = copy.copy(order[:num_f]) elif order_way is 'RandomMeasure': order = np.random.permutation(gtn.length) order_now = copy.copy(order[:num_f]) else: order = None order_now = None return order, order_now
def get_marked_imgs(gtn, imgs_test, num_f, sample, param, order_way): if order_way is 'SequencedMeasure': order_file = os.path.join(param['data_path'], 'Order_' + param['save_exp']) if os.path.isfile(order_file): order = load_pr(order_file, 'order') else: order = gtn.mps.markov_measurement(if_restore=True)[0] save_pr(para['data_path'], 'Order_' + para['save_exp'], [order], ['order']) order_now = copy.copy(order.reshape(-1,)[:num_f]) elif order_way is 'MaxSEE': ent = gtn.mps.calculate_onsite_reduced_density_matrix()[0] order = np.argsort(ent)[::-1] order_now = copy.copy(order.reshape(-1,)[:num_f]) elif order_way is 'Variance': tmp = TNmachineLearning.MachineLearningFeatureMap(param['d'], param['dataset']) tmp.load_data() tmp.select_samples([param['class']]) variance = tmp.variance_pixels() order = np.argsort(variance)[::-1] order_now = copy.copy(order.reshape(-1,)[:num_f]) elif order_way is 'RandomMeasure': order = np.random.permutation(gtn.length) order_now = copy.copy(order.reshape(-1,)[:num_f]) else: order = None order_now = None img_part = imgs_test.images.copy()[order_now, sample] img_new = gtn.generate_features(img_part, pos=order_now, f_max=1, f_min=0, is_display=False, way=generate_way) img_new = img_new.reshape(imgs_test.img_size) marked_img = imgs_test.mark_pixels_on_full_image(img_new, order_now) return imgs_test.images[:, sample].reshape(imgs_test.img_size), img_new, marked_img, order
def gtnc(para_tot=None): print('Preparing parameters') if para_tot is None: para_tot = pm.parameters_gcmpm() n_class = len(para_tot['classes']) paras = bf.empty_list(n_class) for n in range(0, n_class): paras[n] = copy.deepcopy(para_tot) paras[n]['class'] = int(para_tot['classes'][n]) paras[n]['chi'] = para_tot['chi'][n] paras[n]['theta'] = para_tot['theta'] paras[n]['save_exp'] = save_exp_gtn_one_class(paras[n]) classifiers = bf.empty_list(n_class) for n in range(0, n_class): print_dict(paras[n]) data = para_tot['data_path'] + paras[n]['save_exp'] if para_tot['if_load'] and os.path.isfile(data): print('The classifier already exists. Load directly') classifiers[n] = load_pr(data, 'a') else: print('Training the MPS of ' + str(para_tot['classes'][n])) classifiers[n] = gtn_one_class(paras[n])[0] # if para_tot['if_save']: # save_pr('../data_tnml/gcmpm/', paras[n]['save_exp'], # [classifiers[n]], ['classifier']) # classifiers[n].mps.check_orthogonality_by_tensors(tol=1e-12) # ==================== Testing accuracy ==================== print('Calculating the testing accuracy') b = TNmachineLearning.MachineLearningFeatureMap(para_tot['d']) b.load_data(data_path='..\\..\\..\\MNIST\\', file_sample='t10k-images.idx3-ubyte', file_label='t10k-labels.idx1-ubyte', is_normalize=True) b.select_samples(para_tot['classes']) if classifiers[0].is_dct: b.dct(shift=para_tot['shift'], factor=para_tot['factor']) b.images2vecs(para_tot['theta'] * np.pi / 2) fid = bf.empty_list(n_class) for n in range(0, n_class): fid[n] = b.compute_fidelities(classifiers[n].mps.mps) max_fid = np.argmin(np.hstack(fid), axis=1) predict = np.zeros(max_fid.shape, dtype=int) for n in range(0, n_class): predict += (max_fid == n) * int(para_tot['classes'][n]) # plot(predict) # plot(b.labels) accuracy = np.sum(predict == b.labels, dtype=float) / b.numVecSample print(accuracy)
def gcmpm(para_tot=None): print('Preparing parameters') if para_tot is None: para_tot = pm.parameters_gcmpm() n_class = len(para_tot['classes']) paras = bf.empty_list(n_class) for n in range(0, n_class): paras[n] = para_tot.copy() paras[n]['class'] = para_tot['classes'][n] paras[n]['chi'] = para_tot['chi'][n] paras[n]['save_exp'] = save_exp_gcmpm_one_class(paras[n]) classifiers = bf.empty_list(n_class) for n in range(0, n_class): data = '../data_tnml/gcmpm/' + paras[n]['save_exp'] if para_tot['if_load'] and os.path.isfile(data): print('The classifier already exists. Load directly') classifiers[n] = load_pr(data, 'classifier') else: print('Training the MPS of ' + str(para_tot['classes'][n])) classifiers[n] = gcmpm_one_class(paras[n])[0] if para_tot['if_save']: save_pr('../data_tnml/gcmpm/', paras[n]['save_exp'], [classifiers[n]], ['classifier']) # Testing accuracy print('Calculating the testing accuracy') labels = para_tot['classes'] b = TNmachineLearning.MachineLearningFeatureMap('MNIST', para_tot['d'], file_sample='t10k-images.idx3-ubyte', file_label='t10k-labels.idx1-ubyte') b.images2vecs(para_tot['classes'], ['all', 'all']) fid = np.zeros((n_class, )) num_wrong = 0 for ni in range(0, b.numVecSample): for n in range(0, n_class): fid[n] = b.fidelity_mps_image(classifiers[n].mps.mps, ni) n_max = int(np.argmax(fid)) if labels[n_max] != b.LabelNow[ni]: num_wrong += 1 accuracy = num_wrong/b.numVecSample print(accuracy)
# ============================================================= para = parameters_gtn_one_class() para['dct'] = False para['d'] = 2 para['step'] = 0.2 # initial gradient step para['if_save'] = True para['if_load'] = True para['dataset'] = dataset para['class'] = which_class para['chi'] = chi psnr_av = np.zeros(var_values.shape) mse_av = np.zeros(var_values.shape) ssim = np.zeros(var_values.shape) b = TNmachineLearning.MachineLearningFeatureMap(para['d'], para['dataset']) if var_name is not 'which_class': b.load_data(data_path=os.path.join(b.project_path, '..\\..\\MNIST\\' + para['dataset'] + '\\'), file_sample='t10k-images.idx3-ubyte', file_label='t10k-labels.idx1-ubyte', is_normalize=True) b.select_samples([para['class']]) is_order_calculated = False for t in range(var_values.size): # Modify parameter print('For ' + var_name + ' = ' + str(var_values[t])) exec(var_name + ' = ' + str(var_values[t])) if var_name in para: exec('para[\'' + var_name + '\'] = ' + str(var_values[t])) print('Train the generative TN')
def labeled_gtn(para): if para is None: para = pm.parameters_labeled_gtn() para['save_exp'] = save_exp_labeled_gtn(para) if para['parallel'] is True: par_pool = para['n_nodes'] else: par_pool = None # Preparing testing dataset b = TNmachineLearning.MachineLearningFeatureMap( para['d'], file_sample='t10k-images.idx3-ubyte', file_label='t10k-labels.idx1-ubyte') b.load_data() b.select_samples(para['classes']) b.add_labels_to_images() b.images2vecs(para['theta']) data_file = os.path.join(para['data_path'], para['save_exp']) if para['if_load'] and os.path.isfile(data_file): print('Data exist. Load directly.') a = bf.load_pr(data_file, 'a') else: a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset'], par_pool=par_pool) a.load_data() a.select_samples(para['classes']) a.add_labels_to_images() a.images2vecs(para['theta'] * np.pi / 2) a.initial_mps() a.mps.correct_orthogonal_center(0, normalize=True) a.initialize_virtual_vecs_train() a.update_virtual_vecs_train_all_tensors('both') accuracy0 = 0 for t in range(0, para['sweep_time']): # from left to right for nt in range(0, a.length): a.update_tensor_gradient(nt, para['step']) if nt != a.length - 1: a.update_virtual_vecs_train(nt, 'left') # from left to right for nt in range(a.length - 1, -1, -1): a.update_tensor_gradient(nt, para['step']) if nt != 0: a.update_virtual_vecs_train(nt, 'right') if t > para['check_time0'] and ((t + 1) % para['check_time'] == 0 or t + 1 == para['sweep_time']): b.input_mps(a.mps) accuracy = b.calculate_accuracy() print('After the ' + str(t) + '-th sweep, the testing accuracy = ' + str(accuracy)) if abs(accuracy - accuracy0) < (para['step'] * para['ratio_step_tol']): para['step'] *= para['step_ratio'] accuracy0 = accuracy print('Converged. Reduce the gradient step to ' + str(para['step'])) elif t + 1 == para['sweep_time']: print('After all ' + str(t + 1) + ' sweeps finished, not converged. ' 'Consider to increase the sweep times.') else: accuracy0 = accuracy if para['step'] < para['step_min']: print('Now step = ' + str(para['step']) + ' is sufficiently small. Break the loop') break else: print('Now step = ' + str(para['step'])) a.clear_before_save() if para['if_save']: save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para']) accuracy = b.calculate_accuracy() print('The final testing accuracy = ' + str(accuracy)) return a, para