示例#1
0
def get_marked_imgs(gtn, imgs_test, num_f, sample, param, order_way):
    if order_way is 'SequencedMeasure':
        order_file = os.path.join(param['data_path'], 'Order_' + param['save_exp'])
        if os.path.isfile(order_file):
            order = load_pr(order_file, 'order')
        else:
            order = gtn.mps.markov_measurement(if_restore=True)[0]
            save_pr(para['data_path'], 'Order_' + para['save_exp'], [order], ['order'])
        order_now = copy.copy(order.reshape(-1,)[:num_f])
    elif order_way is 'MaxSEE':
        ent = gtn.mps.calculate_onsite_reduced_density_matrix()[0]
        order = np.argsort(ent)[::-1]
        order_now = copy.copy(order.reshape(-1,)[:num_f])
    elif order_way is 'Variance':
        tmp = TNmachineLearning.MachineLearningFeatureMap(param['d'], param['dataset'])
        tmp.load_data()
        tmp.select_samples([param['class']])
        variance = tmp.variance_pixels()
        order = np.argsort(variance)[::-1]
        order_now = copy.copy(order.reshape(-1,)[:num_f])
    elif order_way is 'RandomMeasure':
        order = np.random.permutation(gtn.length)
        order_now = copy.copy(order.reshape(-1,)[:num_f])
    else:
        order = None
        order_now = None
    img_part = imgs_test.images.copy()[order_now, sample]
    img_new = gtn.generate_features(img_part, pos=order_now, f_max=1, f_min=0,
                                    is_display=False, way=generate_way)
    img_new = img_new.reshape(imgs_test.img_size)
    marked_img = imgs_test.mark_pixels_on_full_image(img_new, order_now)
    return imgs_test.images[:, sample].reshape(imgs_test.img_size), img_new, marked_img, order
示例#2
0
def get_sequence(gtn, num_f, param, order_way):
    if order_way is 'SequencedMeasure':
        order_file = os.path.join(param['data_path'],
                                  'Order_' + param['save_exp'])
        if os.path.isfile(order_file):
            order = load_pr(order_file, 'order')
        else:
            order = gtn.mps.markov_measurement(if_restore=True)[0]
            save_pr(param['data_path'], 'Order_' + param['save_exp'], [order],
                    ['order'])
        order_now = copy.copy(order[:num_f])
    elif order_way is 'MaxSEE':
        ent = gtn.mps.calculate_onsite_reduced_density_matrix()[0]
        order = np.argsort(ent)[::-1]
        order_now = copy.copy(order[:num_f])
    elif order_way is 'Variance':
        tmp = TNmachineLearning.MachineLearningFeatureMap(
            param['d'], param['dataset'])
        tmp.load_data()
        tmp.select_samples([param['class']])
        variance = tmp.variance_pixels()
        order = np.argsort(variance)[::-1]
        order_now = copy.copy(order[:num_f])
    elif order_way is 'RandomMeasure':
        order = np.random.permutation(gtn.length)
        order_now = copy.copy(order[:num_f])
    else:
        order = None
        order_now = None
    return order, order_now
示例#3
0
def decision_mps(para=None):
    if para is None:
        para = pm.parameters_decision_mps()
    a = TNmachineLearning.DecisionTensorNetwork(
        para['dataset'],
        2,
        para['chi'],
        'mps',
        para['classes'],
        para['numbers'],
        if_reducing_samples=para['if_reducing_samples'])
    a.images2vecs_test_samples(para['classes'])
    print(a.vLabel)
    for n in range(0, a.length):
        bf.print_sep()
        print('Calculating the %i-th tensor' % n)
        a.update_tensor_decision_mps_svd(n)
        # a.update_tensor_decision_mps_svd_threshold_algo(n)
        # a.update_tensor_decision_mps_gradient_algo(n)
        a.update_v_ctr_train(n)
        a.calculate_intermediate_accuracy_train(n)
        if a.remaining_samples_train.__len__() == 0:
            print('All samples are classified correctly. Training stopped.')
            break
        print('The current accuracy = %g' % a.intermediate_accuracy_train[n])
        print('Entanglement: ' + str(a.lm[n].reshape(1, -1)))
        if para['if_reducing_samples']:
            print('Number of remaining samples: ' +
                  str(a.remaining_samples_train.__len__()))
def gcmpm_one_class(para=None):
    if para is None:
        para = pm.parameters_gcmpm_one_class()
    para['save_exp'] = save_exp_gcmpm_one_class(para)
    if para['parallel'] is True:
        par_pool = para['n_nodes']
    else:
        par_pool = None
    if para['if_load'] and os.path.isfile(para['save_exp']):
        a = bf.load_pr(os.path.join(para['data_path'], para['save_exp']), 'a')
    else:
        a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset'],
                                                 par_pool=par_pool)
    a.images2vecs([para['class']], [100])
    a.initialize_virtual_vecs_train()
    a.update_virtual_vecs_train('all', 'all', 'both')
    a.mps.correct_orthogonal_center(0, normalize=True)
    a.mps.mps[0] /= np.linalg.norm(a.mps.mps[0].reshape(-1, ))
    mps0 = a.mps.mps.copy()
    for t in range(0, para['sweep_time']):
        # from left to right
        if para['if_print_detail']:
            print('At the ' + str(t) + '-th sweep, from left to right')
        for nt in range(0, a.length):
            a.update_tensor_gradient(nt, para['step'])
            if nt != a.length-1:
                a.update_virtual_vecs_train('all', nt, 'left')
        # from left to right
        print('At the ' + str(t) + '-th sweep, from right to left')
        for nt in range(a.length-1, -1, -1):
            a.update_tensor_gradient(nt, para['step'])
            if nt != 0:
                a.update_virtual_vecs_train('all', nt, 'right')
        if t > para['check_time0'] and ((t+1) % para['check_time'] == 0
                                        or t+1 == para['sweep_time']):
            fid = ln_fidelity_per_site(mps0, a.mps.mps)
            if fid < (para['step'] * para['ratio_step_tol']):
                print('After ' + str(t+1) + ' sweeps: fid = %g' % fid)
                para['step'] *= para['step_ratio']
            elif t+1 == para['sweep_time']:
                print('After all ' + str(t+1) + ' sweeps finished, fid = %g. '
                                                'Consider to increase the sweep times.' % fid)
            else:
                print('After ' + str(t+1) + ' sweeps, fid = %g.' % fid)
                mps0 = a.mps.mps.copy()
            if para['step'] < para['step_min']:
                print('Now step = ' + str(para['step']) + ' is sufficiently small. Break the loop')
                break
            else:
                print('Now step = ' + str(para['step']))
    if para['if_save']:
        save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para'])
    return a, para
示例#5
0
def gtnc(para_tot=None):
    print('Preparing parameters')
    if para_tot is None:
        para_tot = pm.parameters_gcmpm()
    n_class = len(para_tot['classes'])
    paras = bf.empty_list(n_class)
    for n in range(0, n_class):
        paras[n] = copy.deepcopy(para_tot)
        paras[n]['class'] = int(para_tot['classes'][n])
        paras[n]['chi'] = para_tot['chi'][n]
        paras[n]['theta'] = para_tot['theta']
        paras[n]['save_exp'] = save_exp_gtn_one_class(paras[n])
    classifiers = bf.empty_list(n_class)
    for n in range(0, n_class):
        print_dict(paras[n])
        data = para_tot['data_path'] + paras[n]['save_exp']
        if para_tot['if_load'] and os.path.isfile(data):
            print('The classifier already exists. Load directly')
            classifiers[n] = load_pr(data, 'a')
        else:
            print('Training the MPS of ' + str(para_tot['classes'][n]))
            classifiers[n] = gtn_one_class(paras[n])[0]
            # if para_tot['if_save']:
            #     save_pr('../data_tnml/gcmpm/', paras[n]['save_exp'],
            #             [classifiers[n]], ['classifier'])
        # classifiers[n].mps.check_orthogonality_by_tensors(tol=1e-12)
    # ==================== Testing accuracy ====================
    print('Calculating the testing accuracy')
    b = TNmachineLearning.MachineLearningFeatureMap(para_tot['d'])
    b.load_data(data_path='..\\..\\..\\MNIST\\',
                file_sample='t10k-images.idx3-ubyte',
                file_label='t10k-labels.idx1-ubyte',
                is_normalize=True)
    b.select_samples(para_tot['classes'])
    if classifiers[0].is_dct:
        b.dct(shift=para_tot['shift'], factor=para_tot['factor'])
    b.images2vecs(para_tot['theta'] * np.pi / 2)
    fid = bf.empty_list(n_class)
    for n in range(0, n_class):
        fid[n] = b.compute_fidelities(classifiers[n].mps.mps)
    max_fid = np.argmin(np.hstack(fid), axis=1)
    predict = np.zeros(max_fid.shape, dtype=int)
    for n in range(0, n_class):
        predict += (max_fid == n) * int(para_tot['classes'][n])
    # plot(predict)
    # plot(b.labels)
    accuracy = np.sum(predict == b.labels, dtype=float) / b.numVecSample
    print(accuracy)
def gcmpm(para_tot=None):
    print('Preparing parameters')
    if para_tot is None:
        para_tot = pm.parameters_gcmpm()
    n_class = len(para_tot['classes'])
    paras = bf.empty_list(n_class)
    for n in range(0, n_class):
        paras[n] = para_tot.copy()
        paras[n]['class'] = para_tot['classes'][n]
        paras[n]['chi'] = para_tot['chi'][n]
        paras[n]['save_exp'] = save_exp_gcmpm_one_class(paras[n])
    classifiers = bf.empty_list(n_class)
    for n in range(0, n_class):
        data = '../data_tnml/gcmpm/' + paras[n]['save_exp']
        if para_tot['if_load'] and os.path.isfile(data):
            print('The classifier already exists. Load directly')
            classifiers[n] = load_pr(data, 'classifier')
        else:
            print('Training the MPS of ' + str(para_tot['classes'][n]))
            classifiers[n] = gcmpm_one_class(paras[n])[0]
            if para_tot['if_save']:
                save_pr('../data_tnml/gcmpm/', paras[n]['save_exp'],
                        [classifiers[n]], ['classifier'])
    # Testing accuracy
    print('Calculating the testing accuracy')
    labels = para_tot['classes']
    b = TNmachineLearning.MachineLearningFeatureMap('MNIST', para_tot['d'],
                                                    file_sample='t10k-images.idx3-ubyte',
                                                    file_label='t10k-labels.idx1-ubyte')
    b.images2vecs(para_tot['classes'], ['all', 'all'])
    fid = np.zeros((n_class, ))
    num_wrong = 0
    for ni in range(0, b.numVecSample):
        for n in range(0, n_class):
            fid[n] = b.fidelity_mps_image(classifiers[n].mps.mps, ni)
        n_max = int(np.argmax(fid))
        if labels[n_max] != b.LabelNow[ni]:
            num_wrong += 1
    accuracy = num_wrong/b.numVecSample
    print(accuracy)
示例#7
0
# =============================================================
para = parameters_gtn_one_class()
para['dct'] = False
para['d'] = 2
para['step'] = 0.2  # initial gradient step
para['if_save'] = True
para['if_load'] = True
para['dataset'] = dataset
para['class'] = which_class
para['chi'] = chi

psnr_av = np.zeros(var_values.shape)
mse_av = np.zeros(var_values.shape)
ssim = np.zeros(var_values.shape)
b = TNmachineLearning.MachineLearningFeatureMap(para['d'], para['dataset'])
if var_name is not 'which_class':
    b.load_data(data_path=os.path.join(b.project_path, '..\\..\\MNIST\\' + para['dataset'] + '\\'),
                file_sample='t10k-images.idx3-ubyte',
                file_label='t10k-labels.idx1-ubyte', is_normalize=True)
    b.select_samples([para['class']])

is_order_calculated = False
for t in range(var_values.size):
    # Modify parameter
    print('For ' + var_name + ' = ' + str(var_values[t]))
    exec(var_name + ' = ' + str(var_values[t]))
    if var_name in para:
        exec('para[\'' + var_name + '\'] = ' + str(var_values[t]))

    print('Train the generative TN')
示例#8
0
def gtn_one_class(para=None, images=None, labels=None):
    if 'to_black_and_white' not in para:
        para['to_black_and_white'] = False
    if para is None:
        para = pm.parameters_gtn_one_class()
    para['save_exp'] = save_exp_gtn_one_class(para)
    if para['if_load'] and os.path.isfile(
            os.path.join(para['data_path'], para['save_exp'])):
        a = bf.load_pr(os.path.join(para['data_path'], para['save_exp']), 'a')
    else:
        a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'],
                                                 para['dataset'])
        if para['dataset'] is 'custom':
            a.input_data(copy.deepcopy(images), copy.deepcopy(labels))
        else:
            a.load_data()
        if a.is_there_labels:
            a.select_samples([para['class']])
        if para['to_black_and_white']:
            a.to_black_and_white()
        if para['dct'] is True:
            a.dct(shift=para['shift'], factor=para['factor'])
        a.images2vecs(theta_max=para['theta'] * np.pi / 2)
        a.initial_mps(center=0, ini_way='1')
        a.initialize_virtual_vecs_train()
        a.mps.correct_orthogonal_center(0)
        a.update_tensor_gradient(0, para['step'])
        nll0 = a.compute_nll()
        step = copy.deepcopy(para['step'])
        print('Iniitially, NLL = ' + str(nll0))
        for t in range(0, para['sweep_time']):
            # from left to right
            if para['if_print_detail']:
                print('At the ' + str(t + 1) + '-th sweep, from left to right')
                t0 = time.time()
                tt0 = time.clock()
            for nt in range(0, a.length):
                a.mps.correct_orthogonal_center(nt)
                if nt != 0:
                    a.update_virtual_vecs_train(nt - 1, 'left')
                a.update_tensor_gradient(nt, step)
            # from right to left
            if para['if_print_detail']:
                print('At the ' + str(t + 1) + '-th sweep, from right to left')
            for nt in range(a.length - 1, -1, -1):
                a.mps.correct_orthogonal_center(nt)
                if nt != a.length - 1:
                    a.update_virtual_vecs_train(nt + 1, 'right')
                a.update_tensor_gradient(nt, step)
            if para['if_print_detail']:
                print('Wall time cost for one loop: %s' % (time.time() - t0))
                print('CPU time cost for one loop: %s' % (time.clock() - tt0))

            if t > (para['check_time0'] - 2) and (
                (t + 1) % para['check_time'] == 0
                    or t + 1 == para['sweep_time']):
                nll = a.compute_nll()
                print('NLL = ' + str(nll))
                # fid = fidelity_per_site(mps0, a.mps.mps)
                fid = abs(nll - nll0) / nll0
                if fid < (step * para['step_ratio']):
                    print('After ' + str(t + 1) + ' sweeps: fid = %g' % fid)
                    step *= para['step_ratio']
                    # mps0 = copy.deepcopy(a.mps.mps)
                    nll0 = nll
                elif t + 1 == para['sweep_time']:
                    print('After all ' + str(t + 1) +
                          ' sweeps finished, fid = %g. '
                          'Consider to increase the sweep times.' % fid)
                else:
                    print('After ' + str(t + 1) + ' sweeps, fid = %g.' % fid)
                    # mps0 = copy.deepcopy(a.mps.mps)
                    nll0 = nll
                if step < para['step_min']:
                    print('Now step = ' + str(step) +
                          ' is sufficiently small. Break the loop')
                    break
                else:
                    print('Now step = ' + str(step))
        a.clear_before_save()
        if para['if_save']:
            save_pr(para['data_path'], para['save_exp'], [a, para],
                    ['a', 'para'])
    return a, para
示例#9
0
def labeled_gtn(para):
    if para is None:
        para = pm.parameters_labeled_gtn()
    para['save_exp'] = save_exp_labeled_gtn(para)
    if para['parallel'] is True:
        par_pool = para['n_nodes']
    else:
        par_pool = None

    # Preparing testing dataset
    b = TNmachineLearning.MachineLearningFeatureMap(
        para['d'],
        file_sample='t10k-images.idx3-ubyte',
        file_label='t10k-labels.idx1-ubyte')
    b.load_data()
    b.select_samples(para['classes'])
    b.add_labels_to_images()
    b.images2vecs(para['theta'])

    data_file = os.path.join(para['data_path'], para['save_exp'])
    if para['if_load'] and os.path.isfile(data_file):
        print('Data exist. Load directly.')
        a = bf.load_pr(data_file, 'a')
    else:
        a = TNmachineLearning.MachineLearningMPS(para['d'],
                                                 para['chi'],
                                                 para['dataset'],
                                                 par_pool=par_pool)
        a.load_data()
        a.select_samples(para['classes'])
        a.add_labels_to_images()
        a.images2vecs(para['theta'] * np.pi / 2)
        a.initial_mps()
        a.mps.correct_orthogonal_center(0, normalize=True)
        a.initialize_virtual_vecs_train()
        a.update_virtual_vecs_train_all_tensors('both')
        accuracy0 = 0
        for t in range(0, para['sweep_time']):
            # from left to right
            for nt in range(0, a.length):
                a.update_tensor_gradient(nt, para['step'])
                if nt != a.length - 1:
                    a.update_virtual_vecs_train(nt, 'left')
            # from left to right
            for nt in range(a.length - 1, -1, -1):
                a.update_tensor_gradient(nt, para['step'])
                if nt != 0:
                    a.update_virtual_vecs_train(nt, 'right')
            if t > para['check_time0'] and ((t + 1) % para['check_time'] == 0
                                            or t + 1 == para['sweep_time']):
                b.input_mps(a.mps)
                accuracy = b.calculate_accuracy()
                print('After the ' + str(t) +
                      '-th sweep, the testing accuracy = ' + str(accuracy))
                if abs(accuracy - accuracy0) < (para['step'] *
                                                para['ratio_step_tol']):
                    para['step'] *= para['step_ratio']
                    accuracy0 = accuracy
                    print('Converged. Reduce the gradient step to ' +
                          str(para['step']))
                elif t + 1 == para['sweep_time']:
                    print('After all ' + str(t + 1) +
                          ' sweeps finished, not converged. '
                          'Consider to increase the sweep times.')
                else:
                    accuracy0 = accuracy
                if para['step'] < para['step_min']:
                    print('Now step = ' + str(para['step']) +
                          ' is sufficiently small. Break the loop')
                    break
                else:
                    print('Now step = ' + str(para['step']))
        a.clear_before_save()
        if para['if_save']:
            save_pr(para['data_path'], para['save_exp'], [a, para],
                    ['a', 'para'])
    accuracy = b.calculate_accuracy()
    print('The final testing accuracy = ' + str(accuracy))

    return a, para
示例#10
0
from library import TNmachineLearning
from library.Parameters import parameters_lazy_learning
import numpy as np
from library.TensorBasicModule import khatri
from library.BasicFunctions import plot
import torch

if_torch = True
num_parts = 20  # to save GPU memory

para = parameters_lazy_learning()
print_dtime = 20

a = TNmachineLearning.MachineLearningFeatureMap(para['d'], para['dataset'])
a.load_data()
a.select_samples(para['classes'], para['num_samples'])
a.images2vecs(theta_max=para['theta'] * np.pi)

b = TNmachineLearning.MachineLearningFeatureMap(para['d'], para['dataset'])
b.load_data(data_path='..\\..\\..\\MNIST\\',
            file_sample='t10k-images.idx3-ubyte',
            file_label='t10k-labels.idx1-ubyte',
            is_normalize=True)
b.select_samples(para['classes'], para['num_samples'])
b.images2vecs(theta_max=para['theta'] * np.pi)

accuracy = list()
num = list()
if if_torch:
    for c2 in para['classes']:
        fid = list()