Ejemplo n.º 1
0
def prepare_bath_hamilts(para):
    print('Starting iDMRG for the entanglement bath')
    bath_data = opath.join(para['bath_path'], para['bath_exp'])
    if para['if_load_bath'] and opath.isfile(bath_data):
        print('Bath data found. Load the bath.')
        bath, ob0, hamilt = load_pr(bath_data, ['A', 'ob0', 'hamilt'])
    else:
        print('Bath data not found. Calculate bath by iDMRG.')
        hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'],
                                        para['jz'], para['hx'] / 2,
                                        para['hz'] / 2)
        bath, ob0 = dmrg_infinite_size(para, hamilt=hamilt)[:2]
        save_pr(para['bath_path'], para['bath_exp'], [bath, ob0, hamilt],
                ['A', 'ob0', 'hamilt'])
    if (bath.is_symme_env is True) and (bath.dmrg_type is 'mpo'):
        bath.env[1] = bath.env[0]

    print('Preparing the physical-bath Hamiltonians')
    qes = QES_1D(para['d'], para['chi'], para['d'] * para['d'], para['l_phys'],
                 para['tau'])
    if bath.dmrg_type is 'mpo':
        qes.obtain_physical_gate_tensors(hamilt)
        qes.obtain_bath_h(bath.env, 'both')
    else:
        qes.obtain_bath_h_by_effective_ops_1d(bath.bath_op_onsite,
                                              bath.effective_ops,
                                              bath.hamilt_index)
    hamilts = [hamilt] + qes.hamilt_bath
    return hamilts, bath, ob0
Ejemplo n.º 2
0
def get_sequence(gtn, num_f, param, order_way):
    if order_way is 'SequencedMeasure':
        order_file = os.path.join(param['data_path'],
                                  'Order_' + param['save_exp'])
        if os.path.isfile(order_file):
            order = load_pr(order_file, 'order')
        else:
            order = gtn.mps.markov_measurement(if_restore=True)[0]
            save_pr(param['data_path'], 'Order_' + param['save_exp'], [order],
                    ['order'])
        order_now = copy.copy(order[:num_f])
    elif order_way is 'MaxSEE':
        ent = gtn.mps.calculate_onsite_reduced_density_matrix()[0]
        order = np.argsort(ent)[::-1]
        order_now = copy.copy(order[:num_f])
    elif order_way is 'Variance':
        tmp = TNmachineLearning.MachineLearningFeatureMap(
            param['d'], param['dataset'])
        tmp.load_data()
        tmp.select_samples([param['class']])
        variance = tmp.variance_pixels()
        order = np.argsort(variance)[::-1]
        order_now = copy.copy(order[:num_f])
    elif order_way is 'RandomMeasure':
        order = np.random.permutation(gtn.length)
        order_now = copy.copy(order[:num_f])
    else:
        order = None
        order_now = None
    return order, order_now
Ejemplo n.º 3
0
def get_marked_imgs(gtn, imgs_test, num_f, sample, param, order_way):
    if order_way is 'SequencedMeasure':
        order_file = os.path.join(param['data_path'], 'Order_' + param['save_exp'])
        if os.path.isfile(order_file):
            order = load_pr(order_file, 'order')
        else:
            order = gtn.mps.markov_measurement(if_restore=True)[0]
            save_pr(para['data_path'], 'Order_' + para['save_exp'], [order], ['order'])
        order_now = copy.copy(order.reshape(-1,)[:num_f])
    elif order_way is 'MaxSEE':
        ent = gtn.mps.calculate_onsite_reduced_density_matrix()[0]
        order = np.argsort(ent)[::-1]
        order_now = copy.copy(order.reshape(-1,)[:num_f])
    elif order_way is 'Variance':
        tmp = TNmachineLearning.MachineLearningFeatureMap(param['d'], param['dataset'])
        tmp.load_data()
        tmp.select_samples([param['class']])
        variance = tmp.variance_pixels()
        order = np.argsort(variance)[::-1]
        order_now = copy.copy(order.reshape(-1,)[:num_f])
    elif order_way is 'RandomMeasure':
        order = np.random.permutation(gtn.length)
        order_now = copy.copy(order.reshape(-1,)[:num_f])
    else:
        order = None
        order_now = None
    img_part = imgs_test.images.copy()[order_now, sample]
    img_new = gtn.generate_features(img_part, pos=order_now, f_max=1, f_min=0,
                                    is_display=False, way=generate_way)
    img_new = img_new.reshape(imgs_test.img_size)
    marked_img = imgs_test.mark_pixels_on_full_image(img_new, order_now)
    return imgs_test.images[:, sample].reshape(imgs_test.img_size), img_new, marked_img, order
def gcmpm_one_class(para=None):
    if para is None:
        para = pm.parameters_gcmpm_one_class()
    para['save_exp'] = save_exp_gcmpm_one_class(para)
    if para['parallel'] is True:
        par_pool = para['n_nodes']
    else:
        par_pool = None
    if para['if_load'] and os.path.isfile(para['save_exp']):
        a = bf.load_pr(os.path.join(para['data_path'], para['save_exp']), 'a')
    else:
        a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'], para['dataset'],
                                                 par_pool=par_pool)
    a.images2vecs([para['class']], [100])
    a.initialize_virtual_vecs_train()
    a.update_virtual_vecs_train('all', 'all', 'both')
    a.mps.correct_orthogonal_center(0, normalize=True)
    a.mps.mps[0] /= np.linalg.norm(a.mps.mps[0].reshape(-1, ))
    mps0 = a.mps.mps.copy()
    for t in range(0, para['sweep_time']):
        # from left to right
        if para['if_print_detail']:
            print('At the ' + str(t) + '-th sweep, from left to right')
        for nt in range(0, a.length):
            a.update_tensor_gradient(nt, para['step'])
            if nt != a.length-1:
                a.update_virtual_vecs_train('all', nt, 'left')
        # from left to right
        print('At the ' + str(t) + '-th sweep, from right to left')
        for nt in range(a.length-1, -1, -1):
            a.update_tensor_gradient(nt, para['step'])
            if nt != 0:
                a.update_virtual_vecs_train('all', nt, 'right')
        if t > para['check_time0'] and ((t+1) % para['check_time'] == 0
                                        or t+1 == para['sweep_time']):
            fid = ln_fidelity_per_site(mps0, a.mps.mps)
            if fid < (para['step'] * para['ratio_step_tol']):
                print('After ' + str(t+1) + ' sweeps: fid = %g' % fid)
                para['step'] *= para['step_ratio']
            elif t+1 == para['sweep_time']:
                print('After all ' + str(t+1) + ' sweeps finished, fid = %g. '
                                                'Consider to increase the sweep times.' % fid)
            else:
                print('After ' + str(t+1) + ' sweeps, fid = %g.' % fid)
                mps0 = a.mps.mps.copy()
            if para['step'] < para['step_min']:
                print('Now step = ' + str(para['step']) + ' is sufficiently small. Break the loop')
                break
            else:
                print('Now step = ' + str(para['step']))
    if para['if_save']:
        save_pr(para['data_path'], para['save_exp'], [a, para], ['a', 'para'])
    return a, para
def gcmpm(para_tot=None):
    print('Preparing parameters')
    if para_tot is None:
        para_tot = pm.parameters_gcmpm()
    n_class = len(para_tot['classes'])
    paras = bf.empty_list(n_class)
    for n in range(0, n_class):
        paras[n] = para_tot.copy()
        paras[n]['class'] = para_tot['classes'][n]
        paras[n]['chi'] = para_tot['chi'][n]
        paras[n]['save_exp'] = save_exp_gcmpm_one_class(paras[n])
    classifiers = bf.empty_list(n_class)
    for n in range(0, n_class):
        data = '../data_tnml/gcmpm/' + paras[n]['save_exp']
        if para_tot['if_load'] and os.path.isfile(data):
            print('The classifier already exists. Load directly')
            classifiers[n] = load_pr(data, 'classifier')
        else:
            print('Training the MPS of ' + str(para_tot['classes'][n]))
            classifiers[n] = gcmpm_one_class(paras[n])[0]
            if para_tot['if_save']:
                save_pr('../data_tnml/gcmpm/', paras[n]['save_exp'],
                        [classifiers[n]], ['classifier'])
    # Testing accuracy
    print('Calculating the testing accuracy')
    labels = para_tot['classes']
    b = TNmachineLearning.MachineLearningFeatureMap('MNIST', para_tot['d'],
                                                    file_sample='t10k-images.idx3-ubyte',
                                                    file_label='t10k-labels.idx1-ubyte')
    b.images2vecs(para_tot['classes'], ['all', 'all'])
    fid = np.zeros((n_class, ))
    num_wrong = 0
    for ni in range(0, b.numVecSample):
        for n in range(0, n_class):
            fid[n] = b.fidelity_mps_image(classifiers[n].mps.mps, ni)
        n_max = int(np.argmax(fid))
        if labels[n_max] != b.LabelNow[ni]:
            num_wrong += 1
    accuracy = num_wrong/b.numVecSample
    print(accuracy)
Ejemplo n.º 6
0
    print('Train the generative TN')
    a, para_gtn = gtn_one_class(para)
    if var_name is 'which_class':
        b.load_data(data_path='..\\..\\..\\MNIST\\' + para['dataset'] + '\\',
                    file_sample='t10k-images.idx3-ubyte',
                    file_label='t10k-labels.idx1-ubyte', is_normalize=True)
        b.select_samples([para['class']])

    if select_way is 'SequencedMeasure':
        print('Calculate the sequence of the measurements')
        order_file = os.path.join(para['data_path'], 'Order_'+para['save_exp'])
        if (not is_order_calculated) and os.path.isfile(order_file):
            order = load_pr(order_file, 'order')
        else:
            order = a.mps.markov_measurement(if_restore=True)[0]
            save_pr(para['data_path'], 'Order_'+para['save_exp'], [order], ['order'])
        order_now = copy.copy(order.reshape(-1, )[:num_features])
        is_order_calculated = True
    elif select_way is 'MaxSEE':
        if not is_order_calculated:
            ent = a.mps.calculate_onsite_reduced_density_matrix()[0]
            order = np.argsort(ent)[::-1]
            is_order_calculated = True
        order_now = copy.copy(order.reshape(-1, )[:num_features])
    elif select_way is 'Variance':
        if not is_order_calculated:
            variance = b.variance_pixels()
            order = np.argsort(variance)[::-1]
            is_order_calculated = True
        order_now = copy.copy(order.reshape(-1, )[:num_features])
    else:
Ejemplo n.º 7
0
    nj = len(j)
    nh = len(h)
    for n1 in range(0, nj):
        for n2 in range(0, nh):
            para['jz'] = j[n1]
            para['hz'] = h[n2]
            para = pm.make_consistent_parameter_dmrg(para)
            # Run DMRG
            if path.isfile(
                    path.join(para['data_path'], para['data_exp'] + '.pr')):
                print('Load existing data ...')
                a = load_pr(
                    path.join(para['data_path'], para['data_exp'] + '.pr'),
                    'a')
            else:
                print('Start DMRG calculation ...')
                ob, a, info, para = dmrg.dmrg_finite_size(para)
                save_pr(para['data_path'], para['data_exp'] + '.pr',
                        (ob, a, info, para), ('ob', 'a', 'info', 'para'))
            print('The entanglement gap is ' +
                  str((a.lm[n_mid][0] - a.lm[n_mid][1]) / a.lm[n_mid][0]))
            if (a.lm[n_mid][0] - a.lm[n_mid][1]) / a.lm[n_mid][0] < tol:
                exp_image = 'Phase1_' + para['data_exp']
            else:
                exp_image = 'Phase2_' + para['data_exp']
            state = a.full_coefficients_mps()
            image = Qubism.state2image(state * 256, para['d'], is_rescale=True)
            # image = Image.fromarray(image.astype(np.uint8))
            image = Qubism.image2rgb(image, if_rescale_1=False)
            image.save(path.join(para['image_path'], exp_image + '.jpg'))
Ejemplo n.º 8
0
def qes_1d_ed(para=None):
    if para is None:
        para = parameter_qes_by_ed()
    print('Starting iDMRG for the entanglement bath')
    bath_data = opath.join(para['bath_path'], para['bath_exp'])
    if para['if_load_bath'] and opath.isfile(bath_data):
        print('Bath data found. Load the bath.')
        bath, ob0, hamilt = load_pr(bath_data, ['A', 'ob0', 'hamilt'])
    else:
        print('Bath data not found. Calculate bath by iDMRG.')
        hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'],
                                        para['jz'], para['hx'] / 2,
                                        para['hz'] / 2)
        bath, ob0 = dmrg_infinite_size(para, hamilt=hamilt)[:2]
        save_pr(para['bath_path'], para['bath_exp'], [bath, ob0, hamilt],
                ['A', 'ob0', 'hamilt'])
    if (bath.is_symme_env is True) and (bath.dmrg_type is 'mpo'):
        bath.env[1] = bath.env[0]

    print('Preparing the physical-bath Hamiltonians')
    qes = QES_1D(para['d'], para['chi'], para['d'] * para['d'], para['l_phys'],
                 para['tau'])
    if bath.dmrg_type is 'mpo':
        qes.obtain_physical_gate_tensors(hamilt)
        qes.obtain_bath_h(bath.env, 'both')
    else:
        qes.obtain_bath_h_by_effective_ops_1d(bath.bath_op_onsite,
                                              bath.effective_ops,
                                              bath.hamilt_index)

    print('Starting ED for the entanglement bath')
    dims = [para['d'] for _ in range(para['l_phys'])]
    dims = [para['chi']] + dims + [para['chi']]
    hamilts = [hamilt] + qes.hamilt_bath
    ob = dict()
    solver = EDbasic(dims)
    heff = LinearOp((solver.dim_tot, solver.dim_tot),
                    lambda x: solver.project_all_hamilt(
                        x, hamilts, para['tau'], para['couplings']))
    ob['e_eig'], solver.v = eigs(heff,
                                 k=1,
                                 which='LM',
                                 v0=solver.v.reshape(-1, ).copy())
    solver.is_vec = True
    ob['e_eig'] = (1 - ob['e_eig']) / para['tau']
    ob['mx'], ob['mz'] = solver.observe_magnetizations(para['phys_sites'])
    ob['eb'] = solver.observe_bond_energies(
        hamilt, para['positions_h2'][1:para['num_h2'] - 1, :])
    ob['lm'] = solver.calculate_entanglement()
    ob['ent'] = entanglement_entropy(ob['lm'])
    ob['e_site'] = sum(ob['eb']) / (para['l_phys'] - 1)
    ob['corr_xx'] = solver.observe_correlations(para['pos4corr'],
                                                para['op'][1])
    ob['corr_zz'] = solver.observe_correlations(para['pos4corr'],
                                                para['op'][3])
    for n in range(para['pos4corr'].shape[0]):
        p1 = para['pos4corr'][n, 0] - 1
        p2 = para['pos4corr'][n, 1] - 1
        ob['corr_xx'][n] -= ob['mx'][p1] * ob['mx'][p2]
        ob['corr_zz'][n] -= ob['mz'][p1] * ob['mz'][p2]
    return bath, solver, ob0, ob
Ejemplo n.º 9
0
def quantum_jpeg(para_tot=None):
    if para_tot is None:
        para_tot = parameters_qjpg()
    print('Preparing image')
    exp = expression_save(para_tot)

    exp = os.path.join(para_tot['data_path'], exp)
    if os.path.isfile(exp) and para_tot['if_load']:
        a, generator = load_pr(exp, ['a', 'mps'])
    else:
        a = qjpg(file=para_tot['file'],
                 b_size=para_tot['block_size'],
                 is_grey=True)
        a.cut2blocks()
        a.dct_blocks()
        a.pre_process_data_before_ml(which=2)

        para = parameters_gtn_one_class()
        para['chi'] = para_tot['chi']
        para['dataset'] = 'custom'
        para['if_save'] = False
        para['if_load'] = False
        para['dct'] = False

        generator = None
        if 'real' in para_tot['tasks']:
            print('Train in the real space')
            for n in range(para_tot['reorder_time']):
                generator = gtn_one_class(para=para,
                                          images=a.blocks.squeeze())[0]
                if n != (para_tot['reorder_time'] - 1):
                    order = generator.mps.markov_measurement(
                        if_restore=False)[0]
                    # ent = generator.mps.calculate_single_entropy()[0]
                    # order = np.argsort(ent)[::-1]
                    a.reorder_features(order, which=0)
        if 'freq' in para_tot['tasks']:
            for n in range(para_tot['reorder_time']):
                print('Train in the frequency space: reorder time = ' + str(n))
                generator = gtn_one_class(para=para,
                                          images=a.blocks_dct.squeeze())[0]
                if n != (para_tot['reorder_time'] - 1):
                    order = generator.mps.markov_measurement(
                        if_restore=False)[0]
                    # ent = generator.mps.calculate_single_entropy()[0]
                    # order = np.argsort(ent)[::-1]
                    a.reorder_features(order, which=1)
        save_pr(para_tot['data_path'], exp, [a, generator, para_tot],
                ['a', 'mps', 'para_tot'])
    # a.show_image()
    if 'recover' in para_tot['tasks']:
        blocks = a.encode_with_cutoff_dct(para_tot['pixel_cutoff'])
        image_gtn1 = decode_with_generative_mps(blocks, generator)
        image_cutoff = decode_with_cutoff_dct(blocks)
        blocks_jpg = a.encode_standard_jpeg(para_tot['pixel_cutoff'])
        image_jpg = decode_jpeg(blocks_jpg)
        io.imsave('../data_QJPG/before.jpg', a.image.squeeze())
        io.imsave('../data_QJPG/0cut.jpg', image_cutoff.squeeze())
        io.imsave('../data_QJPG/1JPGway.jpg', image_jpg.squeeze())
        io.imsave('../data_QJPG/2GTNway.jpg', image_gtn1.squeeze())
        p1 = psnr(a.image0, image_jpg)
        p2 = psnr(a.image0, image_cutoff)
        p3 = psnr(a.image0, image_gtn1)
        print('The PSNRs for jpg, cut-off, and gtn are %g, %g, and %g' %
              (p1, p2, p3))
Ejemplo n.º 10
0
para_dmps['num_layers'] = 1
para_dmps['chi_overlap'] = 256
para_dmps['theta'] = 0
para_dmps['num_theta'] = 1

for n in range(num_len):
    print('var_para = ' + str(length[n]))
    para_dmrg['l'] = int(length[n])
    para_dmrg = Pm.make_consistent_parameter_dmrg(para_dmrg)

    if path.isfile(path.join(para_dmrg['data_path'], para_dmrg['data_exp'] + '.pr')):
        print('Load existing MPS data ...')
        a, ob = load_pr(path.join(para_dmrg['data_path'], para_dmrg['data_exp'] + '.pr'), ['a', 'ob'])
    else:
        ob, a, info, para_dmrg = dmrg_finite_size(para_dmrg)
        save_pr(para_dmrg['data_path'], para_dmrg['data_exp'] + '.pr', (ob, a, info, para_dmrg),
                ('ob', 'a', 'info', 'para'))
    print('Energy per site = ' + str(ob['e_per_site']))

    a.calculate_entanglement_spectrum()
    a.calculate_entanglement_entropy()
    ent0_mid[n] = a.ent[round(a.length/2)]
    fid_ini[n] = a.fidelity_log_to_product_state()
    # print('Mid entanglement entropy and fid0 = %.12g, %.12g' % (ent0_mid, fid_ini))

    save_path = path.join(para_dmrg['project_path'], 'data_dMPS/')
    save_exp = 'UMPO_layer' + str(para_dmps['num_layers']) + para_dmrg['data_exp'] + '.pr'
    if path.isfile(path.join(save_path, save_exp)):
        print('Load existing MPO data ...')
        mpo, fid_tmp = load_pr(path.join(save_path, save_exp), ['mpo', 'fid'])
    else:
        fid_tmp, _, _, mpo, _ = deep_mps_qubit(a, para_dmps)
Ejemplo n.º 11
0
def gtn_one_class(para=None, images=None, labels=None):
    if 'to_black_and_white' not in para:
        para['to_black_and_white'] = False
    if para is None:
        para = pm.parameters_gtn_one_class()
    para['save_exp'] = save_exp_gtn_one_class(para)
    if para['if_load'] and os.path.isfile(
            os.path.join(para['data_path'], para['save_exp'])):
        a = bf.load_pr(os.path.join(para['data_path'], para['save_exp']), 'a')
    else:
        a = TNmachineLearning.MachineLearningMPS(para['d'], para['chi'],
                                                 para['dataset'])
        if para['dataset'] is 'custom':
            a.input_data(copy.deepcopy(images), copy.deepcopy(labels))
        else:
            a.load_data()
        if a.is_there_labels:
            a.select_samples([para['class']])
        if para['to_black_and_white']:
            a.to_black_and_white()
        if para['dct'] is True:
            a.dct(shift=para['shift'], factor=para['factor'])
        a.images2vecs(theta_max=para['theta'] * np.pi / 2)
        a.initial_mps(center=0, ini_way='1')
        a.initialize_virtual_vecs_train()
        a.mps.correct_orthogonal_center(0)
        a.update_tensor_gradient(0, para['step'])
        nll0 = a.compute_nll()
        step = copy.deepcopy(para['step'])
        print('Iniitially, NLL = ' + str(nll0))
        for t in range(0, para['sweep_time']):
            # from left to right
            if para['if_print_detail']:
                print('At the ' + str(t + 1) + '-th sweep, from left to right')
                t0 = time.time()
                tt0 = time.clock()
            for nt in range(0, a.length):
                a.mps.correct_orthogonal_center(nt)
                if nt != 0:
                    a.update_virtual_vecs_train(nt - 1, 'left')
                a.update_tensor_gradient(nt, step)
            # from right to left
            if para['if_print_detail']:
                print('At the ' + str(t + 1) + '-th sweep, from right to left')
            for nt in range(a.length - 1, -1, -1):
                a.mps.correct_orthogonal_center(nt)
                if nt != a.length - 1:
                    a.update_virtual_vecs_train(nt + 1, 'right')
                a.update_tensor_gradient(nt, step)
            if para['if_print_detail']:
                print('Wall time cost for one loop: %s' % (time.time() - t0))
                print('CPU time cost for one loop: %s' % (time.clock() - tt0))

            if t > (para['check_time0'] - 2) and (
                (t + 1) % para['check_time'] == 0
                    or t + 1 == para['sweep_time']):
                nll = a.compute_nll()
                print('NLL = ' + str(nll))
                # fid = fidelity_per_site(mps0, a.mps.mps)
                fid = abs(nll - nll0) / nll0
                if fid < (step * para['step_ratio']):
                    print('After ' + str(t + 1) + ' sweeps: fid = %g' % fid)
                    step *= para['step_ratio']
                    # mps0 = copy.deepcopy(a.mps.mps)
                    nll0 = nll
                elif t + 1 == para['sweep_time']:
                    print('After all ' + str(t + 1) +
                          ' sweeps finished, fid = %g. '
                          'Consider to increase the sweep times.' % fid)
                else:
                    print('After ' + str(t + 1) + ' sweeps, fid = %g.' % fid)
                    # mps0 = copy.deepcopy(a.mps.mps)
                    nll0 = nll
                if step < para['step_min']:
                    print('Now step = ' + str(step) +
                          ' is sufficiently small. Break the loop')
                    break
                else:
                    print('Now step = ' + str(step))
        a.clear_before_save()
        if para['if_save']:
            save_pr(para['data_path'], para['save_exp'], [a, para],
                    ['a', 'para'])
    return a, para
Ejemplo n.º 12
0
def labeled_gtn(para):
    if para is None:
        para = pm.parameters_labeled_gtn()
    para['save_exp'] = save_exp_labeled_gtn(para)
    if para['parallel'] is True:
        par_pool = para['n_nodes']
    else:
        par_pool = None

    # Preparing testing dataset
    b = TNmachineLearning.MachineLearningFeatureMap(
        para['d'],
        file_sample='t10k-images.idx3-ubyte',
        file_label='t10k-labels.idx1-ubyte')
    b.load_data()
    b.select_samples(para['classes'])
    b.add_labels_to_images()
    b.images2vecs(para['theta'])

    data_file = os.path.join(para['data_path'], para['save_exp'])
    if para['if_load'] and os.path.isfile(data_file):
        print('Data exist. Load directly.')
        a = bf.load_pr(data_file, 'a')
    else:
        a = TNmachineLearning.MachineLearningMPS(para['d'],
                                                 para['chi'],
                                                 para['dataset'],
                                                 par_pool=par_pool)
        a.load_data()
        a.select_samples(para['classes'])
        a.add_labels_to_images()
        a.images2vecs(para['theta'] * np.pi / 2)
        a.initial_mps()
        a.mps.correct_orthogonal_center(0, normalize=True)
        a.initialize_virtual_vecs_train()
        a.update_virtual_vecs_train_all_tensors('both')
        accuracy0 = 0
        for t in range(0, para['sweep_time']):
            # from left to right
            for nt in range(0, a.length):
                a.update_tensor_gradient(nt, para['step'])
                if nt != a.length - 1:
                    a.update_virtual_vecs_train(nt, 'left')
            # from left to right
            for nt in range(a.length - 1, -1, -1):
                a.update_tensor_gradient(nt, para['step'])
                if nt != 0:
                    a.update_virtual_vecs_train(nt, 'right')
            if t > para['check_time0'] and ((t + 1) % para['check_time'] == 0
                                            or t + 1 == para['sweep_time']):
                b.input_mps(a.mps)
                accuracy = b.calculate_accuracy()
                print('After the ' + str(t) +
                      '-th sweep, the testing accuracy = ' + str(accuracy))
                if abs(accuracy - accuracy0) < (para['step'] *
                                                para['ratio_step_tol']):
                    para['step'] *= para['step_ratio']
                    accuracy0 = accuracy
                    print('Converged. Reduce the gradient step to ' +
                          str(para['step']))
                elif t + 1 == para['sweep_time']:
                    print('After all ' + str(t + 1) +
                          ' sweeps finished, not converged. '
                          'Consider to increase the sweep times.')
                else:
                    accuracy0 = accuracy
                if para['step'] < para['step_min']:
                    print('Now step = ' + str(para['step']) +
                          ' is sufficiently small. Break the loop')
                    break
                else:
                    print('Now step = ' + str(para['step']))
        a.clear_before_save()
        if para['if_save']:
            save_pr(para['data_path'], para['save_exp'], [a, para],
                    ['a', 'para'])
    accuracy = b.calculate_accuracy()
    print('The final testing accuracy = ' + str(accuracy))

    return a, para
Ejemplo n.º 13
0
                            para['hz'], para['chi']) + para['bound_cond']
    para['index1'] = np.mat(np.arange(0, para['l']))
    para['index1'] = np.vstack((para['index1'], 6 * np.ones((1, para['l'])))).T.astype(int)
    para['index2'] = hm.interactions_position2full_index_heisenberg_two_body(para['positions_h2'])
    para['coeff1'] = np.ones((para['l'], 1))
    para['coeff2'] = np.ones((para['positions_h2'].shape[0]*3, 1))
    for n in range(0, para['positions_h2'].shape[0]):
        para['coeff2'][n * 3, 0] = para['jxy']
        para['coeff2'][n * 3 + 1, 0] = para['jxy']
        para['coeff2'][n * 3 + 2, 0] = para['jz']
else:
    from library.Parameters import generate_parameters_dmrg
    para = generate_parameters_dmrg()

data_full_name = para['data_path'] + para['data_exp'] + '.pr'
save_pr('.\\para_dmrg\\', '_para.pr', (para,), ('para',))
print('The parameter have been saved as ' + colored('.\\para_dmrg\_para.pr', 'green'))

print_sep('Start DMRG simulation')
if is_load_data and o_path.isfile(data_full_name) and (para['lattice'] in ('chain', 'square')):
    print('The data exists in ' + para['data_path'].rstrip("\\") + '. Load directly...')
    ob, A = load_pr(data_full_name, ('ob', 'A'))
else:
    ob, A, info, para = dmrg_finite_size(para)
    save_pr(para['data_path'], para['data_exp'] + '.pr', (ob, A, info, para), ('ob', 'A', 'info', 'para'))
    print('The data have been saved in ' + colored(para['data_path'].rstrip("\\"), 'green'))

print_sep('DMRG simulation finished')
if is_from_input:
    end_plot = False
    while not end_plot: