def save_embedding_vocab(model, embed_path, vocab_path):
    """
    保存词向量矩阵和词典
    Args:
        model: gensim模型,Word2Vec实例或KeyedVectors实例
        embed_path: 词向量矩阵保存路径
        vocab_path: 词典保存路径
    """
    if isinstance(model, Word2Vec):
        model = model.wv
    # 保存词向量矩阵:2d-array
    with open(embed_path, 'wb') as f:
        save_model(model.vectors, f)

    # 保存字典:token to id
    vocab = {}
    with open(vocab_path, 'wb') as f:
        for i in range(len(model.index2word)):
            vocab[model.index2word[i]] = i
        save_model(vocab, f)
Beispiel #2
0
def main():

    for size in cf.system_size:

        zero_state = get_zero_state(size)

        fidelities = list()
        losses = list()

        for i in range(cf.replications):

            angle = np.random.randint(1, 10, size=[size, 3])
            matrix = Identity(size)
            for j in range(size):
                row_i_mat = np.matmul(Z_Rotation(size, j, np.pi / angle[j][2], False),
                                      np.matmul(Y_Rotation(size, j, np.pi / angle[j][1], False),
                                                X_Rotation(size, j, np.pi / angle[j][0], False)))
                matrix = np.matmul(row_i_mat, matrix)
            real_state = np.matmul(matrix, zero_state)

            # define generator
            gen = Generator(size)
            gen.set_qcircuit(construct_qcircuit(gen.qc,size))

            # define discriminator
            herm = [I, X, Y, Z]

            dis = Discriminator(herm, size)

            f = compute_fidelity(gen,zero_state,real_state)
            # optional term, this is for controlling the initial fidelity is small.
            # while(compute_fidelity(gen,zero_state,real_state)>0.5):
            #     gen.reset_angles()
            while(compute_fidelity(gen,zero_state,real_state)<0.001):
                gen.reset_angles()

            while(f < 0.99):
                starttime = datetime.now()
                for iter in range(cf.epochs):
                    print("==================================================")
                    print("Epoch {}, Step_size {}".format(iter + 1, cf.eta))

                    if iter % cf.step_size == 0:
                        # Generator gradient descent
                        gen.update_gen(dis,real_state)
                        print("Loss after generator step: {}".format(compute_cost(gen, dis,real_state)))

                    # Discriminator gradient ascent
                    dis.update_dis(gen,real_state)
                    print("Loss after discriminator step: {}".format(compute_cost(gen, dis,real_state)))

                    cost = compute_cost(gen, dis, real_state)
                    fidelity = compute_fidelity(gen, zero_state, real_state)

                    losses.append(cost)
                    fidelities.append(fidelity)

                    print("Fidelity between real and fake state: {}".format(fidelity))
                    print("==================================================")

                    if iter % 10 == 0:
                        endtime = datetime.now()
                        training_duration = (endtime - starttime).seconds / np.float(3600)
                        param = 'epoches:{:4d} | fidelity:{:8f} | time:{:10s} | duration:{:8f}\n'.format(iter,round(fidelity,6),time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()),round(training_duration,2))
                        train_log(param, './{}qubit_log_pure.txt'.format(cf.system_size))

                    if (cf.decay):
                        eta = (cf.initial_eta * (cf.epochs - iter - 1) +
                               (cf.initial_eta) * iter) / cf.epochs

                f = compute_fidelity(gen,zero_state,real_state)

            plt_fidelity_vs_iter(fidelities, losses, cf, indx=i)
            save_model(gen, cf.model_gen_path)
            save_model(dis, cf.model_dis_path)
            
            fidelities[:]=[]
            losses[:]=[]
    print("end")
Beispiel #3
0
def main():

    # 生成したい画像
    testimg = [[255,255,255,255], [0, 0, 0, 0], [255, 255, 255, 255], [0, 0, 0, 0]]
    
    nqubits = 6
    control = [num for num in range(1,5)]
    target = 0
    anc = [nqubits-1]

    # 各ステップのフィデリティー
    fidelities = list()
    
    #各ステップのコスト関数
    losses = list()

    # 学習する量子状態 関数get_real_state4は ./frqi/frqi.pyをご覧下さい
    real_state = get_real_state4(QuantumCircuit(nqubits), testimg, control, target, anc).T
    
    # Generator
    zero_state = get_zero_state(size)
    gen = Generator(size)
    
    # Generatorの量子回路を設置する 関数circ_frqiEncoderに関しては ./generator/circuit.py及び
    #./generator/gates.pyをご覧下さい 
    gen.set_qcircuit(circ_frqiEncoder(gen.qc, testimg, control, target, anc))

    # Discriminator
    herm = [I, Pauli_X, Pauli_Y, Pauli_Z]

    dis = Discriminator(herm, size)

    f = compute_fidelity(gen,zero_state,real_state)
    # optional term, this is for controlling the initial fidelity is small.
    # while(compute_fidelity(gen,zero_state,real_state)>0.5):
    #     gen.reset_angles()
    while(compute_fidelity(gen,zero_state,real_state)<0.001):
        gen.reset_angles()

    # 学習
    while(f < 0.99):
        starttime = datetime.now()
        for iter in range(cf.epochs):
            print("==================================================")
            print("Epoch {}, Step_size {}".format(iter + 1, cf.eta))

            if iter % cf.step_size == 0:
                # Generator gradient descent
                gen.update_gen(dis,real_state)
                print("Loss after generator step: {}".format(compute_cost(gen, dis,real_state)))

            # Discriminator gradient ascent
            dis.update_dis(gen,real_state)
            print("Loss after discriminator step: {}".format(compute_cost(gen, dis,real_state)))

            cost = compute_cost(gen, dis, real_state)
            fidelity = compute_fidelity(gen, zero_state, real_state)

            losses.append(cost)
            fidelities.append(fidelity)

            print("Fidelity between real and fake state: {}".format(fidelity))
            print("==================================================")

            if iter % 10 == 0:
                endtime = datetime.now()
                training_duration = (endtime - starttime).seconds / np.float(3600)
                param = 'epoches:{:4d} | fidelity:{:8f} | time:{:10s} | duration:{:8f}\n'.format(iter,round(fidelity,6),time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()),round(training_duration,2))
                train_log(param, './{}qubit_log_pure.txt'.format(cf.system_size))

            if (cf.decay):
                eta = (cf.initial_eta * (cf.epochs - iter - 1) +
                       (cf.initial_eta) * iter) / cf.epochs

        f = compute_fidelity(gen,zero_state,real_state)

    plt_fidelity_vs_iter(fidelities, losses, cf, indx=0)
    save_model(gen, cf.model_gen_path)
    save_model(dis, cf.model_dis_path)
    
    fidelities[:]=[]
    losses[:]=[]

    
    real_matrix = frqiDecoder2(real_state)
    fake_matrix = frqiDecoder2(gen.getState())
    
    real_image = Image.fromarray(real_matrix)
    real_image.save('real_image.png')
Beispiel #4
0
def main():
    angle = np.random.randint(1,10,size=[cf.system_size,3])
    matrix = Identity(cf.system_size)
    for j in range(cf.system_size):
        row_i_mat = np.matmul(Z_Rotation(cf.system_size, j, np.pi * angle[j][2], False),
                                  np.matmul(Y_Rotation(cf.system_size, j, np.pi * angle[j][1], False),
                                            X_Rotation(cf.system_size, j, np.pi * angle[j][0], False)))
        matrix = np.matmul(row_i_mat, matrix)

    param = np.random.rand(6)
    XX1 = XX_Rotation(cf.system_size, 0, 1, param[0], False)
    XX2 = XX_Rotation(cf.system_size, 0, 2, param[1], False)
    XX3 = XX_Rotation(cf.system_size, 0, 3, param[2], False)
    XX4 = XX_Rotation(cf.system_size, 1, 2, param[3], False)
    XX5 = XX_Rotation(cf.system_size, 1, 3, param[4], False)
    XX6 = XX_Rotation(cf.system_size, 2, 3, param[5], False)

    zero_state = get_zero_state(cf.system_size)

    real_state_tmp = np.matmul(XX6 ,np.matmul( XX5 ,np.matmul( XX4 ,np.matmul( XX3 ,np.matmul(XX2 , np.matmul(XX1 ,np.matmul( matrix , zero_state)))))))
    real_state = np.matmul(real_state_tmp , real_state_tmp.getH())

    starttime = datetime.now()

    # define generator
    gen = Gen(cf.system_size, cf.num_to_mix, cf.mu, cf.sigma)
    gen.set_qcircuit(construct_qcircuit(gen.qc_list))

    # define discriminator
    herm = [I, X, Y, Z]
    dis = Dis(herm, cf.system_size, cf.mu,cf.sigma)

    fidelities = list()
    losses = list()

    f = compute_fidelity(gen, zero_state, real_state)

    while (f < 0.99):

        starttime = datetime.now()
        for iter in range(cf.epochs):
            print("==================================================")
            print("Epoch {}, Step_size {}".format(iter + 1, cf.eta))

            compute_cost(gen, dis, real_state)
            print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
            if iter % cf.step_size == 0:
                # Generator gradient descent
                gen.update_gen(dis,real_state)
                print("Loss after generator step: {}".format(compute_cost(gen, dis,real_state)))

            # Discriminator gradient ascent
            dis.update_dis(gen,real_state)
            print("Loss after discriminator step: {}".format(compute_cost(gen, dis, real_state)))

            cost = compute_cost(gen, dis, real_state)
            fidelity = compute_fidelity(gen, zero_state,real_state)

            losses.append(cost)
            fidelities.append(fidelity)

            print("Fidelity between real and fake state: {}".format(fidelity))
            print("==================================================")

            if iter % 10 == 0:
                endtime = datetime.now()
                training_duration = (endtime - starttime).seconds / np.float(3600)
                param = 'epoches:{:4d} | fidelity:{:8f} | time:{:10s} | duration:{:8f}\n'.format(iter, round(fidelity, 6),time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()),round(training_duration, 2))
                train_log(param, './{}qubit_log_noise.txt'.format(cf.system_size))

            if (cf.decay):
                eta = (cf.initial_eta * (cf.epochs - iter - 1) +
                       (1e-2 * cf.initial_eta) * iter) / cf.epochs

        f = compute_fidelity(gen, zero_state, real_state)

    plt_fidelity_vs_iter(fidelities, losses, cf)
    save_model(gen, cf.model_gen_path)
    save_model(dis, cf.model_dis_path)

    fidelities[:] = []
    losses[:] = []

    print("end")
    net: NeuralNetClassifier = NeuralNetClassifier(
        probe,
        callbacks=callbacks,
        max_epochs=config.pos_probe_train_epoch,
        batch_size=config.pos_probe_train_batch_size,
        lr=config.pos_probe_train_lr,
        train_split=predefined_split(valid_ds),
        iterator_train__shuffle=True,
        optimizer=torch.optim.Adam,
    )

    net.fit(train_X, train_y)

    path_to_model = f'storage/saved_models/pos_probes/{config.run_label}_{config.feature_model_type}_pos_probe'
    utils.save_model(f'{path_to_model}.pt', net.module)
    utils.save_vocab(f'{path_to_model}_vocab.p', train_vocab)

    print('Done Training Probe')

    # Get accuracy score
    valid_predictions = net.predict(valid_X)
    valid_acc = accuracy(valid_predictions, valid_y)
    print(f'Validation Accuracy: {valid_acc:.4f}')

    # Get validation losses / accuracies from skorch's history
    net_history = net.history
    validation_losses = net_history[:, 'valid_loss']
    validation_accs = net_history[:, 'valid_acc']
    pos_probe_results['validation_losses'] = validation_losses
    pos_probe_results['validation_accs'] = validation_accs
Beispiel #6
0
def main():

    input_state = get_maximally_entangled_state(cf.system_size)

    target_unitary = scio.loadmat('./exp_ideal_{}_qubit.mat'.format(
        cf.system_size))['exp_ideal']
    real_state_tmp = np.matmul(
        np.kron(target_unitary, Identity(cf.system_size)), input_state)
    real_state = real_state_tmp

    step_size = 1

    # define generator
    gen = Generator(cf.system_size)
    gen.set_qcircuit(construct_qcircuit(gen.qc, cf.system_size, cf.layer))

    # define discriminator
    herm = [I, X, Y, Z]
    dis = Discriminator(herm, cf.system_size * 2)

    f = compute_fidelity(gen, input_state, real_state)

    fidelities = []
    losses = []

    while (f < 0.99):
        fidelities[:] = []
        losses[:] = []
        starttime = datetime.now()
        for iter in range(cf.epochs):
            print("==================================================")
            print("Epoch {}, Step_size {}".format(iter + 1, cf.eta))

            if iter % step_size == 0:
                # Generator gradient descent
                gen.update_gen(dis, real_state)
                print("Loss after generator step: {}".format(
                    compute_cost(gen, dis, real_state)))

            # Discriminator gradient ascent
            dis.update_dis(gen, real_state)
            print("Loss after discriminator step: {}".format(
                compute_cost(gen, dis, real_state)))

            cost = compute_cost(gen, dis, real_state)
            fidelity = compute_fidelity(gen, input_state, real_state)
            losses.append(cost)
            fidelities.append(fidelity)

            if iter % 10 == 0:
                endtime = datetime.now()
                training_duration = (endtime -
                                     starttime).seconds / np.float(3600)
                param = 'epoches:{:4d} | fidelity:{:8f} | time:{:10s} | duration:{:8f}\n'.format(
                    iter, round(fidelity, 6),
                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                    round(training_duration, 2))
                train_log(param, './{}qubit_log_hs.txt'.format(cf.system_size))

            print("Fidelity between real and fake state: {}".format(fidelity))
            print("==================================================")

            if (cf.decay):
                cf.eta = (cf.initial_eta * (cf.epochs - iter - 1) +
                          (1e-2 * cf.initial_eta) * iter) / cf.epochs
        f = compute_fidelity(gen, input_state, real_state)

    plt_fidelity_vs_iter(fidelities, losses, cf)
    save_model(gen, cf.model_gen_path)
    save_model(dis, cf.model_dis_path)

    print("end")