Ejemplo n.º 1
0
def optimize(feats_train, feats_val, Y_train, Y_val, features, Y):
    num_qubits = 2
    num_layers = 6
    var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0)

    opt = NesterovMomentumOptimizer(0.01)
    batch_size = 5

    # train the variational classifier
    var = var_init
    for it in range(60):

        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        feats_train_batch = feats_train[batch_index]
        Y_train_batch = Y_train[batch_index]
        var = opt.step(lambda v: cost(v, feats_train_batch, Y_train_batch),
                       var)

        # Compute predictions on train and validation set
        predictions_train = [
            np.sign(variational_classifier(var, angles=f)) for f in feats_train
        ]
        predictions_val = [
            np.sign(variational_classifier(var, angles=f)) for f in feats_val
        ]

        # Compute accuracy on train and validation set
        acc_train = accuracy(Y_train, predictions_train)
        acc_val = accuracy(Y_val, predictions_val)

        print(
            "Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
            "".format(it + 1, cost(var, features, Y), acc_train, acc_val))
    return var
Ejemplo n.º 2
0
def tensorflow_interface(init_node, init_var, n, lr, steps, batch_size, X, X_train, X_val, Y, Y_train, Y_val):
    print('\n\nTensorFlow interface:')

    import tensorflow as tf

    best = [0, 1.0, 0.0, 0.0, 0.0, []]

    node = init_node.to_tf()
    var = tf.Variable(init_var, dtype=tf.float64)
    opt = tf.optimizers.Adam(learning_rate=lr)

    time1 = time.time()
    for it in range(steps):
        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, len(X_train), (batch_size,)) # pylint: disable=no-member
        X_train_batch = X_train[batch_index]
        Y_train_batch = Y_train[batch_index]

        with tf.GradientTape() as tape:
            loss = cost(node, n, var, X_train_batch, Y_train_batch)
            grads = tape.gradient(loss, [var])

        opt.apply_gradients(zip(grads, [var]))

        # Compute predictions on train and validation set
        predictions_train = [np.sign(variational_classifier(node, var, state_vector=f, n=n)) for f in X_train] # pylint: disable=no-member
        predictions_val = [np.sign(variational_classifier(node, var, state_vector=f, n=n)) for f in X_val]     # pylint: disable=no-member
        
        # Compute accuracy on train and validation set
        acc_train = accuracy(Y_train, predictions_train)
        acc_val = accuracy(Y_val, predictions_val)

        # Compute cost on complete dataset
        cost_set = cost(node, n, var, X, Y)

        if (cost_set < best[1]):
          best[0] = it + 1
          best[1] = cost_set
          best[2] = acc_train
          best[3] = acc_val
          best[4] = var[-1]
          best[5] = var[:-1]
          
        print(
            "Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
            "".format(it + 1, cost_set, acc_train, acc_val)
        )
    time2 = time.time()

    print("Optimized rotation angles: {}".format(var[:-1]))
    print("Optimized bias: {}".format(var[-1]))
    print(f'Run time={((time2-time1)*1000.0):.3f}')

    return best
def train_and_test(X_train, Y_train, X_test, Y_test):
    opt = NesterovMomentumOptimizer(0.01)
    batch_size = 5

    # train the variational classifier
    var = var_init

    test_accuracies = []
    train_accuracies = []
    costs = []
    for it in range(num_iterations):

        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        X_train_batch = X_train[batch_index]
        Y_train_batch = Y_train[batch_index]
        var = opt.step(lambda v: cost(v, X_train_batch, Y_train_batch), var)

        # Compute predictions on train and validation set
        predictions_train = [np.sign(variational_classifier(var, f)) for f in X_train]
        predictions_test = [np.sign(variational_classifier(var, f)) for f in X_test]

        # Compute accuracy on train and validation set
        acc_train = accuracy(Y_train, predictions_train)
        acc_test = accuracy(Y_test, predictions_test)

        # Compute cost on all samples
        c = cost(var, X, Y)

        costs.append(c)
        test_accuracies.append(acc_test)
        train_accuracies.append(acc_train)

        print("Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
              "".format(it+1, c, acc_train, acc_test))

    return train_accuracies, test_accuracies, costs, var
Ejemplo n.º 4
0
def optimize(var, opt, batch_size, X, Y):
    for it in range(25):

        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, len(X), (batch_size, ))
        X_batch = X[batch_index]
        Y_batch = Y[batch_index]
        var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)

        # Compute accuracy
        predictions = [np.sign(variational_classifier(var, x=x)) for x in X]
        acc = accuracy(Y, predictions)

        print("Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} ".format(
            it + 1, cost(var, X, Y), acc))
data = np.loadtxt("data/parity.txt")
X = data[:, :-1]
Y = data[:, -1]
Y = Y * 2 - np.ones(len(Y))  # shift label from {0, 1} to {-1, 1}

# initialize weight layers
np.random.seed(0)
num_qubits = 4
num_layers = 2
var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0)

# create optimizer
opt = NesterovMomentumOptimizer(0.5)
batch_size = 5

# train the variational classifier
var = var_init
for it in range(25):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, len(X), (batch_size, ))
    X_batch = X[batch_index]
    Y_batch = Y[batch_index]
    var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)

    # Compute accuracy
    predictions = [np.sign(variational_classifier(var, x=x)) for x in X]
    acc = accuracy(Y, predictions)

    print("Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} ".format(it + 1, cost(var, X, Y), acc))
# …and train the optimizer. We track the accuracy - the share of correctly
# classified data samples. For this we compute the outputs of the
# variational classifier and turn them into predictions in
# :math:`\{-1,1\}` by taking the sign of the output.

var = var_init
for it in range(25):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, len(X), (batch_size, ))
    X_batch = X[batch_index]
    Y_batch = Y[batch_index]
    var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)

    # Compute accuracy
    predictions = [np.sign(variational_classifier(var, x=x)) for x in X]
    acc = accuracy(Y, predictions)

    print("Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} ".format(
        it + 1, cost(var, X, Y), acc))

##############################################################################
# 2. Iris classification
# ----------------------
#
# Quantum and classical nodes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# To encode real-valued vectors into the amplitudes of a quantum state, we
# use a 2-qubit simulator.
Ejemplo n.º 7
0
var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0)

opt = NesterovMomentumOptimizer(0.01)
batch_size = 5

# train the variational classifier
var = var_init
for it in range(60):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, num_train, (batch_size, ))
    feats_train_batch = feats_train[batch_index]
    Y_train_batch = Y_train[batch_index]
    var = opt.step(lambda v: cost(v, feats_train_batch, Y_train_batch), var)

    # Compute predictions on train and validation set
    predictions_train = [
        np.sign(variational_classifier(var, angles=f)) for f in feats_train
    ]
    predictions_val = [
        np.sign(variational_classifier(var, angles=f)) for f in feats_val
    ]

    # Compute accuracy on train and validation set
    acc_train = accuracy(Y_train, predictions_train)
    acc_val = accuracy(Y_val, predictions_val)

    print(
        "Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
        "".format(it + 1, cost(var, features, Y), acc_train, acc_val))
Ejemplo n.º 8
0
   

    
    

var = var_init
for it in range(25):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, len(X), (batch_size,))
    X_batch = X[batch_index]
    Y_batch = Y[batch_index]
    var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)
    
    # Compute accuracy
    predictions = [np.sign(variational_classifier(var, x)) for x in X]
    acc = accuracy(Y, predictions)
    
    print(
        "Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} ".format(
            it + 0, cost(var, X, Y), acc
        )
    )






###2. Iris classification###
Ejemplo n.º 9
0
# opt = torch.optim.SGD([Q_circuit, Q_bias], lr = 1e-2)

batch_size = 5
opt = torch.optim.RMSprop([Q_circuit, Q_bias],
                          lr=0.01,
                          alpha=0.99,
                          eps=1e-08,
                          weight_decay=0,
                          momentum=0,
                          centered=False)

for it in range(50):
    batch_index = np.random.randint(0, len(X_sample), (batch_size, ))
    X_batch = X_sample[batch_index]
    Y_batch = Y_sample[batch_index]

    var = opt.step(closure)
    # Compute accuracy

    predictions = [
        np.sign(variational_classifier(Q_circuit, Q_bias, x=x).item())
        for x in X_sample
    ]
    print("===================")
    print(Q_circuit)
    print(Q_bias)
    print("===================")
    # predictions = [variational_classifier(Q_circuit, x=x).item() for x in X]
    acc = accuracy(Y_sample, predictions)

    # print("Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} ".format(it + 1, cost(Q_circuit, Q_bias, X_sample, Y_sample), acc))
Y_val = Y[index[num_train:]]

# initialize weight layers
num_qubits = 2
num_layers = 6
var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0)

opt = NesterovMomentumOptimizer(0.01)
batch_size = 5

# train the variational classifier
var = var_init
for it in range(60):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, num_train, (batch_size, ))
    feats_train_batch = feats_train[batch_index]
    Y_train_batch = Y_train[batch_index]
    var = opt.step(lambda v: cost(v, feats_train_batch, Y_train_batch), var)

    # Compute predictions on train and validation set
    predictions_train = [np.sign(variational_classifier(var, angles=f)) for f in feats_train]
    predictions_val = [np.sign(variational_classifier(var, angles=f)) for f in feats_val]

    # Compute accuracy on train and validation set
    acc_train = accuracy(Y_train, predictions_train)
    acc_val = accuracy(Y_val, predictions_val)

    print("Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
          "".format(it+1, cost(var, features, Y), acc_train, acc_val))
Ejemplo n.º 11
0
def classify_ising_data(ising_configs, labels):
    """Learn the phases of the classical Ising model.

    Args:
        - ising_configs (np.ndarray): 250 rows of binary (0 and 1) Ising model configurations
        - labels (np.ndarray): 250 rows of labels (1 or -1)

    Returns:
        - predictions (list(int)): Your final model predictions

    Feel free to add any other functions than `cost` and `circuit` within the "# QHACK #" markers 
    that you might need.
    """

    # QHACK #

    num_wires = ising_configs.shape[1]
    dev = qml.device("default.qubit", wires=num_wires)

    # Define a variational circuit below with your needed arguments and return something meaningful
    @qml.qnode(dev)
    def circuit(params, ising_config):
        """ Variational quantum circuit """
        # data encoding
        qml.BasisState(ising_config, wires=range(num_wires))

        # variational quantum circuit
        for i in range(len(params)):
            for j in range(num_wires):
                qml.Rot(*params[i][j], wires=j)
            for j in range(num_wires - 1):
                qml.CNOT(wires=(j, j + 1))
            qml.CNOT(wires=(num_wires - 1, 0))

        # Return NN parity of entire spin chain
        # parity = [qml.PauliZ(i) for i in range(num_wires)]
        # parity = qml.operation.Tensor(*parity)
        # return qml.expval(parity)

        # return [qml.expval(qml.PauliZ(i) @ qml.PauliZ(i+1)) for i in range(num_wires-1)]
        return [qml.expval(qml.PauliZ(i)) for i in range(num_wires)]

    def variational_classifier(params, bias, ising_config):
        """ Decodes the quantum circuit output into a classification """
        magnetisation = np.sum(circuit(params, ising_config))
        return magnetisation + bias

    # Define a cost function below with your needed arguments
    def cost(params, bias, X, Y):

        # QHACK #

        # Insert an expression for your model predictions here
        predictions = [variational_classifier(params, bias, x) for x in X]

        # QHACK #

        return square_loss(Y, predictions)  # DO NOT MODIFY this line

    # optimize your circuit here
    opt = qml.NesterovMomentumOptimizer()
    batch_size = 5  # number of ising_configs to train on in each iter
    num_layers = 3  # number of mixing layers in variational quantum circuit
    params = np.ones([num_layers, num_wires, 3])  # initial guess
    bias = np.array(0.0)  # initial guess
    for i in range(100):  # iteratively optimise
        batch_index = np.random.randint(0, len(labels), (batch_size, ))
        X = ising_configs[batch_index]
        Y = labels[batch_index]
        params, bias, _, _ = opt.step(cost, params, bias, X, Y)

    # make predictions w/ optimised circuit
    predictions = []
    for ising_config in ising_configs:
        predict = variational_classifier(params, bias, ising_config)
        predictions.append(int(np.sign(predict)))

    # QHACK #

    return predictions