Пример #1
0

##############################################################################
# Optimization
# ~~~~~~~~~~~~
#
# Let’s now load and preprocess some data.
#
# .. note::
#
#     The parity dataset can be downloaded
#     :html:`<a href="https://raw.githubusercontent.com/XanaduAI/qml/master/demonstrations/variational_classifier/data/parity.txt"
#     download=parity.txt target="_blank">here</a>` and
#     should be placed in the subfolder ``variational_classifer/data``.

data = np.loadtxt("variational_classifier/data/parity.txt")
X = data[:, :-1]
Y = data[:, -1]
Y = Y * 2 - np.ones(len(Y))  # shift label from {0, 1} to {-1, 1}

for i in range(5):
    print("X = {}, Y = {: d}".format(X[i], int(Y[i])))

print("...")

##############################################################################
# We initialize the variables randomly (but fix a seed for
# reproducibility). The first variable in the list is used as a bias,
# while the rest is fed into the gates of the variational circuit.

np.random.seed(0)
# This is a sample of four images:
#
# .. figure:: ../implementations/embedding_metric_learning/data_example.png
#    :align: center
#    :width: 50%
#
# For convenience, instead of coding up the classical neural network, we
# load `pre-extracted feature vectors of the images
# <https://github.com/XanaduAI/qml/blob/master/implementations/embedding_metric_learning/X_antbees.txt>`_.
# These were created by
# resizing, cropping and normalizing the images, and passing them through
# PyTorch's pretrained ResNet 512 (that is, without the final linear
# layer).
#

X = np.loadtxt("embedding_metric_learning/X_antbees.txt", ndmin=2)  #1  pre-extracted inputs
Y = np.loadtxt("embedding_metric_learning/Y_antbees.txt")  # labels
X_val = np.loadtxt(
    "embedding_metric_learning/X_antbees_test.txt", ndmin=2
)  # pre-extracted validation inputs
Y_val = np.loadtxt("embedding_metric_learning/Y_antbees_test.txt")  # validation labels

# split data into two classes
A = X[Y == -1]
B = X[Y == 1]
A_val = X_val[Y_val == -1]
B_val = X_val[Y_val == 1]

print(A.shape)
print(B.shape)
Пример #3
0

def cost(var, features, labels):
    preds = [quantum_neural_net(var, x=x) for x in features]
    return square_loss(labels, preds)


##############################################################################
# Optimization
# ~~~~~~~~~~~~
#
# We load noisy data samples of a sine function from the external file ``sine.txt``
# (:html:`<a href="https://raw.githubusercontent.com/XanaduAI/pennylane/v0.3.0/examples/data/sine.txt"
# download="sine.txt" target="_blank">download the file here</a>`).

data = np.loadtxt("sine.txt")
X = data[:, 0]
Y = data[:, 1]

##############################################################################
# Before training a model, let's examine the data.
#
# *Note: For the next cell to work you need the matplotlib library.*

import matplotlib.pyplot as plt

plt.figure()
plt.scatter(X, Y)
plt.xlabel("x", fontsize=18)
plt.ylabel("f(x)", fontsize=18)
plt.tick_params(axis="both", which="major", labelsize=16)
Пример #4
0
            loss = loss + 1
    loss = loss / len(labels)

    return loss

#For learning tasks, the cost depends on the data - here the features and labels considered in the iteration of the optimization routine.
def cost(var, X, Y):
    predictions = [variational_classifier(var, x) for x in X]
    return square_loss(Y, predictions)



#Optimization
#Let’s now load and preprocess some data.

data = np.loadtxt("variational_classifer\parity.txt")
X = np.array(data[:, :-1], requires_grad=False)
Y = np.array(data[:, -1], requires_grad=False)
Y = Y * 2 - np.ones(len(Y))  # shift label from {0, 1} to {-1, 1}

for i in range(5):
    print("X = {}, Y = {: d}".format(X[i], int(Y[i])))


print("... now we initialize the variables randomly")


#We initialize the variables randomly (but fix a seed for reproducibility). The first variable in the list is used as a bias, while the rest is fed into the gates of the variational circuit.
np.random.seed(0)
num_qubits = 4
num_layers = 2
Пример #5
0
    Args:
        var (array[float]): array of variables
        features (array[float]): 2-d array of input vectors
        labels (array[float]): 1-d array of targets

    Returns:
        float: loss
    """
    # Compute prediction for each input in data batch
    preds = [quantum_neural_net(var, x=x) for x in features]

    return square_loss(labels, preds)


# load function data
data = np.loadtxt("data/sine.txt")
X = data[:, 0]
Y = data[:, 1]

# initialize weights
np.random.seed(0)
num_layers = 4
var_init = 0.05 * np.random.randn(num_layers, 5)

# create optimizer
opt = AdamOptimizer(0.01, beta1=0.9, beta2=0.999)

# train
var = var_init
for it in range(500):
    var = opt.step(lambda v: cost(v, X, Y), var)

def cost(var, features, labels):
    preds = [quantum_neural_net(var, x=x) for x in features]
    return square_loss(labels, preds)


##############################################################################
# Optimization
# ~~~~~~~~~~~~
#
# We load noisy data samples of a sine function from the external file ``sine.txt``
# (:html:`<a href="https://raw.githubusercontent.com/XanaduAI/pennylane/v0.3.0/examples/data/sine.txt"
# download="sine.txt" target="_blank">download the file here</a>`).

data = np.loadtxt("/workspace/my-examples-for-quantum-computing/pennylaneai/qml-demos/alldata/sine.txt")
X = data[:, 0]
Y = data[:, 1]

##############################################################################
# Before training a model, let's examine the data.
#
# *Note: For the next cell to work you need the matplotlib library.*

import matplotlib.pyplot as plt

plt.figure()
plt.scatter(X, Y)
plt.xlabel("x", fontsize=18)
plt.ylabel("f(x)", fontsize=18)
plt.tick_params(axis="both", which="major", labelsize=16)
Пример #7
0
        variational_classifier(Q_circuit, Q_bias, x=x).item() for x in X
    ]
    return square_loss(Y, predictions)


def closure():
    opt.zero_grad()
    # loss = cost(Q_circuit = Q_circuit, Q_bias = Q_bias, features = X, labels = Y)
    loss = cost(Q_circuit=Q_circuit, Q_bias=Q_bias, X=X_batch, Y=Y_batch)
    loss.backward()
    print(loss)
    print("++++++")
    return loss


data = np.loadtxt("parity.txt")

X_sample = data[:, :-1]
Y_sample = data[:, -1]
Y_sample = Y_sample * 2 - np.ones(len(Y_sample))

X_sample = torch.from_numpy(X_sample)
Y_sample = torch.from_numpy(Y_sample)
X_sample.requires_grad = True
Y_sample.requires_grad = True

# for i in range(5):
#   print("X = {}, Y = {: d}".format(X[i], int(Y[i])))
# print("...")

num_qubits = 4
def square_loss(labels, predictions):
    loss = 0
    for l, p in zip(labels, predictions):
        loss = loss + (l - p) ** 2

    loss = loss / len(labels)
    return loss


def cost(var, features, labels):
    preds = [quantum_neural_net(var, x=x) for x in features]
    return square_loss(labels, preds)


data = np.loadtxt('covid.csv', skiprows=1, delimiter=',')
X = data[:,0]
xmax = np.max(X)
X = X/xmax
Y = data[:,1]
ymax = np.max(Y)
Y = Y/ymax

import matplotlib.pyplot as plt

plt.figure()
plt.scatter(X, Y)
plt.xlabel("x", fontsize=18)
plt.ylabel("f(x)", fontsize=18)
plt.tick_params(axis="both", which="major", labelsize=16)
plt.tick_params(axis="both", which="minor", labelsize=16)
Пример #9
0
##############################################################################
# Optimization
# ~~~~~~~~~~~~
#
# Let’s now load and preprocess some data.
#
# .. note::
#
#     The parity dataset can be downloaded
#     :html:`<a href="https://raw.githubusercontent.com/XanaduAI/qml/master/demonstrations/variational_classifier/data/parity.txt"
#     download=parity.txt target="_blank">here</a>` and
#     should be placed in the subfolder ``variational_classifer/data``.

data = np.loadtxt(
    "/workspace/my-examples-for-quantum-computing/pennylaneai/qml-demos/variational_classifier/data/parity.txt"
)
X = data[:, :-1]
Y = data[:, -1]
Y = Y * 2 - np.ones(len(Y))  # shift label from {0, 1} to {-1, 1}

for i in range(5):
    print("X = {}, Y = {: d}".format(X[i], int(Y[i])))

print("...")

##############################################################################
# We initialize the variables randomly (but fix a seed for
# reproducibility). The first variable in the list is used as a bias,
# while the rest is fed into the gates of the variational circuit.
Пример #10
0
# For learning tasks, the cost depends on the data - here the features and
# labels considered in the iteration of the optimization routine.


def cost(var, X, Y):
    predictions = [variational_classifier(var, x=x) for x in X]
    return square_loss(Y, predictions)


##############################################################################
# Optimization
# ~~~~~~~~~~~~
#
# Let’s now load and preprocess some data.

data = np.loadtxt("/home/vince/labs/quantumComputing/xanadu/parity.txt")
X = data[:, :-1]
Y = data[:, -1]
Y = Y * 2 - np.ones(len(Y))  # shift label from {0, 1} to {-1, 1}

for i in range(5):
    print("X = {}, Y = {: d}".format(X[i], int(Y[i])))

print("...")

##############################################################################
# We initialize the variables randomly (but fix a seed for
# reproducability). The first variable in the list is used as a bias,
# while the rest is fed into the gates of the variational circuit.

np.random.seed(0)
Пример #11
0
def load():
    data = np.loadtxt("data/parity.txt")
    X = data[:, :-1]
    Y = data[:, -1]
    Y = Y * 2 - np.ones(len(Y))  # shift label from {0, 1} to {-1, 1}
    return (X, Y)
            loss = loss + 1
    loss = loss / len(labels)

    return loss


def cost(weights, features, labels):
    """Cost (error) function to be minimized."""

    predictions = [variational_classifier(weights, angles=f) for f in features]

    return square_loss(labels, predictions)


# load Iris data and normalise feature vectors
data = np.loadtxt("data/iris_classes1and2_scaled.txt")
X = data[:, 0:2]

# pad the vectors to size 2^2 with constant values
padding = 0.3 * np.ones((len(X), 1))
X_pad = np.c_[np.c_[X, padding], np.zeros((len(X), 1)) ] 

# normalize each input
normalization = np.sqrt(np.sum(X_pad ** 2, -1))
X_norm = (X_pad.T / normalization).T  

# angles for state preparation are new features
features = np.array([get_angles(x) for x in X_norm])   

Y = data[:, -1]
            loss += 1
    loss = loss / len(labels)

    return loss


def cost(var, X, Y):
    """Cost (error) function to be minimized."""

    predictions = [variational_classifier(var, x=x) for x in X]

    return square_loss(Y, predictions)


# load parity data
data = np.loadtxt("data/parity.txt")
X = data[:, :-1]
Y = data[:, -1]
Y = Y * 2 - np.ones(len(Y))  # shift label from {0, 1} to {-1, 1}

# initialize weight layers
np.random.seed(0)
num_qubits = 4
num_layers = 2
var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0)

# create optimizer
opt = NesterovMomentumOptimizer(0.5)
batch_size = 5

# train the variational classifier
########################
# Settings
########################
# FIX: For technical reasons, define all kwargs with fixed values (i.e. n_layers=1).
# Make sure these are consistent with the variables below.
def featmap(*args):
    """Wrapper for feature map to define specific keyword arguments."""
    return qaoa(*args, n_layers=4)


n_layers = 4  # number of layers for featuremap, if applicable
n_inp = 1  # number of wires that feature map acts on
n_steps = 300  # steps of GD performed
log_step = 5  # how often the test error is calculated
batch_size = 1  # how many pairs are sampled in each training step
X = np.loadtxt("./data/X_1d_sep.txt", ndmin=2)  # load features
Y = np.loadtxt("./data/Y_1d_sep.txt")  # load labels
name_output = "./trained_embeddings/1d_sep-l2-" + str(n_steps) + "s-" + \
              str(n_layers) + "l-" + str(n_inp) + "w"   # name of output file
init_pars = pars_qaoa(
    n_wires=n_inp,
    n_layers=n_layers)  # generate initial parameters with helper function
pennylane_dev = 'default.qubit'
optimizer = qml.RMSPropOptimizer(stepsize=0.01)
plot = True
save_featmap = True
save_plot = True
save_intermediate = True  # whether to save feature map in any log_step of training
#########################

# Use settings to calculate other settings