Esempio n. 1
0
    def load_weights(self, weights_file):
        """Loads weights from file.

        Args:
            weights_file (string):file name containing the weights
        """
        weights_dict = np.load(weights_file)
        weights_list = []
        for _, value in weights_dict.iteritems():
            weights_list.append(value)
        self.var = weights_list

        if self.interface == "tf":
            if self.architecture == "cv":
                self.var = [tf.Variable(v) for v in self.var]
            else:
                self.var = tf.Variable(self.var)
Esempio n. 2
0
        q_train_images.append(quanv(img))
    q_train_images = np.asarray(q_train_images)

    q_test_images = []
    print("\nQuantum pre-processing of test images:")
    for idx, img in enumerate(test_images):
        print("{}/{}        ".format(idx + 1, n_test), end="\r")
        q_test_images.append(quanv(img))
    q_test_images = np.asarray(q_test_images)

    # Save pre-processed images
    np.save(SAVE_PATH + "q_train_images.npy", q_train_images)
    np.save(SAVE_PATH + "q_test_images.npy", q_test_images)

# Load pre-processed images
q_train_images = np.load(SAVE_PATH + "q_train_images.npy")
q_test_images = np.load(SAVE_PATH + "q_test_images.npy")

##############################################################################
# Let us visualize the effect of the quantum convolution
# layer on a batch of samples:

n_samples = 4
n_channels = 4
fig, axes = plt.subplots(1 + n_channels, n_samples, figsize=(10, 10))
for k in range(n_samples):
    axes[0, 0].set_ylabel("Input")
    if k != 0:
        axes[0, k].yaxis.set_visible(False)
    axes[0, k].imshow(train_images[k, :, :, 0], cmap="gray")
        print("Cost on validation set {:2f}".format(cst))


######################################################################
# Optimizing a hybrid quantum-classical model with 1024 + 12 parameters
# takes an awfully long time. We will
# therefore load a set of `already trained parameters
# <https://github.com/XanaduAI/qml/blob/master/implementations/embedding_metric_learning/pretrained_parameters.npy>`_
# (from running the cell above for 1500 steps).
#
# .. note:: Training is sensitive to the hyperparameters
# such as the batch size, initial parameters and
# optimizer used.
#

pretrained_pars = np.load("embedding_metric_learning/pretrained_parameters.npy",
                          allow_pickle=True)

print(pretrained_pars)


######################################################################
# Analysis
# --------
#
# Let us analyze the effect of training. To speed up the script, we will
# only look at a reduced version of the training and validation set,
# selecting the first 10 points from either class.
#

select = 10
Esempio n. 4
0
plt.ylabel("Cost function value")
plt.xlabel("Optimization steps")
plt.legend()
plt.show()

##############################################################################
# Or we can visualize the optimization path in the parameter space using a contour plot.
# Energies at different grid points have been pre-computed, and they can be downloaded by
# clicking :download:`here<../demonstrations/vqe_qng/param_landscape.npy>`.

# Discretize the parameter space
theta0 = np.linspace(0.0, 2.0 * np.pi, 100)
theta1 = np.linspace(0.0, 2.0 * np.pi, 100)

# Load energy value at each point in parameter space
parameter_landscape = np.load("vqe_qng/param_landscape.npy")

# Plot energy landscape
fig, axes = plt.subplots(figsize=(6, 6))
cmap = plt.cm.get_cmap("coolwarm")
contour_plot = plt.contourf(theta0, theta1, parameter_landscape, cmap=cmap)
plt.xlabel(r"$\theta_0$")
plt.ylabel(r"$\theta_1$")

# Plot optimization path for gradient descent. Plot every 10th point.
gd_color = "g"
plt.plot(
    np.array(gd_param_history)[::10, 0],
    np.array(gd_param_history)[::10, 1],
    ".",
    color=gd_color,
Esempio n. 5
0
def predict(x_new,
            path_to_featmap,
            n_samples=None,
            probs_A=None,
            probs_B=None,
            binary=True,
            implementation=None,
            seed=None):
    """
    Predicts which class the new input is from, using either exact numerical simulation
    or a simulated quantum circuit.

    As a convention, the class labeled by +1 is 'A', the class labeled by -1 is 'B'.

    :param x_new: new input to predict label for
    :param path_to_featmap: Where to load featmap from.
    :param n_samples: How many samples to use, if None, use full class (simulating perfect measurement)
    :param probs_A: Probabilities with which to draw each samples from A. If None, use uniform.
    :param probs_B: Probabilities with which to draw each samples from B. If None, use uniform.
    :param binary: If True, return probability, else return value {-1, 1}
    :param implementation: String that chooses the background implementation. Can be 'exact',
        'fast' or 'circuit'
    :return: probability or prediction of class for x_new
    """

    if seed is not None:
        np.random.seed(seed)

    # Load settings from result of featmap learning function
    settings = np.load(path_to_featmap, allow_pickle=True).item()
    featmap = pickle.loads(settings['featmap'])
    pars = settings['pars']
    n_inp = settings['n_wires']
    X = settings['X']
    Y = settings['Y']
    A = X[Y == 1]
    B = X[Y == -1]

    if probs_A is not None and len(probs_A) != len(A):
        raise ValueError(
            "Length of probs_A and A have to be the same, got {} and {}.".
            format(len(probs_A), len(A)))
    if probs_B is not None and len(probs_B) != len(B):
        raise ValueError(
            "Length of probs_B and B have to be the same, got {} and {}.".
            format(len(probs_B), len(B)))

    # Sample subsets from A and B
    if n_samples is None:
        # Consider all samples from A, B
        A_samples = A
        B_samples = B
    else:
        selectA = np.random.choice(range(len(A)),
                                   size=(n_samples, ),
                                   replace=True,
                                   p=probs_A)
        A_samples = A[selectA]
        selectB = np.random.choice(range(len(B)),
                                   size=(n_samples, ),
                                   replace=True,
                                   p=probs_B)
        B_samples = B[selectB]

    if implementation == "exact":
        overlap_A, overlap_B = _exact(x_new=x_new,
                                      A_samples=A_samples,
                                      B_samples=B_samples,
                                      featmap=featmap,
                                      n_inp=n_inp,
                                      pars=pars)
    elif implementation == "circuit":
        overlap_A, overlap_B = _circuit(x_new=x_new,
                                        A_samples=A_samples,
                                        B_samples=B_samples,
                                        featmap=featmap,
                                        pars=pars,
                                        n_inp=n_inp)
    elif implementation == "fast":
        overlap_A, overlap_B = _fast(x_new=x_new,
                                     A_samples=A_samples,
                                     B_samples=B_samples,
                                     featmap=featmap,
                                     pars=pars,
                                     n_inp=n_inp)
    else:
        raise ValueError("Implementation not recognized.")

    if binary:
        if overlap_A > overlap_B:
            return 1
        elif overlap_A < overlap_B:
            return -1
        else:
            return 0
    else:
        return overlap_A - overlap_B