Ejemplo n.º 1
0
    def test_gradient_descent_optimizer_multivar_multidim(self, bunch, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multi-variate functions and with higher dimensional inputs."""

        for gradf, f, name in zip(grad_mvar_mdim_funcs, mvar_mdim_funcs, fnames):
            for jdx in range(len(x_vals[:-3])):
                x_vec = x_vals[jdx:jdx+4]
                x_vec_multidim = np.reshape(x_vec, (2, 2))
                x_new = bunch.sgd_opt.step(f, x_vec_multidim)
                x_correct = x_vec_multidim - gradf(x_vec_multidim) * stepsize
                x_new_flat = x_new.flatten()
                x_correct_flat = x_correct.flatten()
                assert x_new_flat == pytest.approx(x_correct_flat, abs=tol)
Ejemplo n.º 2
0
    def test_gradient_descent_optimizer_multivar_multidim(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multi-variate functions and with higher dimensional inputs."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_mvar_mdim_funcs,
                                  self.mvar_mdim_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-3])):
                    x_vec = x_vals[jdx:jdx + 4]
                    x_vec_multidim = np.reshape(x_vec, (2, 2))
                    x_new = self.sgd_opt.step(f, x_vec_multidim)
                    x_correct = x_vec_multidim - gradf(
                        x_vec_multidim) * stepsize
                    x_new_flat = x_new.flatten()
                    x_correct_flat = x_correct.flatten()
                    self.assertAllAlmostEqual(x_new_flat,
                                              x_correct_flat,
                                              delta=self.tol)
Ejemplo n.º 3
0
    def test_reps_per_factor_not_1(self, mocker):
        """Tests if mitigation proceeds as expected when reps_per_factor is not 1 (default)"""
        scale_factors = [1, 2, -4]
        spy_fold = mocker.spy(self, "folding")
        spy_extrapolate = mocker.spy(self, "extrapolate")
        tapes, fn = mitigate_with_zne(
            tape, scale_factors, self.folding, self.extrapolate, reps_per_factor=2
        )
        random_results = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]

        args = spy_fold.call_args_list
        for i in range(6):
            same_tape(args[i][0][0], tape_base)
        assert [args[i][0][1] for i in range(6)] == [1, 1, 2, 2, -4, -4]

        fn(random_results)

        args = spy_extrapolate.call_args
        assert args[0][0] == scale_factors
        assert np.allclose(args[0][1], np.mean(np.reshape(random_results, (3, 2)), axis=1))
Ejemplo n.º 4
0
    def test_gradient_descent_optimizer_multivar_multidim(self, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multivariate functions and with higher dimensional inputs."""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1],
            lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x]),
        ]
        grad_mvar_mdim_funcs = [
            lambda x: (np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                 [-np.sin(x[1, 0]), 1.0]]), ),
            lambda x: (np.array([
                [
                    np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                    np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2),
                ],
                [0.0, 0.0],
            ]), ),
            lambda x: (np.array([[2 * x_[0], 0.0] for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_mvar_mdim_funcs, mvar_mdim_funcs):
            for jdx in range(len(x_vals[:-3])):
                x_vec = x_vals[jdx:jdx + 4]
                x_vec_multidim = np.reshape(x_vec, (2, 2))
                x_new = sgd_opt.step(f, x_vec_multidim)
                x_correct = x_vec_multidim - gradf(
                    x_vec_multidim)[0] * stepsize
                x_new_flat = x_new.flatten()
                x_correct_flat = x_correct.flatten()
                assert np.allclose(x_new_flat, x_correct_flat, atol=tol)
Ejemplo n.º 5
0
class AutogradBox(qml.math.TensorBox):
    """Implements the :class:`~.TensorBox` API for ``pennylane.numpy`` tensors.

    For more details, please refer to the :class:`~.TensorBox` documentation.
    """

    abs = wrap_output(lambda self: np.abs(self.data))
    angle = wrap_output(lambda self: np.angle(self.data))
    arcsin = wrap_output(lambda self: np.arcsin(self.data))
    cast = wrap_output(lambda self, dtype: np.tensor(self.data, dtype=dtype))
    diag = staticmethod(wrap_output(lambda values, k=0: np.diag(values, k=k)))
    expand_dims = wrap_output(
        lambda self, axis: np.expand_dims(self.data, axis=axis))
    ones_like = wrap_output(lambda self: np.ones_like(self.data))
    reshape = wrap_output(lambda self, shape: np.reshape(self.data, shape))
    sqrt = wrap_output(lambda self: np.sqrt(self.data))
    sum = wrap_output(lambda self, axis=None, keepdims=False: np.sum(
        self.data, axis=axis, keepdims=keepdims))
    T = wrap_output(lambda self: self.data.T)
    squeeze = wrap_output(lambda self: self.data.squeeze())

    @staticmethod
    def astensor(tensor):
        return np.tensor(tensor)

    @staticmethod
    @wrap_output
    def concatenate(values, axis=0):
        return np.concatenate(AutogradBox.unbox_list(values), axis=axis)

    @staticmethod
    @wrap_output
    def dot(x, y):
        x, y = AutogradBox.unbox_list([x, y])

        if x.ndim == 0 and y.ndim == 0:
            return x * y

        if x.ndim == 2 and y.ndim == 2:
            return x @ y

        return np.dot(x, y)

    @property
    def interface(self):
        return "autograd"

    def numpy(self):
        if hasattr(self.data, "_value"):
            # Catches the edge case where the data is an Autograd arraybox,
            # which only occurs during backpropagation.
            return self.data._value

        return self.data.numpy()

    @property
    def requires_grad(self):
        return self.data.requires_grad

    @wrap_output
    def scatter_element_add(self, index, value):
        size = self.data.size
        flat_index = np.ravel_multi_index(index, self.shape)
        t = [0] * size
        t[flat_index] = value
        self.data = self.data + np.array(t).reshape(self.shape)
        return self.data

    @property
    def shape(self):
        return self.data.shape

    @staticmethod
    @wrap_output
    def stack(values, axis=0):
        return np.stack(AutogradBox.unbox_list(values), axis=axis)

    @wrap_output
    def take(self, indices, axis=None):
        indices = self.astensor(indices)

        if axis is None:
            return self.data.flatten()[indices]

        fancy_indices = [slice(None)] * axis + [indices]
        return self.data[tuple(fancy_indices)]

    @staticmethod
    @wrap_output
    def where(condition, x, y):
        return np.where(condition, *AutogradBox.unbox_list([x, y]))
# make data for decision regions
xx, yy = np.meshgrid(np.linspace(0.0, 1.5, 20), np.linspace(0.0, 1.5, 20))
X_grid = [np.array([x, y]) for x, y in zip(xx.flatten(), yy.flatten())]

# preprocess grid points like data inputs above
padding = 0.3 * np.ones((len(X_grid), 1))
X_grid = np.c_[np.c_[X_grid, padding],
               np.zeros((len(X_grid), 1))]  # pad each input
normalization = np.sqrt(np.sum(X_grid**2, -1))
X_grid = (X_grid.T / normalization).T  # normalize each input
features_grid = np.array([get_angles(x) for x in X_grid
                          ])  # angles for state preparation are new features
predictions_grid = [
    variational_classifier(var, angles=f) for f in features_grid
]
Z = np.reshape(predictions_grid, xx.shape)

# plot decision regions
cnt = plt.contourf(xx,
                   yy,
                   Z,
                   levels=np.arange(-1, 1.1, 0.1),
                   cmap=cm,
                   alpha=0.8,
                   extend="both")
plt.contour(xx,
            yy,
            Z,
            levels=[0.0],
            colors=("black", ),
            linestyles=("--", ),
Ejemplo n.º 7
0
                H = tfi_chain(num_qubits, h, g=g)
                grad_vals = []

                cost_fn = qml.ExpvalCost(mod_ansatz, H, dev, optimize=True)
                grad = qml.grad(cost_fn)

                # Use the same random numbers for each set of parameters/ansatz.
                np.random.seed(seed)
                for i in range(num_samples):
                    if init in ["Zero |0...0>", "Zero |+...+>"]:
                        params = np.zeros(num_params)
                    else:
                        params = np.pi * (np.random.rand(num_params) - 1.0)

                    if "HEA" in ansatz_name:
                        params = np.reshape(params, (len(params) // 3, 3))

                    # Convert the info to normal numpy arrays, not the special pennlyane.numpy tensors.

                    gradient = np2.array(grad(params)[0]).flatten()
                    params = np2.array(params).flatten()

                    data_dict = {
                        "num_qubits": num_qubits,
                        "ansatz_name": ansatz_name,
                        "h": h,
                        "init": init,
                        "params": params,
                        "grad": gradient,
                        "sample": i
                    }
Ejemplo n.º 8
0
def visualize_trained(var, X_train, X_val, Y_train, Y_val):
    plt.figure()
    cm = plt.cm.RdBu

    # make data for decision regions
    xx, yy = np.meshgrid(np.linspace(0.0, 1.5, 20), np.linspace(0.0, 1.5, 20))
    X_grid = [np.array([x, y]) for x, y in zip(xx.flatten(), yy.flatten())]

    # preprocess grid points like data inputs above
    padding = 0.3 * np.ones((len(X_grid), 1))
    X_grid = np.c_[np.c_[X_grid, padding],
                   np.zeros((len(X_grid), 1))]  # pad each input
    normalization = np.sqrt(np.sum(X_grid**2, -1))
    X_grid = (X_grid.T / normalization).T  # normalize each input
    features_grid = np.array(
        [get_angles(x)
         for x in X_grid])  # angles for state preparation are new features
    predictions_grid = [
        variational_classifier(var, angles=f) for f in features_grid
    ]
    Z = np.reshape(predictions_grid, xx.shape)

    # plot decision regions
    cnt = plt.contourf(xx,
                       yy,
                       Z,
                       levels=np.arange(-1, 1.1, 0.1),
                       cmap=cm,
                       alpha=.8,
                       extend='both')
    plt.contour(xx,
                yy,
                Z,
                levels=[0.0],
                colors=('black', ),
                linestyles=('--', ),
                linewidths=(0.8, ))
    plt.colorbar(cnt, ticks=[-1, 0, 1])

    # plot data
    plt.scatter(X_train[:, 0][Y_train == 1],
                X_train[:, 1][Y_train == 1],
                c='b',
                marker='o',
                edgecolors='k',
                label="class 1 train")
    plt.scatter(X_val[:, 0][Y_val == 1],
                X_val[:, 1][Y_val == 1],
                c='b',
                marker='^',
                edgecolors='k',
                label="class 1 validation")
    plt.scatter(X_train[:, 0][Y_train == -1],
                X_train[:, 1][Y_train == -1],
                c='r',
                marker='o',
                edgecolors='k',
                label="class -1 train")
    plt.scatter(X_val[:, 0][Y_val == -1],
                X_val[:, 1][Y_val == -1],
                c='r',
                marker='^',
                edgecolors='k',
                label="class -1 validation")

    plt.legend()
    plt.show()