示例#1
0
    def weighted_random_sampling(qnodes, coeffs, shots, argnums, *args,
                                 **kwargs):
        """Returns an array of length ``shots`` containing single-shot estimates
        of the Hamiltonian gradient. The shots are distributed randomly over
        the terms in the Hamiltonian, as per a multinomial distribution.

        Args:
            qnodes (Sequence[.QNode]): Sequence of QNodes, each one when evaluated
                returning the corresponding expectation value of a term in the Hamiltonian.
            coeffs (Sequence[float]): Sequences of coefficients corresponding to
                each term in the Hamiltonian. Must be the same length as ``qnodes``.
            shots (int): The number of shots used to estimate the Hamiltonian expectation
                value. These shots are distributed over the terms in the Hamiltonian,
                as per a Multinomial distribution.
            argnums (Sequence[int]): the QNode argument indices which are trainable
            *args: Arguments to the QNodes
            **kwargs: Keyword arguments to the QNodes

        Returns:
            array[float]: the single-shot gradients of the Hamiltonian expectation value
        """

        # determine the shot probability per term
        prob_shots = np.abs(coeffs) / np.sum(np.abs(coeffs))

        # construct the multinomial distribution, and sample
        # from it to determine how many shots to apply per term
        si = multinomial(n=shots, p=prob_shots)
        shots_per_term = si.rvs()[0]

        grads = []

        for h, c, p, s in zip(qnodes, coeffs, prob_shots, shots_per_term):

            # if the number of shots is 0, do nothing
            if s == 0:
                continue

            # set the QNode device shots
            h.device.shots = [(1, s)]

            jacs = []
            for i in argnums:
                j = qml.jacobian(h, argnum=i)(*args, **kwargs)

                if s == 1:
                    j = np.expand_dims(j, 0)

                # Divide each term by the probability per shot. This is
                # because we are sampling one at a time.
                jacs.append(c * j / p)

            grads.append(jacs)

        return [np.concatenate(i) for i in zip(*grads)]
 def EnsembleEntropy(self, ProbDist):
     '''
     Compute ensemble entropy
     from prob dist. 
     '''
     ent = 0.0
     # E = sum of entropies since
     # ansatz is prod state
     for dist in ProbDist:
         ent += -1 * np.sum(dist * np.log(dist))
     return ent
示例#3
0
def qft_U_loss(thetas):
    print(thetas[0].val if type(thetas[0]) is Variable else thetas[0])
    loss = 0
    for i in range(2**n_qubits):
        input_state_vector = I16[i]
        exp_output_eigenvalues = basisvector2eigenvalues(input_state_vector)
        output_eigenvalues = qft_U_iqft(thetas, input_state=input_state_vector)
        # Calculate loss as the sum of the squared diff. btw. eigenvalues of expected output and actual (PauliZ)
        loss += np.sum((np.array(exp_output_eigenvalues) -
                        np.array(output_eigenvalues))**2)
    return loss
    def resample(self, spike_times, frequency_sampling):
        frequency_simulation = 1000 / self.simulation["dt"]
        n_increment_per_sample = int(frequency_simulation / frequency_sampling)
        ts_spikes = []

        for neuron in range(self.simulation["brain"].N):
            timeSerie = self.simulation["spike_times"][neuron].toarray()[0]
            timeSerie = timeSerie[0:n_increment_per_sample * int(
                len(timeSerie) / n_increment_per_sample)]  # Keeping multiple of n...
            ts_spikes.append(np.sum(timeSerie.reshape(-1, n_increment_per_sample), axis=1))
        return np.array(ts_spikes)
示例#5
0
    def test_linear(self):
        """Tests gradients with multivariate multidimensional linear func."""
        x_vec = np.random.uniform(-5, 5, size=(2))
        x_vec_multidim = np.expand_dims(x_vec, axis=1)

        gradf = lambda x: np.array([[2 * x_[0]] for x_ in x])
        f = lambda x: np.sum([x_[0]**2 for x_ in x])

        g = qml.grad(f, 0)
        auto_grad = g(x_vec_multidim)
        correct_grad = gradf(x_vec_multidim)
        np.allclose(auto_grad, correct_grad)
def estimate_shadow_obervable(shadow, observable, k=10):
    """
    Adapted from https://github.com/momohuang/predicting-quantum-properties
    Calculate the estimator E[O] = median(Tr{rho_{(k)} O}) where rho_(k)) is set of k
    snapshots in the shadow. Use median of means to ameliorate the effects of outliers.

    Args:
        shadow (tuple): A shadow tuple obtained from `calculate_classical_shadow`.
        observable (qml.Observable): Single PennyLane observable consisting of single Pauli
            operators e.g. qml.PauliX(0) @ qml.PauliY(1).
        k (int): number of splits in the median of means estimator.

    Returns:
        Scalar corresponding to the estimate of the observable.
    """
    shadow_size, num_qubits = shadow[0].shape

    # convert Pennylane observables to indices
    map_name_to_int = {"PauliX": 0, "PauliY": 1, "PauliZ": 2}
    if isinstance(observable, (qml.PauliX, qml.PauliY, qml.PauliZ)):
        target_obs, target_locs = np.array(
            [map_name_to_int[observable.name]]
        ), np.array([observable.wires[0]])
    else:
        target_obs, target_locs = np.array(
            [map_name_to_int[o.name] for o in observable.obs]
        ), np.array([o.wires[0] for o in observable.obs])

    # classical values
    b_lists, obs_lists = shadow
    means = []

    # loop over the splits of the shadow:
    for i in range(0, shadow_size, shadow_size // k):

        # assign the splits temporarily
        b_lists_k, obs_lists_k = (
            b_lists[i : i + shadow_size // k],
            obs_lists[i : i + shadow_size // k],
        )

        # find the exact matches for the observable of interest at the specified locations
        indices = np.all(obs_lists_k[:, target_locs] == target_obs, axis=1)

        # catch the edge case where there is no match in the chunk
        if sum(indices) > 0:
            # take the product and sum
            product = np.prod(b_lists_k[indices][:, target_locs], axis=1)
            means.append(np.sum(product) / sum(indices))
        else:
            means.append(0)

    return np.median(means)
示例#7
0
 def test_default_value_shrink(self):
     mp = MaskedCircuit.full_circuit(
         parameters=pnp.random.uniform(low=-pnp.pi,
                                       high=pnp.pi,
                                       size=(4, 3, 2)),
         layers=4,
         wires=3,
         default_value=0,
     )
     mp.mask(Axis.LAYERS)[:] = True
     mp.shrink(axis=Axis.LAYERS)
     assert pnp.sum(mp.parameters == 0) == 6
示例#8
0
 def test_default_value_perturb(self):
     mp = MaskedCircuit.full_circuit(
         parameters=pnp.random.uniform(low=-pnp.pi,
                                       high=pnp.pi,
                                       size=(4, 3, 2)),
         layers=4,
         wires=3,
         default_value=0,
     )
     mp.mask(Axis.PARAMETERS)[:] = True
     mp.perturb(axis=Axis.PARAMETERS, amount=0.5, mode=Mode.INVERT)
     assert pnp.sum(mp.parameters == 0) == round(0.5 * 4 * 3 * 2)
示例#9
0
 def test_setting(self):
     size = 3
     mp = DropoutMask((size, ))
     assert mp
     assert len(mp.mask) == mp.mask.size
     assert pnp.sum(mp.mask) == 0
     mp[1] = True
     assert mp[1] == True  # noqa: E712
     with pytest.raises(IndexError):
         mp[size] = True
     assert pnp.sum(mp.mask) == 1
     mp.clear()
     assert pnp.sum(mp.mask) == 0
     mp[:] = True
     result = mp[:]
     assert len(result) == size
     assert pnp.all(result)
     assert pnp.sum(mp.mask) == size
     mp.clear()
     with pytest.raises(IndexError):
         mp[1, 2] = True
def matrix_norm(mixed_state, pure_state):
    """Computes the matrix one-norm of the difference between mixed and pure states.

    Args:
        - mixed_state (np.tensor): A density matrix
        - pure_state (np.tensor): A pure state

    Returns:
        - (float): The matrix one-norm
    """

    return np.sum(np.abs(mixed_state - np.outer(pure_state, np.conj(pure_state))))
示例#11
0
    def test_partial_subsystem(self, mocker):
        """Test applying a state vector to a subset of wires of the full subsystem"""

        dev = DefaultQubitAutograd(wires=['a', 'b', 'c'])
        state = np.array([1, 0, 1, 0]) / np.sqrt(2.)
        state_wires = qml.wires.Wires(['a', 'c'])

        spy = mocker.spy(dev, "_scatter")
        dev._apply_state_vector(state=state, device_wires=state_wires)
        res = np.sum(dev._state, axis=(1, )).flatten()

        assert np.all(res == state)
        spy.assert_called()
示例#12
0
    def test_adam_optimizer_multivar(self, tol):
        """Tests that adam optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma, delta = 0.1, 0.5, 0.8
        adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                adam_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = adam_opt.step(f, x_vec)
                adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
                firstmoment = (1 - gamma) * gradf(x_vec)[0]
                secondmoment = (1 - delta) * gradf(x_vec)[0] * gradf(x_vec)[0]
                x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
                    np.sqrt(secondmoment) + 1e-8
                )
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = adam_opt.step(f, x_onestep)
                adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
                firstmoment = gamma * firstmoment + (1 - gamma) * gradf(x_onestep)[0]
                secondmoment = (
                    delta * secondmoment + (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
                )
                x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                    np.sqrt(secondmoment) + 1e-8
                )
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
示例#13
0
 def GenHamiltonian(self):
     '''
     Define and diagonalize chains hamiltonian
     '''
     # Define Pauli operators
     PauliX = np.array([
         [0, 1],
         [1, 0]
     ])
     PauliY = np.array([
         [0, -1j],
         [1j, 0]
     ])
     PauliZ = np.array([
         [1, 0],
         [0, -1]
     ])
     PauliOps = [PauliX, PauliY, PauliZ]
     # Definition of two-qubit Hamiltonian
     Hij = np.sum(Jint * np.kron(Pauli, Pauli)
                  for Jint, Pauli in zip(self.ExchangeIntegrals, PauliOps))
     # Definition of one-qubit Hamiltonian
     Hi = np.sum(hcomp * Pauli
                 for hcomp, Pauli in zip(self.ExternalField, PauliOps))
     # Definition of Chain Hamiltonian
     Hchain = np.sum(
         np.kron(np.identity(2**idx),
                 np.kron(Hij, np.identity(2**(self.num_spins-(idx + 2))))) +
         np.kron(np.identity(2**idx),
                 np.kron(Hi, np.identity(2**(self.num_spins-(idx + 1)))))
         for idx in range(self.num_spins-1)
     ) + np.kron(np.identity(2**(self.num_spins - 1)), Hi)
     # Diagonalization of Hamiltonian
     self.HamMatEnergies, self.HamMatEstates = np.linalg.eig(Hchain)
     # Storing system hamiltonian
     self.SysHamiltonian = qml.Hermitian(
         Hchain, wires=range(self.num_spins))
示例#14
0
def mse(labels, predictions):
    """

    Args:
      labels:
      predictions:

    Returns:

    """
    # print(labels.shape, predictions.shape)
    loss = 0
    for l, p in zip(labels, predictions):
        loss += np.sum((l - p)**2)
    return loss / labels.shape[0]
示例#15
0
 def test_copy(self):
     mp = create_freezable_circuit(3)
     mp.perturb(amount=5, mode=Mode.SET)
     mp.perturb(amount=2, axis=Axis.LAYERS, mode=Mode.SET, mask=FreezeMask)
     mp_copy = mp.copy()
     assert pnp.array_equal(mp.full_mask(FreezeMask),
                            mp_copy.full_mask(FreezeMask))
     mp.perturb(amount=5, mode=Mode.RESET)
     mp.perturb(amount=2,
                axis=Axis.LAYERS,
                mode=Mode.RESET,
                mask=FreezeMask)
     assert pnp.sum(mp.full_mask(FreezeMask)) == 0
     assert not pnp.array_equal(mp.full_mask(FreezeMask),
                                mp_copy.full_mask(FreezeMask))
示例#16
0
    def check_learning_rate(self, coeffs):
        r"""Verifies that the learning rate is less than 2 over the Lipschitz constant,
        where the Lipschitz constant is given by :math:`\sum |c_i|` for Hamiltonian
        coefficients :math:`c_i`.

        Args:
            coeffs (Sequence[float]): the coefficients of the terms in the Hamiltonian

        Raises:
            ValueError: if the learning rate is large than :math:`2/\sum |c_i|`
        """
        self.lipschitz = np.sum(np.abs(coeffs))

        if self.stepsize > 2 / self.lipschitz:
            raise ValueError(f"The learning rate must be less than {2 / self.lipschitz}")
示例#17
0
    def test_multidimensional_indexing_along_axis_autograd(self):
        """Test that indexing with a sequence properly extracts
        the elements from the specified tensor axis"""
        t = np.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]])
        indices = np.array([[0, 0], [1, 0]])

        def cost_fn(t):
            return fn.sum(fn.take(t, indices, axis=1))

        res = cost_fn(t)
        expected = np.sum(
            np.array([[[[1, 2], [1, 2]], [[3, 4], [1, 2]]], [[[5, 6], [5, 6]], [[0, -1], [5, 6]]]])
        )
        assert fn.allclose(res, expected)

        grad = qml.grad(cost_fn)(t)
        expected = np.array([[[3, 3], [1, 1], [0, 0]], [[3, 3], [1, 1], [0, 0]]])
        assert fn.allclose(grad, expected)
示例#18
0
    def test_adagrad_optimizer_multivar(self, tol):
        """Tests that adagrad optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize = 0.1
        adag_opt = AdagradOptimizer(stepsize)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                adag_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = adag_opt.step(f, x_vec)
                past_grads = gradf(x_vec)[0] * gradf(x_vec)[0]
                adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                x_onestep_target = x_vec - gradf(x_vec)[0] * adapt_stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = adag_opt.step(f, x_onestep)
                past_grads = (
                    gradf(x_vec)[0] * gradf(x_vec)[0] + gradf(x_onestep)[0] * gradf(x_onestep)[0]
                )
                adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
示例#19
0
    def encode_data(self, features):
        """Encodes data according to encoding method."""

        wires = range(self.num_q)

        # amplitude encoding mode
        if self.encoding == "amplitude":
            qml.templates.embeddings.AmplitudeEmbedding(features,
                                                        wires=wires,
                                                        normalize=True)
        # angle encoding mode
        elif self.encoding == "angle":
            qml.templates.embeddings.AngleEmbedding(features,
                                                    wires=wires)
        elif self.encoding == "mottonen":
            norm = np.sum(np.abs(features) ** 2)
            features = features / math.sqrt(norm)
            qml.templates.state_preparations.MottonenStatePreparation(
                features, wires=wires)
示例#20
0
    def cost(self, var, features, labels):
        """Cost to be optimized during training.

        Args:
            var (list):weights of the model
            features (array):obervations to be evalueated by the model
            labels (array):labels associated to features

        Returns:
            loss: float
                loss of the model given x
        """
        model_output = \
            [self.neural_network(var, features=x_) for x_ in features]

        # if the interface is autograd, call custom losses
        if self.interface == "autograd":
            if self.type_problem == "regression":
                loss = square_loss(labels, model_output)
            elif self.type_problem == "classification":
                loss = square_loss(labels, model_output)
            elif self.type_problem == "multiclassification":
                model_output = np.array(model_output)
                preds = np.exp(model_output) / \
                        np.sum(np.exp(model_output), axis=1)[:, None]
                loss = cross_entropy(labels, preds)
            elif self.type_problem == "reinforcement_learning":
                loss = np.mean(square_loss(labels, model_output))

        # if the interface if tensorflow, call tensorflow losses
        elif self.interface == "tf":
            if self.type_problem == "regression":
                loss = tf.math.reduce_mean(tf.losses.MSE(labels, model_output))
            elif self.type_problem == "classification":
                loss = tf.math.reduce_mean(tf.losses.MSE(labels, model_output))
            elif self.type_problem == "multiclassification":
                loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
                    labels, model_output),
                                      keepdims=True)
            elif self.type_problem == "reinforcement_learning":
                loss = tf.math.reduce_mean(tf.losses.MSE(labels, model_output))
        return loss
示例#21
0
    def predict(self, features):
        """Predicts certain obervations.

        Args:
            features (array):observations to be predicted

        Returns:
            preds: float or int
                prediction of the model
        """
        model_output = np.array(
            [self.neural_network(self.var, features=x_) for x_ in features])

        if self.type_problem == "classification":
            return np.where(model_output > 0., 1, 0)
        elif self.type_problem == "multiclassification":
            soft_outputs = np.exp(model_output) / \
                    np.sum(np.exp(model_output), axis=1)[:, None]
            return np.argmax(soft_outputs, axis=1)
        return model_output
def CFIM(weights, phi, gamma):
    p = experiment(weights, phi, gamma=gamma)
    dp = []

    for idx in range(3):
        # We use the parameter-shift rule explicitly
        # to compute the derivatives
        shift = np.zeros_like(phi)
        shift[idx] = np.pi / 2

        plus = experiment(weights, phi + shift, gamma=gamma)
        minus = experiment(weights, phi - shift, gamma=gamma)

        dp.append(0.5 * (plus - minus))

    matrix = [0] * 9
    for i in range(3):
        for j in range(3):
            matrix[3 * i + j] = np.sum(dp[i] * dp[j] / p)

    return np.array(matrix).reshape((3, 3))
示例#23
0
    def test_shape(self, wires, cutoff_dim):
        """Test that the probabilities and jacobian are returned with the expected shape"""
        dev = qml.device("strawberryfields.gbs", wires=wires, cutoff_dim=cutoff_dim)
        a = np.ones((wires, wires), requires_grad=False)
        params = np.ones(wires)

        @qnode_decorator(dev)
        def vgbs(params):
            ParamGraphEmbed(params, a, 1, wires=range(wires))
            return qml.probs(wires=range(wires))

        d_vgbs = qml.jacobian(vgbs, argnum=0)

        p = vgbs(params)
        dp = d_vgbs(params)

        assert p.shape == (cutoff_dim ** wires,)
        assert dp.shape == (cutoff_dim ** wires, wires)
        assert (p >= 0).all()
        assert (p <= 1).all()
        assert np.sum(p) <= 1
    def test_nesterovmomentum_optimizer_multivar(self, tol):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma = 0.1, 0.5
        nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                nesmom_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = nesmom_opt.step(f, x_vec)
                x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = nesmom_opt.step(f, x_onestep)
                momentum_term = gamma * gradf(x_vec)[0]
                shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
                x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
示例#25
0
    def test_shape_reduced_wires(self, wires, cutoff_dim):
        """Test that the probabilities and jacobian are returned with the expected shape when
        probabilities are measured on a subset of wires"""
        dev = qml.device("strawberryfields.gbs", wires=wires, cutoff_dim=cutoff_dim)
        a = np.ones((wires, wires))
        params = np.ones(wires)

        @qml.qnode(dev)
        def vgbs(params):
            ParamGraphEmbed(params, a, 1, wires=range(wires))
            return qml.probs(wires=[0, 1])

        d_vgbs = qml.jacobian(vgbs, argnum=0)

        p = vgbs(params)
        dp = d_vgbs(params)

        assert p.shape == (cutoff_dim ** 2,)
        assert dp.shape == (cutoff_dim ** 2, wires)
        assert (p >= 0).all()
        assert (p <= 1).all()
        assert np.sum(p) <= 1
示例#26
0
    def test_learning_error(self):
        """Test that an exception is raised if the learning rate is beyond the
        lipschitz bound"""
        coeffs = [0.3, 0.1]
        H = qml.Hamiltonian(coeffs, [qml.PauliX(0), qml.PauliZ(0)])
        dev = qml.device("default.qubit", wires=1, shots=100)
        expval_cost = qml.ExpvalCost(lambda x, **kwargs: qml.RX(x, wires=0), H, dev)

        opt = qml.ShotAdaptiveOptimizer(min_shots=10, stepsize=100.)

        # lipschitz constant is given by sum(|coeffs|)
        lipschitz = np.sum(np.abs(coeffs))

        assert opt._stepsize > 2 / lipschitz

        with pytest.raises(ValueError, match=f"The learning rate must be less than {2 / lipschitz}"):
            opt.step(expval_cost, 0.5)

        # for a single QNode, the lipschitz constant is simply 1
        opt = qml.ShotAdaptiveOptimizer(min_shots=10, stepsize=100.)
        with pytest.raises(ValueError, match=f"The learning rate must be less than {2 / 1}"):
            opt.step(expval_cost.qnodes[0], 0.5)
示例#27
0
def load():
    data = np.loadtxt("data/iris_classes1and2_scaled.txt")
    X = data[:, 0:2]
    print("First X sample (original)  :", X[0])

    # pad the vectors to size 2^2 with constant values
    padding = 0.3 * np.ones((len(X), 1))
    X_pad = np.c_[np.c_[X, padding], np.zeros((len(X), 1))]
    print("First X sample (padded)    :", X_pad[0])

    # normalize each input
    normalization = np.sqrt(np.sum(X_pad**2, -1))
    X_norm = (X_pad.T / normalization).T
    print("First X sample (normalized):", X_norm[0])

    # angles for state preparation are new features
    features = np.array([get_angles(x) for x in X_norm])
    print("First features sample      :", features[0])

    Y = data[:, -1]

    return (X, X_norm, features, Y)
示例#28
0
 def test_apply_mask(self):
     size = 3
     mp = self._create_circuit(size)
     with pytest.raises(ValueError):
         mp.apply_mask(pnp.ones((size, size - 1)))
     mp.mask(Axis.WIRES)[:size - 1] = True
     result = mp.apply_mask(pnp.ones((size, size), dtype=bool))
     assert pnp.sum(~mp.full_mask(DropoutMask)) == size
     assert pnp.sum(result) == size
     mp.mask(Axis.LAYERS)[:size - 1] = True
     result = mp.apply_mask(pnp.ones((size, size), dtype=bool))
     assert pnp.sum(~mp.full_mask(DropoutMask)) == 1
     assert pnp.sum(result) == 1
     mp.mask(Axis.PARAMETERS)[(size - 1, size - 1)] = True
     result = mp.apply_mask(pnp.ones((size, size), dtype=bool))
     assert pnp.sum(~mp.full_mask(DropoutMask)) == 0
     assert pnp.sum(result) == 0
示例#29
0
    def test_copy(self):
        size = 3
        mp = self._create_circuit(size)
        mp.mask(Axis.LAYERS)[0] = True
        new_mp = mp.copy()

        mp.mask(Axis.WIRES)[0] = True
        mp.mask(Axis.PARAMETERS)[0, 0] = True

        assert pnp.sum(mp.full_mask(DropoutMask)) > pnp.sum(
            new_mp.full_mask(DropoutMask))
        assert pnp.sum(new_mp.mask(Axis.WIRES)) == 0
        assert pnp.sum(new_mp.mask(Axis.LAYERS)) == pnp.sum(
            mp.mask(Axis.LAYERS))
        assert pnp.sum(new_mp.mask(Axis.PARAMETERS)) == 0
        assert Axis.ENTANGLING not in new_mp.masks

        # also test copying of existing entanglement mask
        mp = self._create_circuit_with_entangling_gates(size)
        assert mp.mask(Axis.ENTANGLING) is not None
        new_mp = mp.copy()
        mp.mask(Axis.ENTANGLING)[0, 0] = True
        assert pnp.sum(mp.mask(Axis.ENTANGLING)) == 1
        assert pnp.sum(new_mp.mask(Axis.ENTANGLING)) == 0
示例#30
0
    def test_second_derivative(self, dev_name, diff_method, mocker, tol):
        """Test second derivative calculation of a scalar valued QNode"""
        if diff_method not in {"parameter-shift", "backprop"}:
            pytest.skip("Test only supports parameter-shift or backprop")

        dev = qml.device(dev_name, wires=1)

        @qnode(dev, diff_method=diff_method, interface="autograd")
        def circuit(x):
            qml.RY(x[0], wires=0)
            qml.RX(x[1], wires=0)
            return qml.expval(qml.PauliZ(0))

        x = np.array([1.0, 2.0], requires_grad=True)
        res = circuit(x)
        g = qml.grad(circuit)(x)

        spy = mocker.spy(JacobianTape, "hessian")
        g2 = qml.grad(lambda x: np.sum(qml.grad(circuit)(x)))(x)

        if diff_method == "parameter-shift":
            spy.assert_called_once()
        elif diff_method == "backprop":
            spy.assert_not_called()

        a, b = x

        expected_res = np.cos(a) * np.cos(b)
        assert np.allclose(res, expected_res, atol=tol, rtol=0)

        expected_g = [-np.sin(a) * np.cos(b), -np.cos(a) * np.sin(b)]
        assert np.allclose(g, expected_g, atol=tol, rtol=0)

        expected_g2 = [
            -np.cos(a) * np.cos(b) + np.sin(a) * np.sin(b),
            np.sin(a) * np.sin(b) - np.cos(a) * np.cos(b),
        ]
        assert np.allclose(g2, expected_g2, atol=tol, rtol=0)