Пример #1
0
    def test_variance_squeezed_numberstate(self):
        """test correct variance for number state expectation |<n|S(r)>|^2
        on a squeezed state
        """
        self.logTestName()
        dev = qml.device('default.gaussian', wires=1, hbar=hbar)

        n = 1
        r = 0.4523
        dev.apply('SqueezedState', wires=[0], par=[r, 0])
        var = dev.var('NumberState', [0], [np.array([2 * n])])
        mean = np.abs(
            np.sqrt(fac(2 * n)) / (2**n * fac(n)) * (-np.tanh(r))**n /
            np.sqrt(np.cosh(r)))**2
        self.assertAlmostEqual(var, mean * (1 - mean), delta=self.tol)
Пример #2
0
 def assertAllAlmostEqual(self, first, second, delta, msg=None):
     """
     Like assertAlmostEqual, but works with arrays. All the corresponding elements have to be almost equal.
     """
     if isinstance(first, tuple):
         # check each element of the tuple separately (needed for when the tuple elements are themselves batches)
         if np.all([
                 np.all(first[idx] == second[idx])
                 for idx, _ in enumerate(first)
         ]):
             return
         if np.all([
                 np.all(np.abs(first[idx] - second[idx])) <= delta
                 for idx, _ in enumerate(first)
         ]):
             return
     else:
         if np.all(first == second):
             return
         if np.all(np.abs(first - second) <= delta):
             return
     standardMsg = '{} != {} within {} delta'.format(first, second, delta)
     msg = self._formatMessage(msg, standardMsg)
     raise self.failureException(msg)
Пример #3
0
    def check_learning_rate(self, coeffs):
        r"""Verifies that the learning rate is less than 2 over the Lipschitz constant,
        where the Lipschitz constant is given by :math:`\sum |c_i|` for Hamiltonian
        coefficients :math:`c_i`.

        Args:
            coeffs (Sequence[float]): the coefficients of the terms in the Hamiltonian

        Raises:
            ValueError: if the learning rate is large than :math:`2/\sum |c_i|`
        """
        self.lipschitz = np.sum(np.abs(coeffs))

        if self._stepsize > 2 / self.lipschitz:
            raise ValueError(
                f"The learning rate must be less than {2 / self.lipschitz}")
Пример #4
0
def optimize_circuit(params):
    """Minimize the variational circuit and return its minimum value.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers. You should create a device and convert the
    variational_circuit function into an executable QNode. Next, you should minimize the variational
    circuit using gradient-based optimization to update the input params. Return the optimized value
    of the QNode as a single floating-point number.

    Args:
        params (np.ndarray): Input parameters to be optimized, of dimension 30

    Returns:
        float: the value of the optimized QNode
    """

    optimal_value = 0.0

    # QHACK #

    # Initialize the device
    dev = qml.device("default.qubit", wires=2)

    # Instantiate the QNode
    circuit = qml.QNode(variational_circuit, dev)

    # Minimize the circuit
    opt = qml.AdagradOptimizer(stepsize=0.4)

    max_iterations = 500
    conv_tol = 1e-6

    for n in range(max_iterations):
        params, prev_optimal_value = opt.step_and_cost(circuit, params)
        optimal_value = circuit(params)
        conv = np.abs(optimal_value - prev_optimal_value)

        # if n % 20 == 0:
        #     print('Iteration = {:},  Energy = {:.8f} Ha'.format(n, optimal_value))

        if conv <= conv_tol:
            break

    # QHACK #

    # Return the value of the minimized QNode
    return optimal_value
Пример #5
0
def flip_matrix(K):
    r"""Remove negative eigenvalues from the given kernel matrix by taking the absolute value.

    This method keeps the eigenvectors of the matrix intact.

    Args:
        K (array[float]): Kernel matrix, assumed to be symmetric.

    Returns:
        array[float]: Kernel matrix with flipped negative eigenvalues.

    Reference:
        This method is introduced in `arXiv:2103.16774 <https://arxiv.org/abs/2103.16774>`_.

    **Example:**

    Consider a symmetric matrix with both positive and negative eigenvalues:

    .. code-block :: pycon

        >>> K = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 2]])
        >>> np.linalg.eigvalsh(K)
        array([-1.,  1.,  2.])

    We then can invert the sign of all negative eigenvalues of the matrix, obtaining
    non-negative eigenvalues only:

    .. code-block :: pycon

        >>> K_flipped = qml.kernels.flip_matrix(K)
        >>> np.linalg.eigvalsh(K_flipped)
        array([1.,  1.,  2.])

    If the input matrix does not have negative eigenvalues, ``flip_matrix``
    does not have any effect.
    """
    w, v = np.linalg.eigh(K)

    if w[0] < 0:
        # Transform spectrum: absolute value
        w_abs = np.abs(w)

        return (v * w_abs) @ np.transpose(v)

    return K
Пример #6
0
    def test_finite_diff_squeezed(self, tol):
        """Test that the jacobian of the probability for a squeezed states is
        approximated well with finite differences"""
        cutoff = 5

        dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=cutoff)

        @qml.qnode(dev)
        def circuit(r, phi):
            qml.Squeezing(r, phi, wires=0)
            return qml.probs(wires=[0])

        r = 0.4
        phi = -0.12

        n = np.arange(cutoff)

        # construct tape
        circuit.construct([r, phi], {})

        # differentiate with respect to parameter a
        circuit.qtape.trainable_params = {0}
        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        res_F = fn(dev.batch_execute(tapes)).flatten()
        assert res_F.shape == (cutoff,)

        expected_gradient = (
            np.abs(np.tanh(r)) ** n
            * (1 + 2 * n - np.cosh(2 * r))
            * fac(n)
            / (2 ** (n + 1) * np.cosh(r) ** 2 * np.sinh(r) * fac(n / 2) ** 2)
        )
        expected_gradient[n % 2 != 0] = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)

        # re-construct tape to reset trainable_params
        circuit.construct([r, phi], {})

        # differentiate with respect to parameter phi
        circuit.qtape.trainable_params = {1}

        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        res_F = fn(dev.batch_execute(tapes)).flatten()
        expected_gradient = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)
Пример #7
0
def hinge_loss(labels, predictions, type='L2'):
    """

    Args:
      labels:
      predictions:
      type:  (Default value = 'L2')

    Returns:

    """
    loss = 0
    for l, p in zip(labels, predictions):
        if type == 'L1':
            loss = loss + np.abs(l - p)  # L1 loss
        elif type == 'L2':
            loss = loss + (l - p)**2  # L2 loss
    loss = loss / labels.shape[0]
    return loss
Пример #8
0
    def encode_data(self, features):
        """Encodes data according to encoding method."""

        wires = range(self.num_q)

        # amplitude encoding mode
        if self.encoding == "amplitude":
            qml.templates.embeddings.AmplitudeEmbedding(features,
                                                        wires=wires,
                                                        normalize=True)
        # angle encoding mode
        elif self.encoding == "angle":
            qml.templates.embeddings.AngleEmbedding(features,
                                                    wires=wires)
        elif self.encoding == "mottonen":
            norm = np.sum(np.abs(features) ** 2)
            features = features / math.sqrt(norm)
            qml.templates.state_preparations.MottonenStatePreparation(
                features, wires=wires)
    def term(params, sign_term, sign_i, i, sign_j, j):
        qml.BasisState(np.array([0, 0, 0, 0, 0, 0]), wires=range(6))

        variational_circuit_wire_list(params)

        shift = np.zeros_like(params)
        shift[i] = sign_i * one
        shift[j] = sign_j * one
        variational_circuit_wire_list(params + shift, wire_list=[3, 4, 5])

        # return qml.DiagonalQubitUnitary([1] * 6, wires=[0, 1, 2]), qml.DiagonalQubitUnitary([1] * 6, wires=[3, 4, 5])
        # return [
        #     [qml.expval(qml.PauliX(wire)) for wire in [0, 1, 2]],
        #     [qml.expval(qml.PauliX(wire)) for wire in [3, 4, 5]],
        # ]
        return sign_term * np.abs(
            qml.dot(
                [qml.expval(qml.PauliX(wire)) for wire in [0, 1, 2]],
                [qml.expval(qml.PauliX(wire)) for wire in [3, 4, 5]],
            ))**2
Пример #10
0
    def test_second_order_observable(self, diff_method, kwargs, tol):
        """Test variance of a second order CV expectation value"""
        dev = qml.device("default.gaussian", wires=1)

        n = 0.12
        a = 0.765

        @qnode(dev, interface="jax", diff_method=diff_method, **kwargs)
        def circuit(n, a):
            qml.ThermalState(n, wires=0)
            qml.Displacement(a, 0, wires=0)
            return qml.var(qml.NumberOperator(0))

        res = circuit(n, a)
        expected = n ** 2 + n + np.abs(a) ** 2 * (1 + 2 * n)
        assert np.allclose(res, expected, atol=tol, rtol=0)

        # circuit jacobians
        res = jax.grad(circuit, argnums=[0, 1])(n, a)
        expected = np.array([2 * a ** 2 + 2 * n + 1, 2 * a * (2 * n + 1)])
        assert np.allclose(res, expected, atol=tol, rtol=0)
Пример #11
0
    def test_second_order_cv(self, tol):
        """Test variance of a second order CV expectation value"""
        dev = qml.device("strawberryfields.gaussian", wires=1)

        @qml.qnode(dev)
        def circuit(n, a):
            qml.ThermalState(n, wires=0)
            qml.Displacement(a, 0, wires=0)
            return qml.var(qml.NumberOperator(0))

        n = 0.12
        a = 0.765

        var = circuit(n, a)
        expected = n**2 + n + np.abs(a)**2 * (1 + 2 * n)
        assert np.allclose(var, expected, atol=tol, rtol=0)

        # circuit jacobians
        gradF = circuit.jacobian([n, a], method="F")
        expected = np.array([2 * a**2 + 2 * n + 1, 2 * a * (2 * n + 1)])
        assert np.allclose(gradF, expected, atol=tol, rtol=0)
Пример #12
0
def test_simulator_qvm_default_agree(tol, qvm, compiler):
    """Test that forest.wavefunction, forest.qvm, and default.qubit agree
    on the calculation of quantum gradients."""
    w = 2

    dev1 = qml.device("default.qubit", wires=w)
    dev2 = qml.device("forest.wavefunction", wires=w)
    dev3 = qml.device("forest.qvm", device="9q-square-qvm", shots=5000)

    in_state = np.zeros([w], requires_grad=False)
    in_state[0] = 1
    in_state[1] = 1

    def func(x, y):
        """Reference QNode"""
        qml.BasisState(in_state, wires=list(range(w)))
        qml.RY(x, wires=0)
        qml.RX(y, wires=1)
        qml.CNOT(wires=[0, 1])
        return qml.expval(qml.PauliZ(1))

    func1 = qml.QNode(func, dev1)
    func2 = qml.QNode(func, dev2)
    func3 = qml.QNode(func, dev3)

    params = [
        np.array(0.2, requires_grad=True),
        np.array(0.453, requires_grad=True)
    ]

    # check all evaluate to the same value
    # NOTE: we increase the tolerance when using the QVM
    assert np.all(np.abs(func1(*params) - func2(*params)) <= tol)
    assert np.all(np.abs(func1(*params) - func3(*params)) <= 0.1)
    assert np.all(np.abs(func2(*params) - func3(*params)) <= 0.1)

    df1 = qml.grad(func1)
    df2 = qml.grad(func2)
    df3 = qml.grad(func3)

    # check all gradients evaluate to the same value
    # NOTE: we increase the tolerance when using the QVM
    assert np.all(
        np.abs(np.array(df1(*params)) - np.array(df2(*params))) <= tol)
    assert np.all(
        np.abs(np.array(df1(*params)) - np.array(df3(*params))) <= 0.1)
    assert np.all(
        np.abs(np.array(df2(*params)) - np.array(df3(*params))) <= 0.1)
Пример #13
0
    def test_learning_error(self):
        """Test that an exception is raised if the learning rate is beyond the
        lipschitz bound"""
        coeffs = [0.3, 0.1]
        H = qml.Hamiltonian(coeffs, [qml.PauliX(0), qml.PauliZ(0)])
        dev = qml.device("default.qubit", wires=1, shots=100)
        expval_cost = qml.ExpvalCost(lambda x, **kwargs: qml.RX(x, wires=0), H, dev)

        opt = qml.ShotAdaptiveOptimizer(min_shots=10, stepsize=100.)

        # lipschitz constant is given by sum(|coeffs|)
        lipschitz = np.sum(np.abs(coeffs))

        assert opt._stepsize > 2 / lipschitz

        with pytest.raises(ValueError, match=f"The learning rate must be less than {2 / lipschitz}"):
            opt.step(expval_cost, 0.5)

        # for a single QNode, the lipschitz constant is simply 1
        opt = qml.ShotAdaptiveOptimizer(min_shots=10, stepsize=100.)
        with pytest.raises(ValueError, match=f"The learning rate must be less than {2 / 1}"):
            opt.step(expval_cost.qnodes[0], 0.5)
Пример #14
0
    def test_second_order_cv(self, tol):
        """Test variance of a second order CV expectation value"""
        dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=15)

        @qml.qnode(dev)
        def circuit(n, a):
            qml.ThermalState(n, wires=0)
            qml.Displacement(a, 0, wires=0)
            return qml.var(qml.NumberOperator(0))

        n = np.array(0.12, requires_grad=True)
        a = np.array(0.105, requires_grad=True)

        var = circuit(n, a)
        expected = n**2 + n + np.abs(a) ** 2 * (1 + 2 * n)
        assert np.allclose(var, expected, atol=tol, rtol=0)

        # circuit jacobians
        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        gradF = fn(dev.batch_execute(tapes))
        expected = np.array([2 * a**2 + 2 * n + 1, 2 * a * (2 * n + 1)])
        assert np.allclose(gradF, expected, atol=tol, rtol=0)
Пример #15
0
    def test_parametrized_transform_qnode(self, mocker):
        """Test that a parametrized transform can be applied
        to a QNode"""

        a = 0.1
        b = 0.4
        x = 0.543

        dev = qml.device("default.qubit", wires=2)

        @qml.qnode(dev)
        def circuit(x):
            qml.Hadamard(wires=0)
            qml.RX(x, wires=0)
            return qml.expval(qml.PauliX(0))

        transform_fn = self.my_transform(circuit, a, b)

        spy = mocker.spy(self.my_transform, "construct")
        res = transform_fn(x)

        spy.assert_called()
        tapes, fn = spy.spy_return

        assert len(tapes[0].operations) == 2
        assert tapes[0].operations[0].name == "Hadamard"
        assert tapes[0].operations[1].name == "RY"
        assert tapes[0].operations[1].parameters == [a * np.abs(x)]

        assert len(tapes[1].operations) == 2
        assert tapes[1].operations[0].name == "Hadamard"
        assert tapes[1].operations[1].name == "RZ"
        assert tapes[1].operations[1].parameters == [b * np.sin(x)]

        expected = fn(dev.batch_execute(tapes))
        assert res == expected
Пример #16
0
    def test_second_order_cv(self, tol):
        """Test variance of a second order CV expectation value"""
        dev = qml.device("default.gaussian", wires=1)

        n = 0.12
        a = 0.765

        with qml.tape.JacobianTape() as tape:
            qml.ThermalState(n, wires=0)
            qml.Displacement(a, 0, wires=0)
            qml.var(qml.NumberOperator(0))

        tape.trainable_params = {0, 1}

        res = tape.execute(dev)
        expected = n**2 + n + np.abs(a)**2 * (1 + 2 * n)
        assert np.allclose(res, expected, atol=tol, rtol=0)

        # circuit jacobians
        tapes, fn = qml.gradients.finite_diff(tape)
        grad_F = fn(dev.batch_execute(tapes))

        expected = np.array([[2 * a**2 + 2 * n + 1, 2 * a * (2 * n + 1)]])
        assert np.allclose(grad_F, expected, atol=tol, rtol=0)
Пример #17
0
    def test_parametrized_transform_tape_decorator(self):
        """Test that a parametrized transform can be applied
        to a tape"""

        a = 0.1
        b = 0.4
        x = 0.543

        with qml.tape.QuantumTape() as tape:
            qml.Hadamard(wires=0)
            qml.RX(x, wires=0)
            qml.expval(qml.PauliX(0))

        tapes, fn = self.my_transform(a, b)(tape)

        assert len(tapes[0].operations) == 2
        assert tapes[0].operations[0].name == "Hadamard"
        assert tapes[0].operations[1].name == "RY"
        assert tapes[0].operations[1].parameters == [a * np.abs(x)]

        assert len(tapes[1].operations) == 2
        assert tapes[1].operations[0].name == "Hadamard"
        assert tapes[1].operations[1].name == "RZ"
        assert tapes[1].operations[1].parameters == [b * np.sin(x)]
def squared_difference(x, y):
    """Classical node to compute the squared
    difference between two inputs"""
    return np.abs(x - y)**2
                    def circuit():
                        if hasattr(qml, operation):
                            operation_class = getattr(qml, operation)
                        else:
                            operation_class = getattr(pennylane_qiskit,
                                                      operation)
                        if hasattr(qml.expval, observable):
                            observable_class = getattr(qml.expval, observable)
                        else:
                            observable_class = getattr(pennylane_qiskit.expval,
                                                       observable)

                        if operation_class.num_wires > self.num_subsystems:
                            raise IgnoreOperationException(
                                'Skipping in automatic test because the operation '
                                + operation +
                                " acts on more than the default number of wires "
                                + str(self.num_subsystems) +
                                ". Maybe you want to increase that?")
                        if observable_class.num_wires > self.num_subsystems:
                            raise IgnoreOperationException(
                                'Skipping in automatic test because the observable '
                                + observable +
                                " acts on more than the default number of wires "
                                + str(self.num_subsystems) +
                                ". Maybe you want to increase that?")

                        if operation_class.par_domain == 'N':
                            operation_pars = rnd_int_pool[:operation_class.
                                                          num_params]
                        elif operation_class.par_domain == 'R':
                            operation_pars = np.abs(
                                rnd_float_pool[:operation_class.num_params]
                            )  #todo: some operations/expectations fail when parameters are negative (e.g. thermal state) but par_domain is not fine grained enough to capture this
                        elif operation_class.par_domain == 'A':
                            if str(operation) == "QubitUnitary":
                                operation_pars = [np.array([[1, 0], [0, -1]])]
                            elif str(operation) == "QubitStateVector":
                                operation_pars = [np.array(random_ket)]
                            elif str(operation) == "BasisState":
                                operation_pars = [
                                    random_zero_one_pool[:self.num_subsystems]
                                ]
                                operation_class.num_wires = self.num_subsystems
                            else:
                                raise IgnoreOperationException(
                                    'Skipping in automatic test because I don\'t know how to generate parameters for the operation '
                                    + operation)
                        else:
                            operation_pars = {}

                        if observable_class.par_domain == 'N':
                            observable_pars = rnd_int_pool[:observable_class.
                                                           num_params]
                        if observable_class.par_domain == 'R':
                            observable_pars = np.abs(
                                rnd_float_pool[:observable_class.num_params]
                            )  #todo: some operations/expectations fail when parameters are negative (e.g. thermal state) but par_domain is not fine grained enough to capture this
                        elif observable_class.par_domain == 'A':
                            if str(observable) == "Hermitian":
                                observable_pars = [
                                    np.array([[1, 1j], [-1j, 0]])
                                ]
                            else:
                                raise IgnoreOperationException(
                                    'Skipping in automatic test because I don\'t know how to generate parameters for the observable '
                                    + observable)
                        else:
                            observable_pars = {}

                        # apply to the first wires
                        operation_wires = list(
                            range(operation_class.num_wires
                                  )) if operation_class.num_wires > 1 else 0
                        observable_wires = list(
                            range(observable_class.num_wires)
                        ) if observable_class.num_wires > 1 else 0

                        operation_class(*operation_pars, operation_wires)
                        return observable_class(*observable_pars,
                                                observable_wires)
Пример #20
0
def cost_fn(params):
    cost = 0
    for k in range(3):
        cost += np.abs(circuit(params, A=Paulis[k]) - bloch_v[k])

    return cost
Пример #21
0
print(params)

##############################################################################
# We carry out the optimization over a maximum of 200 steps, aiming to reach a convergence
# tolerance (difference in cost function for subsequent optimization steps) of :math:`\sim 10^{
# -6}`.

max_iterations = 200
conv_tol = 1e-06


for n in range(max_iterations):
    params, prev_energy = opt.step_and_cost(cost_fn, params)
    energy = cost_fn(params)
    conv = np.abs(energy - prev_energy)

    if n % 20 == 0:
        print('Iteration = {:},  Energy = {:.8f} Ha'.format(n, energy))

    if conv <= conv_tol:
        break

print()
print('Final convergence parameter = {:.8f} Ha'.format(conv))
print('Final value of the ground-state energy = {:.8f} Ha'.format(energy))
print('Accuracy with respect to the FCI energy: {:.8f} Ha ({:.8f} kcal/mol)'.format(
    np.abs(energy - (-1.136189454088)), np.abs(energy - (-1.136189454088))*627.503
    )
)
print()
Пример #22
0
def train_circuit(circuit, parameter_shape, X_train, Y_train, batch_size,
                  learning_rate, **kwargs):
    """
    train a circuit classifier
    Args:
        circuit (qml.QNode): A circuit that you want to train
        parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
        the second one is the number of layers in the circuit architecture.
        X_train (np.ndarray): An array of floats of size (M, n) to be used as training data.
        Y_train (np.ndarray): An array of size (M,) which are the categorical labels
            associated to the training data.

        batch_size (int): Batch size for the circuit training.

        learning_rate (float): The learning rate/step size of the optimizer.

        kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters:

            nsteps (int) : Number of training steps.

            optim (pennylane.optimize instance): Optimizer used during the training of the circuit.
                Pass as qml.OptimizerName.

            Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8)
                    The first element is the maximum number of parameters among all architectures,
                    the second is the maximum inference time among all architectures in terms of computing time,
                    the third one is the maximum inference time among all architectures in terms of the number of CNOTS
                    in the circuit

            rate_type (string): Determines the type of error rate in the W-coefficient.
                    If rate_type == 'accuracy', the inference time of the circuit
                    is equal to the time it takes to evaluate the accuracy of the trained circuit with
                    respect to a validation batch three times the size of the training batch size and
                    the error rate is equal to 1-accuracy (w.r.t. to a validation batch).

                    If rate_type == 'accuracy', the inference time of the circuit is equal to the time
                    it takes to train the circuit (for nsteps training steps) and compute the cost at
                    each step and the error rate is equal to the cost after nsteps training steps.






    Returns:
        (W_,weights): W-coefficient, trained weights
    """

    #print('batch_size',batch_size)
    # fix the seed while debugging
    #np.random.seed(1337)
    def ohe_cost_fcn(params, circuit, ang_array, actual):
        '''
        use MAE to start
        '''
        predictions = (np.stack([circuit(params, x)
                                 for x in ang_array]) + 1) * 0.5
        return mse(actual, predictions)

    def wn_cost_fcn(params, circuit, ang_array, actual):
        '''
        use MAE to start
        '''
        w = params[:, -1]

        theta = params[:, :-1]
        #print(w.shape,w,theta.shape,theta)
        predictions = np.asarray([
            2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) -
            1. for x in ang_array
        ])
        return mse(actual, predictions)

    if kwargs['readout_layer'] == 'one_hot':
        var = np.zeros(parameter_shape)
    elif kwargs['readout_layer'] == "weighted_neuron":
        var = np.hstack(
            (np.zeros(parameter_shape), np.random.random(
                (kwargs['nqubits'], 1)) - 0.5))
    rate_type = kwargs['rate_type']
    inf_time = kwargs['inf_time']
    optim = kwargs['optim']
    numcnots = kwargs['numcnots']

    Tmax = kwargs[
        'Tmax']  #Tmax[0] is maximum parameter size, Tmax[1] maximum inftime (timeit),Tmax[2] maximum number of entangling gates
    num_train = len(Y_train)
    validation_size = int(0.1 * num_train)
    opt = optim(
        stepsize=learning_rate
    )  #all optimizers in autograd module take in argument stepsize, so this works for all
    start = time.time()
    for _ in range(kwargs['nsteps']):
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        X_train_batch = np.asarray(X_train[batch_index])
        Y_train_batch = np.asarray(Y_train[batch_index])
        if kwargs['readout_layer'] == 'one_hot':
            var, cost = opt.step_and_cost(
                lambda v: ohe_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                       ), var)
        elif kwargs['readout_layer'] == 'weighted_neuron':
            var, cost = opt.step_and_cost(
                lambda v: wn_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                      ), var)
    end = time.time()
    cost_time = (end - start)

    if kwargs['rate_type'] == 'accuracy':
        validation_batch = np.random.randint(0, num_train, (validation_size, ))
        X_validation_batch = np.asarray(X_train[validation_batch])
        Y_validation_batch = np.asarray(Y_train[validation_batch])
        start = time.time()  # add in timeit function from Wbranch
        if kwargs['readout_layer'] == 'one_hot':
            predictions = np.stack(
                [circuit(var, x) for x in X_validation_batch])
        elif kwargs['readout_layer'] == 'weighted_neuron':
            n = kwargs.get('nqubits')
            w = var[:, -1]
            theta = var[:, :-1]
            predictions = [
                int(
                    np.round(
                        2. *
                        (1.0 /
                         (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) -
                        1., 1)) for x in X_validation_batch
            ]
        end = time.time()
        inftime = (end - start) / len(X_validation_batch)
        if kwargs['readout_layer'] == 'one_hot':
            err_rate = (
                1.0 - ohe_accuracy(Y_validation_batch, predictions)
            ) + 10**-7  #add small epsilon to prevent divide by 0 errors
            #print('error rate:',err_rate)
            #print('weights: ',var)
        elif kwargs['readout_layer'] == 'weighted_neuron':
            err_rate = (
                1.0 - wn_accuracy(Y_validation_batch, predictions)
            ) + 10**-7  #add small epsilon to prevent divide by 0 errors
            #print('error rate:',err_rate)
            #print('weights: ',var)
    elif kwargs['rate_type'] == 'batch_cost':
        err_rate = (
            cost) + 10**-7  #add small epsilon to prevent divide by 0 errors
        #print('error rate:',err_rate)
        #print('weights: ',var)
        inftime = cost_time
    # QHACK #

    if kwargs['inf_time'] == 'timeit':

        W_ = np.abs((Tmax[0] - len(var)) / (Tmax[0])) * np.abs(
            (Tmax[1] - inftime) / (Tmax[1])) * (1. / err_rate)

    elif kwargs['inf_time'] == 'numcnots':
        nc_ = numcnots
        W_ = np.abs((Tmax[0] - len(var)) / (Tmax[0])) * np.abs(
            (Tmax[2] - nc_) / (Tmax[2])) * (1. / err_rate)

    return W_, var
Пример #23
0
params = init_params

gd_param_history = [params]
gd_cost_history = []

for n in range(max_iterations):

    # Take step
    params, prev_energy = opt.step_and_cost(cost_fn, params)
    gd_param_history.append(params)
    gd_cost_history.append(prev_energy)

    energy = cost_fn(params)

    # Calculate difference between new and old energies
    conv = np.abs(energy - prev_energy)

    if n % 20 == 0:
        print(
            "Iteration = {:},  Energy = {:.8f} Ha,  Convergence parameter = {"
            ":.8f} Ha".format(n, energy, conv)
        )

    if conv <= conv_tol:
        break

print()
print("Final value of the energy = {:.8f} Ha".format(energy))
print("Number of iterations = ", n)

##############################################################################
def natural_gradient(params):
    """Calculate the natural gradient of the qnode() cost function.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers.

    You should evaluate the metric tensor and the gradient of the QNode, and then combine these
    together using the natural gradient definition. The natural gradient should be returned as a
    NumPy array.

    The metric tensor should be evaluated using the equation provided in the problem text. Hint:
    you will need to define a new QNode that returns the quantum state before measurement.

    Args:
        params (np.ndarray): Input parameters, of dimension 6

    Returns:
        np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
    """

    # QHACK #
    def get_state(params):
        """ Get the state before a measurement """
        qnode(params)
        return dev.state

    # Calculate the unshifted state (its conjugate transpose)
    state_unshifted = np.conjugate(get_state(params)).T

    def shift_vector(i):
        vector = np.zeros(6)
        vector[i] = 1
        return vector

    metric_tensor = np.zeros((6, 6))

    for i in range(6):
        for j in range(i + 1):

            state_shifted_1 = get_state(params +
                                        (shift_vector(i) + shift_vector(j)) *
                                        np.pi / 2)
            state_shifted_2 = get_state(params +
                                        (shift_vector(i) - shift_vector(j)) *
                                        np.pi / 2)
            state_shifted_3 = get_state(params +
                                        (-shift_vector(i) + shift_vector(j)) *
                                        np.pi / 2)
            state_shifted_4 = get_state(params -
                                        (shift_vector(i) + shift_vector(j)) *
                                        np.pi / 2)

            metric_tensor[
                i,
                j] = (-np.abs(np.dot(state_unshifted, state_shifted_1))**2 +
                      np.abs(np.dot(state_unshifted, state_shifted_2))**2 +
                      np.abs(np.dot(state_unshifted, state_shifted_3))**2 -
                      np.abs(np.dot(state_unshifted, state_shifted_4))**2) / 8

            if i != j:
                metric_tensor[j, i] = metric_tensor[i, j]

    grad = qml.grad(qnode)
    gradient = grad(params)[0]

    metric_tensor_inv = np.linalg.inv(metric_tensor)

    natural_grad = np.dot(metric_tensor_inv, gradient)

    # QHACK #

    return natural_grad
Пример #25
0
class AutogradBox(qml.math.TensorBox):
    """Implements the :class:`~.TensorBox` API for ``pennylane.numpy`` tensors.

    For more details, please refer to the :class:`~.TensorBox` documentation.
    """

    abs = wrap_output(lambda self: np.abs(self.data))
    angle = wrap_output(lambda self: np.angle(self.data))
    arcsin = wrap_output(lambda self: np.arcsin(self.data))
    cast = wrap_output(lambda self, dtype: np.tensor(self.data, dtype=dtype))
    diag = staticmethod(wrap_output(lambda values, k=0: np.diag(values, k=k)))
    expand_dims = wrap_output(
        lambda self, axis: np.expand_dims(self.data, axis=axis))
    ones_like = wrap_output(lambda self: np.ones_like(self.data))
    reshape = wrap_output(lambda self, shape: np.reshape(self.data, shape))
    sqrt = wrap_output(lambda self: np.sqrt(self.data))
    sum = wrap_output(lambda self, axis=None, keepdims=False: np.sum(
        self.data, axis=axis, keepdims=keepdims))
    T = wrap_output(lambda self: self.data.T)
    squeeze = wrap_output(lambda self: self.data.squeeze())

    @staticmethod
    def astensor(tensor):
        return np.tensor(tensor)

    @staticmethod
    @wrap_output
    def concatenate(values, axis=0):
        return np.concatenate(AutogradBox.unbox_list(values), axis=axis)

    @staticmethod
    @wrap_output
    def dot(x, y):
        x, y = AutogradBox.unbox_list([x, y])

        if x.ndim == 0 and y.ndim == 0:
            return x * y

        if x.ndim == 2 and y.ndim == 2:
            return x @ y

        return np.dot(x, y)

    @property
    def interface(self):
        return "autograd"

    def numpy(self):
        if hasattr(self.data, "_value"):
            # Catches the edge case where the data is an Autograd arraybox,
            # which only occurs during backpropagation.
            return self.data._value

        return self.data.numpy()

    @property
    def requires_grad(self):
        return self.data.requires_grad

    @wrap_output
    def scatter_element_add(self, index, value):
        size = self.data.size
        flat_index = np.ravel_multi_index(index, self.shape)
        t = [0] * size
        t[flat_index] = value
        self.data = self.data + np.array(t).reshape(self.shape)
        return self.data

    @property
    def shape(self):
        return self.data.shape

    @staticmethod
    @wrap_output
    def stack(values, axis=0):
        return np.stack(AutogradBox.unbox_list(values), axis=axis)

    @wrap_output
    def take(self, indices, axis=None):
        indices = self.astensor(indices)

        if axis is None:
            return self.data.flatten()[indices]

        fancy_indices = [slice(None)] * axis + [indices]
        return self.data[tuple(fancy_indices)]

    @staticmethod
    @wrap_output
    def where(condition, x, y):
        return np.where(condition, *AutogradBox.unbox_list([x, y]))
Пример #26
0
print(params)

##############################################################################
# We carry out the optimization over a maximum of 200 steps, aiming to reach a convergence
# tolerance (difference in cost function for subsequent optimization steps) of :math:`\sim 10^{
# -6}`.

max_iterations = 200
conv_tol = 1e-06

prev_energy = cost_fn(params)
for n in range(max_iterations):
    params = opt.step(cost_fn, params)
    energy = cost_fn(params)
    conv = np.abs(energy - prev_energy)

    if n % 20 == 0:
        print('Iteration = {:},  Energy = {:.8f} Ha'.format(n, energy))

    if conv <= conv_tol:
        break

    prev_energy = energy

print()
print('Final convergence parameter = {:.8f} Ha'.format(conv))
print('Final value of the ground-state energy = {:.8f} Ha'.format(energy))
print('Accuracy with respect to the FCI energy: {:.8f} Ha ({:.8f} kcal/mol)'.
      format(np.abs(energy - (-1.136189454088)),
             np.abs(energy - (-1.136189454088)) * 627.503))
Пример #27
0
params = np.random.normal(0, np.pi, len(singles) + len(doubles))
print(params)

##############################################################################
# We carry out the optimization over a maximum of 100 steps, aiming to reach a convergence
# tolerance of :math:`\sim 10^{-6}`. Furthermore, we track the value of
# the total spin :math:`S` of the prepared state as it is optimized through
# the iterative procedure.

max_iterations = 100
conv_tol = 1e-06
prev_energy = cost_fn(params)
for n in range(max_iterations):
    params = opt.step(cost_fn, params)
    energy = cost_fn(params)
    conv = np.abs(energy - prev_energy)

    spin = total_spin(params)

    if n % 4 == 0:
        print("Iteration = {:},  E = {:.8f} Ha,  S = {:.4f}".format(
            n, energy, spin))

    if conv <= conv_tol:
        break

    prev_energy = energy

print()
print("Final convergence parameter = {:.8f} Ha".format(conv))
print("Final value of the ground-state energy = {:.8f} Ha".format(energy))
Пример #28
0
 def cost(a):
     """A function of the device quantum state, as a function
     of input QNode parameters."""
     circuit(a)
     res = np.abs(dev.state)**2
     return res[1] - res[0]
Пример #29
0
 def expZ(state):
     return np.abs(state[0])**2 - np.abs(state[1])**2
Пример #30
0
params = init_params

gd_param_history = [params]
gd_cost_history = []

for n in range(max_iterations):

    # Take step
    params, prev_energy = opt.step_and_cost(cost_fn, params)
    gd_param_history.append(params)
    gd_cost_history.append(prev_energy)

    energy = cost_fn(params)

    # Calculate difference between new and old energies
    conv = np.abs(energy - prev_energy)

    if n % 20 == 0:
        print(
            "Iteration = {:},  Energy = {:.8f} Ha,  Convergence parameter = {"
            ":.8f} Ha".format(n, energy, conv))

    if conv <= conv_tol:
        break

print()
print("Final value of the energy = {:.8f} Ha".format(energy))
print("Number of iterations = ", n)

##############################################################################
# We then repeat the process for the optimizer employing quantum natural gradients: