Ejemplo n.º 1
0
    def test_grad_tf(self, qnodes, skip_if_no_tf_support, parallel, interface):
        """Test correct gradient of the QNodeCollection using
        the tf interface"""
        if parallel and qml.tape_mode_active():
            pytest.skip("There appears to be a race condition when constructing TF tapes in parallel")

        qnode1, qnode2 = qnodes

        # calculate the gradient of the collection using tf
        params = Variable([0.5643, -0.45])
        qc = qml.QNodeCollection([qnode1, qnode2])

        with tf.GradientTape() as tape:
            tape.watch(params)

            if parallel:
                with pytest.warns(UserWarning):
                    cost = sum(qc(params, parallel=parallel))
            else:
                cost = sum(qc(params, parallel=parallel))

            # the gradient will be None
            res = tape.gradient(cost, params).numpy()

        # calculate the gradient of the QNodes individually using tf
        params = Variable([0.5643, -0.45])

        with tf.GradientTape() as tape:
            tape.watch(params)
            cost = sum(qnode1(params) + qnode2(params))
            expected = tape.gradient(cost, params).numpy()

        assert np.all(res == expected)
Ejemplo n.º 2
0
def test_astensor_array():
    """Test conversion of numpy arrays to PennyLane tensors"""
    x = np.array([0.1, 0.2, 0.3])
    y = np.array([0.4, 0.5, 0.6])

    res = qml.proc.TensorBox(x).astensor(y)
    assert isinstance(res, np.tensor)
    assert np.all(res == y)
Ejemplo n.º 3
0
def test_creation():
    """Test that a AutogradBox is automatically created from a PennyLane numpy tensor"""
    x = np.array([0.1, 0.2, 0.3])
    res = qml.proc.TensorBox(x)
    assert isinstance(res, AutogradBox)
    assert res.interface == "autograd"
    assert isinstance(res.unbox(), np.ndarray)
    assert np.all(res == x)
Ejemplo n.º 4
0
    def test_stack_array_jax(self):
        """Test that stack, called without the axis arguments, stacks vertically"""
        t1 = onp.array([0.6, 0.1, 0.6])
        t2 = jnp.array([0.1, 0.2, 0.3])
        t3 = jnp.array([5.0, 8.0, 101.0])

        res = fn.stack([t1, t2, t3])
        assert np.all(res == np.stack([t1, t2, t3]))
Ejemplo n.º 5
0
def test_numpy():
    """Test that calling numpy() returns a NumPy array representation
    of the TensorBox"""
    x = np.array([[1, 2], [3, 4]])
    xT = qml.proc.TensorBox(x)
    assert isinstance(xT.numpy(), np.ndarray)
    assert not isinstance(xT.numpy(), np.tensor)
    assert np.all(xT == x)
Ejemplo n.º 6
0
    def test_get_parameters(self):
        """Test that the get_parameters function correctly gets the trainable parameters and all
        parameters, depending on the trainable_only argument"""
        a = np.array(0.1, requires_grad=True)
        b = np.array(0.2, requires_grad=False)
        c = np.array(0.3, requires_grad=True)
        d = np.array(0.4, requires_grad=False)

        with AutogradInterface.apply(QuantumTape()) as tape:
            qml.Rot(a, b, c, wires=0)
            qml.RX(d, wires=1)
            qml.CNOT(wires=[0, 1])
            qml.expval(qml.PauliX(0))

        assert tape.trainable_params == {0, 2}
        assert np.all(tape.get_parameters(trainable_only=True) == [a, c])
        assert np.all(tape.get_parameters(trainable_only=False) == [a, b, c, d])
Ejemplo n.º 7
0
def test_astensor_list():
    """Test conversion of a list to PennyLane tensors"""
    x = np.array([0.1, 0.2, 0.3])
    y = [0.4, 0.5, 0.6]

    res = qml.math.TensorBox(x).astensor(y)
    assert isinstance(res, np.tensor)
    assert np.all(res == y)
Ejemplo n.º 8
0
    def test_zero_dy(self):
        """A zero dy vector will return a zero matrix"""
        dy = np.zeros([2, 2])
        jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]],
                        [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])

        vjp = qml.gradients.compute_vjp(dy, jac)
        assert np.all(vjp == np.zeros([3]))
Ejemplo n.º 9
0
    def test_step_and_cost_autograd_rotosolve_multid_array(self, bunch):
        """Test that the correct cost is returned via the step_and_cost method for the
        Rotosolve optimizer"""
        _, res = bunch.rotosolve_opt.step_and_cost(quant_fun_mdarr,
                                                   multid_array)
        expected = quant_fun_mdarr(multid_array)

        assert np.all(res == expected)
Ejemplo n.º 10
0
def test_stack():
    """Test that arrays are correctly stacked together"""
    x = np.array([[1, 2], [3, 4]])
    y = np.array([[1, 0], [0, 1]])

    xT = qml.proc.TensorBox(x)
    res = xT.stack([y, xT, x])

    assert np.all(res == np.stack([y, x, x]))
Ejemplo n.º 11
0
def test_ones_like():
    """Test that all ones arrays are correctly created"""
    x = np.array([[1, 2, 3], [4, 5, 6]])
    xT = qml.proc.TensorBox(x)

    res = xT.ones_like()
    expected = np.ones_like(x)
    assert isinstance(res, AutogradBox)
    assert np.all(res == expected)
Ejemplo n.º 12
0
def test_expand_dims():
    """Test that dimension expansion works"""
    x = np.array([1, 2, 3])
    xT = qml.proc.TensorBox(x)

    res = xT.expand_dims(axis=1)
    expected = np.expand_dims(x, axis=1)
    assert isinstance(res, AutogradBox)
    assert np.all(res == expected)
Ejemplo n.º 13
0
def test_unbox_list():
    """Test unboxing a mixed list works correctly"""
    x = np.array([[1, 2], [3, 4]])
    y = np.array([[1, 0], [0, 1]])

    xT = qml.proc.TensorBox(x)
    res = xT.unbox_list([y, xT, x])

    assert np.all(res == [y, x, x])
Ejemplo n.º 14
0
def test_cast():
    """Test that arrays can be cast to different dtypes"""
    x = np.array([1, 2, 3])

    res = qml.proc.TensorBox(x).cast(np.float64)
    expected = np.array([1.0, 2.0, 3.0])
    assert np.all(res == expected)
    assert res.numpy().dtype.type is np.float64

    res = qml.proc.TensorBox(x).cast(np.dtype("int8"))
    expected = np.array([1, 2, 3], dtype=np.int8)
    assert np.all(res == expected)
    assert res.numpy().dtype == np.dtype("int8")

    res = qml.proc.TensorBox(x).cast("complex128")
    expected = np.array([1, 2, 3], dtype=np.complex128)
    assert np.all(res == expected)
    assert res.numpy().dtype.type is np.complex128
Ejemplo n.º 15
0
    def test_concatenate_array(self):
        """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension"""
        t1 = [0.6, 0.1, 0.6]
        t2 = np.array([0.1, 0.2, 0.3])
        t3 = onp.array([5.0, 8.0, 101.0])

        res = fn.concatenate([t1, t2, t3])
        assert isinstance(res, np.ndarray)
        assert np.all(res == np.concatenate([t1, t2, t3]))
Ejemplo n.º 16
0
    def test_stack_torch(self):
        """Test that stack, called without the axis arguments, stacks vertically"""
        t1 = onp.array([5.0, 8.0, 101.0], dtype=np.float64)
        t2 = torch.tensor([0.6, 0.1, 0.6], dtype=torch.float64)
        t3 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64)

        res = fn.stack([t1, t2, t3])
        assert isinstance(res, torch.Tensor)
        assert np.all(res.numpy() == np.stack([t1, t2.numpy(), t3.numpy()]))
Ejemplo n.º 17
0
    def test_convert_array(self):
        """Test that a numpy array successfully converts"""
        data = np.array([1, 2, 3])
        res = data.numpy()

        assert np.shares_memory(res, data)
        assert np.all(res == data)
        assert isinstance(res, np.ndarray)
        assert not isinstance(res, np.tensor)
Ejemplo n.º 18
0
    def test_stack_tensorflow(self):
        """Test that stack, called without the axis arguments, stacks vertically"""
        t1 = tf.constant([0.6, 0.1, 0.6])
        t2 = tf.Variable([0.1, 0.2, 0.3])
        t3 = onp.array([5.0, 8.0, 101.0])

        res = fn.stack([t1, t2, t3])
        assert isinstance(res, tf.Tensor)
        assert np.all(res.numpy() == np.stack([t1.numpy(), t2.numpy(), t3]))
Ejemplo n.º 19
0
    def test_stack_torch(self):
        """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension"""
        t1 = onp.array([5.0, 8.0, 101.0], dtype=np.float64)
        t2 = torch.tensor([0.6, 0.1, 0.6], dtype=torch.float64)
        t3 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64)

        res = fn.concatenate([t1, t2, t3])
        assert isinstance(res, torch.Tensor)
        assert np.all(res.numpy() == np.concatenate([t1, t2.numpy(), t3.numpy()]))
Ejemplo n.º 20
0
    def test_stack_tensorflow(self):
        """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension"""
        t1 = tf.constant([0.6, 0.1, 0.6])
        t2 = tf.Variable([0.1, 0.2, 0.3])
        t3 = onp.array([5.0, 8.0, 101.0])

        res = fn.concatenate([t1, t2, t3])
        assert isinstance(res, tf.Tensor)
        assert np.all(res.numpy() == np.concatenate([t1.numpy(), t2.numpy(), t3]))
Ejemplo n.º 21
0
    def test_wrapped_function_on_array(self):
        """Test behaviour of a wrapped function on a vanilla NumPy
        array."""
        res = np.sin(onp.array([0, 1, 2]))
        expected = onp.sin(onp.array([0, 1, 2]))
        assert np.all(res == expected)

        # the result has been converted into a tensor
        assert isinstance(res, np.tensor)
        assert res.requires_grad
Ejemplo n.º 22
0
    def test_eval_tf(self, qnodes, skip_if_no_tf_support):
        """Test correct evaluation of the QNodeCollection using
        the tf interface"""
        qnode1, qnode2 = qnodes
        qc = qml.QNodeCollection([qnode1, qnode2])
        params = [0.5643, -0.45]

        res = qc(params).numpy()
        expected = np.vstack([qnode1(params), qnode2(params)])
        assert np.all(res == expected)
Ejemplo n.º 23
0
    def test_eval_autograd(self, qnodes):
        """Test correct evaluation of the QNodeCollection using
        the Autograd interface"""
        qnode1, qnode2 = qnodes
        qc = qml.QNodeCollection([qnode1, qnode2])
        params = [0.5643, -0.45]

        res = qc(params)
        expected = np.vstack([qnode1(params), qnode2(params)])
        assert np.all(res == expected)
Ejemplo n.º 24
0
    def test_computation(self):
        """Test that the correct VJP is returned"""
        dy = np.array([[1.0, 2.0], [3.0, 4.0]])
        jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]],
                        [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])

        vjp = qml.gradients.compute_vjp(dy, jac)

        assert vjp.shape == (3, )
        assert np.all(vjp == np.tensordot(dy, jac, axes=[[0, 1], [0, 1]]))
Ejemplo n.º 25
0
def generate_symmetries(qubit_op, num_qubits):
    r"""Compute the generator set of the symmetries :math:`\mathbf{\tau}` and the corresponding single-qubit
    set of the Pauli-X operators :math:`\mathbf{\sigma^x}` that are used to build the Clifford operators
    :math:`U`, according to the following relation:

    .. math::

        U_i = \frac{1}{\sqrt{2}}(\tau_i+\sigma^{x}_{q}).

    Here, :math:`\sigma^{x}_{q}` is the Pauli-X operator acting on qubit :math:`q`. These :math:`U_i` can be
    used to transform the Hamiltonian :math:`H` in such a way that it acts trivially or at most with one
    Pauli-gate on a subset of qubits, which allows us to taper off those qubits from the simulation
    using :func:`~.transform_hamiltonian`.

    Args:
        qubit_op (Hamiltonian): Hamiltonian for which symmetries are to be generated to perform tapering
        num_qubits (int): number of wires required to define the Hamiltonian

    Returns:
        tuple (list[Hamiltonian], list[Operation]):

            * list[Hamiltonian]: list of generators of symmetries, :math:`\mathbf{\tau}`,
              for the Hamiltonian.
            * list[Operation]: list of single-qubit Pauli-X operators which will be used
              to build the Clifford operators :math:`U`.

    **Example**

    >>> symbols = ['H', 'H']
    >>> geometry = np.array([[0., 0., -0.66140414], [0., 0., 0.66140414]])
    >>> mol = qml.hf.Molecule(symbols, geometry)
    >>> H, qubits = qml.hf.generate_hamiltonian(mol)(geometry), 4
    >>> generators, paulix_ops = qml.hf.tapering.generate_symmetries(H, qubits)
    >>> generators, paulix_ops
    ([(1.0) [Z0 Z1], (1.0) [Z0 Z2], (1.0) [Z0 Z3]],
    [PauliX(wires=[1]), PauliX(wires=[2]), PauliX(wires=[3])])
    """
    # Generate binary matrix for qubit_op
    binary_matrix = _binary_matrix(qubit_op.ops, num_qubits)

    # Get reduced row echelon form of binary matrix
    rref_binary_matrix = _reduced_row_echelon(binary_matrix)
    rref_binary_matrix_red = rref_binary_matrix[~np.all(
        rref_binary_matrix == 0, axis=1)]  # remove all-zero rows

    # Get kernel (i.e., nullspace) for trimmed binary matrix using gaussian elimination
    nullspace = _kernel(rref_binary_matrix_red)

    # Get generators tau from the calculated nullspace
    generators = get_generators(nullspace, num_qubits)

    # Get unitaries from the calculated nullspace
    paulix_ops = generate_paulis(generators, num_qubits)

    return generators, paulix_ops
Ejemplo n.º 26
0
    def test_full_subsystem(self, mocker):
        """Test applying a state vector to the full subsystem"""
        dev = DefaultQubitAutograd(wires=['a', 'b', 'c'])
        state = np.array([1, 0, 0, 0, 1, 0, 1, 1]) / 2.
        state_wires = qml.wires.Wires(['a', 'b', 'c'])

        spy = mocker.spy(dev, "_scatter")
        dev._apply_state_vector(state=state, device_wires=state_wires)

        assert np.all(dev._state.flatten() == state)
        spy.assert_not_called()
Ejemplo n.º 27
0
def test_autodifferentiation():
    """Test that autodifferentiation is preserved when writing
    a cost function that uses TensorBox method chaining"""
    x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])

    cost_fn = lambda a: (qml.proc.TensorBox(a).T**2).unbox()[0, 1]
    grad_fn = qml.grad(cost_fn)

    res = grad_fn(x)[0]
    expected = np.array([[0.0, 0.0, 0.0], [8.0, 0.0, 0.0]])
    assert np.all(res == expected)
Ejemplo n.º 28
0
    def test_gradient(self):
        """Test gradient computations continue to work"""
        def cost(x):
            return np.sum(np.sin(x))

        grad_fn = qml.grad(cost, argnum=[0])
        arr1 = np.array([0., 1., 2.])

        res = grad_fn(arr1)
        expected = np.cos(arr1)

        assert np.all(res == expected)
Ejemplo n.º 29
0
    def test_seed_reproducible(self):
        """Tests that setting a seed to ``default_rng`` gives reproducible results."""

        seed = 42
        size = (3, 2)

        rng1 = random.default_rng(seed)
        rng2 = random.default_rng(seed)

        assert isinstance(rng1, random.Generator)
        assert isinstance(rng2, random.Generator)

        mat1 = rng1.random(size=size)
        mat2 = rng2.random(size=size)

        assert np.all(mat1 == mat2)

        mat1_2 = rng1.normal(size=size)
        mat2_2 = rng2.normal(size=size)

        assert np.all(mat1_2 == mat2_2)
Ejemplo n.º 30
0
def estimate_shadow_obervable(shadow, observable, k=10):
    """
    Adapted from https://github.com/momohuang/predicting-quantum-properties
    Calculate the estimator E[O] = median(Tr{rho_{(k)} O}) where rho_(k)) is set of k
    snapshots in the shadow. Use median of means to ameliorate the effects of outliers.

    Args:
        shadow (tuple): A shadow tuple obtained from `calculate_classical_shadow`.
        observable (qml.Observable): Single PennyLane observable consisting of single Pauli
            operators e.g. qml.PauliX(0) @ qml.PauliY(1).
        k (int): number of splits in the median of means estimator.

    Returns:
        Scalar corresponding to the estimate of the observable.
    """
    shadow_size, num_qubits = shadow[0].shape

    # convert Pennylane observables to indices
    map_name_to_int = {"PauliX": 0, "PauliY": 1, "PauliZ": 2}
    if isinstance(observable, (qml.PauliX, qml.PauliY, qml.PauliZ)):
        target_obs, target_locs = np.array(
            [map_name_to_int[observable.name]]
        ), np.array([observable.wires[0]])
    else:
        target_obs, target_locs = np.array(
            [map_name_to_int[o.name] for o in observable.obs]
        ), np.array([o.wires[0] for o in observable.obs])

    # classical values
    b_lists, obs_lists = shadow
    means = []

    # loop over the splits of the shadow:
    for i in range(0, shadow_size, shadow_size // k):

        # assign the splits temporarily
        b_lists_k, obs_lists_k = (
            b_lists[i : i + shadow_size // k],
            obs_lists[i : i + shadow_size // k],
        )

        # find the exact matches for the observable of interest at the specified locations
        indices = np.all(obs_lists_k[:, target_locs] == target_obs, axis=1)

        # catch the edge case where there is no match in the chunk
        if sum(indices) > 0:
            # take the product and sum
            product = np.prod(b_lists_k[indices][:, target_locs], axis=1)
            means.append(np.sum(product) / sum(indices))
        else:
            means.append(0)

    return np.median(means)