def _asarray(array, dtype=None):
        res = np.asarray(array, dtype=dtype)

        if res.dtype is np.dtype("O"):
            return np.hstack(array).flatten().astype(dtype)

        return res
Exemple #2
0
    def _execute(self, params, device):
        # unwrap all NumPy scalar arrays to Python literals
        params = [p.item() if p.shape == tuple() else p for p in params]
        params = autograd.builtins.tuple(params)

        # unwrap constant parameters
        self._all_params_unwrapped = [
            p.numpy() if isinstance(p, np.tensor) else p
            for p in self._all_parameter_values
        ]

        # evaluate the tape
        self.set_parameters(self._all_params_unwrapped, trainable_only=False)
        res = self.execute_device(params, device=device)
        self.set_parameters(self._all_parameter_values, trainable_only=False)

        if self.is_sampled:
            return res

        if res.dtype == np.dtype("object"):
            return np.hstack(res)

        requires_grad = False

        if self.trainable_params:
            requires_grad = True

        return np.array(res, requires_grad=requires_grad)
Exemple #3
0
    def test_sample_shape_and_dtype(self, gaussian_device_2_wires, observable,
                                    n_sample):
        """Test that the sample function outputs samples of the right size"""

        sample = gaussian_device_2_wires.sample(observable, [0], [], n_sample)

        assert np.array_equal(sample.shape, (n_sample, ))
        assert sample.dtype == np.dtype("float")
Exemple #4
0
def test_cast():
    """Test that arrays can be cast to different dtypes"""
    x = np.array([1, 2, 3])

    res = qml.proc.TensorBox(x).cast(np.float64)
    expected = np.array([1.0, 2.0, 3.0])
    assert np.all(res == expected)
    assert res.numpy().dtype.type is np.float64

    res = qml.proc.TensorBox(x).cast(np.dtype("int8"))
    expected = np.array([1, 2, 3], dtype=np.int8)
    assert np.all(res == expected)
    assert res.numpy().dtype == np.dtype("int8")

    res = qml.proc.TensorBox(x).cast("complex128")
    expected = np.array([1, 2, 3], dtype=np.complex128)
    assert np.all(res == expected)
    assert res.numpy().dtype.type is np.complex128
Exemple #5
0
    def _execute(self, params, device):
        params = autograd.builtins.tuple(params)

        res = self.execute_device(params, device=device)

        if res.dtype == np.dtype("object"):
            return np.hstack(res)

        requires_grad = False

        if self.trainable_params:
            requires_grad = True

        return np.array(res, requires_grad=requires_grad)
Exemple #6
0
def _execute(
    parameters,
    tapes=None,
    device=None,
    execute_fn=None,
    gradient_fn=None,
    gradient_kwargs=None,
    _n=1,
    max_diff=2,
):  # pylint: disable=dangerous-default-value,unused-argument
    """Autodifferentiable wrapper around ``Device.batch_execute``.

    The signature of this function is designed to work around Autograd restrictions.
    Note that the ``parameters`` argument is dependent on the ``tapes`` argument;
    this function should always be called as follows:

    >>> parameters = [autograd.builtins.list(t.get_parameters()) for t in tapes])
    >>> parameters = autograd.builtins.tuple(parameters)
    >>> _execute(parameters, tapes=tapes, device=device)

    In particular:

    - ``parameters`` is dependent on the provided tapes: always extract them as above
    - ``tapes`` is a *required* argument
    - ``device`` is a *required* argument

    The private argument ``_n`` is used to track nesting of derivatives, for example
    if the nth-order derivative is requested. Do not set this argument unless you
    understand the consequences!
    """
    with qml.tape.Unwrap(*tapes):
        res, jacs = execute_fn(tapes, **gradient_kwargs)

    for i, r in enumerate(res):

        if isinstance(res[i], np.ndarray):
            # For backwards compatibility, we flatten ragged tape outputs
            # when there is no sampling
            r = np.hstack(
                res[i]) if res[i].dtype == np.dtype("object") else res[i]
            res[i] = np.tensor(r)

        elif isinstance(res[i], tuple):
            res[i] = tuple(np.tensor(r) for r in res[i])

        else:
            res[i] = qml.math.toarray(res[i])

    return res, jacs
Exemple #7
0
    def _execute(self, params, device):
        # unwrap all NumPy scalar arrays to Python literals
        params = [p.item() if p.shape == tuple() else p for p in params]
        params = autograd.builtins.tuple(params)

        res = self.execute_device(params, device=device)

        if res.dtype == np.dtype("object"):
            return np.hstack(res)

        requires_grad = False

        if self.trainable_params:
            requires_grad = True

        return np.array(res, requires_grad=requires_grad)
 def cost_fn(x, y):
     res = circuit(x, y)
     assert res.dtype is np.dtype("complex128")
     probs = jnp.abs(res) ** 2
     return probs[0] + probs[2]