示例#1
0
def test_ones_like():
    """Test that all ones arrays are correctly created"""
    x = np.array([[1, 2, 3], [4, 5, 6]])
    xT = qml.proc.TensorBox(x)

    res = xT.ones_like()
    expected = np.ones_like(x)
    assert isinstance(res, AutogradBox)
    assert np.all(res == expected)
def parameter_shift(weights):
    """Compute the gradient of the variational circuit given by the
    ansatz function using the parameter-shift rule.

    Write your code below between the # QHACK # markers—create a device with
    the correct number of qubits, create a QNode that applies the above ansatz,
    and compute the gradient of the provided ansatz using the parameter-shift rule.

    Args:
        weights (array): An array of floating-point numbers with size (2, 3).

    Returns:
        array: The gradient of the variational circuit. The shape should match
        the input weights array.
    """
    dev = qml.device("default.qubit", wires=3)

    @qml.qnode(dev)
    def vq_circuit(weights):
        for i in range(len(weights)):
            qml.RX(weights[i, 0], wires=0)
            qml.RY(weights[i, 1], wires=1)
            qml.RZ(weights[i, 2], wires=2)

            qml.CNOT(wires=[0, 1])
            qml.CNOT(wires=[1, 2])
            qml.CNOT(wires=[2, 0])

        return qml.expval(qml.PauliY(0) @ qml.PauliZ(2))

    gradient = np.zeros_like(weights)

    # QHACK #
    shifted_par = weights.copy()
    unit_vec = np.ones_like(weights)

    for i in range(len(weights)):  #row
        for j in range(len(weights[0])):  #column
            shifted_par = weights.copy()
            shifted_par[i][j] += np.pi / 2
            fwd_shift = vq_circuit(shifted_par)

            shifted_par[i][j] -= np.pi
            bkwd_shift = vq_circuit(shifted_par)

            gradient[i][j] = 0.5 * (fwd_shift - bkwd_shift)
    # QHACK #

    return gradient
示例#3
0
def expand_num_freq(num_freq, param):
    if np.isscalar(num_freq):
        num_freq = [num_freq] * len(param)
    expanded = []
    for _num_freq, par in zip(num_freq, param):
        if np.isscalar(_num_freq) and np.isscalar(par):
            expanded.append(_num_freq)
        elif np.isscalar(_num_freq):
            expanded.append(np.ones_like(par) * _num_freq)
        elif np.isscalar(par):
            raise ValueError(f"{num_freq}\n{param}\n{_num_freq}\n{par}")
        elif len(_num_freq) == len(par):
            expanded.append(_num_freq)
        else:
            raise ValueError()
    return expanded
示例#4
0
class AutogradBox(qml.math.TensorBox):
    """Implements the :class:`~.TensorBox` API for ``pennylane.numpy`` tensors.

    For more details, please refer to the :class:`~.TensorBox` documentation.
    """

    abs = wrap_output(lambda self: np.abs(self.data))
    angle = wrap_output(lambda self: np.angle(self.data))
    arcsin = wrap_output(lambda self: np.arcsin(self.data))
    cast = wrap_output(lambda self, dtype: np.tensor(self.data, dtype=dtype))
    diag = staticmethod(wrap_output(lambda values, k=0: np.diag(values, k=k)))
    expand_dims = wrap_output(
        lambda self, axis: np.expand_dims(self.data, axis=axis))
    ones_like = wrap_output(lambda self: np.ones_like(self.data))
    reshape = wrap_output(lambda self, shape: np.reshape(self.data, shape))
    sqrt = wrap_output(lambda self: np.sqrt(self.data))
    sum = wrap_output(lambda self, axis=None, keepdims=False: np.sum(
        self.data, axis=axis, keepdims=keepdims))
    T = wrap_output(lambda self: self.data.T)
    squeeze = wrap_output(lambda self: self.data.squeeze())

    @staticmethod
    def astensor(tensor):
        return np.tensor(tensor)

    @staticmethod
    @wrap_output
    def concatenate(values, axis=0):
        return np.concatenate(AutogradBox.unbox_list(values), axis=axis)

    @staticmethod
    @wrap_output
    def dot(x, y):
        x, y = AutogradBox.unbox_list([x, y])

        if x.ndim == 0 and y.ndim == 0:
            return x * y

        if x.ndim == 2 and y.ndim == 2:
            return x @ y

        return np.dot(x, y)

    @property
    def interface(self):
        return "autograd"

    def numpy(self):
        if hasattr(self.data, "_value"):
            # Catches the edge case where the data is an Autograd arraybox,
            # which only occurs during backpropagation.
            return self.data._value

        return self.data.numpy()

    @property
    def requires_grad(self):
        return self.data.requires_grad

    @wrap_output
    def scatter_element_add(self, index, value):
        size = self.data.size
        flat_index = np.ravel_multi_index(index, self.shape)
        t = [0] * size
        t[flat_index] = value
        self.data = self.data + np.array(t).reshape(self.shape)
        return self.data

    @property
    def shape(self):
        return self.data.shape

    @staticmethod
    @wrap_output
    def stack(values, axis=0):
        return np.stack(AutogradBox.unbox_list(values), axis=axis)

    @wrap_output
    def take(self, indices, axis=None):
        indices = self.astensor(indices)

        if axis is None:
            return self.data.flatten()[indices]

        fancy_indices = [slice(None)] * axis + [indices]
        return self.data[tuple(fancy_indices)]

    @staticmethod
    @wrap_output
    def where(condition, x, y):
        return np.where(condition, *AutogradBox.unbox_list([x, y]))
#
# The latter two observables, on the other hand, are seemingly
# unaffected by the noise at all.
#
# We can see that even when noise is present, there may still be subspaces
# or observables which are minimally affected or unaffected.
# This gives us some hope that variational algorithms can learn
# to find and exploit such noise-free substructures on otherwise
# noisy devices.
#
# We can also plot the CHSH observable in the noisy case. Remember,
# values greater than 2 can safely be considered "quantum".

plt.plot(noise_vals, CHSH_expvals, label="CHSH")
plt.plot(noise_vals,
         2 * np.ones_like(noise_vals),
         label="Quantum-classical boundary")
plt.xlabel('Noise parameter')
plt.ylabel('CHSH Expectation value')
plt.legend()
plt.show()

##############################################################################
# Too much noise (around 0.2 in this example), and we lose the
# quantumness we created in our circuit. But if we only have a little
# noise, the quantumness undeniably remains. So there is still hope
# that quantum algorithms can do something useful, even on noisy
# near-term devices, so long as the noise is not high.
#
# .. note::
#
示例#6
0
 def ones_like(self):
     return AutogradBox(np.ones_like(self.data))