Esempio n. 1
0
    def test_scalar_jacobian(self, execute_kwargs, tol):
        """Test scalar jacobian calculation"""
        a = np.array(0.1, requires_grad=True)
        dev = qml.device("default.qubit", wires=2)

        def cost(a):
            with qml.tape.JacobianTape() as tape:
                qml.RY(a, wires=0)
                qml.expval(qml.PauliZ(0))
            return execute([tape], dev, **execute_kwargs)[0]

        res = qml.jacobian(cost)(a)
        assert res.shape == (1, )

        # compare to standard tape jacobian
        with qml.tape.JacobianTape() as tape:
            qml.RY(a, wires=0)
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {0}
        tapes, fn = param_shift(tape)
        expected = fn(dev.batch_execute(tapes))

        assert expected.shape == (1, 1)
        assert np.allclose(res, np.squeeze(expected), atol=tol, rtol=0)
Esempio n. 2
0
                def hessian_product(ddy):
                    """Returns the vector-Hessian product with given
                    parameter values p, output gradient dy, and output
                    second-order gradient ddy"""
                    hessian = _evaluate_grad_matrix(p, "hessian")

                    if dy.size > 1:
                        vhp = dy @ ddy @ hessian @ dy.T
                    else:
                        vhp = np.squeeze(ddy @ hessian)

                    return vhp
    def test_scalar_jacobian(self, tol):
        """Test scalar jacobian calculation"""
        a = np.array(0.1, requires_grad=True)

        def cost(a, device):
            with AutogradInterface.apply(QuantumTape()) as tape:
                qml.RY(a, wires=0)
                expval(qml.PauliZ(0))
            assert tape.trainable_params == {0}
            return tape.execute(device)

        dev = qml.device("default.qubit", wires=2)
        res = qml.jacobian(cost)(a, device=dev)
        assert res.shape == (1, )

        # compare to standard tape jacobian
        with QuantumTape() as tape:
            qml.RY(a, wires=0)
            expval(qml.PauliZ(0))

        tape.trainable_params = {0}
        expected = tape.jacobian(dev)
        assert expected.shape == (1, 1)
        assert np.allclose(res, np.squeeze(expected), atol=tol, rtol=0)
    def test_scalar_jacobian(self, tol, mocker):
        """Test scalar jacobian calculation"""
        spy = mocker.spy(QuantumTape, "jacobian")
        a = np.array(0.1, requires_grad=True)

        def cost(a, device):
            with QuantumTape() as tape:
                qml.RY(a, wires=0)
                expval(qml.PauliZ(0))
            return tape.execute(device)

        dev = qml.device("default.qubit.autograd", wires=2)
        res = qml.jacobian(cost)(a, device=dev)
        spy.assert_not_called()
        assert res.shape == (1, )

        # compare to standard tape jacobian
        with QuantumTape() as tape:
            qml.RY(a, wires=0)
            expval(qml.PauliZ(0))

        expected = tape.jacobian(dev)
        assert expected.shape == (1, 1)
        assert np.allclose(res, np.squeeze(expected), atol=tol, rtol=0)
Esempio n. 5
0
    def step(self, objective_fn, *args, **kwargs):
        """Update trainable arguments with one step of the optimizer.

        Args:
            objective_fn (function): the objective function for optimization
            *args: variable length argument list for objective function
            **kwargs: variable length of keyword arguments for the objective function

        Returns:
            list[array]: The new variable values :math:`x^{(t+1)}`.
            If single arg is provided, list[array] is replaced by array.
        """

        self.trainable_args = set()

        for index, arg in enumerate(args):
            if getattr(arg, "requires_grad", True):
                self.trainable_args |= {index}

        if self.s is None:
            # Number of shots per parameter
            self.s = [
                np.zeros_like(a, dtype=np.int64) + self.min_shots
                for i, a in enumerate(args)
                if i in self.trainable_args
            ]

        # keep track of the number of shots run
        s = np.concatenate([i.flatten() for i in self.s])
        self.max_shots = max(s)
        self.shots_used = int(2 * np.sum(s))
        self.total_shots_used += self.shots_used

        # compute the gradient, as well as the variance in the gradient,
        # using the number of shots determined by the array s.
        grads, grad_variances = self.compute_grad(objective_fn, args, kwargs)
        new_args = self.apply_grad(grads, args)

        if self.xi is None:
            self.chi = [np.zeros_like(g, dtype=np.float64) for g in grads]
            self.xi = [np.zeros_like(g, dtype=np.float64) for g in grads]

        # running average of the gradient
        self.chi = [self.mu * c + (1 - self.mu) * g for c, g in zip(self.chi, grads)]

        # running average of the gradient variance
        self.xi = [self.mu * x + (1 - self.mu) * v for x, v in zip(self.xi, grad_variances)]

        for idx, (c, x) in enumerate(zip(self.chi, self.xi)):
            xi = x / (1 - self.mu ** (self.k + 1))
            chi = c / (1 - self.mu ** (self.k + 1))

            # determine the new optimum shots distribution for the next
            # iteration of the optimizer
            s = np.ceil(
                (2 * self.lipschitz * self.stepsize * xi)
                / ((2 - self.lipschitz * self.stepsize) * (chi ** 2 + self.b * (self.mu ** self.k)))
            )

            # apply an upper and lower bound on the new shot distributions,
            # to avoid the number of shots reducing below min(2, min_shots),
            # or growing too significantly.
            gamma = (
                (self.stepsize - self.lipschitz * self.stepsize ** 2 / 2) * chi ** 2
                - xi * self.lipschitz * self.stepsize ** 2 / (2 * s)
            ) / s

            argmax_gamma = np.unravel_index(np.argmax(gamma), gamma.shape)
            smax = max(s[argmax_gamma], 2)
            self.s[idx] = np.squeeze(np.int64(np.clip(s, min(2, self.min_shots), smax)))

        self.k += 1

        # unwrap from list if one argument, cleaner return
        if len(new_args) == 1:
            return new_args[0]

        return new_args
Esempio n. 6
0
def one_hot(a: np.ndarray, num_classes: int) -> np.ndarray:
    return np.squeeze(np.eye(num_classes)[a.astype(int).reshape(-1)])