Esempio n. 1
0
 def cost_fn(params, generators):
     # test single parameter inputs
     if np.ndim(params) == 1:
         params = [params[0], params[0]]
     Z_1, Y_2 = circuit_1(params, generators=generators)
     X_1 = circuit_2(params, generators=generators)
     return 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
Esempio n. 2
0
        def gradient_product(dy):
            """Returns the vector-Jacobian product with given
            parameter values p and output gradient dy"""

            if all(np.ndim(p) == 0 for p in params):
                # only flatten dy if all parameters are single values
                dy = dy.flatten()

            @autograd.extend.primitive
            def jacobian(p):
                """Returns the Jacobian for parameters p"""
                return _evaluate_grad_matrix(p, "jacobian")

            def vhp(ans, p):
                def hessian_product(ddy):
                    """Returns the vector-Hessian product with given
                    parameter values p, output gradient dy, and output
                    second-order gradient ddy"""
                    hessian = _evaluate_grad_matrix(p, "hessian")

                    if dy.size > 1:
                        vhp = dy @ ddy @ hessian @ dy.T
                    else:
                        vhp = ddy @ hessian
                        vhp = vhp.flatten()

                    return vhp

                return hessian_product

            # register vhp as the backward method of the jacobian function
            autograd.extend.defvjp(jacobian, vhp, argnums=[0])

            vjp = dy @ jacobian(params)
            return vjp
Esempio n. 3
0
        def gradient_product(g):
            # In autograd, the forward pass is always performed prior to the backwards
            # pass, so we do not need to re-unwrap the parameters.
            self.set_parameters(self._all_params_unwrapped,
                                trainable_only=False)
            jac = self.jacobian(device, params=params, **self.jacobian_options)
            self.set_parameters(self._all_parameter_values,
                                trainable_only=False)

            # only flatten g if all parameters are single values
            if all(np.ndim(p) == 0 for p in params):
                vjp = g.flatten() @ jac
            else:
                vjp = g @ jac
            return vjp
    def rotosolve_step(f, x):
        """Helper function to test the Rotosolve and Rotoselect optimizers"""
        # make sure that x is an array
        if np.ndim(x) == 0:
            x = np.array([x])

        # helper function for x[d] = theta
        def insert(xf, d, theta):
            xf[d] = theta
            return xf

        for d, _ in enumerate(x):
            H_0 = float(f(insert(x, d, 0)))
            H_p = float(f(insert(x, d, np.pi / 2)))
            H_m = float(f(insert(x, d, -np.pi / 2)))
            a = onp.arctan2(2 * H_0 - H_p - H_m, H_p - H_m)

            x[d] = -np.pi / 2 - a

            if x[d] <= -np.pi:
                x[d] += 2 * np.pi
        return x