Example #1
0
def test_vector_jacobian_product():
    # This function will have an asymmetric jacobian matrix.
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5)
    V = npr.randn(5)
    J = jacobian(fun)(a)
    check_equivalent(np.dot(V.T, J), vector_jacobian_product(fun)(a, V))
Example #2
0
 def _evaluate_constraint_jacobians(self, x):
     return numpy.vstack(
         autograd.jacobian(
             lambda x: responsible_class.evaluate(x, block.parameter_array.array())
         )(x)
         for responsible_class, block in self._constraints.items()
     )
Example #3
0
File: terms.py Project: dfm/GenRP
 def get_coeffs_jacobian(self, include_frozen=False):
     if not HAS_AUTOGRAD:
         raise ImportError("'autograd' must be installed to compute "
                           "gradients")
     jac = jacobian(lambda p: np.concatenate(self.get_all_coefficients(p)))
     jac = jac(self.get_parameter_vector(include_frozen=True)).T
     if include_frozen:
         return jac
     return jac[self.unfrozen_mask]
Example #4
0
def test_make_jvp():
    A = npr.randn(3, 5)
    x = npr.randn(5)
    v = npr.randn(5)
    fun = lambda x: np.tanh(np.dot(A, x))

    jvp_explicit = lambda x: lambda v: np.dot(jacobian(fun)(x), v)
    jvp = make_jvp(fun)

    check_equivalent(jvp_explicit(x)(v), jvp(x)(v))
Example #5
0
def elementwise_hess(fun, argnum=0):
    """
    From https://github.com/HIPS/autograd/issues/60
    """
    def sum_latter_dims(x):
        return anp.sum(x.reshape(x.shape[0], -1), 1)

    def sum_grad_output(*args, **kwargs):
        return sum_latter_dims(elementwise_grad(fun)(*args, **kwargs))
    return jacobian(sum_grad_output, argnum)
Example #6
0
def test_jacobian_against_wrapper():
    A = npr.randn(3,3,3)
    fun = lambda x: np.einsum(
        'ijk,jkl->il',
        A, np.sin(x[...,None] * np.tanh(x[None,...])))

    B = npr.randn(3,3)
    jac1 = jacobian(fun)(B)
    jac2 = old_jacobian(fun)(B)

    assert np.allclose(jac1, jac2)
Example #7
0
def loss_function(W, x, y):
    loss_sum = 0.

    for xi in x:
        for yi in y:
            input_point = np.array([xi, yi])
            net_out = neural_network(W, input_point)[0]

            psy_t = psy_trial(input_point, net_out)
            psy_t_hessian = jacobian(jacobian(psy_trial))(input_point, net_out)

            gradient_of_trial_d2x = psy_t_hessian[0][0]
            gradient_of_trial_d2y = psy_t_hessian[1][1]

            func = f(input_point)  # right part function

            err_sqr = ((gradient_of_trial_d2x + gradient_of_trial_d2y) -
                       func)**2
            loss_sum += err_sqr

    return loss_sum
def descent_overlap_si(N, patterns, weights, biases, sc, incremental, tol,
                       lmbd, alpha):
    '''
    Newton's method for minimising \sum_{k = 1}^{p} -(h_i^{\mu} \sigma_i^{\mu}) / (\sum_j w_{ij}^2 + b_i^2)^(0.5)
    '''
    jac = egrad(overlap_si, 0)
    hess = jacobian(jac)
    if incremental:
        for i in range(N):  # for each neuron independently
            for j in range(patterns.shape[0]):
                pattern = np.array(deepcopy(patterns[j].reshape(1, N)))
                w_i = weights[i, :]
                b_i = biases[i]
                x0 = np.append(w_i, b_i)
                bnds = list(
                    zip(-100 * np.ones(x0.shape[-1]),
                        100 * np.ones(x0.shape[-1])))
                if sc == False:
                    bnds[i] = (0, 0)
                res = minimize(overlap_si,
                               x0,
                               args=(pattern, i, lmbd, alpha),
                               jac=jac,
                               hess=hess,
                               bounds=bnds,
                               method='Newton-CG',
                               tol=tol,
                               options={'disp': False})
                weights[i, :] = deepcopy(res['x'][:-1])
                biases[i] = deepcopy(res['x'][-1])
    if incremental == False:
        patterns = np.array(deepcopy(patterns.reshape(-1, N)))
        for i in range(N):  # for each neuron independently
            w_i = weights[i, :]
            b_i = biases[i]
            x0 = np.append(w_i, b_i)
            bnds = list(
                zip(-100 * np.ones(x0.shape[-1]), 100 * np.ones(x0.shape[-1])))
            if sc == False:
                bnds[i] = (0, 0)
            res = minimize(overlap_si,
                           x0,
                           args=(patterns, i, lmbd, alpha),
                           jac=jac,
                           hess=hess,
                           bounds=bnds,
                           method='Newton-CG',
                           tol=tol,
                           options={'disp': False})
            weights[i, :] = deepcopy(res['x'][:-1])
            biases[i] = deepcopy(res['x'][-1])
    return weights, biases
Example #9
0
def test_rwstickysoftmaxagent():
    lr = 0.1
    B = 1.5
    p = 0.01
    task = TwoArmedBandit()
    q = RWStickySoftmaxAgent(task,
                             learning_rate=lr,
                             inverse_softmax_temp=B,
                             perseveration=p)

    x = np.array([1., 0., 0.])
    u1 = np.array([1., 0.])
    u2 = np.array([0., 1.])
    x_1 = np.array([0., 1., 0.])
    x_2 = np.array([0., 0., 1.])
    r1 = 1.0
    r2 = 0.0

    q.log_prob(x, u1)
    q.learning(x, u1, r1, x_1, None)
    q.log_prob(x, u2)
    q.learning(x, u2, r2, x_2, None)
    q.log_prob(x, u2)
    q.learning(x, u2, r1, x_1, None)
    q.log_prob(x, u1)
    q.learning(x, u1, r2, x_2, None)
    q.log_prob(x, u1)
    q.learning(x, u1, r1, x_1, None)

    def f(w):
        m = RWStickySoftmaxAgent(task,
                                 learning_rate=w[0],
                                 inverse_softmax_temp=w[1],
                                 perseveration=w[2])
        m._log_prob_noderivatives(x, u1)
        m.critic._update_noderivatives(x, u1, r1, x_1, None)
        m._log_prob_noderivatives(x, u2)
        m.critic._update_noderivatives(x, u2, r2, x_2, None)
        m._log_prob_noderivatives(x, u2)
        m.critic._update_noderivatives(x, u2, r1, x_1, None)
        m._log_prob_noderivatives(x, u1)
        m.critic._update_noderivatives(x, u1, r2, x_2, None)
        m._log_prob_noderivatives(x, u1)
        m.critic._update_noderivatives(x, u1, r1, x_1, None)
        return m.logprob_

    w = np.array([lr, B, p])
    ag = jacobian(f)(w)
    aH = hessian(f)(w)

    assert (np.linalg.norm(q.grad_ - ag) < 1e-6)
    assert (np.linalg.norm(q.hess_ - aH) < 1e-6)
Example #10
0
    def featureGradJacobNormalizer(self,
                                   feature,
                                   singulervaluenorm=SINGULARVLAUENORM,
                                   computeHessian=True):
        featurefun = self.features[feature]
        # forward = featurefun(self.vec)
        # assert( not np.isnan(forward).any() )
        g = jacobian(featurefun)(self.vec)[:]
        # assert( not np.isnan(g).any() )
        if (computeHessian):
            H = jacobian(grad(featurefun))(self.vec)[:, :]
            assert (not np.isnan(H).any())
        else:
            H = 1
        if (computeHessian and singulervaluenorm):
            normalizer = np.linalg.svd(H)[1][0]
        else:
            normalizer = 1

        g, H = g / normalizer, H / normalizer

        return g, H, normalizer
    def test_gradient_exception_on_sample(self, qubit_device_2_wires):
        """Tests that the proper exception is raised if differentiation of sampling is attempted."""
        @qml.qnode(qubit_device_2_wires)
        def circuit(x):
            qml.RX(x, wires=[0])
            return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))

        with pytest.raises(
                qml.QuantumFunctionError,
                match=
                "Circuits that include sampling can not be differentiated."):
            grad_fn = autograd.jacobian(circuit)
            grad_fn(1.0)
Example #12
0
def test_jacobian_against_stacked_grads():
    scalar_funs = [
        lambda x: np.sum(x**3), lambda x: np.prod(np.sin(x) + np.sin(x)),
        lambda x: grad(lambda y: np.exp(y) * np.tanh(x[0]))(x[1])
    ]

    vector_fun = lambda x: np.array([f(x) for f in scalar_funs])

    x = npr.randn(5)
    jac = jacobian(vector_fun)(x)
    grads = [grad(f)(x) for f in scalar_funs]

    assert np.allclose(jac, np.vstack(grads))
    def gen_net_f_auton_jac(h, Wrec, b, Win=0, x=0, phi=phi):
        """
        generate net_f_auton function's Jacobian.

        Returns:
            net_f_auton_jac (function): Jacobian for net_f_auton.

        """
        def net_f_auton_reduced(h):
            return net_f_auton(h, Wrec, b, Win, x, phi)

        f_auton_jac = autograd.jacobian(net_f_auton_reduced)
        return f_auton_jac
Example #14
0
    def reverse_kinematic_step(self, x, y, z):
        '''
        Calculate error and Jacobian for updating reverse kinematic
        '''
        def cal_loss(thetas):
            pos = self.get_arm_pos(3, *thetas)
            return pos - np.array([x,y,z])
        
        diff = cal_loss(self.get_angle())
        jacobian_loss = jacobian(cal_loss)
        jac = jacobian_loss(self.get_angle())

        return diff, jac
Example #15
0
    def get_param_sensitivity(self, interesting_moments):
        # interesting moments should be a function that takes in the global
        # free parameters, and returns the posterior moments you want to examine

        get_moment_jac = autograd.jacobian(interesting_moments)
        moment_jac = get_moment_jac(self.optimal_global_free_params)

        sensitivity_operator = np.linalg.solve(self.kl_hessian, moment_jac.T)

        alpha_jac = self.get_alpha_jac(self.optimal_global_free_params,\
                                        self.alpha)

        return np.dot(sensitivity_operator.T, -1 * alpha_jac)
Example #16
0
def get_resid_wave_eq_first_order(u):
    """
    Generate wave equation residual function from solution ansatz u
    This is for first order form. 
    u must of be of form 
    [u1, u2, u3]=u(params, x,t) where params are going to be optimized over. 
    u1 is u, the position
    u2 is ut, unknown corresponding to derivative w.r.t. time. 
    u3 is ux, unknown corresponding to derivative w.r.t. time. 
        NOTE: derivative of u1 w.r.t. t i.e. du/dt is only equal to u2=ut if PDE system is solved exactly. 

    Returns
    --------
    resid: scalar valued function resid(params,x,t).
    Returns residual of PDE from neural network parameters and x,t (space and time positions). 
    """
    #See autograd docs for jacobian documentation.
    #This code treats u as a vector valued function of the arguments x,t
    #So have to compute two jacobians, one for each. Consider changing depending on efficiency.
    #Is one jacobian w.r.t single vector [x,t] much faster than two jacobians w.r.t. (x,t)?

    #Jx is vector valued function of params,x,t
    #Jx(params,x,t) is d([u,ut,ux])/dx(params,x,t)
    Jx = jacobian(u, 1)
    Jt = jacobian(u, 2)


    elementwise_error=lambda params,x,t: np.array([\
        Jx(params,x,t)[0]-Jt(params,x,t)[0]-u(params,x,t)[2]+u(params,x,t)[1], \
        Jx(params,x,t)[1]-Jt(params,x,t)[2], \
        Jx(params,x,t)[2]-Jt(params,x,t)[1]
        ])

    #elementwise_error=lambda params,x,t: np.array([\
    #   Jx(params,x,t)[0], 0., 0.])

    resid = lambda params, x, t: np.linalg.norm(
        (elementwise_error(params, x, t)), ord=2)
    return resid
Example #17
0
    def _compute_alternative_bound(self, x, r, cube):

        n, m, problem = self.n, self.m, self.problem

        def axlogy(x, y):
            # pylint: disable=no-member
            return anp.where(y > 0, x * anp.log(y), 0)

        def obj(xi, cnst):
            # pylint: disable=no-member
            xi_ = xi.reshape((n, m))
            out = anp.sum(
                (axlogy(xi_, xi_ /
                        (x + r)) * problem.S * problem.w).T / problem.b) / cnst
            return out

        jac = jacobian(obj)  # pylint: disable=no-value-for-parameter
        lb = np.outer(self.z_lb, np.maximum(x - r, self.problem.x_lb)).reshape(
            (n * m, ))
        ub = np.outer(self.z_ub, np.minimum(x + r, self.problem.x_ub)).reshape(
            (n * m, ))
        bounds = list(zip(lb, ub))

        def cnstr(xi):
            xi_ = xi.reshape((n, m))
            S_xi = np.sum(problem.S * xi_, axis=0)
            return np.hstack([
                S_xi - np.maximum(1 - x - r, 1 - self.problem.x_ub),
                np.minimum(1 - x + r, 1 - self.problem.x_lb) - S_xi,  #
            ])

        cnstrs = [{"type": "ineq", "fun": cnstr}]
        xi_start = lb
        cnst = np.linalg.norm(jac(xi_start,
                                  1.0))  # for scaling the optimization problem

        assert np.isfinite(cnst)

        with np.errstate(
                all="ignore"):  # suppress overflows during optimization
            opt = minimize(
                obj,
                xi_start,
                args=(cnst, ),
                bounds=bounds,
                constraints=cnstrs,
                jac=jac,
                tol=TOL,
            )

        return opt, cnst
Example #18
0
def get_obj_and_obj_gradient(KMM_get_K, B_max, KMM_eps, SDR_get_K, SDR_get_Ky, obj_from_ws_and_Ks, dobj_dwsopt, get_dobj_dP_thru_Ku, lin_solver, cvxopt_solver, xs_train, xs_test, ys_train, use_yu=False, dobj_dB=None):
    # get_dobj_dP_thru_Ku accepts xs_train, SDR_get_K, Ky
    
    the_f = functools.partial(ws_obj_f, xs_train, xs_test, KMM_get_K)
    df_dws = autograd.jacobian(the_f) # ans dim: |x|
    def the_f_reverse(P,ws):
        return df_dws(ws,P)
    #d_dP_df_dws = autograd.jacobian(lambda P,ws: df_dws(ws,P)) # ans dim: |x| x |p|
    d_dP_df_dws = autograd.jacobian(the_f_reverse) # ans dim: |x| x |p|
    d_dws_df_dws = autograd.jacobian(autograd.jacobian(the_f))

    Ky = SDR_get_Ky(ys_train, ys_train)

    #dobj_dwsopt = autograd.jacobian(lambda wsopt, Ky, Ku: obj_from_ws_and_Ks(Ky, Ku, wsopt))

    #def obj_from_P_and_wsopt(P, wsopt):
    #    us_train = np.dot(xs_train, P)
    #    Ku = SDR_get_K(us_train, us_train)
    #    return obj_from_ws_and_Ks(Ky, Ku, wsopt)

    #dobj_dP_thru_Ku = autograd.jacobian(obj_from_P_and_wsopt)

    if not use_yu:
        dobj_dP_thru_Ku = get_dobj_dP_thru_Ku(xs_train, SDR_get_K, Ky)
    else:
        dobj_dP_thru_Ku = get_dobj_dP_thru_Ku(xs_train, SDR_get_K, Ky, ys_train)

    A, b = get_KMM_ineq_constraints(len(xs_train), B_max, KMM_eps)

    dobj_dP = functools.partial(get_dobj_dP, use_yu, xs_train, xs_test, Ky, ys_train, SDR_get_K, KMM_get_K, dobj_dP_thru_Ku, lin_solver, cvxopt_solver, df_dws, d_dP_df_dws, d_dws_df_dws, dobj_dwsopt, A, b)

    obj = functools.partial(get_obj, use_yu, obj_from_ws_and_Ks, xs_train, xs_test, Ky, ys_train, KMM_get_K, SDR_get_K, cvxopt_solver, A, b)

    if dobj_dB is None:
        return obj, dobj_dP
    else:
        dobj_dB_actual = functools.partial(get_dobj_dB, use_yu, xs_train, xs_test, Ky, ys_train, SDR_get_K, KMM_get_K, dobj_dB, A, b, dobj_dB)
        return obj, dobj_dP, dobj_dB_actual
Example #19
0
    def __init__(self):
        self.counter = 0
        self.targetUpdatefreq = 100 # Not being used

        self.max_action = 0.01
        self.world_box = np.array([[5.0, 5.0], [-5.0, -5.0]])
        #self.min_position = np.array([-5.0, -5.0])
        self.xlow = np.append(self.world_box[1], [0., -1., -1.])
        self.xhigh = np.append(self.world_box[0], [2*pi, 1., 1.])
        self.low_state = np.append(self.xlow, -10*np.ones(15))       #
        self.high_state = np.append(self.xhigh, 10*np.ones(15))       #

        self.action_space = spaces.Box(-np.ones(2), np.ones(2))
        self.observation_space = spaces.Box(self.low_state, self.high_state)

        self.viewer = None
        #self.state = self.observation_space.sample()

        self.noise = np.array([0.01]*5 + [0.2]*2) #std
        dt = 0.1
        self.Q = np.eye(5) * (0.01**2)
        self.R = np.eye(2) * (0.2**2)
        self.P = np.eye(5) * (0.0001**2)
        self.Id = np.eye(5)

        # initialize state
        pos = random.uniform(self.world_box[0], self.world_box[1])
        ang = math.atan2(pos[1], pos[1]) -pi + random.uniform(-pi/8, pi/8)
        ang %= 2*pi
        self.x = np.append(pos, [ang, 0., 0.])
        self.L = np.linalg.cholesky(self.P)

        self.goal_position = np.array([0., 0.])

        self.A = jacobian(self.dynamics)
        self.H = jacobian(self.obs)
        self.seed()
        self.reset()
Example #20
0
    def __init__(self, name='', size=2, diag_lb=0.0, val=None):
        self.name = name
        self.__size = int(size)
        self.__vec_size = int(size * (size + 1) / 2)
        self.__diag_lb = diag_lb
        assert diag_lb >= 0
        if val is None:
            self.__val = np.diag(np.full(self.__size, diag_lb + 1.0))
        else:
            self.set(val)

        # These will be dense, so just use autograd directly.
        self.free_to_vector_jac_dense = autograd.jacobian(self.free_to_vector)
        self.free_to_vector_hess_dense = autograd.hessian(self.free_to_vector)
Example #21
0
def rhess(cost, proj):
    """
    Generates the Riemannian hessian of the cost. Specifically, rhess(cost,
    proj)(x, u) is the directional derivatative of cost at point X on the
    manifold, in direction u.
    cost and proj must be defined using autograd.numpy.
    See http://sites.uclouvain.be/absil/2013-01/Weingarten_07PA_techrep.pdf
    for some discussion.
    proj and cost must be defined using autograd.
    Currently this is correct but not efficient, because of the jacobian-
    vector product. Hopefully this can be fixed in future.
    """
    return lambda x, u: proj(
        x, np.tensordot(jacobian(rgrad(cost, proj))(x), u, axes=u.ndim))
Example #22
0
def sga(z_0, alpha=0.05, lamb=0.1, num_iter = 100):
    z = [z_0]
    grad_fn = grad(target)
    hessian = jacobian(grad_fn)
    for i in range(num_iter):
        g = grad_fn(z[-1])
        w = g * np.array([1,-1])
        H = hessian(z[-1])
        HH = np.array([[1, -lamb*H[0,1]],[lamb*H[0,1],1]])
        v = HH @ w
        z1 = z[-1] - alpha*v
        z.append(z1)
    z = np.array(z)
    return z
Example #23
0
def co(z_0, alpha=0.01, gamma=0.1, num_iter=100):
    z = [z_0]
    grads = []
    grad_fn = grad(target)
    hessian = jacobian(grad_fn)
    for i in range(num_iter):
        g = grad_fn(z[-1])
        H = hessian(z[-1])
        #print(np.matmul(H,g), z[-1])
        v = g*np.array([1,-1]) + gamma*np.matmul(H,g)
        z1 = z[-1] - alpha*v
        z.append(z1)
    z = np.array(z)
    return z
Example #24
0
 def gradient(self, params):
     # use the gradient if the model provides it.
     # if not, compute it using autograd.
     if not hasattr(self.mean, 'gradient'):
         _grad = lambda mean, argnum, params: jacobian(mean, argnum)(*params)
     else:
         _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]
     n_params = len(np.atleast_1d(params))
     grad_likelihood = np.array([])
     for i in range(n_params):
         grad = _grad(self.mean, i, params)
         grad_likelihood = np.append(grad_likelihood,
                                     np.nansum(grad * (1 - self.data / self.mean(*params))))
     return grad_likelihood
Example #25
0
def main():
    J = jacobian(fun)

    def wrapper(x):
        return fun(x), J(x)

    xlb = np.array([0.6, 0.2])
    xub = np.array([1.6, 1.2])
    clb = np.array([0, 1.0])
    cub = np.array([0, 1.0])
    x0 = numpy.random.random(2)
    # test grad fun
    rst = gradSolve(wrapper, x0, nf=None, xlb=xlb, xub=xub, clb=clb, cub=cub)
    print(rst)
Example #26
0
def calc_hier_norm_X_fourth_moment(mu_x, Tau):

    M = lambda t: npa.exp(npa.dot(t.T, mu_x) + .5 * npa.dot(npa.dot(t.T, Tau), t))

    M_1 = jacobian(M)
    M_2 = jacobian(M_1)
    M_3 = jacobian(M_2)
    M_4 = jacobian(M_3)

    t = npa.array([0])

    d = len(mu_x) + 1
    if d > 2:
        raise ValueError('Extend this to work for multivariate covariate data') # TODO
    E_vals = [M(t), M_1(t).squeeze(), M_2(t).squeeze(), M_3(t).squeeze(), M_4(t).squeeze()]
    Ex4 = np.empty((d, d, d, d))
    for i in range(d):
        for j in range(d):
            for k in range(d):
                for l in range(d):
                        Ex4[i, j, k, l] = E_vals[[i, j, k, l].count(0)]

    return Ex4
Example #27
0
def rhess(cost, proj):
    """
    Generates the Riemannian hessian of the cost. Specifically, rhess(cost,
    proj)(x, u) is the directional derivatative of cost at point X on the
    manifold, in direction u.
    cost and proj must be defined using autograd.numpy.
    See http://sites.uclouvain.be/absil/2013-01/Weingarten_07PA_techrep.pdf
    for some discussion.
    proj and cost must be defined using autograd.
    Currently this is correct but not efficient, because of the jacobian-
    vector product. Hopefully this can be fixed in future.
    """
    return lambda x, u: proj(x, np.tensordot(jacobian(rgrad(cost, proj))(x), u,
                                             axes=u.ndim))
Example #28
0
    def _sample_x_map(self):
        """Compute the maximum a posteriori estimation of `X`.
        """
        def _neg_log_posterior_x(X_flat):
            X = X_flat.reshape(self.N, self.D)
            return -1 * self._log_posterior_x(X)

        resp = minimize(_neg_log_posterior_x,
                        x0=np.copy(self.X).flatten(),
                        jac=jacobian(_neg_log_posterior_x),
                        method='L-BFGS-B',
                        options=dict(maxiter=self.max_iters))
        X_map = resp.x
        self.X = X_map.reshape(self.N, self.D)
Example #29
0
def test_jacobian_against_stacked_grads():
    scalar_funs = [
        lambda x: np.sum(x ** 3),
        lambda x: np.prod(np.sin(x) + np.sin(x)),
        lambda x: grad(lambda y: np.exp(y) * np.tanh(x[0]))(x[1]),
    ]

    vector_fun = lambda x: np.array([f(x) for f in scalar_funs])

    x = npr.randn(5)
    jac = jacobian(vector_fun)(x)
    grads = [grad(f)(x) for f in scalar_funs]

    assert np.allclose(jac, np.vstack(grads))
def test_jacobian_higher_order():
    fun = lambda x: np.sin(np.outer(x,x)) + np.cos(np.dot(x,x))

    assert jacobian(fun)(npr.randn(3)).shape == (3,3,3)
    assert jacobian(jacobian(fun))(npr.randn(3)).shape == (3,3,3,3)
    # assert jacobian(jacobian(jacobian(fun)))(npr.randn(3)).shape == (3,3,3,3,3)

    check_grads(lambda x: np.sum(np.sin(jacobian(fun)(x))), npr.randn(3))
    check_grads(lambda x: np.sum(np.sin(jacobian(jacobian(fun))(x))), npr.randn(3))
Example #31
0
def get_optimal_coverage_prob(G,
                              unbiased_probs,
                              U,
                              initial_distribution,
                              budget,
                              omega=4,
                              options={},
                              method='SLSQP',
                              initial_coverage_prob=None,
                              tol=0.1):
    """
    Inputs: 
        G is the graph object with dictionaries G[node], G[graph] etc. 
        phi is the attractiveness function, for each of the N nodes, phi(v, Fv)
    """
    n = nx.number_of_nodes(G)
    m = nx.number_of_edges(G)

    # Randomly initialize coverage probability distribution
    if initial_coverage_prob is None:
        initial_coverage_prob = np.random.rand(m)
        initial_coverage_prob = budget * (initial_coverage_prob /
                                          np.sum(initial_coverage_prob))

    # Bounds and constraints
    bounds = [(0.0, 1.0) for _ in range(m)]

    eq_fn = lambda x: budget - sum(x)
    constraints = [{
        'type': 'eq',
        'fun': eq_fn,
        'jac': autograd.jacobian(eq_fn)
    }]
    edge_set = list(range(m))

    # Optimization step
    coverage_prob_optimal = minimize(objective_function_matrix_form,
                                     initial_coverage_prob,
                                     args=(G, unbiased_probs, torch.Tensor(U),
                                           torch.Tensor(initial_distribution),
                                           edge_set, omega, np),
                                     method=method,
                                     jac=dobj_dx_matrix_form,
                                     bounds=bounds,
                                     constraints=constraints,
                                     tol=tol,
                                     options=options)

    return coverage_prob_optimal
Example #32
0
    def test_pinv(self):
        def check_pinv(a):
            w = anp.linalg.pinv(a)
            return w

        for i in range(50):
            x = jt.random((2, 2, 4, 3))
            c_a = x.data
            mx = jt.linalg.pinv(x)
            tx = check_pinv(c_a)
            np.allclose(mx.data, tx)
            jx = jt.grad(mx, x)
            check_grad = jacobian(check_pinv)
            gx = check_grad(c_a)
            np.allclose(gx, jx.data)
Example #33
0
    def __init__(self,
                 coeff,
                 dynamics_model,
                 umax,
                 state_dim,
                 action_dim,
                 forward_dynamics=None,
                 reg_type=0,
                 pred_time=50
                 ):
        self.reg_type = reg_type
        self.apply_control_constrains = True
        self.control_limits = np.array([[-umax, umax]])  # ,[-1, 1]])

        self.alpha = 10**np.linspace(0, -4, 11)

        self.a1, self.a2, self.a3 = coeff
        self.forward_dynamics = forward_dynamics

        self.pred_time = pred_time
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.umax = umax

        self.lf = self.final_cost
        self.lf_x = grad(self.lf)
        self.lf_xx = jacobian(self.lf_x)

        self.l = lambda x, u: 0.5*np.sum(np.square(u), axis=0 if len(u.shape) == 1 else 1)
        self.l_x = grad(self.l, 0)
        self.l_u = grad(self.l, 1)
        self.l_xx = jacobian(self.l_x, 0)
        self.l_uu = jacobian(self.l_u, 1)
        self.l_ux = jacobian(self.l_u, 0)

        self.f = dynamics_model
        self.f_x = jacobian(self.f, 0)
        self.f_u = jacobian(self.f, 1)
        self.f_xx = jacobian(self.f_x, 0)
        self.f_uu = jacobian(self.f_u, 1)
        self.f_ux = jacobian(self.f_u, 0)
Example #34
0
    def test_gradient_exception_on_sample(self):
        """Tests that the proper exception is raised if differentiation of sampling is attempted."""
        dev = qml.device("default.qubit", wires=2, shots=1000)

        @qml.qnode(dev, diff_method="parameter-shift")
        def circuit(x):
            qml.RX(x, wires=[0])
            return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))

        with pytest.raises(
                qml.QuantumFunctionError,
                match=
                "Circuits that include sampling can not be differentiated."):
            grad_fn = autograd.jacobian(circuit)
            grad_fn(1.0)
    def __init__(self):
        self.time_elapsed = 0.0

        self.world_pos = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
        self.world_force = np.array(
            [0.0, -9.8, 0.0, -9.8, 0.0, -9.8, 0.0, -9.8])

        self.gen_pos = np.array([0.0, 0.0, 0.0])
        self.gen_vel = np.array([0.0, 0.0, 0.0])
        self.gen_acc = np.array([0.0, 0.0, 0.0])

        self.state = np.array([*self.gen_pos, *self.gen_vel])

        self.jac_x_wrt_q = jacobian(
            boxes.explicit_decode)  # Preconstruct the jacobian function
    def test_simplex_derivatives(self):
        k = 3
        free_param = np.arange(0., float(k), 1.)
        z = constrain_simplex_vector(free_param)

        get_constrain_hess = hessian(constrain_simplex_vector)
        target_hess = get_constrain_hess(free_param)
        hess = constrain_hess_from_moment(z)
        np_test.assert_array_almost_equal(target_hess, hess)

        get_constrain_jac = jacobian(constrain_simplex_vector)
        target_jac = get_constrain_jac(free_param)
        jac = constrain_grad_from_moment(z)

        np_test.assert_array_almost_equal(target_jac, jac)
    def __init__(self, par1, par2, fun):
        self.par1 = par1
        self.par2 = par2
        self.fun = fun

        self.ag_fun_free_grad1 = autograd.grad(self.fun_free, argnum=0)
        self.ag_fun_free_grad2 = autograd.grad(self.fun_free, argnum=1)

        self.ag_fun_vector_grad1 = autograd.grad(self.fun_vector, argnum=0)
        self.ag_fun_vector_grad2 = autograd.grad(self.fun_vector, argnum=1)

        # hessian12 has par1 in the rows and par2 in the columns.
        # hessian21 has par2 in the rows and par1 in the columns.
        self.ag_fun_free_hessian12 = \
            autograd.jacobian(self.ag_fun_free_grad1, argnum=1)

        self.ag_fun_free_hessian21 = \
            autograd.jacobian(self.ag_fun_free_grad2, argnum=0)

        self.ag_fun_vector_hessian12 = \
            autograd.jacobian(self.ag_fun_vector_grad1, argnum=1)

        self.ag_fun_vector_hessian21 = \
            autograd.jacobian(self.ag_fun_vector_grad2, argnum=0)
Example #38
0
def prob22(r, x0, tol=1e-5, maxiter=100):

    J = jacobian(r)
    #repeat maxiter times
    for k in range(maxiter):
        #update points

        x1 = x0 - la.solve(J(x0).T@J(X0), J(x0).T@r(X0))

        #check if it converged
        if la.norm(x1 - x0) < tol:
            return x1, True, k+1
        x0 = x1

    return x1, False, maxiter
Example #39
0
    def test_gradient_exception_on_sample(self):
        """Tests that the proper exception is raised if differentiation of sampling is attempted."""
        self.logTestName()

        @qml.qnode(self.qubit_dev2)
        def circuit(x):
            qml.RX(x, wires=[0])
            return qml.sample(qml.PauliZ(0), 1), qml.sample(qml.PauliX(1), 1)

        with self.assertRaisesRegex(
            qml.QuantumFunctionError,
            "Circuits that include sampling can not be differentiated."
        ):
            grad_fn = autograd.jacobian(circuit)
            grad_fn(1.0)
Example #40
0
def ehess2rhess(proj):
    """
    Generates an ehess2rhess function for a manifold which is a sub-manifold
    of Euclidean space.
    ehess2rhess(proj)(x, egrad, ehess, u) converts the Euclidean hessian ehess
    at the point x to a Riemannian hessian. That is the directional
    derivatative of the gradient in the direction u.
    proj must be defined using autograd.numpy.
    This will not be an efficient implementation because of missing support
    for efficient jacobian-vector products in autograd.
    """
    # Differentiate proj w.r.t. the first argument
    d_proj = jacobian(proj)
    return lambda x, egrad, ehess, u: proj(x, ehess +
                                           np.tensordot(d_proj(x, egrad), u,
                                                        axes=u.ndim))
Example #41
0
    def _autograd_ctoi(self, atom, bond, angle, torsion, positions):
        import autograd
        import autograd.numpy as np

        positions = positions/positions.unit
        atom_position = positions[atom]


        def _cartesian_to_internal(xyz):
            """
            Autograd-based jacobian of transformation from cartesian to internal.
            Returns without units!
            """
            a = positions[bond] - xyz
            b = positions[angle] - positions[bond]
            #3-4 bond
            c = positions[angle] - positions[torsion]
            a_u = a / np.linalg.norm(a)
            b_u = b / np.linalg.norm(b)
            c_u = c / np.linalg.norm(c)

            #bond length
            r = np.linalg.norm(a)

            #bond angle
            theta = np.arccos(np.dot(-a_u, b_u))

            #torsion angle
            plane1 = np.cross(a, b)
            plane2 = np.cross(b, c)

            phi = np.arccos(np.dot(plane1, plane2)/(np.linalg.norm(plane1)*np.linalg.norm(plane2)))

            if np.dot(np.cross(plane1, plane2), b_u) < 0:
                phi = -phi

            return np.array([r, theta, phi])

        j = autograd.jacobian(_cartesian_to_internal)
        internal_coords = _cartesian_to_internal(atom_position)
        jacobian_det = np.linalg.det(j(atom_position))
        return internal_coords, np.abs(jacobian_det)
Example #42
0
 def ggnvp_maker(x):
     J = jacobian(f)(x)
     H = hessian(g)(f(x))
     def ggnvp(v):
         return np.dot(J.T, np.dot(H, np.dot(J, v)))
     return ggnvp
Example #43
0
def test_jacobian_against_grad():
    fun = lambda x: np.sum(np.sin(x), axis=1, keepdims=True)
    A = npr.randn(1, 3)
    assert np.allclose(grad(fun)(A), jacobian(fun)(A))
Example #44
0
    def __init__(self, T, plant_dyn, cost, constraints=None, use_autograd=True):
        """
        T:              Length of horizon
        plant_dyn:      Discrete time plant dynamics, can be nonlinear
        cost:           instaneous cost function; the terminal cost can be defined by judging the time index
        constraints:    constraints on state/control; will be incorporated into cost

        All the functions should accept (x, u, t, aux) but not necessarily depend on all of them. 
        aux indicates the auxiliary arguments to be evaluated in the functions
        """
        self.T = T
        self.plant_dyn = plant_dyn
        self.cost = cost                  
        self.constraints = constraints
        #auxiliary arguments for function evaluations; particularly useful for cost evaluation
        self.aux = None
        self.use_autograd = has_autograd and use_autograd

        """
        Gradient of dynamics and costs with respect to state/control
        Default is none so finite difference/automatic differentiation will be used
        Otherwise the given functions should be again the functions accept (x, u, t, aux)
        Constraints should mean self.constraints(x, u, t, aux) <= 0
        """
        self.plant_dyn_dx = None        #Df/Dx
        self.plant_dyn_du = None        #Df/Du
        self.cost_dx = None             #Dl/Dx
        self.cost_du = None             #Dl/Du
        self.cost_dxx = None            #D2l/Dx2
        self.cost_duu = None            #D2l/Du2
        self.cost_dux = None            #D2l/DuDx

        self.constraints_dx = None      #Dc/Dx
        self.constraints_du = None      #Dc/Du
        self.constraints_dxx = None     #D2c/Dx2
        self.constraints_duu = None     #D2c/Du2
        self.constraints_dux = None     #D2c/DuDx

        self.constraints_lambda = 1000
        self.finite_diff_eps = 1e-5
        self.reg = .1

        #candidate alphas for line search in the directoin of gradients
        #10 line steps
        self.alpha_array = 1.1 ** (-np.arange(10)**2)

        #adapting the regularizer
        self.reg_max = 1000
        self.reg_min = 1e-6
        self.reg_factor = 10

        if self.use_autograd:
            #generate gradients and hessians using autograd
            #note in this case, the plant_dyn, cost and constraints must be specified with the autograd numpy
            self.plant_dyn_dx = jacobian(self.plant_dyn, 0)  #with respect the first argument   x
            self.plant_dyn_du = jacobian(self.plant_dyn, 1)  #with respect to the second argument   u

            self.cost_dx = grad(self.cost, 0)
            self.cost_du = grad(self.cost, 1)
            self.cost_dxx = jacobian(self.cost_dx, 0)
            self.cost_duu = jacobian(self.cost_du, 1)
            self.cost_dux = jacobian(self.cost_du, 0)

            if constraints is not None:
                self.constraints_dx = jacobian(self.constraints, 0)
                self.constraints_du = jacobian(self.constraints, 1)
                self.constraints_dxx = jacobian(self.constraints_dx, 0)
                self.constraints_duu = jacobian(self.constraints_du, 1)
                self.constraints_dux = jacobian(self.constraints_du, 0)
        return
Example #45
0
def test_jacobian_scalar_to_vector():
    fun = lambda x: np.array([x, x ** 2, x ** 3])
    val = npr.randn()
    assert np.allclose(jacobian(fun)(val), np.array([1.0, 2 * val, 3 * val ** 2]))
Example #46
0
    def _autograd_itoc(self, bond, angle, torsion, r, theta, phi, positions):
        """
        Autograd based coordinate conversion internal -> cartesian

        Arguments
        ---------
        bond : int
            index of the bonded atom
        angle : int
            index of the angle atom
        torsion : int
            index of the torsion atom
        r : float, Quantity nm
            bond length
        theta : float, Quantity rad
            bond angle
        phi : float, Quantity rad
            torsion angle
        positions : [n, 3] np.array Quantity nm
            positions of the atoms in the molecule

        Returns
        -------
        atom_xyz : [1, 3] np.array Quantity nm
            The atomic positions in cartesian space
        detJ : float
            The absolute value of the determinant of the jacobian
        """
        import autograd
        import autograd.numpy as np
        positions = positions/positions.unit
        r = r/r.unit
        theta = theta/theta.unit
        phi = phi/phi.unit
        rtp = np.array([r, theta, phi])

        def _internal_to_cartesian(rthetaphi):

            a = positions[angle] - positions[bond]
            b = positions[angle] - positions[torsion]

            a_u = a / np.linalg.norm(a)
            b_u = b / np.linalg.norm(b)


            d_r = rthetaphi[0]*a_u

            normal = np.cross(a, b)

            #construct the angle rotation matrix
            axis_angle = normal / np.linalg.norm(normal)
            a = np.cos(rthetaphi[1]/2)
            b, c, d = -axis_angle*np.sin(rthetaphi[1]/2)
            angle_rotation_matrix =  np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
                            [2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
                            [2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
            #apply it
            d_ang = np.dot(angle_rotation_matrix, d_r)

            #construct the torsion rotation matrix and apply it
            axis = a_u
            a = np.cos(rthetaphi[2]/2)
            b, c, d = -axis*np.sin(rthetaphi[2]/2)
            torsion_rotation_matrix = np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
                            [2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
                            [2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
            #apply it
            d_torsion = np.dot(torsion_rotation_matrix, d_ang)

            #add the positions of the bond atom
            xyz = positions[bond] + d_torsion

            return xyz

        j = autograd.jacobian(_internal_to_cartesian)
        atom_xyz = _internal_to_cartesian(rtp)
        jacobian_det = np.linalg.det(j(rtp))
        logging.debug("detJ is %f" %(jacobian_det))
        detj_spherical = r**2*np.sin(theta)
        logging.debug("The spherical detJ is %f" % detj_spherical)
        return units.Quantity(atom_xyz, unit=units.nanometers), np.abs(jacobian_det)
Example #47
0
    w_err = 1. - np.square(w)

    return (reproj_err, w_err)

########## derivative extras #############


def compute_w_err(w):
    return 1. - w*w

compute_w_err_d = value_and_grad(compute_w_err)

def compute_reproj_err_wrapper(params,feat):
    X_off = BA_NCAMPARAMS
    return compute_reproj_err(params[0:X_off],params[X_off:X_off+3],params[-1],feat)
compute_reproj_err_d = jacobian(compute_reproj_err_wrapper)

def compute_ba_J(cams, X, w, obs, feats):
    p = obs.shape[0]
    reproj_err_d = []
    for i in range(p):
        params = np.hstack((cams[obs[i,0]],X[obs[i,1]],w[i]))
        reproj_err_d.append(compute_reproj_err_d(params,feats[i]))

    w_err_d = []
    for curr_w in w:
        w_err_d.append(compute_w_err_d(curr_w))

    return (reproj_err_d, w_err_d)

Example #48
0
def test_matrix_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J), vector_jacobian_product(fun)(a, V))
Example #49
0
def test_tensor_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J, axes=np.ndim(V)), vector_jacobian_product(fun)(a, V))