Exemplo n.º 1
0
    def _calculate_derivs(self):
        """
        Compute derivatives of the lensing image wrt the mass of each subhalo, evaluated at the given mass
        """

        # Initialize subhalo properties
        self.theta_xs_0 = self.theta_xs
        self.theta_ys_0 = self.theta_ys
        self.m_subs_0 = self.m_subs

        # Gradient image array
        self.grad_msub_image = np.zeros((self.n_sub_roi, self.n_xy, self.n_xy))

        # Loop over subhalos, setting the first element of subhalo properties arrays to a given subhalo,
        # then compute Jacobian (hacky, but seems to be fastest way currently with forward-pass Jacobian
        # vector product implementation in autograd...
        for i_sub in range(self.n_sub_roi):
            self.theta_xs[[0, i_sub]] = self.theta_xs[[i_sub, 0]]
            self.theta_ys[[0, i_sub]] = self.theta_ys[[i_sub, 0]]
            self.m_subs[[0, i_sub]] = self.m_subs[[i_sub, 0]]

            self.grad_msub_image[i_sub] = make_jvp(self._deriv_helper_function)(self.m_subs)([1.])[1]

        # Reset subhalo properties to the original ones
        self.theta_xs = self.theta_xs_0
        self.theta_ys = self.theta_ys_0
        self.m_subs = self.m_subs_0
Exemplo n.º 2
0
def jacobian(F, N, x):
    identity = np.eye(N)
    Jcols = []
    jvp = make_jvp(F)(x)
    for i in range(N):
        Jcols.append(jvp(identity[i])[1])
    return np.concatenate(Jcols).reshape(identity.shape)
Exemplo n.º 3
0
def test_make_jvp():
    A = npr.randn(3, 5)
    x = npr.randn(5)
    v = npr.randn(5)
    fun = lambda x: np.tanh(np.dot(A, x))

    jvp_explicit = lambda x: lambda v: np.dot(jacobian(fun)(x), v)
    jvp = make_jvp(fun)

    check_equivalent(jvp_explicit(x)(v), jvp(x)(v)[1])
Exemplo n.º 4
0
def test_make_jvp():
    A = npr.randn(3, 5)
    x = npr.randn(5)
    v = npr.randn(5)
    fun = lambda x: np.tanh(np.dot(A, x))

    jvp_explicit = lambda x: lambda v: np.dot(jacobian(fun)(x), v)
    jvp = make_jvp(fun)

    check_equivalent(jvp_explicit(x)(v), jvp(x)(v))
Exemplo n.º 5
0
def jvp():
    A = np.random.randn(2, 2)

    def f(x):
        return np.dot(A, x)

    x = np.zeros(2)
    jvp_f_x = make_jvp(f)(x)
    print(jvp_f_x(np.array([1,
                            0])))  # f(0) and first column of f's Jacobian at 0
    print(jvp_f_x(np.array([0, 1
                            ])))  # f(0) and second column of f's Jacobian at 0
    def test_slogdet_3d(self):
        fun = lambda x: np.sum(np.linalg.slogdet(x)[1])
        mat = np.concatenate(
            [(rand_psd(5) + 5 * np.eye(5))[None,...] for _ in range(3)])
        # At this time, this is not supported.
        #check_grads(fun)(mat)

        # Check that it raises an error.
        fwd_grad = autograd.make_jvp(fun, argnum=0)
        def error_fun():
            return fwd_grad(mat)(mat)
        self.assertRaises(ValueError, error_fun)
Exemplo n.º 7
0
def forward_grad_vector(fun, arg_no, n_derivs, *args):
    # Example call:
    # forward_grad_vector(
    # ard_rbf_kernel_efficient, 3, lscales.shape[0], X, X, var, lscales)
    # TODO: Maybe make syntax agree even more with the current autograd.

    grad = make_jvp(fun, arg_no)
    all_indices = np.eye(n_derivs)

    all_grads = list()

    for cur_index in all_indices:

        all_grads.append(grad(*args)(cur_index)[1])

    return np.stack(all_grads, -1)
Exemplo n.º 8
0
 def _make_jacobian(V):
     if 'hessian' in self.params:
         F = make_jvp(lambda U: self.form(*U, w))
         return make_jvp(lambda W: F(W)(V)[1])(u0)
     return make_jvp(lambda U: self.form(*U, *V, w))(u0)