Exemplo n.º 1
0
def wrap_namespace(old, new):
    """
    Wraps namespace of array library.
    """
    unchanged_types = {float, int, type(None), type}
    int_types = {
        _cp.int,
        _cp.int8,
        _cp.int16,
        _cp.int32,
        _cp.int64,
        _cp.integer,
    }
    function_types = {_cp.ufunc, types.FunctionType, types.BuiltinFunctionType}
    for name, obj in old.items():
        if obj in notrace_functions:
            new[name] = notrace_primitive(obj)

        # Note: type(obj) == _cp.ufunc doesn't work! Should use:
        #
        #     isinstance(obj, _cp.ufunc)
        #
        elif (type(obj) in function_types or isinstance(obj, _cp.ufunc)
              # or isinstance(obj, _cp.core.fusion.reduction)
              ):
            new[name] = primitive(obj)
        elif type(obj) is type and obj in int_types:
            new[name] = wrap_intdtype(obj)
        elif type(obj) in unchanged_types:
            new[name] = obj
Exemplo n.º 2
0
def autograd_register(f, s_f):
    """Register a function and its sensitivity for AutoGrad.

    Args:
        f (function): Function to register.
        s_f (function): Sensitivity of `f`.

    Returns:
        function: AutoGrad primitive.
    """
    # Create a primitive for `f`.
    f_primitive = primitive(f)

    # Register the sensitivity.
    def vjp_argnums(nums, y, args, kw_args):
        def vjp(s_y):
            grads = as_tuple(s_f(s_y, y, *args, **kw_args))
            return tuple([grads[i] for i in nums])

        return vjp

    defvjp_argnums(f_primitive, vjp_argnums)

    # Return the AutoGrad primitive.
    return f_primitive
Exemplo n.º 3
0
def wrap_namespace(old, new):
    unchanged_types = {float, int, type(None), type}
    int_types = {_np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
    for name, obj in old.items():
        if obj in notrace_functions:
            new[name] = notrace_primitive(obj)
        elif callable(obj) and type(obj) is not type:
            new[name] = primitive(obj)
        elif type(obj) is type and obj in int_types:
            new[name] = wrap_intdtype(obj)
        elif type(obj) in unchanged_types:
            new[name] = obj
Exemplo n.º 4
0
def wrap_namespace(old, new):
    unchanged_types = {float, int, type(None), type}
    int_types = {_np.int, _np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
    function_types = {_np.ufunc, types.FunctionType, types.BuiltinFunctionType}
    for name, obj in old.items():
        if obj in notrace_functions:
            new[name] = notrace_primitive(obj)
        elif type(obj) in function_types:
            new[name] = primitive(obj)
        elif type(obj) is type and obj in int_types:
            new[name] = wrap_intdtype(obj)
        elif type(obj) in unchanged_types:
            new[name] = obj
Exemplo n.º 5
0
    def decorator(func):
        """Decorate a function to define its custome gradient(s).

        Parameters
        ----------
        func : callable
            Function whose gradients will be assigned by grad_funcs.

        Returns
        -------
        wrapped_function : callable
            Function func with gradients specified by grad_funcs.
        """
        wrapped_function = primitive(func)

        def wrapped_grad_func(i, ans, *args, **kwargs):
            grads = grad_funcs[i](*args, **kwargs)
            if isinstance(grads, float):
                return lambda g: g * grads
            if grads.ndim == 2:
                return lambda g: g[..., None] * grads
            if grads.ndim == 3:
                return lambda g: g[..., None, None] * grads
            return lambda g: g * grads

        if len(grad_funcs) == 1:
            defvjp(
                wrapped_function,
                lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
            )
        elif len(grad_funcs) == 2:
            defvjp(
                wrapped_function,
                lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
                lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs),
            )
        elif len(grad_funcs) == 3:
            defvjp(
                wrapped_function,
                lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
                lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs),
                lambda ans, *args, **kwargs: wrapped_grad_func(2, ans, *args, **kwargs),
            )
        else:
            raise NotImplementedError(
                "custom_gradient is not yet implemented " "for more than 3 gradients."
            )

        return wrapped_function
Exemplo n.º 6
0
def wrap_namespace(old, new):
    unchanged_types = {float, int, type(None), type}
    int_types = {
        _np.int, _np.int8, _np.int16, _np.int32, _np.int64, _np.integer
    }
    function_types = {_np.ufunc, types.FunctionType, types.BuiltinFunctionType}
    for name, obj in old.items():
        if obj in notrace_functions:
            # NOTE(brendan): notrace_primitive has to marshal out all of the
            # values from potentially boxed obj's.
            new[name] = notrace_primitive(obj)
        elif type(obj) in function_types:
            new[name] = primitive(obj)
        elif type(obj) is type and obj in int_types:
            new[name] = wrap_intdtype(obj)
        elif type(obj) in unchanged_types:
            new[name] = obj
Exemplo n.º 7
0
    def decorator(func):
        wrapped_function = primitive(func)

        def wrapped_grad_func(i, ans, *args, **kwargs):
            grads = grad_funcs[i](*args, **kwargs)
            if isinstance(grads, float):
                return lambda g: g * grads
            if grads.ndim == 2:
                return lambda g: g[..., None] * grads
            if grads.ndim == 3:
                return lambda g: g[..., None, None] * grads
            return lambda g: g * grads

        if len(grad_funcs) == 1:
            defvjp(
                wrapped_function,
                lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
            )
        elif len(grad_funcs) == 2:

            defvjp(
                wrapped_function,
                lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
                lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs),
            )
        elif len(grad_funcs) == 3:
            defvjp(
                wrapped_function,
                lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
                lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs),
                lambda ans, *args, **kwargs: wrapped_grad_func(2, ans, *args, **kwargs),
            )
        else:
            raise NotImplementedError(
                "custom_gradient is not yet implemented " "for more than 3 gradients."
            )

        return wrapped_function
Exemplo n.º 8
0
from __future__ import absolute_import
import scipy.special
import autograd.numpy as np
from autograd.extend import primitive, defvjp, defjvp
from autograd.numpy.numpy_vjps import unbroadcast_f, repeat_to_match_shape

### Beta function ###
beta    = primitive(scipy.special.beta)
betainc = primitive(scipy.special.betainc)
betaln  = primitive(scipy.special.betaln)

defvjp(beta,
       lambda ans, a, b: unbroadcast_f(a, lambda g: g * ans * (psi(a) - psi(a + b))),
       lambda ans, a, b: unbroadcast_f(b, lambda g: g * ans * (psi(b) - psi(a + b))))
defvjp(betainc,
       lambda ans, a, b, x: unbroadcast_f(x, lambda g: g * np.power(x, a - 1) * np.power(1 - x, b - 1) / beta(a, b)),
       argnums=[2])
defvjp(betaln,
       lambda ans, a, b: unbroadcast_f(a, lambda g: g * (psi(a) - psi(a + b))),
       lambda ans, a, b: unbroadcast_f(b, lambda g: g * (psi(b) - psi(a + b))))

### Gamma functions ###
polygamma    = primitive(scipy.special.polygamma)
psi          = primitive(scipy.special.psi)        # psi(x) is just polygamma(0, x)
digamma      = primitive(scipy.special.digamma)    # digamma is another name for psi.
gamma        = primitive(scipy.special.gamma)
gammaln      = primitive(scipy.special.gammaln)
gammainc     = primitive(scipy.special.gammainc)
gammaincc    = primitive(scipy.special.gammaincc)
gammasgn     = primitive(scipy.special.gammasgn)
rgamma       = primitive(scipy.special.rgamma)
Exemplo n.º 9
0
# -*- coding: utf-8 -*-
from __future__ import division
from scipy.stats import norm as _scipy_norm
import autograd.numpy as np
from autograd.scipy.stats import norm
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f

# TODO: next release of autograd will have this built in.

logsf = primitive(_scipy_norm.logsf)

defvjp(
    logsf,
    lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
        x, lambda g: -g * np.exp(
            norm.logpdf(x, loc, scale) - logsf(x, loc, scale))),
    lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
        loc, lambda g: g * np.exp(
            norm.logpdf(x, loc, scale) - logsf(x, loc, scale))),
    lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
        scale, lambda g: g * np.exp(
            norm.logpdf(x, loc, scale) - logsf(x, loc, scale)) *
        (x - loc) / scale),
)
Exemplo n.º 10
0
from __future__ import absolute_import
from builtins import range

import scipy.integrate

import autograd.numpy as np
from autograd.extend import primitive, defvjp_argnums
from autograd import make_vjp
from autograd.misc import flatten
from autograd.builtins import tuple

odeint = primitive(scipy.integrate.odeint)


def grad_odeint(yt, func, y0, t, func_args, **kwargs):
    # Extended from "Scalable Inference of Ordinary Differential
    # Equation Models of Biochemical Processes", Sec. 2.4.2
    # Fabian Froehlich, Carolin Loos, Jan Hasenauer, 2017
    # https://arxiv.org/abs/1711.08079
    
    T, D = np.shape(yt)
    flat_args, unflatten = flatten(func_args)
    
    def flat_func(y, t, flat_args):
        return func(y, t, *unflatten(flat_args))

    def unpack(x):
        #      y,      vjp_y,      vjp_t,    vjp_args
        return x[0:D], x[D:2 * D], x[2 * D], x[2 * D + 1:]

    def augmented_dynamics(augmented_state, t, flat_args):
Exemplo n.º 11
0
        return -transpose(tri(anp.matmul(anp.reshape(v, ans.shape), T(ans))))

    return solve_triangular_grad
defvjp(solve_triangular,
       make_grad_solve_triangular,
       lambda ans, a, b, trans=0, lower=False, **kwargs:
         lambda g: solve_triangular(a, g, trans=_flip(a, trans), lower=lower),
       None)

### cholesky

solve_trans = lambda L, X: solve_triangular(L, X, lower=True, trans='T')
solve_conj = lambda L, X: solve_trans(L, T(solve_trans(L, T(X))))
phi = lambda X: anp.tril(X) / (1. + anp.eye(X.shape[-1]))

cholesky = primitive(np.linalg.cholesky)
defvjp(cholesky, lambda L, A: lambda g: symm(solve_conj(L, phi(anp.matmul(T(L), g)))))


### operations on cholesky factors

solve_tri = partial(solve_triangular, lower=True)
solve_posdef_from_cholesky = lambda L, x: solve_tri(L, solve_tri(L, x), trans='T')

@primitive
def inv_posdef_from_cholesky(L, lower=True):
    flat_L = np.reshape(L, (-1,) + L.shape[-2:])
    return np.reshape(cyla.inv_posdef_from_cholesky(C(flat_L), lower), L.shape)

square_grad = lambda X: lambda g: anp.matmul(g, X) + anp.matmul(T(g), X)
sym_inv_grad = lambda Xinv: lambda g: -anp.matmul(Xinv, anp.matmul(g, Xinv))
Exemplo n.º 12
0
import numpy as np
from autograd.extend import primitive, defvjp, vspace

''' Define here various primitives needed for the main code 
To use with both numpy and autograd backends, define the autograd primitive of 
a numpy function fnc as fnc_ag, and then define the vjp'''

def T(x): return np.swapaxes(x, -1, -2)

'''=========== NP.SQRT STABLE AROUND 0 =========== '''
sqrt_ag = primitive(np.sqrt)

def vjp_maker_sqrt(ans, x):
    def vjp(g):
        return g * 0.5 * (x + 1e-10)**0.5/(x + 1e-10)
        # return np.where(np.abs(x) > 1e-10, g * 0.5 * x**-0.5, 0.)
    return vjp

defvjp(sqrt_ag, vjp_maker_sqrt)

def vjp_maker_meshgridx(ans, x):
    def vjp(g):
        return np.sum(g,axis=1)
    return vjp

'''=========== inv =========== '''

inv_ag = primitive(np.linalg.inv)

def vjp_maker_inv(ans, x):
    return lambda g: -np.dot(np.dot(T(ans), g), T(ans))
Exemplo n.º 13
0
    betainc as _scipy_betainc,
)

__all__ = [
    "gammainc",  # regularized lower incomplete gamma function
    "gammaincc",  # regularized upper incomplete gamma function
    "gamma",  # gamma function
    "betainc",
    "betaincln",
    "beta",
]

LOG_EPISILON = 1e-35
MACHINE_EPISLON_POWER = np.finfo(float).eps**(1 / 2)

gammainc = primitive(_scipy_gammainc)
gammaincc = primitive(_scipy_gammaincc)
betainc = primitive(_scipy_betainc)


@primitive
def gammainccln(a, x):
    return np.log(np.clip(gammaincc(a, x), LOG_EPISILON, 1 - LOG_EPISILON))


@primitive
def gammaincln(a, x):
    return np.log(np.clip(gammainc(a, x), LOG_EPISILON, 1 - LOG_EPISILON))


@primitive
Exemplo n.º 14
0
from __future__ import absolute_import
import scipy.stats

import autograd.numpy as np
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.extend import primitive, defvjp

pdf = primitive(scipy.stats.multivariate_normal.pdf)
logpdf = primitive(scipy.stats.multivariate_normal.logpdf)
entropy = primitive(scipy.stats.multivariate_normal.entropy)

# With thanks to Eric Bresch.
# Some formulas are from
# "An extended collection of matrix derivative results
#  for forward and reverse mode algorithmic differentiation"
# by Mike Giles
# https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf


def generalized_outer_product(x):
    if np.ndim(x) == 1:
        return np.outer(x, x)
    return np.matmul(x, np.swapaxes(x, -1, -2))


def covgrad(x, mean, cov, allow_singular=False):
    if allow_singular:
        raise NotImplementedError(
            "The multivariate normal pdf is not "
            "differentiable w.r.t. a singular covariance matix")
    J = np.linalg.inv(cov)
Exemplo n.º 15
0
if use_autograd.use == 0:
    # autograd turned off
    import numpy as np
    eig = np.linalg.eig
    inv = np.linalg.inv
    sqrt = np.sqrt
else:
    # autograd turned on
    import autograd.numpy as np
    from autograd import grad
    from autograd.extend import primitive, defvjp
    from primitives import vjp_maker_eig, vjp_maker_sqrt, vjp_maker_inv
    from primitives_fix import grad_eig
    import numpy as npf

    eig = primitive(npf.linalg.eig)
    sqrt = primitive(npf.sqrt)
    inv = primitive(npf.linalg.inv)
    #defvjp(eig, vjp_maker_eig)
    defvjp(eig, grad_eig)
    defvjp(sqrt, vjp_maker_sqrt)
    defvjp(inv, vjp_maker_inv)


class RCWA_obj:
    def __init__(self, nG, L1, L2, freq, theta, phi, verbose=1):
        '''The time harmonic convention is exp(-i omega t), speed of light = 1
        The first and last layer must be uniform

        Two kinds of layers are currently supported: uniform layer,
        patterned layer from grids. Interface for patterned layer by
Exemplo n.º 16
0
def test_no_jvp_def():
    fun = primitive(lambda x: 2. * x)
    deriv(fun)(1.)
Exemplo n.º 17
0
def test_no_vjp_def():
    fun = primitive(lambda x: 2. * x)
    grad(fun)(1.)
Exemplo n.º 18
0
_diag = lambda a: anp.eye(a.shape[-1]) * a


# batched diagonal, similar to matrix_diag in tensorflow
def _matrix_diag(a):
    reps = anp.array(a.shape)
    reps[:-1] = 1
    reps[-1] = a.shape[-1]
    newshape = list(a.shape) + [a.shape[-1]]
    return _diag(anp.tile(a, reps).reshape(newshape))


# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77)
# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete

inv = primitive(anp.linalg.inv)


def grad_inv(ans, x):
    return lambda g: -_dot(_dot(T(ans), g), T(ans))


defvjp(inv, grad_inv)

eig = primitive(anp.linalg.eig)


def grad_eig(ans, x):
    """Gradient of a general square (complex valued) matrix"""
    e, u = ans  # eigenvalues as 1d array, eigenvectors in columns
    n = e.shape[-1]
Exemplo n.º 19
0
from __future__ import absolute_import

import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import beta, psi

cdf = primitive(scipy.stats.beta.cdf)
logpdf = primitive(scipy.stats.beta.logpdf)
pdf = primitive(scipy.stats.beta.pdf)


def grad_beta_logpdf_arg0(x, a, b):
    return (1 + a * (x - 1) + x * (b - 2)) / (x * (x - 1))


def grad_beta_logpdf_arg1(x, a, b):
    return np.log(x) - psi(a) + psi(a + b)


def grad_beta_logpdf_arg2(x, a, b):
    return np.log1p(-x) - psi(b) + psi(a + b)


defvjp(cdf,
       lambda ans, x, a, b: unbroadcast_f(
           x, lambda g: g * np.power(x, a - 1) * np.power(1 - x, b - 1) / beta(
               a, b)),
       argnums=[0])
defvjp(
Exemplo n.º 20
0
from __future__ import absolute_import
import scipy.stats

import autograd.numpy as np
from autograd.scipy.special import digamma
from autograd.extend import primitive, defvjp

rvs = primitive(scipy.stats.dirichlet.rvs)
pdf = primitive(scipy.stats.dirichlet.pdf)
logpdf = primitive(scipy.stats.dirichlet.logpdf)

defvjp(
    logpdf, lambda ans, x, alpha: lambda g: g * (alpha - 1) / x,
    lambda ans, x, alpha: lambda g: g *
    (digamma(np.sum(alpha)) - digamma(alpha) + np.log(x)))

# Same as log pdf, but multiplied by the pdf (ans).
defvjp(
    pdf, lambda ans, x, alpha: lambda g: g * ans * (alpha - 1) / x,
    lambda ans, x, alpha: lambda g: g * ans *
    (digamma(np.sum(alpha)) - digamma(alpha) + np.log(x)))
Exemplo n.º 21
0
Arquivo: t.py Projeto: abfarr/moo2020
"""Gradients of the univariate t distribution."""
from __future__ import absolute_import
import scipy.stats
import autograd.numpy as np
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import psi

pdf = primitive(scipy.stats.t.pdf)
cdf = primitive(scipy.stats.t.cdf)
logpdf = primitive(scipy.stats.t.logpdf)
logcdf = primitive(scipy.stats.t.logcdf)

def grad_tlogpdf_diff(diff, df):
    return -diff * (1.0 + df) / (diff**2 + df)
def grad_tlogpdf_x(x, df, loc, scale):
    return grad_tlogpdf_diff((x - loc) / scale, df) / scale
def grad_tlogpdf_loc(x, df, loc, scale):
    return -grad_tlogpdf_diff((x - loc) / scale, df) / scale
def grad_tlogpdf_scale(x, df, loc, scale):
    diff = x - loc
    return -(df * (scale**2 - diff**2))/(scale * (df * scale**2 + diff**2))
def grad_tlogpdf_df(x, df, loc, scale):
    y = (x - loc)/scale
    return 0.5 * ((y**2 * (df+1))/(df * (y**2 + df)) - np.log(y**2 / df + 1) - 1.0/df -psi(df/2.0) + psi((df + 1)/2.0))

defvjp(pdf, lambda ans, x, df, loc=0.0, scale=1.0:
       unbroadcast_f(x, lambda g: g * ans * grad_tlogpdf_x(    x, df, loc, scale)),
       lambda ans, x, df, loc=0.0, scale=1.0:
       unbroadcast_f(df, lambda g: g * ans * grad_tlogpdf_df(   x, df, loc, scale)),
       lambda ans, x, df, loc=0.0, scale=1.0:
Exemplo n.º 22
0
from __future__ import absolute_import, division

import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma

cdf = primitive(scipy.stats.chi2.cdf)
logpdf = primitive(scipy.stats.chi2.logpdf)
pdf = primitive(scipy.stats.chi2.pdf)

def grad_chi2_logpdf(x, df):
    return np.where(df % 1 == 0, (df - x - 2) / (2 * x), 0)

defvjp(cdf, lambda ans, x, df: unbroadcast_f(x, lambda g: g * np.power(2., -df/2) * np.exp(-x/2) * np.power(x, df/2 - 1) / gamma(df/2)), argnums=[0])
defvjp(logpdf, lambda ans, x, df: unbroadcast_f(x, lambda g: g * grad_chi2_logpdf(x, df)), argnums=[0])
defvjp(pdf, lambda ans, x, df: unbroadcast_f(x, lambda g: g * ans * grad_chi2_logpdf(x, df)), argnums=[0])
Exemplo n.º 23
0
from __future__ import absolute_import
import scipy.misc
from autograd.extend import primitive, defvjp
import autograd.numpy as anp
from autograd.numpy.numpy_vjps import repeat_to_match_shape

logsumexp = primitive(scipy.misc.logsumexp)

def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
    shape, dtype = anp.shape(x), anp.result_type(x)
    def vjp(g):
        g_repeated,   _ = repeat_to_match_shape(g,   shape, dtype, axis, keepdims)
        ans_repeated, _ = repeat_to_match_shape(ans, shape, dtype, axis, keepdims)
        return g_repeated * b * anp.exp(x - ans_repeated)
    return vjp

defvjp(logsumexp, make_grad_logsumexp)
Exemplo n.º 24
0
from __future__ import absolute_import

import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f

cdf = primitive(scipy.stats.poisson.cdf)
logpmf = primitive(scipy.stats.poisson.logpmf)
pmf = primitive(scipy.stats.poisson.pmf)

def grad_poisson_logpmf(k, mu):
    return np.where(k % 1 == 0, k / mu - 1, 0)

defvjp(cdf, lambda ans, k, mu: unbroadcast_f(mu, lambda g: g * -pmf(np.floor(k), mu)), argnums=[1])
defvjp(logpmf, lambda ans, k, mu: unbroadcast_f(mu, lambda g: g * grad_poisson_logpmf(k, mu)), argnums=[1])
defvjp(pmf, lambda ans, k, mu: unbroadcast_f(mu, lambda g: g * ans * grad_poisson_logpmf(k, mu)), argnums=[1])
Exemplo n.º 25
0
from .util import memoize, check_psd
from .convolution import convolve_sum_axes, transposed_convolve_sum_axes, sum_trailing_antidiagonals, add_trailing_axis, roll_trailing_axes, unroll_trailing_axes
from .einsum2 import einsum1, einsum2


def par_einsum(*args):
    return einsum2(*args)


def convolve_trailing_axes(A, B):
    A = np.reshape(A, list(A.shape) + [1])
    B = np.reshape(B, list(B.shape) + [1])
    return convolve_sum_axes(A, B)


convolve_sum_axes = primitive(convolve_sum_axes)
transposed_convolve_sum_axes = primitive(transposed_convolve_sum_axes)

defvjp(
    convolve_sum_axes,
    lambda ans, A, B: lambda g: transposed_convolve_sum_axes(g, B),
    lambda ans, A, B: lambda g: transposed_convolve_sum_axes(
        np.transpose(g, (0, 2, 1, 3)), A))
#convolve_sum_axes.defgrad(lambda ans, A, B: lambda g: transposed_convolve_sum_axes(g, B))
#convolve_sum_axes.defgrad(lambda ans, A, B: lambda g: transposed_convolve_sum_axes(np.transpose(g, (0, 2, 1, 3)), A), argnum=1)
#convolve_sum_axes.defvjp(
#    lambda g, ans, vs, gvs, A, B: transposed_convolve_sum_axes(g, B))
#convolve_sum_axes.defvjp(
#    lambda g, ans, vs, gvs, A, B: transposed_convolve_sum_axes(
#        np.transpose(g, (0, 2, 1, 3)), A),
#    argnum=1)
Exemplo n.º 26
0
    return _scipy_gammainc(k, x)


delta = 1e-6

defvjp(
    gammainc,
    lambda ans, a, x: unbroadcast_f(
        a,
        lambda g: g *
        (-gammainc(a + 2 * delta, x) + 8 * gammainc(a + delta, x) - 8 *
         gammainc(a - delta, x) + gammainc(a - 2 * delta, x)) / (12 * delta),
    ),
    lambda ans, a, x: unbroadcast_f(
        x, lambda g: g * np.exp(-x) * np.power(x, a - 1) / gamma(a)),
)

gammaincc = primitive(_scipy_gammaincc)

defvjp(
    gammaincc,
    lambda ans, a, x: unbroadcast_f(
        a,
        lambda g: g *
        (-gammaincc(a + 2 * delta, x) + 8 * gammaincc(a + delta, x) - 8 *
         gammaincc(a - delta, x) + gammaincc(a - 2 * delta, x)) / (12 * delta),
    ),
    lambda ans, a, x: unbroadcast_f(
        x, lambda g: -g * np.exp(-x) * np.power(x, a - 1) / gamma(a)),
)
Exemplo n.º 27
0
from __future__ import absolute_import, division

import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma

cdf = primitive(scipy.stats.chi2.cdf)
logpdf = primitive(scipy.stats.chi2.logpdf)
pdf = primitive(scipy.stats.chi2.pdf)


def grad_chi2_logpdf(x, df):
    return np.where(df % 1 == 0, (df - x - 2) / (2 * x), 0)


defvjp(cdf,
       lambda ans, x, df: unbroadcast_f(
           x, lambda g: g * np.power(2., -df / 2) * np.exp(-x / 2) * np.power(
               x, df / 2 - 1) / gamma(df / 2)),
       argnums=[0])
defvjp(
    logpdf,
    lambda ans, x, df: unbroadcast_f(x, lambda g: g * grad_chi2_logpdf(x, df)),
    argnums=[0])
defvjp(pdf,
       lambda ans, x, df: unbroadcast_f(
           x, lambda g: g * ans * grad_chi2_logpdf(x, df)),
       argnums=[0])
Exemplo n.º 28
0
from __future__ import absolute_import
import scipy.special
import autograd.numpy as np
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f

### Beta function ###
beta    = primitive(scipy.special.beta)
betainc = primitive(scipy.special.betainc)
betaln  = primitive(scipy.special.betaln)

defvjp(beta,
       lambda ans, a, b: unbroadcast_f(a, lambda g: g * ans * (psi(a) - psi(a + b))),
       lambda ans, a, b: unbroadcast_f(b, lambda g: g * ans * (psi(b) - psi(a + b))))
defvjp(betainc,
       lambda ans, a, b, x: unbroadcast_f(x, lambda g: g * np.power(x, a - 1) * np.power(1 - x, b - 1) / beta(a, b)),
       argnums=[2])
defvjp(betaln,
       lambda ans, a, b: unbroadcast_f(a, lambda g: g * (psi(a) - psi(a + b))),
       lambda ans, a, b: unbroadcast_f(b, lambda g: g * (psi(b) - psi(a + b))))

### Gamma functions ###
polygamma    = primitive(scipy.special.polygamma)
psi          = primitive(scipy.special.psi)        # psi(x) is just polygamma(0, x)
digamma      = primitive(scipy.special.digamma)    # digamma is another name for psi.
gamma        = primitive(scipy.special.gamma)
gammaln      = primitive(scipy.special.gammaln)
gammainc     = primitive(scipy.special.gammainc)
gammaincc    = primitive(scipy.special.gammaincc)
gammasgn     = primitive(scipy.special.gammasgn)
rgamma       = primitive(scipy.special.rgamma)
Exemplo n.º 29
0
def test_no_jvp_def():
    fun = primitive(lambda x: 2. * x)
    with pytest.raises(NotImplementedError):
        deriv(fun)(1.)
Exemplo n.º 30
0
from __future__ import absolute_import
import scipy.misc, scipy.special
from autograd.extend import primitive, defvjp
import autograd.numpy as anp
from autograd.numpy.numpy_vjps import repeat_to_match_shape

logsumexp = primitive(scipy.special.logsumexp)

def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
    shape, dtype = anp.shape(x), anp.result_type(x)
    def vjp(g):
        g_repeated,   _ = repeat_to_match_shape(g,   shape, dtype, axis, keepdims)
        ans_repeated, _ = repeat_to_match_shape(ans, shape, dtype, axis, keepdims)
        return g_repeated * b * anp.exp(x - ans_repeated)
    return vjp

defvjp(logsumexp, make_grad_logsumexp)
Exemplo n.º 31
0
from __future__ import absolute_import

import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f

cdf = primitive(scipy.stats.poisson.cdf)
logpmf = primitive(scipy.stats.poisson.logpmf)
pmf = primitive(scipy.stats.poisson.pmf)


def grad_poisson_logpmf(k, mu):
    return np.where(k % 1 == 0, k / mu - 1, 0)


defvjp(
    cdf,
    lambda ans, k, mu: unbroadcast_f(mu, lambda g: g * -pmf(np.floor(k), mu)),
    argnums=[1])
defvjp(logpmf,
       lambda ans, k, mu: unbroadcast_f(
           mu, lambda g: g * grad_poisson_logpmf(k, mu)),
       argnums=[1])
defvjp(pmf,
       lambda ans, k, mu: unbroadcast_f(
           mu, lambda g: g * ans * grad_poisson_logpmf(k, mu)),
       argnums=[1])
Exemplo n.º 32
0
from __future__ import absolute_import

import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import gamma, psi

cdf = primitive(scipy.stats.gamma.cdf)
logpdf = primitive(scipy.stats.gamma.logpdf)
pdf = primitive(scipy.stats.gamma.pdf)

def grad_gamma_logpdf_arg0(x, a):
    return (a - x - 1) / x

def grad_gamma_logpdf_arg1(x, a):
    return np.log(x) - psi(a)

defvjp(cdf, lambda ans, x, a: unbroadcast_f(x, lambda g: g * np.exp(-x) * np.power(x, a-1) / gamma(a)), argnums=[0])
defvjp(logpdf,
       lambda ans, x, a: unbroadcast_f(x, lambda g: g * grad_gamma_logpdf_arg0(x, a)),
       lambda ans, x, a: unbroadcast_f(a, lambda g: g * grad_gamma_logpdf_arg1(x, a)))
defvjp(pdf,
       lambda ans, x, a: unbroadcast_f(x, lambda g: g * ans * grad_gamma_logpdf_arg0(x, a)),
       lambda ans, x, a: unbroadcast_f(a, lambda g: g * ans * grad_gamma_logpdf_arg1(x, a)))
Exemplo n.º 33
0
from autograd.extend import primitive, defvjp, vspace
from autograd import grad, vector_jacobian_product
import autograd.numpy as npa

""" Define here various primitives needed for the main code 
To use with both numpy and autograd backends, define the autograd primitive of 
a numpy function fnc as fnc_ag, and then define the vjp"""

def T(x): return np.swapaxes(x, -1, -2)

"""=========== EXPAND ARRAY TO A GIVEN SHAPE =========== """

# extend(vals, inds, shape) makes an array of shape `shape` where indices 
# `inds` have values `vals` 
extend_ag = primitive(extend)

def vjp_maker_extend(ans, vals, inds, shape):
    def vjp(g):
        return g[inds]
    return vjp

defvjp(extend_ag, vjp_maker_extend, None, None)

"""=========== NP.SQRT STABLE AROUND 0 =========== """
sqrt_ag = primitive(np.sqrt)

def vjp_maker_sqrt(ans, x):
    def vjp(g):
        return g * 0.5 * (x + 1e-10)**0.5/(x + 1e-10)
        # return np.where(np.abs(x) > 1e-10, g * 0.5 * x**-0.5, 0.)
Exemplo n.º 34
0
"""Gradients of the normal distribution."""
from __future__ import absolute_import
import scipy.stats
import autograd.numpy as anp
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f

pdf = primitive(scipy.stats.norm.pdf)
cdf = primitive(scipy.stats.norm.cdf)
sf = primitive(scipy.stats.norm.sf)
logpdf = primitive(scipy.stats.norm.logpdf)
logcdf = primitive(scipy.stats.norm.logcdf)
logsf = primitive(scipy.stats.norm.logsf)

defvjp(pdf,
       lambda ans, x, loc=0.0, scale=1.0:
       unbroadcast_f(x, lambda g: -g * ans * (x - loc) / scale**2),
       lambda ans, x, loc=0.0, scale=1.0:
       unbroadcast_f(loc, lambda g: g * ans * (x - loc) / scale**2),
       lambda ans, x, loc=0.0, scale=1.0:
       unbroadcast_f(scale, lambda g: g * ans * (((x - loc)/scale)**2 - 1.0)/scale))

defvjp(cdf,
       lambda ans, x, loc=0.0, scale=1.0:
       unbroadcast_f(x, lambda g: g * pdf(x, loc, scale)) ,
       lambda ans, x, loc=0.0, scale=1.0:
       unbroadcast_f(loc, lambda g: -g * pdf(x, loc, scale)),
       lambda ans, x, loc=0.0, scale=1.0:
       unbroadcast_f(scale, lambda g: -g * pdf(x, loc, scale)*(x-loc)/scale))

defvjp(logpdf,
Exemplo n.º 35
0
import use_autograd

if use_autograd.use == 0:
    import numpy as np
    import numpy as npf
    inv = np.linalg.inv
else:
    import autograd.numpy as np
    from autograd import grad
    from autograd.extend import primitive, defvjp
    from primitives import vjp_maker_inv
    import numpy as npf

    inv = primitive(npf.linalg.inv)
    defvjp(inv, vjp_maker_inv)


def GetEpsilon_FFT(dN, eps_grid, G):
    '''dN = 1/Nx/Ny
    eps_grid is a numpy 2d array in the format of (Nx,Ny)
    
    For now, assume epsilon is isotropic
    if epsilon has xz,yz component, just simply add them to off-diagonal eps2
    '''

    eps_fft = get_conv(dN, eps_grid, G)
    epsinv = inv(eps_fft)
    # somehow block don't work with autograd
    # eps2 = np.block([[eps_fft,np.zeros_like(eps_fft)],
    #                  [np.zeros_like(eps_fft),eps_fft]])
"""
From Appendix B in the paper
Implementation of autograd
"""

import scipy.integrate

import autograd.numpy as np
from autograd.extend import primitive, defvjp_argnums
from autograd import make_vjp
from autograd.misc import flatten
from autograd.builtins import tuple

odeint = primitive(scipy.integrate.odeint)


def grad_odeint_all(yt, func, y0, t, func_args, **kwargs):
	"""
	Extended from "Scalable Inference of Ordinary Differential"
	Equation Models of Biochemical Processes". Sec. 2.4.2
	Fabian Froehlich, Carolin Loos, Jan Hasenauer, 2017
	https://arxiv.org/pdf/1711.08079.pdf
	"""

	T, D = np.shape(yt)
	flat_args, unflatten = flatten(func_args)

	def flat_func(y, t, flat_args):
		return func(y, t, *unflatten(flat_args))

Exemplo n.º 37
0
from __future__ import absolute_import
import scipy.special
import autograd.numpy as np
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f

### Beta function ###
beta    = primitive(scipy.special.beta)
betainc = primitive(scipy.special.betainc)
betaln  = primitive(scipy.special.betaln)

defvjp(beta,
       lambda ans, a, b: unbroadcast_f(a, lambda g: g * ans * (psi(a) - psi(a + b))),
       lambda ans, a, b: unbroadcast_f(b, lambda g: g * ans * (psi(b) - psi(a + b))))
defvjp(betainc,
       lambda ans, a, b, x: unbroadcast_f(x, lambda g: g * np.power(x, a - 1) * np.power(1 - x, b - 1) / beta(a, b)),
       argnums=[2])
defvjp(betaln,
       lambda ans, a, b: unbroadcast_f(a, lambda g: g * (psi(a) - psi(a + b))),
       lambda ans, a, b: unbroadcast_f(b, lambda g: g * (psi(b) - psi(a + b))))

### Gamma functions ###
polygamma    = primitive(scipy.special.polygamma)
psi          = primitive(scipy.special.psi)        # psi(x) is just polygamma(0, x)
digamma      = primitive(scipy.special.digamma)    # digamma is another name for psi.
gamma        = primitive(scipy.special.gamma)
gammaln      = primitive(scipy.special.gammaln)
gammainc     = primitive(scipy.special.gammainc)
gammaincc    = primitive(scipy.special.gammaincc)
gammasgn     = primitive(scipy.special.gammasgn)
rgamma       = primitive(scipy.special.rgamma)
Exemplo n.º 38
0
"""Gradients of the normal distribution."""
from __future__ import absolute_import
import scipy.stats
import autograd.numpy as anp
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f

pdf = primitive(scipy.stats.norm.pdf)
cdf = primitive(scipy.stats.norm.cdf)
sf = primitive(scipy.stats.norm.sf)
logpdf = primitive(scipy.stats.norm.logpdf)
logcdf = primitive(scipy.stats.norm.logcdf)
logsf = primitive(scipy.stats.norm.logsf)

defvjp(pdf,
       lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
           x, lambda g: -g * ans * (x - loc) / scale**2),
       lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
           loc, lambda g: g * ans * (x - loc) / scale**2),
       lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
           scale, lambda g: g * ans * (((x - loc) / scale)**2 - 1.0) / scale))

defvjp(cdf,
       lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
           x, lambda g: g * pdf(x, loc, scale)),
       lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
           loc, lambda g: -g * pdf(x, loc, scale)),
       lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
           scale, lambda g: -g * pdf(x, loc, scale) * (x - loc) / scale))

defvjp(logpdf,
Exemplo n.º 39
0
from __future__ import absolute_import
import scipy.stats

import autograd.numpy as np
from autograd.scipy.special import digamma
from autograd.extend import primitive, defvjp

rvs    = primitive(scipy.stats.dirichlet.rvs)
pdf    = primitive(scipy.stats.dirichlet.pdf)
logpdf = primitive(scipy.stats.dirichlet.logpdf)

defvjp(logpdf,lambda ans, x, alpha: lambda g:
              g * (alpha - 1) / x,
              lambda ans, x, alpha: lambda g:
              g * (digamma(np.sum(alpha)) - digamma(alpha) + np.log(x)))

# Same as log pdf, but multiplied by the pdf (ans).
defvjp(pdf,lambda ans, x, alpha: lambda g:
           g * ans * (alpha - 1) / x,
           lambda ans, x, alpha: lambda g:
           g * ans * (digamma(np.sum(alpha)) - digamma(alpha) + np.log(x)))
Exemplo n.º 40
0
from __future__ import absolute_import
import scipy.stats

import autograd.numpy as np
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.extend import primitive, defvjp


pdf    =  primitive(scipy.stats.multivariate_normal.pdf)
logpdf =  primitive(scipy.stats.multivariate_normal.logpdf)
entropy = primitive(scipy.stats.multivariate_normal.entropy)

# With thanks to Eric Bresch.
# Some formulas are from
# "An extended collection of matrix derivative results
#  for forward and reverse mode algorithmic differentiation"
# by Mike Giles
# https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf

def generalized_outer_product(x):
    if np.ndim(x) == 1:
        return np.outer(x, x)
    return np.matmul(x, np.swapaxes(x, -1, -2))

def covgrad(x, mean, cov, allow_singular=False):
    if allow_singular:
        raise NotImplementedError("The multivariate normal pdf is not "
                "differentiable w.r.t. a singular covariance matix")
    J = np.linalg.inv(cov)
    solved = np.matmul(J, np.expand_dims(x - mean, -1))
    return 1./2 * (generalized_outer_product(solved) - J)