コード例 #1
0
ファイル: _core.py プロジェクト: vishalbelsare/udiff
def translate_vjp(vjpfun, fun, argnum):
    if vjpfun is None:
        return lambda ans, *args, **kwargs: lambda g: np.zeros_like(args[argnum])
    elif callable(vjpfun):
        return vjpfun
    else:
        raise Exception("Bad VJP '{}' for '{}'".format(vjpfun, fun.__name__))
コード例 #2
0
ファイル: _core.py プロジェクト: vishalbelsare/udiff
def translate_jvp(jvpfun, fun, argnum):
    if jvpfun is None:
        return lambda ans, *a, **k: lambda g: np.zeros_like(ans)
    elif jvpfun == "same":
        return lambda ans, *args, **kwargs: lambda g: fun(
            *subval(args, argnum, g), **kwargs
        )
    elif callable(jvpfun):
        return jvpfun
    else:
        raise Exception("Bad JVP '{}' for '{}'".format(jvpfun, fun.__name__))
コード例 #3
0
ファイル: _jvp_diffs.py プロジェクト: vishalbelsare/udiff
    def jvp(g):
        if axis is None:
            num_reps = np.size(g)
        elif isinstance(axis, int):
            num_reps = np.shape(g)[axis]
        elif isinstance(axis, tuple):
            num_reps = np.prod(np.array(np.shape(g))[list(axis)])

        if num_reps <= 1:
            return np.zeros_like(ans)
        x_minus_mean = np.conj(x - np.mean(x, axis=axis, keepdims=True))
        return np.sum(np.real(g * x_minus_mean), axis=axis,
                      keepdims=keepdims) / ((num_reps - ddof) * ans)
コード例 #4
0
    def to(self, x, grad_variables=None, jacobian=False):
        """
        Calculate the VJP or Jacobian matrix of self to x.

        Parameters
        ----------
        x : VJPDiffArray
            The denominator in derivative.
        grad_variables : VJPDiffArray
            Gradient of the numerator in derivative.
        jacobian : bool
            Flag identifies whether to calculate the jacobian logo.
            If set ``True``, it will return jacobian matrix instead of vjp.

        Examples
        --------
        >>> with ua.set_backend(udiff.DiffArrayBackend(numpy_backend), coerce=True):
        ...
        ...    x1 = np.array([2])
        ...    x2 = np.array([5])
        ...    y = np.log(x1) + x1 * x2 - np.sin(x2)
        ...    x1_diff = y.to(x1)
        ...    print(np.allclose(x1_diff.value, [5.5]))
        True
        """
        if jacobian:
            if x._jacobian is None or self not in x._jacobian:
                for position in itertools.product(
                        *[range(i) for i in np.shape(self)]):
                    grad_variables = np.zeros_like(self.value)
                    grad_variables.value[position] = 1
                    self._backward_jacobian(grad_variables, self, position, x)

            x._jacobian[self] = np.reshape(
                np.stack(x._jacobian[self].values()),
                np.shape(self) + np.shape(x))
            return x._jacobian[self]
        else:
            if x._diff is None or self not in x._diff:
                self._backward(grad_variables, self, x)
            return x._diff[self]
コード例 #5
0
ファイル: _vjp_diffs.py プロジェクト: vishalbelsare/udiff
    np.moveaxis,
    lambda ans, a, source, destination: lambda g: np.moveaxis(g, destination, source),
)
defvjp(np.real_if_close, lambda ans, x: lambda g: match_complex(x, g))
defvjp(np.real, lambda ans, x: lambda g: match_complex(x, g))
defvjp(np.imag, lambda ans, x: lambda g: match_complex(x, -1j * g))
defvjp(np.conj, lambda ans, x: lambda g: np.conj(g))
defvjp(np.conjugate, lambda ans, x: lambda g: np.conj(g))
defvjp(
    np.angle,
    lambda ans, x: lambda g: match_complex(x, g * np.conj(x * 1j) / np.abs(x) ** 2),
)
defvjp(
    np.where,
    None,
    lambda ans, c, x=None, y=None: lambda g: np.where(c, g, np.zeros_like(g)),
    lambda ans, c, x=None, y=None: lambda g: np.where(c, np.zeros_like(g), g),
)
defvjp(
    np.cross,
    lambda ans, a, b, axisa=-1, axisb=-1, axisc=-1, axis=None: lambda g: np.cross(
        b, g, axisb, axisc, axisa, axis
    ),
    lambda ans, a, b, axisa=-1, axisb=-1, axisc=-1, axis=None: lambda g: np.cross(
        g, a, axisc, axisa, axisb, axis
    ),
)
defvjp(
    np.linspace,
    lambda ans, start, stop, num: lambda g: np.dot(np.linspace(1.0, 0.0, num), g),
    lambda ans, start, stop, num: lambda g: np.dot(np.linspace(0.0, 1.0, num), g),
コード例 #6
0
    def to(self, x, grad_variables=None, jacobian=False):
        """
        Calculate the JVP or Jacobian matrix of self to x.

        Parameters
        ----------
        x : JVPDiffArray
            The denominator in derivative.
        grad_variables : JVPDiffArray
            Gradient assigned to the x.
        jacobian : bool
            Flag identifies whether to calculate the jacobian logo.
            If set ``True``, it will return jacobian matrix instead of jvp.

        Examples
        --------
        >>> with ua.set_backend(udiff.DiffArrayBackend(numpy_backend, mode="jvp"), coerce=True):
        ...
        ...    x1 = np.array([2])
        ...    x2 = np.array([5])
        ...    y = np.log(x1) + x1 * x2 - np.sin(x2)
        ...    x1_diff = y.to(x1)
        ...    print(np.allclose(x1_diff, [5.5]))
        True
        """
        if self._jvp and x not in self._jvp:
            raise ValueError("Please check if the base is correct.")

        if jacobian:
            if self._jacobian is None:
                self._jacobian = {}

            if x not in self._jacobian:
                self._jacobian[x] = {}
                for position in itertools.product(
                        *[range(i) for i in np.shape(x)]):
                    grad_variables = np.zeros_like(x)
                    grad_variables.value[position] = 1
                    self._jacobian[x][position] = self._forward(
                        x, grad_variables)

            old_axes = tuple(range(np.ndim(self) + np.ndim(x)))
            new_axes = old_axes[np.ndim(x):] + old_axes[:np.ndim(x)]
            self._jacobian[x] = np.transpose(
                np.reshape(
                    np.stack(self._jacobian[x].values()),
                    np.shape(x) + np.shape(self),
                ),
                new_axes,
            )
            return self._jacobian[x]
        else:
            if self._diff is None:
                self._diff = {}

            if x not in self._diff:
                if grad_variables is None:
                    grad_variables = np.ones_like(self)

                self._diff[x] = self._forward(x, grad_variables)

            return self._diff[x]