Esempio n. 1
0
                grad_outputs=gyfcn,
                create_graph=torch.is_grad_enabled())
            grad_nontensor_params = [
                None for _ in range(param_sep.nnontensors())
            ]
            grad_params = param_sep.reconstruct_params(grad_tensor_params,
                                                       grad_nontensor_params)

        return (None, None, None, None, None, *grad_params)


def _get_rootfinder_default_method(method):
    if method is None:
        return "broyden1"
    else:
        return method


def _get_minimizer_default_method(method):
    if method is None:
        return "broyden1"
    else:
        return method


# docstring completion
rf_methods: Sequence[Callable] = [broyden1, broyden2, linearmixing]
rootfinder.__doc__ = get_methods_docstr(rootfinder, rf_methods)
equilibrium.__doc__ = get_methods_docstr(equilibrium, rf_methods)
minimize.__doc__ = get_methods_docstr(minimize, rf_methods)
Esempio n. 2
0
                             **ctx.bck_config)
            # only take the output for the earliest time
            states = [out[-1] for out in outs]
            states[y_index] = yt[t_flip_idx]
            # gyt is the contribution from the input grad_y
            # gy0 is the propagated gradients from the later time step
            states[dLdy_index] = grad_yt[t_flip_idx] + states[dLdy_index]

        if ts_requires_grad:
            grad_ts[0] = states[dLdt_index].reshape(-1)

        grad_y0 = states[dLdy_index]  # dL/dy0, (*ny)
        if ts_requires_grad:
            grad_ts = torch.cat(grad_ts).reshape(*ts.shape)
        grad_tensor_params = states[dLdp_slice]
        grad_ntensor_params = [
            None for _ in range(len(allparams) - ntensor_params)
        ]
        grad_params = param_sep.reconstruct_params(grad_tensor_params,
                                                   grad_ntensor_params)
        return (None, grad_ts, None, None, None, grad_y0, *grad_params)


# docstring completion
ivp_methods: Dict[str, Callable] = {
    "rk45": rk45_adaptive,
    "rk23": rk23_adaptive,
    "rk4": rk4_ivp,
}
solve_ivp.__doc__ = get_methods_docstr(solve_ivp, ivp_methods)
Esempio n. 3
0
        Returns
        -------
        torch.Tensor
            The integrated values.
        """
        swapaxes = dim != -1
        if swapaxes:
            y = y.transpose(dim, -1)
        if y.shape[-1] != self.nx:
            raise RuntimeError("The length of integrated dimension does not match with x")
        res = self.obj.integrate(y)
        if keepdim:
            res = res.unsqueeze(-1)
        if swapaxes:
            res = res.transpose(dim, -1)
        return res

    def getparamnames(self, methodname: str, prefix: str = "") -> List[str]:
        """"""
        return self.obj.getparamnames(methodname, prefix=prefix + "obj.")


# docstring completion
_squad_methods = {
    "cspline": CubicSplineSQuad,
    # "simpson": SimpsonSQuad,
    "trapz": TrapzSQuad,
}
SQuad.__doc__ = get_methods_docstr(SQuad, _squad_methods)
Esempio n. 4
0
            The values at the given position with shape ``(*BY, nr)``.
            If ``y`` has been specified during ``__init__`` and also
            specified here, the value of ``y`` given here will be ignored.
            If no ``y`` ever specified, then it will raise an error.

        Returns
        -------
        torch.Tensor
            The interpolated values with shape ``(*BY, nrq)``.
    """
    def __init__(self, x, y=None, method=None, **fwd_options):
        if method is None:
            method = "cspline"
        if method == "cspline":
            self.obj = CubicSpline1D(x, y, **fwd_options)
        else:
            raise RuntimeError("Unknown interp1d method: %s" % method)

    def __call__(self, xq, y=None):
        return self.obj(xq, y)

    def getparamnames(self, methodname, prefix=""):
        return [prefix + "obj." + c for c in self.obj.getparamnames()]


# docstring completion
interp1d_methods = {
    "cspline": CubicSpline1D,
}
Interp1D.__doc__ = get_methods_docstr(Interp1D, interp1d_methods)
Esempio n. 5
0
            grad_mparams = torch.autograd.grad(
                (mloss, ),
                mparams,
                grad_outputs=(v, ),
                create_graph=torch.is_grad_enabled(),
                allow_unused=True)

        return (None, grad_B, grad_E, None, None, None, None, None,
                *grad_params, *grad_mparams)


def custom_exactsolve(A, B, E=None, M=None, **options):
    # A: (*BA, na, na)
    # B: (*BB, na, ncols)
    # E: (*BE, ncols)
    # M: (*BM, na, na)
    return exactsolve(A, B, E, M)


# docstring completion
_solve_methods = {
    "cg": cg,
    "bicgstab": bicgstab,
    "exactsolve": exactsolve,
    "broyden1": broyden1_solve,
    "scipy_gmres": wrap_gmres,
}
ignore_kwargs = ["E", "M", "mparams"]
solve.__doc__ = get_methods_docstr(solve, _solve_methods, ignore_kwargs)
Esempio n. 6
0
                tensor_params_copy,
                grad_outputs=gyfcn,
                create_graph=torch.is_grad_enabled())
            grad_nontensor_params = [
                None for _ in range(param_sep.nnontensors())
            ]
            grad_params = param_sep.reconstruct_params(grad_tensor_params,
                                                       grad_nontensor_params)

        return (None, None, None, None, None, *grad_params)


def _get_rootfinder_default_method(method):
    if method is None:
        return "broyden1"
    else:
        return method


def _get_minimizer_default_method(method):
    if method is None:
        return "broyden1"
    else:
        return method


# docstring completion
rootfinder.__doc__ = get_methods_docstr(rootfinder, [broyden1])
equilibrium.__doc__ = get_methods_docstr(equilibrium, [broyden1])
minimize.__doc__ = get_methods_docstr(minimize, [broyden1])
Esempio n. 7
0
            log_pfcn,
            x0=xsamples[0],  # unused because xsamples is set
            xsamples=xsamples,
            wsamples=wsamples,
            fparams=(grad_epf, epf, *fptensor_params_copy),
            pparams=pparams,
            fwd_options=ctx.bck_config,
            bck_options=ctx.bck_config)
        dLdthetaf = aug_epfs[:nftensorparams]
        dLdthetap = aug_epfs[nftensorparams:]

        # combine the gradient for all fparams
        dLdfnontensor = [None for _ in range(ctx.fparam_sep.nnontensors())]
        dLdpnontensor = [None for _ in range(ctx.pparam_sep.nnontensors())]
        dLdtf = ctx.fparam_sep.reconstruct_params(dLdthetaf, dLdfnontensor)
        dLdtp = ctx.pparam_sep.reconstruct_params(dLdthetap, dLdpnontensor)
        return (None, None, None, None, None, None, None, None, None, None,
                *dLdtf, *dLdtp)


def _integrate(ffcn, xsamples, wsamples, fparams):
    nsamples = len(xsamples)
    res = 0.0
    for x, w in zip(xsamples, wsamples):
        res = res + ffcn(x, *fparams) * w
    return res


# docstring completion
mcquad.__doc__ = get_methods_docstr(mcquad, [mh, mhcustom])
Esempio n. 8
0
            # the contribution from the parallel elements
            gevecsM_par = (-0.5 *
                           torch.einsum("...ae,...ae->...e", grad_evecs, evecs)
                           ).unsqueeze(-2) * evecs  # (*BAM, na, neig)

            gaccumM = gevalsM + gevecsM + gevecsM_par
            grad_mparams = torch.autograd.grad(
                outputs=(mloss, ),
                inputs=mparams,
                grad_outputs=(gaccumM, ),
                create_graph=torch.is_grad_enabled(),
            )

        return (None, None, None, None, None, None, None, *grad_params,
                *grad_mparams)


def custom_exacteig(A, neig, mode, M=None, **options):
    return exacteig(A, neig, mode, M)


# docstring completion
_symeig_methods = {
    "exacteig": exacteig,
    "davidson": davidson,
}
ignore_kwargs = ["M", "mparams"]
symeig.__doc__ = get_methods_docstr(symeig, _symeig_methods, ignore_kwargs)
svd.__doc__ = get_methods_docstr(svd, _symeig_methods)
Esempio n. 9
0

class _BaseInfTransform(object):
    @abstractmethod
    def forward(self, t):
        pass

    @abstractmethod
    def dxdt(self, t):
        pass

    @abstractmethod
    def x2t(self, x):
        pass


class _TanInfTransform(_BaseInfTransform):
    def forward(self, t):
        return torch.tan(t)

    def dxdt(self, t):
        sec = 1. / torch.cos(t)
        return sec * sec

    def x2t(self, x):
        return torch.atan(x)


# docstring completion
quad.__doc__ = get_methods_docstr(quad, [leggauss])