コード例 #1
0
class ContactPointsOp(gof.COp):
    num_inputs = 7
    params_type = gof.ParamsType(tol=theano.scalar.float64, )
    __props__ = ("tol", )
    func_file = "./contact.cc"
    func_name = "APPLY_SPECIFIC(contact)"

    def __init__(self, tol=1e-10, **kwargs):
        self.tol = float(tol)
        super(ContactPointsOp, self).__init__(self.func_file, self.func_name)

    def c_code_cache_version(self):
        return get_cache_version()

    def c_compile_args(self, compiler):
        return get_compile_args(compiler)

    def c_headers(self, compiler):
        return ["theano_helpers.h", "contact_points.h"]

    def c_header_dirs(self, compiler):
        return [pkg_resources.resource_filename(__name__, "include")]

    def make_node(self, *args):
        if len(args) != self.num_inputs:
            raise ValueError("expected {0} inputs".format(self.num_inputs))
        dtype = theano.config.floatX
        in_args = []
        for a in args:
            try:
                a = tt.as_tensor_variable(a)
            except tt.AsTensorError:
                pass
            else:
                dtype = theano.scalar.upcast(dtype, a.dtype)
            in_args.append(a)
        ndim = in_args[0].ndim
        out_args = [
            tt.TensorType(dtype=dtype, broadcastable=[False] * ndim)(),
            tt.TensorType(dtype=dtype, broadcastable=[False] * ndim)(),
            tt.TensorType(dtype="int32", broadcastable=[False] * ndim)()
        ]
        return gof.Apply(self, in_args, out_args)

    def infer_shape(self, node, shapes):
        return shapes[0], shapes[0], shapes[0]
コード例 #2
0
class ContactPointsOp(gof.COp):
    num_inputs = 7
    params_type = gof.ParamsType(tol=theano.scalar.float64)
    __props__ = ("tol", )
    func_file = "./contact.cc"
    func_name = "APPLY_SPECIFIC(contact)"

    def __init__(self, tol=1e-10, **kwargs):
        self.tol = float(tol)
        super(ContactPointsOp, self).__init__(self.func_file, self.func_name)

    def c_code_cache_version(self):
        return get_cache_version()

    def c_compile_args(self, compiler):
        return get_compile_args(compiler)

    def c_headers(self, compiler):
        return ["exoplanet/theano_helpers.h", "exoplanet/contact_points.h"]

    def c_header_dirs(self, compiler):
        return get_header_dirs(eigen=False)

    def make_node(self, *args):
        if len(args) != self.num_inputs:
            raise ValueError("expected {0} inputs".format(self.num_inputs))
        in_args = [tt.as_tensor_variable(a) for a in args]
        out_args = [
            in_args[0].type(),
            in_args[0].type(),
            tt.zeros_like(in_args[0], dtype="int32").type(),
        ]
        return gof.Apply(self, in_args, out_args)

    def infer_shape(self, node, shapes):
        return shapes[0], shapes[0], shapes[0]
コード例 #3
0
ファイル: ops.py プロジェクト: adrn/Theano-PyMC
 def params_type(self):
     return gof.ParamsType(i=theano.scalar.basic.int64)
コード例 #4
0
class RegularGridOp(gof.COp):
    params_type = gof.ParamsType(
        ndim=theano.scalar.int64,
        nout=theano.scalar.int64,
        check_sorted=theano.scalar.bool,
        bounds_error=theano.scalar.bool,
    )
    __props__ = ("ndim", "nout", "check_sorted", "bounds_error")
    func_file = "./regular_grid.cc"
    func_name = "APPLY_SPECIFIC(regular_grid)"

    def __init__(self,
                 ndim,
                 nout=-1,
                 check_sorted=True,
                 bounds_error=True,
                 **kwargs):
        self.ndim = int(ndim)
        if not 0 < self.ndim <= 5:
            raise ValueError("ndim must be less than or equal to 5")
        self.nout = int(nout)
        self.check_sorted = bool(check_sorted)
        self.bounds_error = bool(bounds_error)
        super(RegularGridOp, self).__init__(self.func_file, self.func_name)

    def c_code_cache_version(self):
        return get_cache_version()

    def c_headers(self, compiler):
        return ["theano_helpers.h"]

    def c_header_dirs(self, compiler):
        return [pkg_resources.resource_filename(__name__, "include")
                ] + get_header_dirs()

    def c_compile_args(self, compiler):
        args = get_compile_args(compiler)
        args.append("-DREGULAR_GRID_NDIM={0}".format(self.ndim))
        if self.ndim == 1:
            args.append("-DREGULAR_GRID_NDIM_ORDER=Eigen::ColMajor")
        else:
            args.append("-DREGULAR_GRID_NDIM_ORDER=Eigen::RowMajor")

        if 0 < self.nout <= 16:
            args.append("-DREGULAR_GRID_NOUT={0}".format(self.nout))
        if self.nout == 1:
            args.append("-DREGULAR_GRID_NOUT_ORDER=Eigen::ColMajor")
        else:
            args.append("-DREGULAR_GRID_NOUT_ORDER=Eigen::RowMajor")

        ndim_nout = self.ndim * self.nout
        if 0 < ndim_nout <= 16:
            args.append("-DREGULAR_GRID_NDIM_NOUT={0}".format(ndim_nout))
        if ndim_nout == 1:
            args.append("-DREGULAR_GRID_NDIM_NOUT_ORDER=Eigen::ColMajor")
        else:
            args.append("-DREGULAR_GRID_NDIM_NOUT_ORDER=Eigen::RowMajor")

        for i in range(self.ndim):
            args.append("-DREGULAR_GRID_{0}".format(i))

        return args

    def make_node(self, *args):
        ndim = len(args) - 2
        if ndim != self.ndim:
            raise ValueError("number of input grids does not match ndim")

        dtype = theano.config.floatX
        in_args = []
        for a in args:
            try:
                a = tt.as_tensor_variable(a)
            except tt.AsTensorError:
                pass
            else:
                dtype = theano.scalar.upcast(dtype, a.dtype)
            in_args.append(a)
        out_args = [
            tt.TensorType(dtype=dtype, broadcastable=[False, False])(),
            tt.TensorType(dtype=dtype, broadcastable=[False, False, False])(),
        ]
        return gof.Apply(self, in_args, out_args)

    def grad(self, inputs, gradients):
        xi = inputs[0]
        zi, dz = self(*inputs)
        bz = gradients[0]

        bx = tt.sum(tt.reshape(bz, (xi.shape[0], 1, zi.shape[1])) * dz,
                    axis=-1)
        return tuple([bx] + [tt.zeros_like(i) for i in inputs[1:]])

    def R_op(self, inputs, eval_points):
        if eval_points[0] is None:
            return eval_points
        return self.grad(inputs, eval_points)
コード例 #5
0
class IntegratedLimbDarkOp(StarryBaseOp):

    params_type = gof.ParamsType(
        tol=theano.scalar.float64,
        min_depth=theano.scalar.int32,
        max_depth=theano.scalar.int32,
        Nc=theano.scalar.int32,
        circular=theano.scalar.bool,
    )

    __props__ = ()
    func_file = "./integrated_limbdark.cc"
    func_name = "APPLY_SPECIFIC(integrated_limbdark)"

    def __init__(
        self,
        tol=1e-5,
        min_depth=0,
        max_depth=50,
        Nc=-1,
        circular=False,
        **kwargs
    ):
        self.tol = float(tol)
        self.min_depth = max(0, int(min_depth))
        self.max_depth = max(self.min_depth + 1, int(max_depth))
        self.Nc = int(Nc)
        self.circular = bool(circular)
        super(IntegratedLimbDarkOp, self).__init__()

    def make_node(self, *args):
        if len(args) != 11:
            raise ValueError("wrong number of inputs")
        in_args = [tt.as_tensor_variable(a) for a in args]
        out_args = [
            in_args[1].type(),
            tt.lscalar().type(),
            tt.TensorType(
                dtype=theano.config.floatX,
                broadcastable=[False] * (in_args[0].ndim + in_args[1].ndim),
            )(),
            in_args[1].type(),
            in_args[1].type(),
            in_args[1].type(),
            in_args[1].type(),
            in_args[1].type(),
        ]
        if self.circular:
            out_args += [
                tt.lscalar().type(),
                tt.lscalar().type(),
                tt.lscalar().type(),
            ]
        else:
            out_args += [
                in_args[1].type(),
                in_args[1].type(),
                in_args[1].type(),
            ]
        return gof.Apply(self, in_args, out_args)

    def infer_shape(self, node, shapes):
        shape = shapes[1]
        out_shapes = [
            shape,
            (),
            list(shapes[0]) + list(shapes[1]),
            shape,
            shape,
            shape,
            shape,
            shape,
        ]
        if self.circular:
            out_shapes += [(), (), ()]
        else:
            out_shapes += [shape, shape, shape]
        return tuple(out_shapes)

    def c_compile_args(self, compiler):
        args = super(IntegratedLimbDarkOp, self).c_compile_args(compiler)
        args.append("-DLIMBDARK_NC={0}".format(self.Nc))
        if self.circular:
            args.append("-DLIMBDARK_CIRCULAR")
        return args

    def grad(self, inputs, gradients):
        c = inputs[0]
        f, neval, dcl, dt, dr, dn, daome2, dcosi, de, dsinw, dcosw = self(
            *inputs
        )
        bf = gradients[0]
        for i, g in enumerate(gradients[1:]):
            if not isinstance(g.type, theano.gradient.DisconnectedType):
                raise ValueError(
                    "can't propagate gradients wrt parameter {0}".format(i + 1)
                )
        bc = tt.sum(
            tt.reshape(bf, (1, bf.size)) * tt.reshape(dcl, (c.size, bf.size)),
            axis=-1,
        )
        results = [
            tt.reshape(bc, inputs[0].shape),
            tt.zeros_like(inputs[1]),
            tt.reshape(bf * dt, inputs[2].shape),
            tt.reshape(bf * dr, inputs[3].shape),
            tt.reshape(bf * dn, inputs[4].shape),
            tt.reshape(bf * daome2, inputs[5].shape),
            tt.zeros_like(inputs[6]),
            tt.reshape(bf * dcosi, inputs[7].shape),
        ]

        if self.circular:
            results += [
                tt.zeros_like(inputs[8]),
                tt.zeros_like(inputs[9]),
                tt.zeros_like(inputs[10]),
            ]
        else:
            results += [
                tt.reshape(bf * de, inputs[8].shape),
                tt.reshape(bf * dsinw, inputs[9].shape),
                tt.reshape(bf * dcosw, inputs[10].shape),
            ]

        return tuple(results)

    def R_op(self, inputs, eval_points):
        if eval_points[0] is None:
            return eval_points
        return self.grad(inputs, eval_points)
コード例 #6
0
class KeplerOp(gof.COp):
    params_type = gof.ParamsType(tol=theano.scalar.float64, )
    __props__ = ("tol", )
    func_file = "./solver.cc"
    func_name = "APPLY_SPECIFIC(solver)"

    def __init__(self, tol=1e-12, **kwargs):
        self.tol = float(tol)
        super(KeplerOp, self).__init__(self.func_file, self.func_name)

    def c_code_cache_version(self):
        return get_cache_version()

    def c_headers(self, compiler):
        return ["theano_helpers.h", "solver.h"]

    def c_header_dirs(self, compiler):
        return [pkg_resources.resource_filename(__name__, "include")]

    def make_node(self, mean_anom, eccen):
        in_args = [
            tt.as_tensor_variable(mean_anom),
            tt.as_tensor_variable(eccen)
        ]
        return gof.Apply(self, in_args, [in_args[0].type(), in_args[0].type()])

    def infer_shape(self, node, shapes):
        return shapes[0], shapes[0]

    def grad(self, inputs, gradients):
        M, e = inputs
        E, f = self(M, e)

        bM = tt.zeros_like(M)
        be = tt.zeros_like(M)
        ecosE = e * tt.cos(E)

        if not isinstance(gradients[0].type, theano.gradient.DisconnectedType):
            # Backpropagate E_bar
            bM = gradients[0] / (1 - ecosE)
            be = tt.sin(E) * bM

        if not isinstance(gradients[1].type, theano.gradient.DisconnectedType):
            # Backpropagate f_bar
            sinf2 = tt.sin(0.5 * f)
            cosf2 = tt.cos(0.5 * f)
            tanf2 = sinf2 / cosf2
            e2 = e**2
            ome2 = 1 - e2
            ome = 1 - e
            ope = 1 + e
            cosf22 = cosf2**2
            twoecosf22 = 2 * e * cosf22
            factor = tt.sqrt(ope / ome)
            inner = (twoecosf22 + ome) * tt.as_tensor_variable(gradients[1])

            bM += factor * (ome * tanf2**2 + ope) * inner * cosf22 / (ope *
                                                                      ome2)
            be += -2 * cosf22 * tanf2 / ome2**2 * inner * (ecosE - 2 + e2)

        return [bM, be]

    def R_op(self, inputs, eval_points):
        if eval_points[0] is None:
            return eval_points
        return self.grad(inputs, eval_points)
コード例 #7
0
class IntegratedLimbDarkOp(StarryBaseOp):

    params_type = gof.ParamsType(
        tol=theano.scalar.float64,
        min_depth=theano.scalar.int32,
        max_depth=theano.scalar.int32,
        Nc=theano.scalar.int32,
        include_contacts=theano.scalar.bool,
    )

    __props__ = ()
    func_file = "./integrated_limbdark.cc"
    func_name = "APPLY_SPECIFIC(integrated_limbdark)"

    def __init__(self,
                 tol=1e-6,
                 min_depth=0,
                 max_depth=50,
                 Nc=-1,
                 include_contacts=False,
                 **kwargs):
        self.tol = float(tol)
        self.min_depth = max(0, int(min_depth))
        self.max_depth = max(self.min_depth + 1, int(max_depth))
        self.Nc = int(Nc)
        self.include_contacts = bool(include_contacts)
        super(IntegratedLimbDarkOp, self).__init__()

    def make_node(self, *args):
        if len(args) != 11:
            raise ValueError("wrong number of inputs")
        dtype = theano.config.floatX
        in_args = [tt.as_tensor_variable(a) for a in args]
        out_args = [
            in_args[1].type(),
            tt.TensorType(dtype=theano.config.floatX,
                          broadcastable=[False] * (in_args[1].ndim + 1))(),
            in_args[1].type(),
            in_args[2].type(),
            in_args[3].type(),
            in_args[4].type(),
            in_args[5].type(),
            in_args[6].type(),
            in_args[7].type(),
            tt.lscalar().type(),
        ]
        return gof.Apply(self, in_args, out_args)

    def infer_shape(self, node, shapes):
        return (shapes[1], list(shapes[0]) + list(shapes[1]), shapes[1],
                shapes[2], shapes[3], shapes[4], shapes[5], shapes[6],
                shapes[7], ())

    def grad(self, inputs, gradients):
        c, r, x, xt, xtt, y, yt, ytt, z, zt, dt = inputs
        f, dfdcl, dfdr, dfdx, dfdxt, dfdxtt, dfdy, dfdyt, dfdytt, neval \
            = self(*inputs)
        bf = gradients[0]
        for i, g in enumerate(gradients[1:]):
            if not isinstance(g.type, theano.gradient.DisconnectedType):
                raise ValueError(
                    "can't propagate gradients wrt parameter {0}".format(i +
                                                                         1))
        bc = tt.sum(tt.reshape(bf, (1, bf.size)) *
                    tt.reshape(dfdcl, (c.size, bf.size)),
                    axis=-1)
        br = bf * dfdr
        bx = bf * dfdx
        bxt = bf * dfdxt
        bxtt = bf * dfdxtt
        by = bf * dfdy
        byt = bf * dfdyt
        bytt = bf * dfdytt
        return (bc, br, bx, bxt, bxtt, by, byt, bytt, tt.zeros_like(z),
                tt.zeros_like(zt), tt.zeros_like(dt))

    def R_op(self, inputs, eval_points):
        if eval_points[0] is None:
            return eval_points
        return self.grad(inputs, eval_points)
コード例 #8
0
class IntegrateOp(gof.COp):
    params_type = gof.ParamsType(
        t=theano.scalar.float64,
        dt=theano.scalar.float64,
        integrator=theano.scalar.int32,
    )
    __props__ = ("t", "dt", "integrator")
    func_file = "./integrate.cc"
    func_name = "APPLY_SPECIFIC(integrate)"
    _INTEGRATORS = {
        "ias15": 0,
        "whfast": 1,
        "sei": 2,
        "leapfrog": 4,
        "none": 7,
        "janus": 8,
        "mercurius": 9,
    }

    def __init__(self, t=0.0, dt=0.1, integrator="ias15", **kwargs):
        self.t = float(t)
        self.dt = float(dt)
        self.integrator = self._INTEGRATORS.get(integrator.lower(), None)
        if self.integrator is None:
            raise ValueError("unknown integrator {0}".format(integrator))
        self.integrator = np.int32(self.integrator)

        super(IntegrateOp, self).__init__(self.func_file, self.func_name)

    def c_code_cache_version(self):
        return get_cache_version()

    def c_headers(self, compiler):
        return [
            "theano_helpers.h",
            "rebound.h",
            "vector",
            "array",
            "numeric",
            "algorithm",
        ]

    def c_header_dirs(self, compiler):
        return [pkg_resources.resource_filename(__name__, "")
                ] + get_header_dirs()

    def c_compile_args(self, compiler):
        return get_compile_args(compiler)

    def c_libraries(self, compiler):
        return [get_librebound_name()]

    def c_lib_dirs(self, compiler):
        return [get_librebound_path()]

    def make_node(self, masses, initial_coords, times):
        in_args = [
            tt.as_tensor_variable(masses),
            tt.as_tensor_variable(initial_coords),
            tt.as_tensor_variable(times),
        ]
        dtype = theano.config.floatX
        out_args = [
            tt.TensorType(dtype=dtype, broadcastable=[False] * 3)(),
            tt.TensorType(dtype=dtype, broadcastable=[False] * 5)(),
        ]
        return gof.Apply(self, in_args, out_args)

    def infer_shape(self, node, shapes):
        return (
            list(shapes[2]) + list(shapes[0]) + [6],
            list(shapes[2]) + list(shapes[0]) + [7] + list(shapes[0]) + [6],
        )

    def grad(self, inputs, gradients):
        masses, initial_coords, times = inputs
        coords, jac = self(*inputs)
        bcoords = gradients[0]
        if not isinstance(gradients[1].type, theano.gradient.DisconnectedType):
            raise ValueError(
                "can't propagate gradients with respect to Jacobian")

        # (time, num, 6) * (time, num, 7, num, 6) -> (num, 7)
        grad = tt.sum(bcoords[:, None, None, :, :] * jac, axis=(0, 3, 4))
        return grad[:, 0], grad[:, 1:], tt.zeros_like(times)

    def R_op(self, inputs, eval_points):
        if eval_points[0] is None:
            return eval_points
        return self.grad(inputs, eval_points)