Ejemplo n.º 1
0
    def check_vectortransform_elementwise_logp(self, model):
        x = model.free_RVs[0]
        x_val_transf = x.tag.value_var

        pt = model.recompute_initial_point(0)
        test_array_transf = floatX(
            np.random.randn(*pt[x_val_transf.name].shape))
        transform = x_val_transf.tag.transform
        test_array_untransf = transform.backward(test_array_transf,
                                                 *x.owner.inputs).eval()

        # Create input variable with same dimensionality as untransformed test_array
        x_val_untransf = at.constant(test_array_untransf).type()

        jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs)
        # Original distribution is univariate
        if x.owner.op.ndim_supp == 0:
            assert logpt(x, sum=False).ndim == x.ndim == (jacob_det.ndim + 1)
        # Original distribution is multivariate
        else:
            assert logpt(x, sum=False).ndim == (x.ndim - 1) == jacob_det.ndim

        a = logpt(x, x_val_transf,
                  jacobian=False).eval({x_val_transf: test_array_transf})
        b = logpt(x, x_val_untransf, transformed=False).eval(
            {x_val_untransf: test_array_untransf})
        # Hack to get relative tolerance
        close_to(a, b, np.abs(0.5 * (a + b) * tol))
Ejemplo n.º 2
0
    def test_shared_scalar_as_rv_input(self):
        # See https://github.com/pymc-devs/pymc/issues/3139
        with pm.Model() as m:
            shared_var = shared(5.0)
            v = pm.Normal("v", mu=shared_var, size=1)

        np.testing.assert_allclose(
            logpt(v, np.r_[5.0]).eval(),
            -0.91893853,
            rtol=1e-5,
        )

        shared_var.set_value(10.0)

        np.testing.assert_allclose(
            logpt(v, np.r_[10.0]).eval(),
            -0.91893853,
            rtol=1e-5,
        )
Ejemplo n.º 3
0
    def check_transform_elementwise_logp(self, model):
        x = model.free_RVs[0]
        x0 = x.tag.value_var
        assert x.ndim == logpt(x, sum=False).ndim

        pt = model.initial_point
        array = np.random.randn(*pt[x0.name].shape)
        transform = x0.tag.transform
        logp_notrans = logpt(x,
                             transform.backward(array, *x.owner.inputs),
                             transformed=False)

        jacob_det = transform.log_jac_det(aesara.shared(array),
                                          *x.owner.inputs)
        assert logpt(x, sum=False).ndim == jacob_det.ndim

        v1 = logpt(x, array, jacobian=False).eval()
        v2 = logp_notrans.eval()
        close_to(v1, v2, tol)
Ejemplo n.º 4
0
def _get_log_likelihood(model, samples):
    "Compute log-likelihood for all observations"
    data = {}
    for v in model.observed_RVs:
        logp_v = replace_shared_variables([logpt(v)])
        fgraph = FunctionGraph(model.value_vars, logp_v, clone=False)
        optimize_graph(fgraph,
                       include=["fast_run"],
                       exclude=["cxx_only", "BlasOpt"])
        jax_fn = jax_funcify(fgraph)
        result = jax.jit(jax.vmap(jax.vmap(jax_fn)))(*samples)[0]
        data[v.name] = result
    return data
Ejemplo n.º 5
0
    def check_transform_elementwise_logp(self, model):
        x = model.free_RVs[0]
        x_val_transf = x.tag.value_var

        pt = model.recompute_initial_point(0)
        test_array_transf = floatX(
            np.random.randn(*pt[x_val_transf.name].shape))
        transform = x_val_transf.tag.transform
        test_array_untransf = transform.backward(test_array_transf,
                                                 *x.owner.inputs).eval()

        # Create input variable with same dimensionality as untransformed test_array
        x_val_untransf = at.constant(test_array_untransf).type()

        jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs)
        assert logpt(x, sum=False).ndim == x.ndim == jacob_det.ndim

        v1 = logpt(x, x_val_transf,
                   jacobian=False).eval({x_val_transf: test_array_transf})
        v2 = logpt(x, x_val_untransf, transformed=False).eval(
            {x_val_untransf: test_array_untransf})
        close_to(v1, v2, tol)
Ejemplo n.º 6
0
    def check_vectortransform_elementwise_logp(self, model, vect_opt=0):
        x = model.free_RVs[0]
        x0 = x.tag.value_var
        # TODO: For some reason the ndim relations
        # dont hold up here. But final log-probablity
        # values are what we expected.
        # assert (x.ndim - 1) == logpt(x, sum=False).ndim

        pt = model.initial_point
        array = np.random.randn(*pt[x0.name].shape)
        transform = x0.tag.transform
        logp_nojac = logpt(x,
                           transform.backward(array, *x.owner.inputs),
                           transformed=False)

        jacob_det = transform.log_jac_det(aesara.shared(array),
                                          *x.owner.inputs)
        # assert logpt(x).ndim == jacob_det.ndim

        # Hack to get relative tolerance
        a = logpt(x, array.astype(aesara.config.floatX), jacobian=False).eval()
        b = logp_nojac.eval()
        close_to(a, b, np.abs(0.5 * (a + b) * tol))
Ejemplo n.º 7
0
    def _extract_log_likelihood(self, trace):
        """Compute log likelihood of each observation."""
        if self.trace is None:
            return None
        if self.model is None:
            return None

        if self.log_likelihood is True:
            cached = [(var, self.model.fn(logpt(var)))
                      for var in self.model.observed_RVs]
        else:
            cached = [(var, self.model.fn(logpt(var)))
                      for var in self.model.observed_RVs
                      if var.name in self.log_likelihood]
        log_likelihood_dict = _DefaultTrace(len(trace.chains))
        for var, log_like_fun in cached:
            for k, chain in enumerate(trace.chains):
                log_like_chain = [
                    self.log_likelihood_vals_point(point, var, log_like_fun)
                    for point in trace.points([chain])
                ]
                log_likelihood_dict.insert(var.name, np.stack(log_like_chain),
                                           k)
        return log_likelihood_dict.trace_dict