예제 #1
0
    def jet_loglikelihood(self, pars_list, neg=False):

        # Iterate over individual lobe region log-likelihoods and sum
        jet_res = 0  #
        for i, item in enumerate(self.jet_data):

            rmf = self.jet_data[i].get_rmf()
            erange = np.array(
                rmf.e_min
            )  # need to convert to numpy array to use a double mask
            bounds = (erange > self.e_min) & (erange < self.e_max)

            model = self.jet_models[i]
            data = self.jet_data[i]
            pars = pars_list[i, :]

            model._set_thawed_pars(pars)
            mean_model = data.eval_model(model)

            #stupid hack to make it not go -infinity
            mean_model += np.exp(-20.)
            res = np.nansum(-mean_model[bounds] +
                            data.counts[bounds] * np.log(mean_model[bounds]) -
                            scipy_gammaln(data.counts[bounds] + 1.))

            if not np.isfinite(res):
                res = -logmin
            jet_res += res

        if neg:
            return -jet_res
        else:
            return jet_res
예제 #2
0
    def test_negative_loglikelihood(self):
        t0 = [10.0]
        self.model.amplitude = t0[0]
        mean_model = self.model(self.x)

        loglike = -np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1))

        lpost = PoissonPosterior(self.x, self.y, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        loglike_test = lpost.loglikelihood(t0, neg=True)

        assert np.isclose(loglike, loglike_test)
예제 #3
0
    def test_negative_loglikelihood(self):
        t0 = [10.0]
        self.model.amplitude = t0[0]
        mean_model = self.model(self.x)

        loglike = -np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1))

        lpost = PoissonPosterior(self.x, self.y, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        loglike_test = lpost.loglikelihood(t0, neg=True)

        assert np.isclose(loglike, loglike_test)
예제 #4
0
    def testGammalnExecution(self):
        raw = np.random.rand(10, 8, 6)
        a = tensor(raw, chunk_size=3)

        r = gammaln(a)

        result = self.executor.execute_tensor(r, concat=True)[0]
        expected = scipy_gammaln(raw)

        np.testing.assert_array_equal(result, expected)

        # test sparse
        raw = sps.csr_matrix(np.array([0, 1.0, 1.01, np.nan]))
        a = tensor(raw, chunk_size=3)

        r = gammaln(a)

        result = self.executor.execute_tensor(r, concat=True)[0]

        data = scipy_gammaln(raw.data)
        expected = sps.csr_matrix((data, raw.indices, raw.indptr), raw.shape)

        np.testing.assert_array_equal(result.toarray(), expected.toarray())
예제 #5
0
    def test_negative_posterior(self):
        t0 = [10.0]
        self.model.amplitude = t0[0]
        mean_model = self.model(self.x)

        lpost = PoissonPosterior(self.x, self.y, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        post_test = lpost(t0, neg=True)

        loglike = np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1))
        logprior = np.log(scipy.stats.norm(self.countrate, self.countrate).pdf(t0))

        post = -loglike - logprior

        assert np.isclose(post_test, post, atol=1.e-10)
예제 #6
0
    def test_negative_posterior(self):
        t0 = [10.0]
        self.model.amplitude = t0[0]
        mean_model = self.model(self.x)

        lpost = PoissonPosterior(self.x, self.y, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        post_test = lpost(t0, neg=True)

        loglike = np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1))
        logprior = np.log(scipy.stats.norm(self.countrate, self.countrate).pdf(t0))

        post = -loglike - logprior

        assert np.isclose(post_test, post, atol=1.e-10)
예제 #7
0
def test_gammaln():
    raw = np.random.rand(10, 8, 5)
    t = tensor(raw, chunk_size=3)

    r = gammaln(t)
    expect = scipy_gammaln(raw)

    assert r.shape == raw.shape
    assert r.dtype == expect.dtype

    t, r = tile(t, r)

    assert r.nsplits == t.nsplits
    for c in r.chunks:
        assert isinstance(c.op, TensorGammaln)
        assert c.index == c.inputs[0].index
        assert c.shape == c.inputs[0].shape
예제 #8
0
파일: test_special.py 프로젝트: wdkwyf/mars
    def testGammaln(self):
        raw = np.random.rand(10, 8, 5)
        t = tensor(raw, chunk_size=3)

        r = gammaln(t)
        expect = scipy_gammaln(raw)

        self.assertEqual(r.shape, raw.shape)
        self.assertEqual(r.dtype, expect.dtype)

        r.tiles()

        self.assertEqual(r.nsplits, t.nsplits)
        for c in r.chunks:
            self.assertIsInstance(c.op, TensorGammaln)
            self.assertEqual(c.index, c.inputs[0].index)
            self.assertEqual(c.shape, c.inputs[0].shape)
예제 #9
0
    def evaluate(self, pars, neg=False):
        """
        Evaluate the log-likelihood for a given set of parameters.

        Parameters
        ----------
        pars : numpy.ndarray
            An array of parameters at which to evaluate the model
            and subsequently the log-likelihood. Note that the
            length of this array must match the free parameters in
            ``model``, i.e. ``npar``

        neg : bool, optional, default ``False``
            If ``True``, return the *negative* log-likelihood, i.e.
            ``-loglike``, rather than ``loglike``. This is useful e.g.
            for optimization routines, which generally minimize
            functions.

        Returns
        -------
        loglike : float
            The log(likelihood) value for the data and model.

        """
        if np.size(pars) != self.npar:
            raise IncorrectParameterError("Input parameters must" +
                                          " match model parameters!")

        _fitter_to_model_params(self.model, pars)

        mean_model = self.model(self.x)

        loglike = np.sum(-mean_model + self.y*np.log(mean_model) \
               - scipy_gammaln(self.y + 1.))

        if not np.isfinite(loglike):
            loglike = logmin

        if neg:
            return -loglike
        else:
            return loglike
예제 #10
0
    def evaluate(self, pars, neg=False):
        """
        Evaluate the log-likelihood for a given set of parameters.

        Parameters
        ----------
        pars : numpy.ndarray
            An array of parameters at which to evaluate the model
            and subsequently the log-likelihood. Note that the
            length of this array must match the free parameters in
            ``model``, i.e. ``npar``

        neg : bool, optional, default ``False``
            If ``True``, return the *negative* log-likelihood, i.e.
            ``-loglike``, rather than ``loglike``. This is useful e.g.
            for optimization routines, which generally minimize
            functions.

        Returns
        -------
        loglike : float
            The log(likelihood) value for the data and model.

        """
        if np.size(pars) != self.npar:
            raise IncorrectParameterError("Input parameters must" +
                                          " match model parameters!")

        _fitter_to_model_params(self.model, pars)

        mean_model = self.model(self.x)

        loglike = np.sum(-mean_model + self.y*np.log(mean_model) \
               - scipy_gammaln(self.y + 1.))

        if not np.isfinite(loglike):
            loglike = logmin

        if neg:
            return -loglike
        else:
            return loglike
예제 #11
0
    def evaluate(self, pars, neg=False):

        if np.size(pars) != self.npar:
            raise IncorrectParameterError("Input parameters must" +
                                          " match model parameters!")

        _fitter_to_model_params(self.model, pars)

        mean_model = self.model(self.x)

        loglike = np.sum(-mean_model + self.y*np.log(mean_model) \
               - scipy_gammaln(self.y + 1.))

        if not np.isfinite(loglike):
            loglike = logmin

        if neg:
            return -loglike
        else:
            return loglike