Exemplo n.º 1
0
    def _evaluate(self):
        """

        calculate the best or mean fit of the new function or
        quantity

        :return:
        """
        # if there are independent variables
        if self._independent_variable_range:

            variates = []

            # scroll through the independent variables
            n_iterations = np.product(self._out_shape)

            with progress_bar(n_iterations, title="Propagating errors") as p:

                with use_astromodels_memoization(False):

                    for variables in itertools.product(*self._independent_variable_range):
                        variates.append(self._propagated_function(*variables))

                        p.increase()


        # otherwise just evaluate
        else:

            variates = self._propagated_function()

        # create a variates container

        self._propagated_variates = VariatesContainer(variates, self._out_shape, self._cl, self._transform, self._equal_tailed)
    def _evaluate(self):
        """

        calculate the best or mean fit of the new function or
        quantity

        :return:
        """
        # if there are independent variables
        if self._independent_variable_range:

            variates = []

            # scroll through the independent variables
            n_iterations = np.product(self._out_shape)

            with use_astromodels_memoization(False):

                variables = list(
                    itertools.product(*self._independent_variable_range))

                if len(variables) > 1:

                    for v in tqdm(variables, desc="Propagating errors"):

                        variates.append(self._propagated_function(*v))

                else:

                    for v in variables:

                        variates.append(self._propagated_function(*v))

        # otherwise just evaluate
        else:

            variates = self._propagated_function()

        # create a variates container

        self._propagated_variates = VariatesContainer(variates,
                                                      self._out_shape,
                                                      self._cl,
                                                      self._transform,
                                                      self._equal_tailed)
Exemplo n.º 3
0
    def fit(self, *args, **kwargs):

        self.likelihood_model.test.spectrum.main.shape.reset_tracking()
        self.likelihood_model.test.spectrum.main.shape.start_tracking()

        with use_astromodels_memoization(False):

            try:

                super(JointLikelihoodWrap, self).fit(*args, **kwargs)

            except:

                raise

            finally:

                self.likelihood_model.test.spectrum.main.shape.stop_tracking()
Exemplo n.º 4
0
    def sample(self, *args, **kwargs):

        self.likelihood_model.test.spectrum.main.shape.reset_tracking()
        self.likelihood_model.test.spectrum.main.shape.start_tracking()

        with use_astromodels_memoization(False):

            try:

                super(BayesianAnalysisWrap, self).sample(*args, **kwargs)

            except:

                raise

            finally:

                self.likelihood_model.test.spectrum.main.shape.stop_tracking()
Exemplo n.º 5
0
    def fit(self, *args, **kwargs):

        self.likelihood_model.test.spectrum.main.shape.reset_tracking()
        self.likelihood_model.test.spectrum.main.shape.start_tracking()

        with use_astromodels_memoization(False):

            try:
    
                super(JointLikelihoodWrap, self).fit(*args, **kwargs)

            except:

                raise

            finally:

                self.likelihood_model.test.spectrum.main.shape.stop_tracking()
Exemplo n.º 6
0
    def sample(self, *args, **kwargs):

        self.likelihood_model.test.spectrum.main.shape.reset_tracking()
        self.likelihood_model.test.spectrum.main.shape.start_tracking()

        with use_astromodels_memoization(False):

            try:

                super(BayesianAnalysisWrap, self).sample(*args, **kwargs)

            except:

                raise

            finally:

                self.likelihood_model.test.spectrum.main.shape.stop_tracking()
Exemplo n.º 7
0
    def sample(self, quiet=False):
        """
        sample the posterior of the model with the selected algorithm

        If no algorithm as been set, then the configured default algorithm
        we default parameters will be run
        
        :param quiet: if True, then no output is displayed
        :type quiet: 
        :returns: 

        """
        if self._sampler is None:

            # assuming the default sampler
            self.set_sampler()

        with use_astromodels_memoization(False):

            self._sampler.sample(quiet=quiet)
Exemplo n.º 8
0
    def _evaluate(self):
        """

        calculate the best or mean fit of the new function or
        quantity

        :return:
        """
        # if there are independent variables
        if self._independent_variable_range:

            variates = []

            # scroll through the independent variables
            n_iterations = np.product(self._out_shape)

            with progress_bar(n_iterations, title="Propagating errors") as p:

                with use_astromodels_memoization(False):

                    for variables in itertools.product(
                            *self._independent_variable_range):
                        variates.append(self._propagated_function(*variables))

                        p.increase()

        # otherwise just evaluate
        else:

            variates = self._propagated_function()

        # create a variates container

        self._propagated_variates = VariatesContainer(variates,
                                                      self._out_shape,
                                                      self._cl,
                                                      self._transform,
                                                      self._equal_tailed)
Exemplo n.º 9
0
    def sample(self, quiet=False):
        """
        sample using the UltraNest numerical integration method
        :rtype: 

        :returns: 

        """
        if not self._is_setup:

            log.info("You forgot to setup the sampler!")
            return

        loud = not quiet

        self._update_free_parameters()

        param_names = list(self._free_parameters.keys())

        ndim = len(param_names)

        self._kwargs["ndim"] = ndim

        loglike, dynesty_prior = self._construct_unitcube_posterior(return_copy=True)

        # check if we are doing to do things in parallel

        if threeML_config["parallel"]["use_parallel"]:

            c = ParallelClient()
            view = c[:]

            self._kwargs["pool"] = view
            self._kwargs["queue_size"] = len(view)

        sampler = DynamicNestedSampler(loglike, dynesty_prior, **self._kwargs)

        self._sampler_kwargs["print_progress"] = loud

        with use_astromodels_memoization(False):
            log.debug("Start dynestsy run")
            sampler.run_nested(**self._sampler_kwargs)
            log.debug("Dynesty run done")

        self._sampler = sampler

        results = self._sampler.results

        # draw posterior samples
        weights = np.exp(results["logwt"] - results["logz"][-1])

        SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))

        rstate = np.random

        if abs(np.sum(weights) - 1.0) > SQRTEPS:  # same tol as in np.random.choice.
            raise ValueError("Weights do not sum to 1.")

        # Make N subdivisions and choose positions with a consistent random offset.
        nsamples = len(weights)
        positions = (rstate.random() + np.arange(nsamples)) / nsamples

        # Resample the data.
        idx = np.zeros(nsamples, dtype=np.int)
        cumulative_sum = np.cumsum(weights)
        i, j = 0, 0
        while i < nsamples:
            if positions[i] < cumulative_sum[j]:
                idx[i] = j
                i += 1
            else:
                j += 1

        samples_dynesty = results["samples"][idx]

        self._raw_samples = samples_dynesty

        # now do the same for the log likes

        logl_dynesty = results["logl"][idx]

        self._log_like_values = logl_dynesty

        self._log_probability_values = self._log_like_values + np.array(
            [self._log_prior(samples) for samples in self._raw_samples]
        )

        self._marginal_likelihood = self._sampler.results["logz"][-1] / np.log(10.0)

        self._build_results()

        # Display results
        if loud:
            self._results.display()

        # now get the marginal likelihood

        return self.samples
Exemplo n.º 10
0
    def sample(self, quiet=False):

        if not self._is_setup:

            log.info("You forgot to setup the sampler!")
            return

        loud = not quiet

        self._update_free_parameters()

        n_dim = len(list(self._free_parameters.keys()))

        # Get starting point

        p0 = emcee.State(self._get_starting_points(self._n_walkers))

        # Deactivate memoization in astromodels, which is useless in this case since we will never use twice the
        # same set of parameters
        with use_astromodels_memoization(False):

            if threeML_config["parallel"]["use_parallel"]:

                c = ParallelClient()
                view = c[:]

                sampler = emcee.EnsembleSampler(self._n_walkers,
                                                n_dim,
                                                self.get_posterior,
                                                pool=view)

            else:

                sampler = emcee.EnsembleSampler(self._n_walkers, n_dim,
                                                self.get_posterior)

            # If a seed is provided, set the random number seed
            if self._seed is not None:

                sampler._random.seed(self._seed)

            log.debug("Start emcee run")
            # Sample the burn-in

            if threeML_config.interface.progress_bars:

                if is_inside_notebook():

                    progress = "notebook"

                else:
                    progress = True

            else:

                progress = False

            pos, prob, state = sampler.run_mcmc(initial_state=p0,
                                                nsteps=self._n_burn_in,
                                                progress=progress)
            log.debug("Emcee run done")

            # Reset sampler

            sampler.reset()

            state = emcee.State(pos, prob, random_state=state)

            # Run the true sampling

            _ = sampler.run_mcmc(initial_state=state,
                                 nsteps=self._n_iterations,
                                 progress=progress)

        acc = np.mean(sampler.acceptance_fraction)

        log.info(f"Mean acceptance fraction: {acc}")

        self._sampler = sampler
        self._raw_samples = sampler.get_chain(flat=True)

        # Compute the corresponding values of the likelihood

        # First we need the prior
        log_prior = [self._log_prior(x) for x in self._raw_samples]

        # Now we get the log posterior and we remove the log prior

        self._log_like_values = sampler.get_log_prob(flat=True) - log_prior

        # we also want to store the log probability

        self._log_probability_values = sampler.get_log_prob(flat=True)

        self._marginal_likelihood = None

        self._build_samples_dictionary()

        self._build_results()

        # Display results
        if loud:
            self._results.display()

        return self.samples
Exemplo n.º 11
0
    def sample(self, quiet=False):

        if not self._is_setup:

            log.info("You forgot to setup the sampler!")
            return

        loud = not quiet

        self._update_free_parameters()

        n_dim = len(list(self._free_parameters.keys()))

        # Get starting point

        p0 = self._get_starting_points(self._n_walkers)

        # Deactivate memoization in astromodels, which is useless in this case since we will never use twice the
        # same set of parameters
        with use_astromodels_memoization(False):

            if using_mpi:

                with MPIPoolExecutor() as executor:

                    sampler = zeus.sampler(
                        logprob_fn=self.get_posterior,
                        nwalkers=self._n_walkers,
                        ndim=n_dim,
                        pool=executor,
                    )

                    # if self._seed is not None:

                    #     sampler._random.seed(self._seed)

                    # Run the true sampling
                    log.debug("Start zeus run")
                    _ = sampler.run(
                        p0,
                        self._n_iterations + self._n_burn_in,
                        progress=loud,
                    )
                    log.debug("Zeus run done")

            elif threeML_config["parallel"]["use_parallel"]:

                c = ParallelClient()
                view = c[:]

                sampler = zeus.sampler(
                    logprob_fn=self.get_posterior,
                    nwalkers=self._n_walkers,
                    ndim=n_dim,
                    pool=view,
                )

            else:

                sampler = zeus.sampler(logprob_fn=self.get_posterior,
                                       nwalkers=self._n_walkers,
                                       ndim=n_dim)

            # If a seed is provided, set the random number seed
            # if self._seed is not None:

            #     sampler._random.seed(self._seed)

            # Sample the burn-in
            if not using_mpi:
                log.debug("Start zeus run")
                _ = sampler.run(p0,
                                self._n_iterations + self._n_burn_in,
                                progress=loud)
                log.debug("Zeus run done")

        self._sampler = sampler
        self._raw_samples = sampler.get_chain(flat=True,
                                              discard=self._n_burn_in)

        # Compute the corresponding values of the likelihood

        # First we need the prior
        log_prior = np.array([self._log_prior(x) for x in self._raw_samples])
        self._log_probability_values = sampler.get_log_prob(
            flat=True, discard=self._n_burn_in)

        # np.array(
        #     [self.get_posterior(x) for x in self._raw_samples]
        # )

        # Now we get the log posterior and we remove the log prior

        self._log_like_values = self._log_probability_values - log_prior

        # we also want to store the log probability

        self._marginal_likelihood = None

        self._build_samples_dictionary()

        self._build_results()

        # Display results
        if loud:
            print(self._sampler.summary)
            self._results.display()

        return self.samples
Exemplo n.º 12
0
    def sample(self, quiet=False):

        assert self._is_setup, "You forgot to setup the sampler!"

        loud = not quiet

        self._update_free_parameters()

        n_dim = len(list(self._free_parameters.keys()))

        # Get starting point

        p0 = self._get_starting_points(1)[0]
        print(p0)
        
        # Deactivate memoization in astromodels, which is useless in this case since we will never use twice the
        # same set of parameters
        with use_astromodels_memoization(False):

            if threeML_config["parallel"]["use-parallel"]:

                c = ParallelClient()
                view = c[:]
                pool = view

            else:

                pool = None

                def logp(theta):

                    return self.get_posterior(theta)

                
                def grad(theta):

                    return numerical_grad(theta, self.get_posterior)
                
                nuts_fn = nuts.NutsSampler_fn_wrapper(self.get_posterior, grad)

                samples, lnprob, epsilon = nuts.nuts6(nuts_fn, self._n_iterations, self._n_adapt, p0)
                
#            sampler = nuts.NUTSSampler(n_dim, self.get_posterior, gradfn=None)  

            # # if a seed is provided, set the random number seed
            # if self._seed is not None:

            #     sampler._random.seed(self._seed)

            # # Run the true sampling

            # samples = sampler.run_mcmc(
            #     initial_state=p0,
            #     M=self._n_iterations,
            #     Madapt=self._n_adapt,
            #     delta=self._delta,
            #     progress=loud,
            # )


        self._sampler = None
        self._raw_samples = samples

        # Compute the corresponding values of the likelihood

        self._test = lnprob
        
        # First we need the prior
        log_prior = np.array([self._log_prior(x) for x in self._raw_samples])

        # Now we get the log posterior and we remove the log prior

        self._log_like_values = np.array([self._log_like(x) for x in self._raw_samples])

        # we also want to store the log probability

        self._log_probability_values = log_prior + self._log_like_values

        self._marginal_likelihood = None

        self._build_samples_dictionary()

        self._build_results()

        # Display results
        if loud:
            self._results.display()

        return self.samples
Exemplo n.º 13
0
    def sample(self, quiet=False):
        """
        sample using the MultiNest numerical integration method

        :returns: 
        :rtype: 

        """
        if not self._is_setup:

            print("You forgot to setup the sampler!")
            return

        assert (
            has_pymultinest
        ), "You don't have pymultinest installed, so you cannot run the Multinest sampler"

        loud = not quiet

        self._update_free_parameters()

        n_dim = len(list(self._free_parameters.keys()))

        # MULTINEST uses a different call signiture for
        # sampling so we construct callbakcs
        loglike, multinest_prior = self._construct_unitcube_posterior()

        # We need to check if the MCMC
        # chains will have a place on
        # the disk to write and if not,
        # create one

        chain_name = self._kwargs.pop("chain_name")

        mcmc_chains_out_dir = ""
        tmp = chain_name.split("/")
        for s in tmp[:-1]:
            mcmc_chains_out_dir += s + "/"

        if using_mpi:

            # if we are running in parallel and this is not the
            # first engine, then we want to wait and let everything finish

            if rank != 0:

                # let these guys take a break
                time.sleep(1)

            else:

                # create mcmc chains directory only on first engine

                if not os.path.exists(mcmc_chains_out_dir):
                    os.makedirs(mcmc_chains_out_dir)

        else:

            if not os.path.exists(mcmc_chains_out_dir):
                os.makedirs(mcmc_chains_out_dir)

        # Multinest must be run parallel via an external method
        # see the demo in the examples folder!!

        if threeML_config["parallel"]["use-parallel"]:

            raise RuntimeError(
                "If you want to run multinest in parallell you need to use an ad-hoc method"
            )

        else:

            with use_astromodels_memoization(False):

                sampler = pymultinest.run(loglike, multinest_prior, n_dim,
                                          n_dim, **self._kwargs)

        # Use PyMULTINEST analyzer to gather parameter info

        process_fit = False

        if using_mpi:

            # if we are running in parallel and this is not the
            # first engine, then we want to wait and let everything finish

            if rank != 0:

                # let these guys take a break
                time.sleep(5)

                # these engines do not need to read
                process_fit = False

            else:

                # wait for a moment to allow it all to turn off
                time.sleep(5)

                process_fit = True

        else:

            process_fit = True

        if process_fit:

            multinest_analyzer = pymultinest.analyse.Analyzer(
                n_params=n_dim, outputfiles_basename=chain_name)

            # Get the log. likelihood values from the chain
            self._log_like_values = multinest_analyzer.get_equal_weighted_posterior(
            )[:, -1]

            self._sampler = sampler

            self._raw_samples = multinest_analyzer.get_equal_weighted_posterior(
            )[:, :-1]

            # now get the log probability

            self._log_probability_values = self._log_like_values + np.array(
                [self._log_prior(samples) for samples in self._raw_samples])

            self._build_samples_dictionary()

            self._marginal_likelihood = multinest_analyzer.get_stats(
            )["global evidence"] / np.log(10.0)

            self._build_results()

            # Display results
            if loud:
                self._results.display()

            # now get the marginal likelihood

            return self.samples
Exemplo n.º 14
0
    def sample(self, quiet=False):
        """
        sample using the UltraNest numerical integration method
        :rtype: 

        :returns: 

        """
        if not self._is_setup:

            print("You forgot to setup the sampler!")
            return

        loud = not quiet

        self._update_free_parameters()

        param_names = list(self._free_parameters.keys())

        n_dim = len(param_names)

        loglike, ultranest_prior = self._construct_unitcube_posterior(
            return_copy=True)

        # We need to check if the MCMC
        # chains will have a place on
        # the disk to write and if not,
        # create one

        chain_name = self._kwargs.pop("chain_name")
        if chain_name is not None:
            mcmc_chains_out_dir = ""
            tmp = chain_name.split("/")
            for s in tmp[:-1]:
                mcmc_chains_out_dir += s + "/"

            if using_mpi:

                # if we are running in parallel and this is not the
                # first engine, then we want to wait and let everything finish

                if rank != 0:

                    # let these guys take a break
                    time.sleep(1)

                else:

                    # create mcmc chains directory only on first engine

                    if not os.path.exists(mcmc_chains_out_dir):
                        os.makedirs(mcmc_chains_out_dir)

            else:

                if not os.path.exists(mcmc_chains_out_dir):
                    os.makedirs(mcmc_chains_out_dir)

        # Multinest must be run parallel via an external method
        # see the demo in the examples folder!!

        if threeML_config["parallel"]["use-parallel"]:

            raise RuntimeError(
                "If you want to run ultranest in parallell you need to use an ad-hoc method"
            )

        else:

            sampler = ultranest.ReactiveNestedSampler(
                param_names,
                loglike,
                transform=ultranest_prior,
                log_dir=chain_name,
                vectorized=False,
                wrapped_params=self._wrapped_params,
            )

            with use_astromodels_memoization(False):
                sampler.run(show_status=loud, **self._kwargs)

        process_fit = False

        if using_mpi:

            # if we are running in parallel and this is not the
            # first engine, then we want to wait and let everything finish

            if rank != 0:

                # let these guys take a break
                time.sleep(5)

                # these engines do not need to read
                process_fit = False

            else:

                # wait for a moment to allow it all to turn off
                time.sleep(5)

                process_fit = True

        else:

            process_fit = True

        if process_fit:

            results = sampler.results

            self._sampler = sampler

            ws = results["weighted_samples"]

            weights = ws["weights"]

            # Get the log. likelihood values from the chain

            SQRTEPS = (float(np.finfo(np.float64).eps))**0.5
            if abs(np.sum(weights) -
                   1.0) > SQRTEPS:  # same tol as in np.random.choice.
                raise ValueError("weights do not sum to 1")

            rstate = np.random

            N = len(weights)

            # make N subdivisions, and choose positions with a consistent random offset
            positions = (rstate.random() + np.arange(N)) / N

            idx = np.zeros(N, dtype=np.int)
            cumulative_sum = np.cumsum(weights)
            i, j = 0, 0
            while i < N:
                if positions[i] < cumulative_sum[j]:
                    idx[i] = j
                    i += 1
                else:
                    j += 1

            self._log_like_values = ws["logl"][idx]

            self._raw_samples = ws["points"][idx]

            # now get the log probability

            self._log_probability_values = self._log_like_values + np.array(
                [self._log_prior(samples) for samples in self._raw_samples])

            self._build_samples_dictionary()

            self._marginal_likelihood = sampler.results["logz"] / np.log(10.0)

            self._build_results()

            # Display results
            if loud:
                self._results.display()

            # now get the marginal likelihood

            return self.samples
Exemplo n.º 15
0
    def sample(self, quiet=False):
        """
        sample using the UltraNest numerical integration method
        :rtype: 

        :returns: 

        """
        if not self._is_setup:

            log.error("You forgot to setup the sampler!")

            raise RuntimeError()

        loud = not quiet

        self._update_free_parameters()

        param_names = list(self._free_parameters.keys())

        n_dim = len(param_names)

        loglike, autoemcee_prior = self._construct_unitcube_posterior(
            return_copy=True)

        # We need to check if the MCMC
        # chains will have a place on
        # the disk to write and if not,
        # create one

        if threeML_config["parallel"]["use_parallel"]:

            log.error(
                "If you want to run ultranest in parallell you need to use an ad-hoc method")

            raise RuntimeError()

        else:

            sampler = autoemcee.ReactiveAffineInvariantSampler(
                param_names,
                loglike,
                transform=autoemcee_prior,
                vectorized=False,
                sampler="goodman-weare"
            )

            with use_astromodels_memoization(False):
                log.debug("Start autoemcee run")
                sampler.run(self._num_global_samples,
                            self._num_chains,
                            self._num_walkers,
                            self._max_ncalls,
                            self._max_improvement_loops,
                            self._num_initial_steps,
                            self._min_autocorr_times,
                            progress=threeML_config.interface.progress_bars


                            )
                log.debug("autoemcee run done")

        process_fit = False

        if using_mpi:

            # if we are running in parallel and this is not the
            # first engine, then we want to wait and let everything finish

            if rank != 0:

                # let these guys take a break
                time.sleep(1)

                # these engines do not need to read
                process_fit = False

            else:

                # wait for a moment to allow it all to turn off
                time.sleep(1)

                process_fit = True

        else:

            process_fit = True

        if process_fit:

            results = sampler.results

            self._sampler = sampler

            self._raw_samples = np.concatenate(
                [sampler.transform(s.get_chain(flat=True)) for s in self._sampler.samplers])

            # First we need the prior
            log_prior = [self._log_prior(x) for x in self._raw_samples]

            self._log_probability_values = np.concatenate(
                [s.get_log_prob(flat=True) for s in self._sampler.samplers])

            self._log_like_values = self._log_probability_values - log_prior

            self._marginal_likelihood = None
            
            self._build_samples_dictionary()

            self._build_results()

            # Display results
            if loud:
                self._results.display()

            # now get the marginal likelihood

            return self.samples
Exemplo n.º 16
0
    def sample(self, n_walkers, burn_in, n_samples, quiet=False, seed=None):
        """
        Sample the posterior with the Goodman & Weare's Affine Invariant Markov chain Monte Carlo
        :param n_walkers:
        :param burn_in:
        :param n_samples:
        :param quiet: if False, do not print results
        :param seed: if provided, it is used to seed the random numbers generator before the MCMC

        :return: MCMC samples

        """

        self._update_free_parameters()

        n_dim = len(self._free_parameters.keys())

        # Get starting point

        p0 = self._get_starting_points(n_walkers)

        sampling_procedure = sample_with_progress

        # Deactivate memoization in astromodels, which is useless in this case since we will never use twice the
        # same set of parameters
        with use_astromodels_memoization(False):

            if threeML_config['parallel']['use-parallel']:

                c = ParallelClient()
                view = c[:]

                sampler = emcee.EnsembleSampler(n_walkers,
                                                n_dim,
                                                self.get_posterior,
                                                pool=view)

                # Sampling with progress in parallel is super-slow, so let's
                # use the non-interactive one
                sampling_procedure = sample_without_progress

            else:

                sampler = emcee.EnsembleSampler(n_walkers, n_dim,
                                                self.get_posterior)

            # If a seed is provided, set the random number seed
            if seed is not None:

                sampler._random.seed(seed)

            # Sample the burn-in
            pos, prob, state = sampling_procedure(title="Burn-in",
                                                  p0=p0,
                                                  sampler=sampler,
                                                  n_samples=burn_in)

            # Reset sampler

            sampler.reset()

            # Run the true sampling

            _ = sampling_procedure(title="Sampling",
                                   p0=pos,
                                   sampler=sampler,
                                   n_samples=n_samples,
                                   rstate0=state)

        acc = np.mean(sampler.acceptance_fraction)

        print("\nMean acceptance fraction: %s\n" % acc)

        self._sampler = sampler
        self._raw_samples = sampler.flatchain

        # Compute the corresponding values of the likelihood

        # First we need the prior
        log_prior = map(lambda x: self._log_prior(x), self._raw_samples)

        # Now we get the log posterior and we remove the log prior

        self._log_like_values = sampler.flatlnprobability - log_prior

        # we also want to store the log probability

        self._log_probability_values = sampler.flatlnprobability

        self._marginal_likelihood = None

        self._build_samples_dictionary()

        self._build_results()

        # Display results
        if not quiet:
            self._results.display()

        return self.samples
Exemplo n.º 17
0
    def sample(self, n_walkers, burn_in, n_samples, quiet=False, seed=None):
        """
        Sample the posterior with the Goodman & Weare's Affine Invariant Markov chain Monte Carlo
        :param n_walkers:
        :param burn_in:
        :param n_samples:
        :param quiet: if False, do not print results
        :param seed: if provided, it is used to seed the random numbers generator before the MCMC

        :return: MCMC samples

        """

        self._update_free_parameters()

        n_dim = len(self._free_parameters.keys())

        # Get starting point

        p0 = self._get_starting_points(n_walkers)

        sampling_procedure = sample_with_progress

        # Deactivate memoization in astromodels, which is useless in this case since we will never use twice the
        # same set of parameters
        with use_astromodels_memoization(False):

            if threeML_config['parallel']['use-parallel']:

                c = ParallelClient()
                view = c[:]

                sampler = emcee.EnsembleSampler(n_walkers, n_dim,
                                                self.get_posterior,
                                                pool=view)

                # Sampling with progress in parallel is super-slow, so let's
                # use the non-interactive one
                sampling_procedure = sample_without_progress

            else:

                sampler = emcee.EnsembleSampler(n_walkers, n_dim,
                                                self.get_posterior)

            # If a seed is provided, set the random number seed
            if seed is not None:

                sampler._random.seed(seed)

            # Sample the burn-in
            pos, prob, state = sampling_procedure(title="Burn-in", p0=p0, sampler=sampler, n_samples=burn_in)

            # Reset sampler

            sampler.reset()

            # Run the true sampling

            _ = sampling_procedure(title="Sampling", p0=pos, sampler=sampler, n_samples=n_samples, rstate0=state)

        acc = np.mean(sampler.acceptance_fraction)

        print("\nMean acceptance fraction: %s\n" % acc)

        self._sampler = sampler
        self._raw_samples = sampler.flatchain

        # Compute the corresponding values of the likelihood

        # First we need the prior
        log_prior = map(lambda x: self._log_prior(x), self._raw_samples)

        # Now we get the log posterior and we remove the log prior

        self._log_like_values = sampler.flatlnprobability - log_prior

        # we also want to store the log probability

        self._log_probability_values = sampler.flatlnprobability

        self._marginal_likelihood = None

        self._build_samples_dictionary()

        self._build_results()

        # Display results
        if not quiet:
            self._results.display()

        return self.samples
Exemplo n.º 18
0
    def get_source_map(self, energy_bin_id, tag=None):

        # We do not use the memoization in astromodels because we are doing caching by ourselves,
        # so the astromodels memoization would turn into 100% cache miss and use a lot of RAM for nothing,
        # given that we are evaluating the function on many points and many energies
        with use_astromodels_memoization(False):

            # If we need to recompute the flux, let's do it
            if self._recompute_flux:

                # print("recomputing %s" % self._name)

                # Recompute the fluxes for the pixels that are covered by this extended source
                self._all_fluxes[self._active_flat_sky_mask, :] = self._source(
                    self._flat_sky_projection.ras[self._active_flat_sky_mask],
                    self._flat_sky_projection.decs[self._active_flat_sky_mask],
                    self._energy_centers_keV)  # 1 / (keV cm^2 s rad^2)

                # We don't need to recompute the function anymore until a parameter changes
                self._recompute_flux = False

            # Now compute the expected signal

            pixel_area_rad2 = self._flat_sky_projection.project_plane_pixel_area * deg2_to_rad2

            this_model_image = np.zeros(self._all_fluxes.shape[0])

            # Loop over the Dec bins that cover this source and compute the expected flux, interpolating between
            # two dec bins for each point

            for dec_bin1, dec_bin2 in zip(self._dec_bins_to_consider[:-1],
                                          self._dec_bins_to_consider[1:]):

                # Get the two response bins to consider
                this_response_bin1 = dec_bin1[energy_bin_id]
                this_response_bin2 = dec_bin2[energy_bin_id]

                # Figure out which pixels are between the centers of the dec bins we are considering
                c1, c2 = this_response_bin1.declination_center, this_response_bin2.declination_center

                idx = (self._flat_sky_projection.decs >= c1) & (self._flat_sky_projection.decs < c2) & \
                      self._active_flat_sky_mask

                # Reweight the spectrum separately for the two bins
                # NOTE: the scale is the same because the sim_differential_photon_fluxes are the same (the simulation
                # used to make the response used the same spectrum for each bin). What changes between the two bins
                # is the observed signal per bin (the .sim_signal_events_per_bin member)
                scale = (self._all_fluxes[idx, :] * pixel_area_rad2
                         ) / this_response_bin1.sim_differential_photon_fluxes

                # Compute the interpolation weights for the two responses
                w1 = (self._flat_sky_projection.decs[idx] - c2) / (c1 - c2)
                w2 = (self._flat_sky_projection.decs[idx] - c1) / (c2 - c1)

                this_model_image[idx] = (w1 * np.sum(scale * this_response_bin1.sim_signal_events_per_bin, axis=1) +
                                         w2 * np.sum(scale * this_response_bin2.sim_signal_events_per_bin, axis=1)) * \
                                        1e9

            # Reshape the flux array into an image
            this_model_image = this_model_image.reshape(
                (self._flat_sky_projection.npix_height,
                 self._flat_sky_projection.npix_width)).T

            return this_model_image
Exemplo n.º 19
0
    def sample(self, quiet=False):

        with use_astromodels_memoization(False):

            self._sampler.sample(quiet=quiet)