コード例 #1
0
ファイル: test_dnest4.py プロジェクト: blakeaw/Gleipnir
def test_initialization():
    DNS = DNest4NestedSampling(sampled_parameters=sampled_parameters,
                                   loglikelihood=loglikelihood,
                                   population_size=population_size,
                                   n_diffusive_levels=n_levels,
                                   num_steps=num_steps,
                                   num_per_step=num_per_step)
    shared['DNS'] = DNS
コード例 #2
0
ファイル: nestedsample_it.py プロジェクト: blakeaw/Gleipnir
    def __call__(self,
                 ns_version='gleipnir-classic',
                 ns_population_size=1000,
                 ns_kwargs=None,
                 log_likelihood_type='logpdf'):
        """Call the NestedSampleIt instance to construct to instance of the NestedSampling object.

        Args:
                ns_version (str): Defines which version of Nested Sampling to use.
                    Options are 'gleipnir-classic'=>Gleipnir's built-in implementation
                    of the classic Nested Sampling algorithm, 'multinest'=>Use the
                    MultiNest code via Gleipnir, 'polychord'=>Use the PolyChord code
                    via Gleipnir, or 'dnest4'=>Use the DNest4 program via Gleipnir.
                    Defaults to 'gleipnir-classic'.
                ns_population_size (int): Set the size of the active population
                    of sample points to use during Nested Sampling runs.
                    Defaults to 1000.
                ns_kwargs (dict): Dictionary of any additional optional keyword
                    arguments to pass to NestedSampling object constructor.
                    Defaults to dict().
                log_likelihood_type (str): Define the type of loglikelihood estimator
                    to use. Options are 'logpdf'=>Compute the loglikelihood using
                    the normal distribution estimator, 'mse'=>Compute the
                    loglikelihood using the negative mean squared error estimator,
                    'sse'=>Compute the loglikelihood using the negative sum of
                     squared errors estimator. Defaults to 'logpdf'.

        Returns:
            type: Description of returned object.

        """
        if ns_kwargs is None:
            ns_kwargs = dict()
        # self.ns_version = ns_version
        self._ns_kwargs = ns_kwargs
        population_size = ns_population_size
        if log_likelihood_type == 'mse':
            loglikelihood = self.mse_loglikelihood
        elif log_likelihood_type == 'sse':
            loglikelihood = self.sse_loglikelihood
        else:
            loglikelihood = self.logpdf_loglikelihood
        if ns_version == 'gleipnir-classic':
            from gleipnir.nestedsampling import NestedSampling
            from gleipnir.nestedsampling.samplers import MetropolisComponentWiseHardNSRejection
            # from gleipnir.sampled_parameter import SampledParameter
            from gleipnir.nestedsampling.stopping_criterion import NumberOfIterations
            # population_size = 100*len(self._sampled_parameters)
            sampler = MetropolisComponentWiseHardNSRejection(iterations=10,
                                                             burn_in=10,
                                                             tuning_cycles=1)
            # Setup the stopping criterion for the NS run -- We'll use a fixed number of
            # iterations: 10*population_size
            stopping_criterion = NumberOfIterations(10 * population_size)
            # Construct the Nested Sampler
            nested_sampler = NestedSampling(
                sampled_parameters=self._sampled_parameters,
                loglikelihood=loglikelihood,
                sampler=sampler,
                population_size=population_size,
                stopping_criterion=stopping_criterion)
            # self._nested_sampler = NS
        elif ns_version == 'multinest':
            from gleipnir.multinest import MultiNestNestedSampling
            # population_size = 100*len(self._sampled_parameters)
            nested_sampler = MultiNestNestedSampling(
                sampled_parameters=self._sampled_parameters,
                loglikelihood=loglikelihood,
                population_size=population_size,
                **self._ns_kwargs)
            #self._nested_sampler = MNNS
        elif ns_version == 'polychord':
            from gleipnir.polychord import PolyChordNestedSampling
            nested_sampler = PolyChordNestedSampling(
                sampled_parameters=self._sampled_parameters,
                loglikelihood=loglikelihood,
                population_size=population_size)
        elif ns_version == 'dnest4':
            from gleipnir.dnest4 import DNest4NestedSampling
            if not ('num_steps' in list(self._ns_kwargs.keys())):
                self._ns_kwargs['num_steps'] = 100 * population_size
                # num_steps = 100*population_size
            nested_sampler = DNest4NestedSampling(
                sampled_parameters=sampled_parameters,
                loglikelihood=loglikelihood,
                population_size=population_size,
                **self._ns_kwargs)

        return nested_sampler
コード例 #3
0
ファイル: run_jarm_dnest4.py プロジェクト: blakeaw/Gleipnir
        logp_total = -np.inf

    return logp_total


if __name__ == '__main__':

    # Setup the Nested Sampling run
    n_params = len(sampled_parameters)
    print("Sampling a total of {} parameters".format(n_params))
    population_size = 100
    print("Will use NS population size of {}".format(population_size))
    # Construct the Nested Sampler
    DNS = DNest4NestedSampling(sampled_parameters=sampled_parameters,
                               loglikelihood=loglikelihood,
                               population_size=population_size,
                               n_diffusive_levels=10,
                               num_steps=1000,
                               num_per_step=100)

    # Launch the Nested Sampling run.
    log_evidence, log_evidence_error = DNS.run()
    # Print the output
    print("log_evidence: {} +- {} ".format(log_evidence, log_evidence_error))
    # Get the estimates of the posterior probability distrbutions of the
    # parameters.
    posteriors = DNS.posteriors()
    # Save the posterior estimates.
    for parm in posteriors.keys():
        marginal, edges, centers = posteriors[parm]
        np.save("post_multinest_marginal_weights_parm_{}.npy".format(parm), marginal, allow_pickle=False)
        np.save("post_multinest_marginal_edges_parm_{}.npy".format(parm), edges, allow_pickle=False)
コード例 #4
0
ファイル: NS_3pline_dnest4.py プロジェクト: blakeaw/Gleipnir
    # Set up the list of sampled parameters: the prior is Uniform(-5:5) --
    # we are using a fixed uniform prior from scipy.stats
    parm_names = list(['m', 'b'])
    sampled_parameters = [SampledParameter(name=p, prior=uniform(loc=-5.0,scale=10.0)) for p in parm_names]

    # Set the active point population size
    population_size = 100
    # Setup the Nested Sampling run
    n_params = len(sampled_parameters)
    print("Sampling a total of {} parameters".format(n_params))
    #population_size = 10
    print("Will use NS population size of {}".format(population_size))
    # Construct the Nested Sampler
    DNS = DNest4NestedSampling(sampled_parameters,
                               loglikelihood,
                               population_size,
                               num_steps=1000)
    #print(PCNS.likelihood(np.array([1.0])))
    #quit()
    # run it
    log_evidence, log_evidence_error = DNS.run(verbose=True)
    # Print the output
    print("log_evidence: {} +- {} ".format(log_evidence, log_evidence_error))
    best_fit_l = DNS.best_fit_likelihood()
    print("Max likelihood parms: ", best_fit_l)
    best_fit_p, fit_error = DNS.best_fit_posterior()
    print("Max posterior weight parms ", best_fit_p)
    print("Max posterior weight parms error ", fit_error)
    # Information criteria
    # Akaike
    aic = DNS.akaike_ic()
コード例 #5
0
 num_steps = 500
 # Number of monte carlo trial moves per iteration -- num_per_step
 num_per_step = 2000
 # Number of diffusive levels
 n_levels = 20
 # Setup the Nested Sampling run
 n_params = len(sampled_parameters)
 print("Sampling a total of {} parameters".format(n_params))
 #population_size = 10
 print("Will use NS population size of {}".format(population_size))
 # Construct the Nested Sampler
 DNS = DNest4NestedSampling(sampled_parameters=sampled_parameters,
                            loglikelihood=loglikelihood,
                            population_size=population_size,
                            n_diffusive_levels=n_levels,
                            num_steps=num_steps,
                            num_per_step=num_per_step,
                            new_level_interval=10000,
                            thread_steps=100,
                            lam=5,
                            beta=100)
 #print(PCNS.likelihood(np.array([1.0])))
 #quit()
 # run it
 log_evidence, log_evidence_error = DNS.run(verbose=True)
 # Print the output
 print("log_evidence: {} +- {} ".format(log_evidence, log_evidence_error))
 print("analytic log_evidence: {}".format(analytic_log_evidence(
     ndim, width)))
 best_fit_l = DNS.best_fit_likelihood()
 print("Max likelihood parms: ", best_fit_l)
 best_fit_p, fit_error = DNS.best_fit_posterior()
コード例 #6
0
# Define the loglikelihood function
def loglikelihood(sampled_parameter_vector):
    diff = sampled_parameter_vector[0] - positions
    diff_scale = diff / width
    l = np.exp(-0.5 * diff_scale**2) / (2.0 * np.pi * width**2)**0.5
    logl = np.log(l.mean())
    if logl < -1000.0:
        logl = -1000.0
    return logl


# Construct the Nested Sampler
DNS = DNest4NestedSampling(sampled_parameters=sampled_parameters,
                           loglikelihood=loglikelihood,
                           population_size=500,
                           n_diffusive_levels=10,
                           dnest4_backend="memory",
                           num_steps=1000,
                           num_per_step=100)
#print(PCNS.likelihood(np.array([1.0])))
#quit()
# run it
log_evidence, log_evidence_error = DNS.run(verbose=True)
# Print the output
#print(PCNS.output)
# Evidence should be 1/2
print("log_evidence: ", log_evidence)
print("evidence: ", DNS.evidence)

#try plotting a marginal distribution
try: