Beispiel #1
0
def test_run_compat():
    from ultranest.solvecompat import pymultinest_solve_compat as solve

    ndim = 2
    sigma = 0.01
    centers = 0.5
    paramnames = ['a', 'b']

    def loglike(theta):
        like = -0.5 * (((theta - centers)/sigma)**2).sum() - 0.5 * np.log(2 * np.pi * sigma**2) * ndim
        return like

    def transform(x):
        params = x.copy()
        params[0] = 10 * x[0] - 5.
        params[1] = 10**(x[1] - 1)
        return params

    result = solve(LogLikelihood=loglike, Prior=transform,
        n_dims=ndim, outputfiles_basename=None,
        verbose=True, resume='resume', importance_nested_sampling=False)

    print()
    print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
    print()
    print('parameter values:')
    for name, col in zip(paramnames, result['samples'].transpose()):
        print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
Beispiel #2
0
	def run(
		self, evidence_tolerance=0.5, n_live_points=400,
		wrapped_params=None, **kwargs
	):
		"""
		Run nested sampling with ultranest.

		:param n_live_points: number of live points (400 to 1000 is recommended).
		:param evidence_tolerance: uncertainty on the evidence to achieve
		:param resume: uncertainty on the evidence to achieve
		:param Lepsilon: numerical model inaccuracies in the statistic (default: 0.1).
			Increase if run is not finishing because it is trying too hard to resolve
			unimportant details caused e.g., by atable interpolations.
		:param frac_remain: fraction of the integration remainder allowed in the live points.
			Setting to 0.5 in mono-modal problems can be acceptable and faster.
			The default is 0.01 (safer).

		These are ultranest parameters (see ultranest.solve documentation!)
		"""

		def log_likelihood(params):
			set_parameters(transformations=self.transformations, values=params)
			like = -0.5 * Fit.statistic
			# print("like = %.1f" % like)
			if not numpy.isfinite(like):
				return -1e100
			return like

		# run multinest
		if Fit.statMethod.lower() not in BXASolver.allowed_stats:
			raise RuntimeError('ERROR: not using cash (Poisson likelihood) for Poisson data! set Fit.statMethod to cash before analysing (currently: %s)!' % Fit.statMethod)

		n_dims = len(self.paramnames)
		resume = kwargs.pop('resume', False)
		Lepsilon = kwargs.pop('Lepsilon', 0.1)

		with XSilence():
			self.results = solve(
				log_likelihood, self.prior_function, n_dims,
				paramnames=self.paramnames,
				outputfiles_basename=self.outputfiles_basename,
				resume=resume, Lepsilon=Lepsilon,
				n_live_points=n_live_points, evidence_tolerance=evidence_tolerance,
				seed=-1, max_iter=0, wrapped_params=wrapped_params, **kwargs
			)
			logls = [self.results['weighted_samples']['logl'][
				numpy.where(self.results['weighted_samples']['points'] == sample)[0][0]]
				for sample in self.results['samples']]
			self.posterior = self.results['samples']

			chainfilename = '%schain.fits' % self.outputfiles_basename
			store_chain(chainfilename, self.transformations, self.posterior, -2 * logls)
			xspec.AllChains.clear()
			xspec.AllChains += chainfilename

			# set current parameters to best fit
			self.set_best_fit()

		return self.results
Beispiel #3
0
	def run(self,
		evidence_tolerance = 0.5,
		n_live_points = 400, 
		wrapped_params = None, **kwargs):
		"""
		The remainder are multinest arguments (see PyMultiNest and MultiNest documentation!)
		n_live_points: 400 are often enough
		
		For quick results, use sampling_efficiency = 0.8, n_live_points = 50, 
		evidence_tolerance = 5.
		The real results must be estimated with sampling_efficiency = 0.3 
		and without using const_efficiency_mode, otherwise it is not reliable.
		"""
		
		def log_likelihood(params):
			set_parameters(transformations=self.transformations, values=params)
			l = -0.5 * Fit.statistic
			#print("like = %.1f" % l)
			if not numpy.isfinite(l):
				return -1e100
			return l
		
		# run multinest
		if Fit.statMethod.lower() not in BXASolver.allowed_stats:
			raise RuntimeError('ERROR: not using cash (Poisson likelihood) for Poisson data! set Fit.statMethod to cash before analysing (currently: %s)!' % Fit.statMethod)
		
		n_dims = len(self.paramnames)
		resume = kwargs.pop('resume', False)
		Lepsilon = kwargs.pop('Lepsilon', 0.1)

		with XSilence():
			self.results = solve(log_likelihood, self.prior_function, n_dims, 
				paramnames=self.paramnames,
				outputfiles_basename=self.outputfiles_basename, 
				resume=resume, Lepsilon=Lepsilon,
				n_live_points=n_live_points, evidence_tolerance=evidence_tolerance, 
				seed=-1, max_iter=0, wrapped_params=wrapped_params, **kwargs
			)
			self.posterior = self.results['samples']
			chainfilename = '%schain.fits' % self.outputfiles_basename
			store_chain(chainfilename, self.transformations, self.posterior)
			xspec.AllChains.clear()
			xspec.AllChains += chainfilename
		
			# set current parameters to best fit
			self.set_best_fit()
		
		return self.results
Beispiel #4
0
    def run(self,
            evidence_tolerance=0.5,
            n_live_points=400,
            wrapped_params=None,
            **kwargs):
        """
		Run nested sampling with ultranest.

		:param n_live_points: number of live points (400 to 1000 is recommended).
		:param evidence_tolerance: uncertainty on the evidence to achieve
		:param resume: uncertainty on the evidence to achieve
		:param Lepsilon: numerical model inaccuracies in the statistic (default: 0.1). 
			Increase if run is not finishing because it is trying too hard to resolve 
			unimportant details caused e.g., by atable interpolations.
		:param frac_remain: fraction of the integration remainder allowed in the live points.
			Setting to 0.5 in mono-modal problems can be acceptable and faster.
			The default is 0.01 (safer).

		These are ultranest parameters (see ultranest.solve documentation!)
		"""

        fit = self.fit
        if False and not isinstance(fit.stat, self.allowed_stats):
            raise RuntimeError("Fit statistic must be cash or cstat, not %s" %
                               fit.stat.name)

        resume = kwargs.pop('resume', False)
        Lepsilon = kwargs.pop('Lepsilon', 0.1)

        self.results = solve(self.log_likelihood,
                             self.prior_transform,
                             self.ndims,
                             paramnames=self.paramnames,
                             outputfiles_basename=self.outputfiles_basename,
                             resume=resume,
                             Lepsilon=Lepsilon,
                             n_live_points=n_live_points,
                             evidence_tolerance=evidence_tolerance,
                             seed=-1,
                             max_iter=0,
                             wrapped_params=wrapped_params,
                             **kwargs)
        self.set_best_fit()
        return self.results
Beispiel #5
0
    def run(self,
            evidence_tolerance=0.5,
            n_live_points=400,
            wrapped_params=None,
            **kwargs):
        """
		Run nested sampling with ultranest.

		:param n_live_points: number of live points (400 to 1000 is recommended).
		:param evidence_tolerance: uncertainty on the evidence to achieve
		:param resume: uncertainty on the evidence to achieve
		:param Lepsilon: numerical model inaccuracies in the statistic (default: 0.1). 
			Increase if run is not finishing because it is trying too hard to resolve 
			unimportant details caused e.g., by atable interpolations.
		:param frac_remain: fraction of the integration remainder allowed in the live points.
			Setting to 0.5 in mono-modal problems can be acceptable and faster.
			The default is 0.01 (safer).

		These are ultranest parameters (see ultranest.solve documentation!)
		"""

        fit = self.get_fit()
        if False and not isinstance(fit.stat, self.allowed_stats):
            raise RuntimeError("Fit statistic must be cash or cstat, not %s" %
                               fit.stat.name)

        def prior_transform(cube):
            params = cube.copy()
            self.prior(params, n_dims, n_dims)
            return params

        def log_likelihood(cube):
            try:
                for i, p in enumerate(self.parameters):
                    assert not isnan(
                        cube[i]
                    ), 'ERROR: parameter %d (%s) to be set to %f' % (
                        i, p.fullname, cube[i])
                    p.val = cube[i]
                    # print "%s: %f" % (p.fullname,p.val),
                return -0.5 * fit.calc_stat()
            except Exception as e:
                print('Exception in log_likelihood function: ', e)
                for i, p in enumerate(self.parameters):
                    print('    Parameter %10s: %f --> %f [%f..%f]' %
                          (p.fullname, p.val, cube[i], p.min, p.max))
                raise e

        n_dims = len(self.parameters)
        resume = kwargs.pop('resume', False)
        Lepsilon = kwargs.pop('Lepsilon', 0.1)

        self.results = solve(log_likelihood,
                             prior_transform,
                             n_dims,
                             paramnames=self.paramnames,
                             outputfiles_basename=self.outputfiles_basename,
                             resume=resume,
                             Lepsilon=Lepsilon,
                             n_live_points=n_live_points,
                             evidence_tolerance=evidence_tolerance,
                             seed=-1,
                             max_iter=0,
                             wrapped_params=wrapped_params,
                             **kwargs)
        self.set_best_fit()
        return self.results
Beispiel #6
0
    def run(self,
            evidence_tolerance=0.5,
            n_live_points=400,
            wrapped_params=None,
            **kwargs):
        """
		Run the Bayesian analysis with specified parameters+transformations.

		:param id: See the sherpa documentation of calc_stat.
		:param otherids: See the sherpa documentation of calc_stat.
		:param prior: prior function created with create_prior_function.
		:param parameters: List of parameters to analyse.
		:param outputfiles_basename: prefix for output filenames.
		
		If prior is None, uniform priors are used on the passed parameters.
		If parameters is also None, all thawed parameters are used.

		The remainder are multinest arguments (see PyMultiNest and MultiNest documentation!)
		n_live_points: 400 are often enough
		
		For quick results, use sampling_efficiency = 0.8, n_live_points = 50, 
		evidence_tolerance = 5. 
		The real results must be estimated with sampling_efficiency = 0.3,
		otherwise it is not reliable.
		"""

        fit = self.get_fit()
        if False and not isinstance(fit.stat, self.allowed_stats):
            raise RuntimeError("Fit statistic must be cash or cstat, not %s" %
                               fit.stat.name)

        def prior_transform(cube):
            params = cube.copy()
            self.prior(params, n_dims, n_dims)
            return params

        def log_likelihood(cube):
            try:
                for i, p in enumerate(self.parameters):
                    assert not isnan(
                        cube[i]
                    ), 'ERROR: parameter %d (%s) to be set to %f' % (
                        i, p.fullname, cube[i])
                    p.val = cube[i]
                    #print "%s: %f" % (p.fullname,p.val),
                l = -0.5 * fit.calc_stat()
                #print "%.1f" % l
                return l
            except Exception as e:
                print('Exception in log_likelihood function: ', e)
                for i, p in enumerate(self.parameters):
                    print('    Parameter %10s: %f --> %f [%f..%f]' %
                          (p.fullname, p.val, cube[i], p.min, p.max))
                #import sys
                #sys.exit(-127)
                raise Exception("Model evaluation problem") from e
            return -1e300

        n_dims = len(self.parameters)
        resume = kwargs.pop('resume', False)
        Lepsilon = kwargs.pop('Lepsilon', 0.1)

        self.results = solve(log_likelihood,
                             prior_transform,
                             n_dims,
                             paramnames=self.paramnames,
                             outputfiles_basename=self.outputfiles_basename,
                             resume=resume,
                             Lepsilon=Lepsilon,
                             n_live_points=n_live_points,
                             evidence_tolerance=evidence_tolerance,
                             seed=-1,
                             max_iter=0,
                             wrapped_params=wrapped_params,
                             **kwargs)
        self.set_best_fit()
        return self.results