예제 #1
0
def nestle_multi2():
    # Run nested sampling
    res = nestle.sample(loglike2,
                        prior_transform2,
                        3,
                        method='multi',
                        npoints=2000)
    print(res.summary())

    # weighted average and covariance:
    pm, covm = nestle.mean_and_cov(res.samples, res.weights)

    # re-scale weights to have a maximum of one
    nweights = res.weights / np.max(res.weights)

    # get the probability of keeping a sample from the weights
    keepidx = np.where(np.random.rand(len(nweights)) < nweights)[0]

    # get the posterior samples
    samples_nestle = res.samples[keepidx, :]

    print(np.mean(samples_nestle[:, 0]))  # mean of A samples
    print(np.std(samples_nestle[:, 0]))  # standard deviation of A samples
    print(np.mean(samples_nestle[:, 1]))  # mean of w samples
    print(np.std(samples_nestle[:, 1]))  # standard deviation of w samples
    print(np.mean(samples_nestle[:, 2]))  # mean of t0 samples
    print(np.std(samples_nestle[:, 2]))  # standard deviation of t0 samples
    print(len(samples_nestle))  # number of posterior samples

    return res.logz
예제 #2
0
    def store_nestle_output(self, result):
        """
        This turns the output fron nestle into a dictionary that can
        be output by Taurex

        Parameters
        ----------
        result: :obj:`dict`
            Result from a nestle sample call

        Returns
        -------
        dict
            Formatted dictionary for output

        """

        from tabulate import tabulate

        nestle_output = {}
        nestle_output['Stats'] = {}
        nestle_output['Stats']['Log-Evidence'] = result.logz
        nestle_output['Stats']['Log-Evidence-Error'] = result.logzerr
        nestle_output['Stats']['Peakiness'] = result.h

        fit_param = self.fit_names

        samples = result.samples
        weights = result.weights

        mean, cov = nestle.mean_and_cov(samples, weights)
        nestle_output['solution'] = {}
        nestle_output['solution']['samples'] = samples
        nestle_output['solution']['weights'] = weights
        nestle_output['solution']['covariance'] = cov
        nestle_output['solution']['fitparams'] = {}

        max_weight = weights.argmax()

        table_data = []

        for idx, param_name in enumerate(fit_param):
            param = {}
            param['mean'] = mean[idx]
            param['sigma'] = cov[idx]
            trace = samples[:, idx]
            q_16, q_50, q_84 = quantile_corner(trace, [0.16, 0.5, 0.84],
                                               weights=np.asarray(weights))
            param['value'] = q_50
            param['sigma_m'] = q_50 - q_16
            param['sigma_p'] = q_84 - q_50
            param['trace'] = trace
            param['map'] = trace[max_weight]
            table_data.append((param_name, q_50, q_50 - q_16))

            nestle_output['solution']['fitparams'][param_name] = param

        return nestle_output
예제 #3
0
파일: runtests.py 프로젝트: bd-j/nestle
def test_mean_and_cov():
    x = np.random.random((10, 3))
    w = np.random.random((10,))

    mean, cov = nestle.mean_and_cov(x, w)

    # check individual elements
    xd = x - np.average(x, weights=w, axis=0)
    prefactor = w.sum() / (w.sum()**2 - (w**2).sum())
    ans00 = prefactor * np.sum(w * xd[:, 0] * xd[:, 0])
    assert_approx_equal(cov[0, 0], ans00)
    ans01 = prefactor * np.sum(w * xd[:, 0] * xd[:, 1])
    assert_approx_equal(cov[0, 1], ans01)

    # If weights are all equal, covariance should come out to simple case
    w = np.repeat(0.2, 10)
    mean, cov = nestle.mean_and_cov(x, w)
    assert_allclose(cov, np.cov(x, rowvar=0))
    assert_allclose(mean, np.average(x, axis=0))
예제 #4
0
def test_mean_and_cov():
    x = np.random.random((10, 3))
    w = np.random.random((10, ))

    mean, cov = nestle.mean_and_cov(x, w)

    # check individual elements
    xd = x - np.average(x, weights=w, axis=0)
    prefactor = w.sum() / (w.sum()**2 - (w**2).sum())
    ans00 = prefactor * np.sum(w * xd[:, 0] * xd[:, 0])
    assert_approx_equal(cov[0, 0], ans00)
    ans01 = prefactor * np.sum(w * xd[:, 0] * xd[:, 1])
    assert_approx_equal(cov[0, 1], ans01)

    # If weights are all equal, covariance should come out to simple case
    w = np.repeat(0.2, 10)
    mean, cov = nestle.mean_and_cov(x, w)
    assert_allclose(cov, np.cov(x, rowvar=0))
    assert_allclose(mean, np.average(x, axis=0))
예제 #5
0
def nested_sampling_results(ns_object, burnin=0.4, bins=None):
    """ Shows the results of the Nested Sampling, summary, parameters with errors,
    walk and corner plots.
    """
    res = ns_object
    nsamples = res.samples.shape[0]
    indburnin = np.percentile(np.array(list(range(nsamples))), burnin * 100)

    print(res.summary())

    print(
        '\nNatural log of prior volume and Weight corresponding to each sample')
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(res.logvol, '.', alpha=0.5, color='gray')
    plt.xlabel('samples')
    plt.ylabel('logvol')
    plt.vlines(indburnin, res.logvol.min(), res.logvol.max(),
               linestyles='dotted')
    plt.subplot(1, 2, 2)
    plt.plot(res.weights, '.', alpha=0.5, color='gray')
    plt.xlabel('samples')
    plt.ylabel('weights')
    plt.vlines(indburnin, res.weights.min(), res.weights.max(),
               linestyles='dotted')
    plt.show()

    print("\nWalk plots before the burnin")
    show_walk_plot(np.expand_dims(res.samples, axis=0))
    if burnin > 0:
        print("\nWalk plots after the burnin")
        show_walk_plot(np.expand_dims(res.samples[indburnin:], axis=0))

    mean, cov = nestle.mean_and_cov(res.samples[indburnin:],
                                    res.weights[indburnin:])
    print("\nWeighted mean +- sqrt(covariance)")
    print("Radius = {:.3f} +/- {:.3f}".format(mean[0], np.sqrt(cov[0, 0])))
    print("Theta = {:.3f} +/- {:.3f}".format(mean[1], np.sqrt(cov[1, 1])))
    print("Flux = {:.3f} +/- {:.3f}".format(mean[2], np.sqrt(cov[2, 2])))

    if bins is None:
        bins = int(np.sqrt(res.samples[indburnin:].shape[0]))
        print("\nHist bins =", bins)
    ranges = None

    fig = corner.corner(res.samples[indburnin:], bins=bins,
                        labels=["$r$", r"$\theta$", "$f$"],
                        weights=res.weights[indburnin:], range=ranges,
                        plot_contours=True)
    fig.set_size_inches(8., 8.)

    print('\nConfidence intervals')
    _ = confidence(res.samples[indburnin:], cfd=68, bins=bins,
                   weights=res.weights[indburnin:],
                   gaussianFit=True, verbose=True, save=False)
예제 #6
0
파일: model.py 프로젝트: codybaxley/nsdmd
def show_results(loglike_NFW, prior_transform_NFW, n):
    # n is the dim of theta; for NFW model, n =2
    result = nestle.sample(loglike_NFW, prior_transform_NFW, 2)

    print('log evidence')
    print(result.logz)

    print('numerical (sampling) error on logz')
    print(result.logzerr)

    print('array of sample parameters')
    print(result.samples)

    print('array of weights associated with each sample')
    print(result.weights)

    p, cov = nestle.mean_and_cov(result.samples, result.weights)

    print("core radius a = {0:5.2f} +/- {1:5.2f} kpc".format(
        p[0], np.sqrt(cov[0, 0])))
    print("normalization factor = {0:5.2f} +/- {1:5.2f}".format(
        p[1], np.sqrt(cov[1, 1])))
    print(
        "Halo density normalization constant = {0:5.2e} +/- {1:5.2e} Msun/kpc^3"
        .format(2.312E5 * p[1], 2.312E5 * np.sqrt(cov[1, 1])))

    # Note: in order to convert the model to units of Msun/kpc^3 we multiply its value by 2.312E5.
    # See comments in the model definition for details.
    print("Halo density in our solor system = {0:5.2e} Msun/kpc^3.".format(
        2.312E5 * model_NFW(p, 8)))

    # Note: 1 Msun/kpc^3 = 3.817E-2 (GeV/c^2)/m^3 = 3.817E-5 (GeV/c^2)/(dm^3)
    # 1 dm^3 = 1 liter.
    # 3 WIMPS/liter would be 300 GeV/c^2/liter
    print("Halo density in our solor system = {0:5.2e} GeV/c^2/liter.".format(
        3.817E-5 * 2.312E5 * model_NFW(p, 8)))

    plt.figure()
    plt.errorbar(data_x, data_y, data_yerr, data_xerr, fmt='*')
    plt.xlabel("r (kpc)")
    plt.ylabel('V (km/s)')
    plt.title(
        "The measured rotational speed of the interstellar medium as a fucntion of the galactocentric radius"
    )
    plt.plot([5., 200.], model_NFW(p, np.array([5., 200.])))
    plt.show()

    fig = corner.corner(result.samples,
                        weights=result.weights,
                        labels=['a', 'rho0'],
                        range=[0.99999, 0.99999],
                        bins=30)
    plt.show()

    return 0
예제 #7
0
def nested_sampling_results(ns_object, burnin=0.4, bins=None):
    """ Shows the results of the Nested Sampling, summary, parameters with errors,
    walk and corner plots.
    """
    res = ns_object
    nsamples = res.samples.shape[0]
    indburnin = np.percentile(np.array(range(nsamples)), burnin * 100)

    print(res.summary())

    print(
        '\nNatural log of prior volume and Weight corresponding to each sample')
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(res.logvol, '.', alpha=0.5, color='gray')
    plt.xlabel('samples')
    plt.ylabel('logvol')
    plt.vlines(indburnin, res.logvol.min(), res.logvol.max(),
               linestyles='dotted')
    plt.subplot(1, 2, 2)
    plt.plot(res.weights, '.', alpha=0.5, color='gray')
    plt.xlabel('samples')
    plt.ylabel('weights')
    plt.vlines(indburnin, res.weights.min(), res.weights.max(),
               linestyles='dotted')
    plt.show()

    print("\nWalk plots before the burnin")
    show_walk_plot(np.expand_dims(res.samples, axis=0))
    if burnin > 0:
        print("\nWalk plots after the burnin")
        show_walk_plot(np.expand_dims(res.samples[indburnin:], axis=0))

    mean, cov = nestle.mean_and_cov(res.samples[indburnin:],
                                    res.weights[indburnin:])
    print("\nWeighted mean +- sqrt(covariance)")
    print("Radius = {:.3f} +/- {:.3f}".format(mean[0], np.sqrt(cov[0, 0])))
    print("Theta = {:.3f} +/- {:.3f}".format(mean[1], np.sqrt(cov[1, 1])))
    print("Flux = {:.3f} +/- {:.3f}".format(mean[2], np.sqrt(cov[2, 2])))

    if bins is None:
        bins = int(np.sqrt(res.samples[indburnin:].shape[0]))
        print("\nHist bins =", bins)
    ranges = None

    fig = corner.corner(res.samples[indburnin:], bins=bins,
                        labels=["$r$", r"$\theta$", "$f$"],
                        weights=res.weights[indburnin:], range=ranges,
                        plot_contours=True)
    fig.set_size_inches(8, 8)

    print('\nConfidence intervals')
    _ = confidence(res.samples[indburnin:], cfd=68, bins=bins,
                   weights=res.weights[indburnin:],
                   gaussian_fit=True, verbose=True, save=False)
예제 #8
0
 def run_nestle(self,outfile='TEST.dat'):
     # initalize outfile 
     self.outfile = open(outfile,'w')
     self.outfile.write('ITER Teff Dist Rad NH arfsc1 arfsc2 log(z) \n')
     # Start sampler
     print('Start Nestle')
     result = nestle.sample(self.calllike,self.prior_trans,self.ndim,method='multi',npoints=1000,callback=self.nestle_callback)
     # generate posterior means and covariances
     p,cov = nestle.mean_and_cov(result.samples,result.weights)
     # close output file
     self.outfile.close()
     return result,p,cov
예제 #9
0
def print_results(result, display_params):
    means, cov = nestle.mean_and_cov(result.samples, result.weights)
    std = np.sqrt(np.diag(cov))

    plot_labels = [
        make_plot_label(*display_param) for display_param in display_params
    ]
    unit_fracs = [dp[1] for dp in display_params]
    shifts = [dp[3] for dp in display_params]

    print "\nEstimated parameters :"
    for m, s, label, scale, shift in zip(means, std, plot_labels, unit_fracs,
                                         shifts):
        m1 = m / scale - shift
        s1 = s / scale
        lbl = latex.latex_to_text(label)
        print "%s = %0.6f +/- %0.6f" % (lbl, m1, s1)
def nestle_multi2():
    # Run nested sampling
    res = nestle.sample(loglike2,
                        prior_transform2,
                        2,
                        method='multi',
                        npoints=2000)

    # weighted average and covariance:
    pm, covm = nestle.mean_and_cov(res.samples, res.weights)

    # re-scale weights to have a maximum of one
    nweights = res.weights / np.max(res.weights)

    # get the probability of keeping a sample from the weights
    keepidx = np.where(np.random.rand(len(nweights)) < nweights)[0]

    # get the posterior samples
    samples_nestle = res.samples[keepidx, :]

    return res.logz
예제 #11
0
    def get_summary(self):
        self.log.info(f'NestedSamplerStatModel::\tgetting the summary (or at'
                      f"least trying) let's first see if I did run")
        self.check_did_run()
        self.log.info(
            f"NestedSamplerStatModel::\t{utils.now()}\n\tAlright, that's done. Let's get some "
            f"info. I'm not going to print too much here")
        # keep a dictionary of all the results
        resdict = {}

        if self.config["sampler"] == 'multinest':
            # Do the import of multinest inside the class such that the package can be
            # loaded without multinest
            try:
                from pymultinest.solve import run, Analyzer, solve
            except ModuleNotFoundError:
                raise ModuleNotFoundError(
                    'package pymultinest not found. See README for installation')
            self.log.info(
                'NestedSamplerStatModel::\tget_summary::\tstart analyzer of results')
            analyzer = Analyzer(len(self.config['fit_parameters']),
                                outputfiles_basename=self.result_file)
            # Taken from multinest.solve
            self.result = analyzer.get_stats()
            samples = analyzer.get_equal_weighted_posterior()[:, :-1]

            self.log.info('parameter values:')
            for name, col in zip(
                    self.config['fit_parameters'], samples.transpose()):
                self.log.info(
                    '%15s : %.3f +- %.3f' %
                    (name, col.mean(), col.std()))
                resdict[name +
                        '_fit_res'] = ('{0:5.2f} +/- {1:5.2f}'.format(col.mean(), col.std()))
                if 'log_' in name:
                    resdict[name[4:] + '_fit_res'] = '%.3g +/- %.2g' % (
                        10. ** col.mean(), 10. ** (col.mean()) * np.log(10.) * col.std())
                    self.log.info(
                        f'\t {name[4:]}, {resdict[name[4:] + "_fit_res"]}')
            resdict['n_samples'] = len(samples.transpose()[0])
            # Pass the samples to the self.result to be saved.
            self.result['samples'] = samples
        elif self.config["sampler"] == 'nestle':
            # Do the import of nestle inside the class such that the package can be
            # loaded without nestle
            try:
                import nestle
            except ModuleNotFoundError:
                raise ModuleNotFoundError(
                    'package nestle not found. See README for installation')
            # taken from mattpitkin.github.io/samplers-demo/pages/samplers-samplers-everywhere/#Nestle
            # estimate of the statistical uncertainty on logZ
            logZerrnestle = np.sqrt(self.result.h / self.config['nlive'])
            # re-scale weights to have a maximum of one
            nweights = self.result.weights / np.max(self.result.weights)
            # get the probability of keeping a sample from the weights
            keepidx = np.where(np.random.rand(len(nweights)) < nweights)[0]
            # get the posterior samples
            samples_nestle = self.result.samples[keepidx, :]
            # estimate of the statistcal uncertainty on logZ
            resdict['nestle_nposterior'] = len(samples_nestle)
            resdict['nestle_time'] = self.config['fit_time']  # run time
            # log marginalised likelihood
            resdict['nestle_logZ'] = self.result.logz
            resdict['nestle_logZerr'] = logZerrnestle  # uncertainty on log(Z)
            resdict['summary'] = self.result.summary()
            p, cov = nestle.mean_and_cov(
                self.result.samples, self.result.weights)
            for i, key in enumerate(self.config['fit_parameters']):
                resdict[key + '_fit_res'] = (
                    '{0:5.2f} +/- {1:5.2f}'.format(p[i], np.sqrt(cov[i, i])))
                self.log.info(f'\t, {key}, {resdict[key + "_fit_res"]}')
                if 'log_' in key:
                    resdict[key[4:] + '_fit_res'] = '%.3g +/- %.2g' % (
                        10. ** p[i], 10. ** (p[i]) * np.log(10) * np.sqrt(cov[i, i]))
                    self.log.info(
                        f'\t, {key[4:]}, {resdict[key[4:] + "_fit_res"]}')
        self.log.info(
            f'NestedSamplerStatModel::\tAlright we got all the info we need, '
            f"let's return it to whomever asked for it")
        return resdict
예제 #12
0
def nested_sampling_results(ns_object,
                            burnin=0.4,
                            bins=None,
                            save=False,
                            output_dir='/'):
    """ Shows the results of the Nested Sampling, summary, parameters with errors,
    walk and corner plots.
    """
    res = ns_object
    nsamples = res.samples.shape[0]
    indburnin = int(np.percentile(np.array(range(nsamples)), burnin * 100))

    print(res.summary())

    print(
        '\nNatural log of prior volume and Weight corresponding to each sample'
    )
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(res.logvol, '.', alpha=0.5, color='gray')
    plt.xlabel('samples')
    plt.ylabel('logvol')
    plt.vlines(indburnin,
               res.logvol.min(),
               res.logvol.max(),
               linestyles='dotted')
    plt.subplot(1, 2, 2)
    plt.plot(res.weights, '.', alpha=0.5, color='gray')
    plt.xlabel('samples')
    plt.ylabel('weights')
    plt.vlines(indburnin,
               res.weights.min(),
               res.weights.max(),
               linestyles='dotted')
    plt.show()

    if save:
        plt.savefig(output_dir + 'Nested_results.pdf')

    print("\nWalk plots before the burnin")
    show_walk_plot(np.expand_dims(res.samples, axis=0))
    if burnin > 0:
        print("\nWalk plots after the burnin")
        show_walk_plot(np.expand_dims(res.samples[indburnin:], axis=0))
    if save:
        plt.savefig(output_dir + 'Nested_walk_plots.pdf')

    mean, cov = nestle.mean_and_cov(res.samples[indburnin:],
                                    res.weights[indburnin:])
    print("\nWeighted mean +- sqrt(covariance)")
    print("Radius = {:.3f} +/- {:.3f}".format(mean[0], np.sqrt(cov[0, 0])))
    print("Theta = {:.3f} +/- {:.3f}".format(mean[1], np.sqrt(cov[1, 1])))
    print("Flux = {:.3f} +/- {:.3f}".format(mean[2], np.sqrt(cov[2, 2])))

    if save:
        with open(output_dir + 'Nested_sampling.txt', "w") as f:
            f.write('#################################\n')
            f.write('####   CONFIDENCE INTERVALS   ###\n')
            f.write('#################################\n')
            f.write(' \n')
            f.write('Results of the NESTED SAMPLING fit\n')
            f.write('----------------------------------\n ')
            f.write(' \n')
            f.write("\nWeighted mean +- sqrt(covariance)\n")
            f.write("Radius = {:.3f} +/- {:.3f}\n".format(
                mean[0], np.sqrt(cov[0, 0])))
            f.write("Theta = {:.3f} +/- {:.3f}\n".format(
                mean[1], np.sqrt(cov[1, 1])))
            f.write("Flux = {:.3f} +/- {:.3f}\n".format(
                mean[2], np.sqrt(cov[2, 2])))

    if bins is None:
        bins = int(np.sqrt(res.samples[indburnin:].shape[0]))
        print("\nHist bins =", bins)
    ranges = None

    fig = corner.corner(res.samples[indburnin:],
                        bins=bins,
                        labels=["$r$", r"$\theta$", "$f$"],
                        weights=res.weights[indburnin:],
                        range=ranges,
                        plot_contours=True)
    fig.set_size_inches(8, 8)
    if save:
        plt.savefig(output_dir + 'Nested_corner.pdf')

    print('\nConfidence intervals')
    _ = confidence(res.samples[indburnin:],
                   cfd=68,
                   bins=bins,
                   weights=res.weights[indburnin:],
                   gaussian_fit=True,
                   verbose=True,
                   save=False)

    if save:
        plt.savefig(output_dir + 'Nested_confi_hist_flux_r_theta_gaussfit.pdf')

    final_res = np.array([[mean[0], np.sqrt(cov[0, 0])],
                          [mean[1], np.sqrt(cov[1, 1])],
                          [mean[2], np.sqrt(cov[1, 1])]])
    return final_res
예제 #13
0
	def run_nestle(self,samplertype=None,npoints=None,restart=None,weightrestart=True,maxrejcall=None):
		if samplertype == None:
			samplertype = 'multi'
		if npoints == None:
			npoints = 100

		if restart == None:
			# generate initial random sample within Nestle volume
			modind = np.array(range(0,len(self.MODPARS)),dtype=int)
			selind = modind[np.random.choice(len(modind),npoints,replace=False)]
			
			if ('Tycho_V' in self.bfpar.keys()) & ('Tycho_B' in self.bfpar.keys()):
				cond = (
					(self.PHOT['Tycho_V']+self.DM(self.mindist+0.5*self.distran) > self.bfpar['Tycho_V']-1.0) & 
					(self.PHOT['Tycho_V']+self.DM(self.mindist+0.5*self.distran) < self.bfpar['Tycho_V'] + 1.0) &
					(self.PHOT['Tycho_B']+self.DM(self.mindist+0.5*self.distran) > self.bfpar['Tycho_B']-1.0) & 
					(self.PHOT['Tycho_B']+self.DM(self.mindist+0.5*self.distran) < self.bfpar['Tycho_B'] + 1.0)
					)

				addind = modind[cond][np.random.choice(len(modind[cond]),int(npoints*0.25),replace=False)]
				finind = np.hstack([selind,addind])
				finind = np.unique(finind)
			else:
				finind = selind

			initsample = self.MODPARS[finind]
			initsample_v = np.empty((len(initsample), self.ndim), dtype=np.float64)
			initsample_u = np.empty((len(initsample), self.ndim), dtype=np.float64)

			for i in range(len(initsample)):
				initsample_v_i = [float(initsample['EEP'][i]),10.0**(float(initsample['log_age'][i])-9.0),float(initsample['[Fe/H]in'][i])]
				if self.fitphotbool:
					# initsample_v_i.append(self.distran*np.random.rand()+self.mindist)
					# initsample_v_i.append(self.Avran*np.random.rand()+self.minAv)
					if 'Para' in self.priordict.keys():
						distmean = 1000.0/self.priordict['Para'][0]
						parashift = self.priordict['Para'][0]-3.0*self.priordict['Para'][1]
						distsig = (1000.0/parashift)-distmean
						initsample_v_i.append(distsig*np.random.randn()+distmean)
					else:
						initsample_v_i.append(self.distran*np.random.rand()+self.mindist)
					initsample_v_i.append(self.Avran*np.random.rand()+self.minAv)
				initsample_u_i = self.prior_inversetrans(initsample_v_i)

				initsample_v[i,:] = initsample_v_i
				initsample_u[i,:] = initsample_u_i

		else:
			restart_from = Table.read(restart,format='ascii')
			if len(restart_from) > npoints:
				if weightrestart:
					restart_ind = np.random.choice(range(0,len(restart_from)),npoints,replace=False,p=np.exp(restart_from['logwt']-restart_from['logz'][-1]))					
				else:
					restart_ind = np.random.choice(range(0,len(restart_from)),npoints,replace=False)					
				restart_sel = restart_from[restart_ind]
				addind = np.random.choice(len(self.MODPARS),int(0.25*npoints),replace=False)
				addsel = self.MODPARS[addind]
			else:
				restart_sel = restart_from
				numbadd = 1.25*npoints-len(restart_sel)
				addind = np.random.choice(len(self.MODPARS),numbadd,replace=False)
				addsel = self.MODPARS[addind]

			initsample_v = np.empty((len(restart_sel)+len(addsel),self.ndim), dtype=np.float64)
			initsample_u = np.empty((len(restart_sel)+len(addsel),self.ndim), dtype=np.float64)

			for i in range(len(restart_sel)):
				initsample_v_i = [float(restart_sel['EEP'][i]),float(restart_sel['Age'][i]),float(restart_sel['[Fe/H]in'][i])]
				if self.fitphotbool:
					initsample_v_i.append(float(restart_sel['Dist'][i]))
					initsample_v_i.append(float(restart_sel['Av'][i]))
				initsample_u_i = self.prior_inversetrans(initsample_v_i)

				initsample_v[i,:] = initsample_v_i
				initsample_u[i,:] = initsample_u_i

			for i in range(len(addsel)):
				initsample_v_i = [float(addsel['EEP'][i]),10.0**(float(addsel['log_age'][i])-9.0),float(addsel['[Fe/H]in'][i])]
				if self.fitphotbool:
					# initsample_v_i.append(self.distran*np.random.rand()+self.mindist)
					# initsample_v_i.append(self.Avran*np.random.rand()+self.minAv)
					distmean = 1000.0/self.priordict['Para'][0]
					parashift = self.priordict['Para'][0]-3.0*self.priordict['Para'][1]
					distsig = (1000.0/parashift)-distmean
					initsample_v_i.append(distsig*np.random.randn()+distmean)
					initsample_v_i.append(self.Avran*np.random.rand()+self.minAv)
				initsample_u_i = self.prior_inversetrans(initsample_v_i)

				initsample_v[i+len(restart_sel),:] = initsample_v_i
				initsample_u[i+len(restart_sel),:] = initsample_u_i


		print 'Start Nestle w/ {0} number of samples'.format(len(initsample_v))
		self.startmct = datetime.now()
		self.stept = datetime.now()
		self.ncallt = 0
		self.maxcallnum = 0
		sys.stdout.flush()
		result = nestle.sample(
			self.lnp_call_nestle,self.prior_trans,self.ndim,method=samplertype,
			npoints=len(initsample_v),callback=self.nestle_callback,user_sample=initsample_u,
			# dlogz=1.0,
			# update_interval=1,
			maxrejcall=maxrejcall,
			)
		p,cov = nestle.mean_and_cov(result.samples,result.weights)
		return result,p,cov
예제 #14
0
def nest_lc(data,
            model,
            vparam_names,
            bounds,
            guess_amplitude_bound=False,
            minsnr=5.,
            priors=None,
            ppfs=None,
            npoints=100,
            method='single',
            maxiter=None,
            maxcall=None,
            modelcov=False,
            rstate=None,
            verbose=False,
            warn=True,
            **kwargs):
    """Run nested sampling algorithm to estimate model parameters and evidence.

    Parameters
    ----------
    data : `~astropy.table.Table` or `~numpy.ndarray` or `dict`
        Table of photometric data. Must include certain columns.
        See the "Photometric Data" section of the documentation for
        required columns.
    model : `~sncosmo.Model`
        The model to fit.
    vparam_names : list
        Model parameters to vary in the fit.
    bounds : `dict`
        Bounded range for each parameter. Bounds must be given for
        each parameter, with the exception of ``t0``: by default, the
        minimum bound is such that the latest phase of the model lines
        up with the earliest data point and the maximum bound is such
        that the earliest phase of the model lines up with the latest
        data point.
    guess_amplitude_bound : bool, optional
        If true, bounds for the model's amplitude parameter are determined
        automatically based on the data and do not need to be included in
        `bounds`. The lower limit is set to zero and the upper limit is 10
        times the amplitude "guess" (which is based on the highest-flux
        data point in any band). Default is False.
    minsnr : float, optional
        Minimum signal-to-noise ratio of data points to use when guessing
        amplitude bound. Default is 5.
    priors : `dict`, optional
        Prior probability distribution function for each parameter. The keys
        should be parameter names and the values should be callables that
        accept a float. If a parameter is not in the dictionary, the prior
        defaults to a flat distribution between the bounds.
    ppfs : `dict`, optional
        Prior percent point function (inverse of the cumulative distribution
        function) for each parameter. If a parameter is in this dictionary,
        the ppf takes precedence over a prior pdf specified in ``priors``.
    npoints : int, optional
        Number of active samples to use. Increasing this value increases
        the accuracy (due to denser sampling) and also the time
        to solution.
    method : {'classic', 'single', 'multi'}, optional
        Method used to select new points. Choices are 'classic',
        single-ellipsoidal ('single'), multi-ellipsoidal ('multi'). Default
        is 'single'.
    maxiter : int, optional
        Maximum number of iterations. Iteration may stop earlier if
        termination condition is reached. Default is no limit.
    maxcall : int, optional
        Maximum number of likelihood evaluations. Iteration may stop earlier
        if termination condition is reached. Default is no limit.
    modelcov : bool, optional
        Include model covariance when calculating chisq. Default is False.
    rstate : `~numpy.random.RandomState`, optional
        RandomState instance. If not given, the global random state of the
        ``numpy.random`` module will be used.
    verbose : bool, optional
        Print running evidence sum on a single line.
    warn : bool, optional
        Issue warning when dropping bands outside the model range. Default is
        True.

        *New in version 1.5.0*

    Returns
    -------
    res : Result
        Attributes are:

        * ``niter``: total number of iterations
        * ``ncall``: total number of likelihood function calls
        * ``time``: time in seconds spent in iteration loop.
        * ``logz``: natural log of the Bayesian evidence Z.
        * ``logzerr``: estimate of uncertainty in logz (due to finite sampling)
        * ``h``: Bayesian information.
        * ``vparam_names``: list of parameter names varied.
        * ``samples``: 2-d `~numpy.ndarray`, shape is (nsamples, nparameters).
          Each row is the parameter values for a single sample. For example,
          ``samples[0, :]`` is the parameter values for the first sample.
        * ``logprior``: 1-d `~numpy.ndarray` (length=nsamples);
          log(prior volume) for each sample.
        * ``logl``: 1-d `~numpy.ndarray` (length=nsamples); log(likelihood)
          for each sample.
        * ``weights``: 1-d `~numpy.ndarray` (length=nsamples);
          Weight corresponding to each sample. The weight is proportional to
          the prior * likelihood for the sample.
        * ``parameters``: 1-d `~numpy.ndarray` of weighted-mean parameter
          values from samples (including fixed parameters). Order corresponds
          to ``model.param_names``.
        * ``covariance``: 2-d `~numpy.ndarray` of parameter covariance;
          indicies correspond to order of ``vparam_names``. Calculated from
          ``samples`` and ``weights``.
        * ``errors``: OrderedDict of varied parameter uncertainties.
          Corresponds to square root of diagonal entries in covariance matrix.
        * ``ndof``: Number of degrees of freedom (len(data) -
          len(vparam_names)).
        * ``bounds``: Dictionary of bounds on varied parameters (including
          any automatically determined bounds).
        * ``data_mask``: Boolean array the same length as data specifying
          whether each observation was used.
          *New in version 1.5.0.*

    estimated_model : `~sncosmo.Model`
        A copy of the model with parameters set to the values in
        ``res.parameters``.
    """

    try:
        import nestle
    except ImportError:
        raise ImportError("nest_lc() requires the nestle package.")

    # warnings
    if "nobj" in kwargs:
        warnings.warn("The nobj keyword is deprecated and will be removed in "
                      "sncosmo v2.0. Use `npoints` instead.")
        npoints = kwargs.pop("nobj")

    # experimental parameters
    tied = kwargs.get("tied", None)

    data = photometric_data(data)

    # sort by time
    if not np.all(np.ediff1d(data.time) >= 0.0):
        sortidx = np.argsort(data.time)
        data = data[sortidx]
    else:
        sortidx = None

    model = copy.copy(model)
    bounds = copy.copy(bounds)  # need to copy this b/c we modify it below

    # Order vparam_names the same way it is ordered in the model:
    vparam_names = [s for s in model.param_names if s in vparam_names]

    # Drop data that the model doesn't cover.
    fitdata, data_mask = cut_bands(data,
                                   model,
                                   z_bounds=bounds.get('z', None),
                                   warn=warn)

    if guess_amplitude_bound:
        if model.param_names[2] not in vparam_names:
            raise ValueError("Amplitude bounds guessing enabled but "
                             "amplitude parameter {0!r} is not varied".format(
                                 model.param_names[2]))
        if model.param_names[2] in bounds:
            raise ValueError("cannot supply bounds for parameter {0!r}"
                             " when guess_amplitude_bound=True".format(
                                 model.param_names[2]))

        # If redshift is bounded, set model redshift to midpoint of bounds
        # when doing the guess.
        if 'z' in bounds:
            model.set(z=sum(bounds['z']) / 2.)
        _, amplitude = guess_t0_and_amplitude(fitdata, model, minsnr)
        bounds[model.param_names[2]] = (0., 10. * amplitude)

    # Find t0 bounds to use, if not explicitly given
    if 't0' in vparam_names and 't0' not in bounds:
        bounds['t0'] = t0_bounds(fitdata, model)

    if ppfs is None:
        ppfs = {}
    if tied is None:
        tied = {}

    # Convert bounds/priors combinations into ppfs
    if bounds is not None:
        for key, val in six.iteritems(bounds):
            if key in ppfs:
                continue  # ppfs take priority over bounds/priors
            a, b = val
            if priors is not None and key in priors:
                # solve ppf at discrete points and return interpolating
                # function
                x_samples = np.linspace(0., 1., 101)
                ppf_samples = ppf(priors[key], x_samples, a, b)
                f = Interp1D(0., 1., ppf_samples)
            else:
                f = Interp1D(0., 1., np.array([a, b]))
            ppfs[key] = f

    # NOTE: It is important that iparam_names is in the same order
    # every time, otherwise results will not be reproducible, even
    # with same random seed.  This is because iparam_names[i] is
    # matched to u[i] below and u will be in a reproducible order,
    # so iparam_names must also be.
    iparam_names = [key for key in vparam_names if key in ppfs]
    ppflist = [ppfs[key] for key in iparam_names]
    npdim = len(iparam_names)  # length of u
    ndim = len(vparam_names)  # length of v

    # Check that all param_names either have a direct prior or are tied.
    for name in vparam_names:
        if name in iparam_names:
            continue
        if name in tied:
            continue
        raise ValueError(
            "Must supply ppf or bounds or tied for parameter '{}'".format(
                name))

    def prior_transform(u):
        d = {}
        for i in range(npdim):
            d[iparam_names[i]] = ppflist[i](u[i])
        v = np.empty(ndim, dtype=np.float)
        for i in range(ndim):
            key = vparam_names[i]
            if key in d:
                v[i] = d[key]
            else:
                v[i] = tied[key](d)
        return v

    # Indicies of the model parameters in vparam_names
    idx = np.array([model.param_names.index(name) for name in vparam_names])

    def loglike(parameters):
        model.parameters[idx] = parameters
        return -0.5 * chisq(fitdata, model, modelcov=modelcov)

    t0 = time.time()
    res = nestle.sample(loglike,
                        prior_transform,
                        ndim,
                        npdim=npdim,
                        npoints=npoints,
                        method=method,
                        maxiter=maxiter,
                        maxcall=maxcall,
                        rstate=rstate,
                        callback=(nestle.print_progress if verbose else None))
    elapsed = time.time() - t0

    # estimate parameters and covariance from samples
    vparameters, cov = nestle.mean_and_cov(res.samples, res.weights)

    # update model parameters to estimated ones.
    model.set(**dict(zip(vparam_names, vparameters)))

    # If we need to, unsort the mask so mask applies to input data
    if sortidx is not None:
        unsort_idx = np.argsort(sortidx)  # indicies that will unsort array
        data_mask = data_mask[unsort_idx]

    # `res` is a nestle.Result object. Collect result into a sncosmo.Result
    # object for consistency, and add more fields.
    res = Result(
        niter=res.niter,
        ncall=res.ncall,
        logz=res.logz,
        logzerr=res.logzerr,
        h=res.h,
        samples=res.samples,
        weights=res.weights,
        logvol=res.logvol,
        logl=res.logl,
        vparam_names=copy.copy(vparam_names),
        ndof=len(fitdata) - len(vparam_names),
        bounds=bounds,
        time=elapsed,
        parameters=model.parameters.copy(),
        covariance=cov,
        errors=OrderedDict(zip(vparam_names, np.sqrt(np.diagonal(cov)))),
        param_dict=OrderedDict(zip(model.param_names, model.parameters)),
        data_mask=data_mask)

    # Deprecated result fields.
    depmsg = ("The `param_names` attribute is deprecated in sncosmo v1.0 "
              "and will be removed in sncosmo v2.0."
              "Use `vparam_names` instead.")
    res.__dict__['deprecated']['param_names'] = (res.vparam_names, depmsg)

    depmsg = ("The `logprior` attribute is deprecated in sncosmo v1.2 "
              "and will be changed in sncosmo v2.0."
              "Use `logvol` instead.")
    res.__dict__['deprecated']['logprior'] = (res.logvol, depmsg)

    return res, model
예제 #15
0
파일: plots.py 프로젝트: bamford/gz_mel
def print_nestle(res, par, model, x, y, yerror, outfile=None):
    print(res.summary)
    print(nestle.mean_and_cov(res.samples, res.weights))
    plot_triangle(res.samples, par, model, x, y, yerror, weights=res.weights)
    if outfile is not None:
        plt.savefig(outfile)
예제 #16
0
파일: negfc_nested.py 프로젝트: avigan/VIP
def nested_sampling_results(ns_object,
                            burnin=0.4,
                            bins=None,
                            cfd=68.27,
                            save=False,
                            output_dir='/',
                            plot=False):
    """ Shows the results of the Nested Sampling, summary, parameters with 
    errors, walk and corner plots.
    
    Parameters
    ----------
    ns_object: numpy.array
        The nestle object returned from `nested_spec_sampling`.
    burnin: float, default: 0
        The fraction of a walker we want to discard.
    bins: int, optional
        The number of bins used to sample the posterior distributions.
    cfd: float, optional
        The confidence level given in percentage.
    save: boolean, default: False
        If True, a pdf file is created.
    output_dir: str, optional
        The name of the output directory which contains the output files in the 
        case  ``save`` is True. 
    plot: bool, optional
        Whether to show the plots (instead of saving them).
                    
    Returns
    -------
    final_res: numpy ndarray
         Best-fit parameters and uncertainties (corresponding to 68% confidence
         interval). Dimensions: nparams x 2.
         
    """
    res = ns_object
    nsamples = res.samples.shape[0]
    indburnin = int(np.percentile(np.array(range(nsamples)), burnin * 100))

    print(res.summary())

    print(
        '\nNatural log of prior volume and Weight corresponding to each sample'
    )
    if save or plot:
        plt.figure(figsize=(12, 4))
        plt.subplot(1, 2, 1)
        plt.plot(res.logvol, '.', alpha=0.5, color='gray')
        plt.xlabel('samples')
        plt.ylabel('logvol')
        plt.vlines(indburnin,
                   res.logvol.min(),
                   res.logvol.max(),
                   linestyles='dotted')
        plt.subplot(1, 2, 2)
        plt.plot(res.weights, '.', alpha=0.5, color='gray')
        plt.xlabel('samples')
        plt.ylabel('weights')
        plt.vlines(indburnin,
                   res.weights.min(),
                   res.weights.max(),
                   linestyles='dotted')
        if plot:
            plt.show()

        plt.savefig(output_dir + 'Nested_results.pdf')

        print("\nWalk plots before the burnin")
        show_walk_plot(np.expand_dims(res.samples, axis=0))
        if burnin > 0:
            print("\nWalk plots after the burnin")
            show_walk_plot(np.expand_dims(res.samples[indburnin:], axis=0))
        plt.savefig(output_dir + 'Nested_walk_plots.pdf')

    mean, cov = nestle.mean_and_cov(res.samples[indburnin:],
                                    res.weights[indburnin:])
    print("\nWeighted mean +- sqrt(covariance)")
    print("Radius = {:.3f} +/- {:.3f}".format(mean[0], np.sqrt(cov[0, 0])))
    print("Theta = {:.3f} +/- {:.3f}".format(mean[1], np.sqrt(cov[1, 1])))
    print("Flux = {:.3f} +/- {:.3f}".format(mean[2], np.sqrt(cov[2, 2])))

    if save:
        with open(output_dir + 'Nested_sampling.txt', "w") as f:
            f.write('#################################\n')
            f.write('####   CONFIDENCE INTERVALS   ###\n')
            f.write('#################################\n')
            f.write(' \n')
            f.write('Results of the NESTED SAMPLING fit\n')
            f.write('----------------------------------\n ')
            f.write(' \n')
            f.write("\nWeighted mean +- sqrt(covariance)\n")
            f.write("Radius = {:.3f} +/- {:.3f}\n".format(
                mean[0], np.sqrt(cov[0, 0])))
            f.write("Theta = {:.3f} +/- {:.3f}\n".format(
                mean[1], np.sqrt(cov[1, 1])))
            f.write("Flux = {:.3f} +/- {:.3f}\n".format(
                mean[2], np.sqrt(cov[2, 2])))

    if bins is None:
        bins = int(np.sqrt(res.samples[indburnin:].shape[0]))
        print("\nHist bins =", bins)

    if save or plot:
        ranges = None
        fig = corner.corner(res.samples[indburnin:],
                            bins=bins,
                            labels=["$r$", r"$\theta$", "$f$"],
                            weights=res.weights[indburnin:],
                            range=ranges,
                            plot_contours=True)
        fig.set_size_inches(8, 8)
    if save:
        plt.savefig(output_dir + 'Nested_corner.pdf')

    print('\nConfidence intervals')
    if save or plot:
        _ = confidence(res.samples[indburnin:],
                       cfd=68,
                       bins=bins,
                       weights=res.weights[indburnin:],
                       gaussian_fit=True,
                       verbose=True,
                       save=False)

    if save:
        plt.savefig(output_dir + 'Nested_confi_hist_flux_r_theta_gaussfit.pdf')

    final_res = np.array([[mean[0], np.sqrt(cov[0, 0])],
                          [mean[1], np.sqrt(cov[1, 1])],
                          [mean[2], np.sqrt(cov[2, 2])]])
    return final_res
예제 #17
0
파일: plot_line.py 프로젝트: smutch/nestle
# The likelihood function:
def loglike(theta):
    return -0.5 * (np.sum((y - model(theta, x))**2 / yerr**2))


# Defines a flat prior in 0 < m < 1, 0 < b < 100:
def prior_transform(theta):
    return np.array([1., 100.]) * theta


# Run nested sampling
res = nestle.sample(loglike, prior_transform, 2, method='single', npoints=1000)
print(res.summary())

# weighted average and covariance:
p, cov = nestle.mean_and_cov(res.samples, res.weights)

print("m = {0:5.2f} +/- {1:5.2f}".format(p[0], np.sqrt(cov[0, 0])))
print("b = {0:5.2f} +/- {1:5.2f}".format(p[1], np.sqrt(cov[1, 1])))

plt.figure()
plt.errorbar(x, y, yerr=yerr, capsize=0, fmt='k.', ecolor='.7')
plt.plot([0., 10.], model(p, np.array([0., 10.])), c='k')
plt.show()

###############################################################################
# Plot samples to see the full posterior surface.

fig = corner.corner(res.samples,
                    weights=res.weights,
                    labels=['m', 'b'],
예제 #18
0
 def __load_mean_and_covariance(self, results):
     self.__means, self.__covariance = nestle.mean_and_cov(
         results.samples, results.weights
     )
예제 #19
0
def nest_lc(data, model, vparam_names, bounds, guess_amplitude_bound=False,
            minsnr=5., priors=None, ppfs=None, npoints=100, method='single',
            maxiter=None, maxcall=None, modelcov=False, rstate=None,
            verbose=False, **kwargs):
    """Run nested sampling algorithm to estimate model parameters and evidence.

    Parameters
    ----------
    data : `~astropy.table.Table` or `~numpy.ndarray` or `dict`
        Table of photometric data. Must include certain columns.
        See the "Photometric Data" section of the documentation for
        required columns.
    model : `~sncosmo.Model`
        The model to fit.
    vparam_names : list
        Model parameters to vary in the fit.
    bounds : `dict`
        Bounded range for each parameter. Bounds must be given for
        each parameter, with the exception of ``t0``: by default, the
        minimum bound is such that the latest phase of the model lines
        up with the earliest data point and the maximum bound is such
        that the earliest phase of the model lines up with the latest
        data point.
    guess_amplitude_bound : bool, optional
        If true, bounds for the model's amplitude parameter are determined
        automatically based on the data and do not need to be included in
        `bounds`. The lower limit is set to zero and the upper limit is 10
        times the amplitude "guess" (which is based on the highest-flux
        data point in any band). Default is False.
    minsnr : float, optional
        Minimum signal-to-noise ratio of data points to use when guessing
        amplitude bound. Default is 5.
    priors : `dict`, optional
        Prior probability distribution function for each parameter. The keys
        should be parameter names and the values should be callables that
        accept a float. If a parameter is not in the dictionary, the prior
        defaults to a flat distribution between the bounds.
    ppfs : `dict`, optional
        Prior percent point function (inverse of the cumulative distribution
        function) for each parameter. If a parameter is in this dictionary,
        the ppf takes precedence over a prior pdf specified in ``priors``.
    npoints : int, optional
        Number of active samples to use. Increasing this value increases
        the accuracy (due to denser sampling) and also the time
        to solution.
    method : {'classic', 'single', 'multi'}, optional
        Method used to select new points. Choices are 'classic',
        single-ellipsoidal ('single'), multi-ellipsoidal ('multi'). Default
        is 'single'.
    maxiter : int, optional
        Maximum number of iterations. Iteration may stop earlier if
        termination condition is reached. Default is no limit.
    maxcall : int, optional
        Maximum number of likelihood evaluations. Iteration may stop earlier
        if termination condition is reached. Default is no limit.
    modelcov : bool, optional
        Include model covariance when calculating chisq. Default is False.
    rstate : `~numpy.random.RandomState`, optional
        RandomState instance. If not given, the global random state of the
        ``numpy.random`` module will be used.
    verbose : bool, optional
        Print running evidence sum on a single line.

    Returns
    -------
    res : Result
        Attributes are:

        * ``niter``: total number of iterations
        * ``ncall``: total number of likelihood function calls
        * ``time``: time in seconds spent in iteration loop.
        * ``logz``: natural log of the Bayesian evidence Z.
        * ``logzerr``: estimate of uncertainty in logz (due to finite sampling)
        * ``h``: Bayesian information.
        * ``vparam_names``: list of parameter names varied.
        * ``samples``: 2-d `~numpy.ndarray`, shape is (nsamples, nparameters).
          Each row is the parameter values for a single sample. For example,
          ``samples[0, :]`` is the parameter values for the first sample.
        * ``logprior``: 1-d `~numpy.ndarray` (length=nsamples);
          log(prior volume) for each sample.
        * ``logl``: 1-d `~numpy.ndarray` (length=nsamples); log(likelihood)
          for each sample.
        * ``weights``: 1-d `~numpy.ndarray` (length=nsamples);
          Weight corresponding to each sample. The weight is proportional to
          the prior * likelihood for the sample.
        * ``parameters``: 1-d `~numpy.ndarray` of weighted-mean parameter
          values from samples (including fixed parameters). Order corresponds
          to ``model.param_names``.
        * ``covariance``: 2-d `~numpy.ndarray` of parameter covariance;
          indicies correspond to order of ``vparam_names``. Calculated from
          ``samples`` and ``weights``.
        * ``errors``: OrderedDict of varied parameter uncertainties.
          Corresponds to square root of diagonal entries in covariance matrix.
        * ``ndof``: Number of degrees of freedom (len(data) -
          len(vparam_names)).
        * ``bounds``: Dictionary of bounds on varied parameters (including
          any automatically determined bounds).

    estimated_model : `~sncosmo.Model`
        A copy of the model with parameters set to the values in
        ``res.parameters``.
    """

    try:
        import nestle
    except ImportError:
        raise ImportError("nest_lc() requires the nestle package.")

    if "nobj" in kwargs:
        warn("The nobj keyword is deprecated and will be removed in a future "
             "sncosmo release. Use `npoints` instead.")
        npoints = kwargs.pop("nobj")

    # experimental parameters
    tied = kwargs.get("tied", None)

    data = photometric_data(data)
    data.sort_by_time()
    model = copy.copy(model)
    bounds = copy.copy(bounds)  # need to copy this b/c we modify it below

    # Order vparam_names the same way it is ordered in the model:
    vparam_names = [s for s in model.param_names if s in vparam_names]

    # Drop data that the model doesn't cover.
    data = cut_bands(data, model, z_bounds=bounds.get('z', None))

    if guess_amplitude_bound:
        if model.param_names[2] not in vparam_names:
            raise ValueError("Amplitude bounds guessing enabled but "
                             "amplitude parameter {0!r} is not varied"
                             .format(model.param_names[2]))
        if model.param_names[2] in bounds:
            raise ValueError("cannot supply bounds for parameter {0!r}"
                             " when guess_amplitude_bound=True"
                             .format(model.param_names[2]))

        # If redshift is bounded, set model redshift to midpoint of bounds
        # when doing the guess.
        if 'z' in bounds:
            model.set(z=sum(bounds['z']) / 2.)
        _, amplitude = guess_t0_and_amplitude(data, model, minsnr)
        bounds[model.param_names[2]] = (0., 10. * amplitude)

    # Find t0 bounds to use, if not explicitly given
    if 't0' in vparam_names and 't0' not in bounds:
        bounds['t0'] = t0_bounds(data, model)

    if ppfs is None:
        ppfs = {}
    if tied is None:
        tied = {}

    # Convert bounds/priors combinations into ppfs
    if bounds is not None:
        for key, val in six.iteritems(bounds):
            if key in ppfs:
                continue  # ppfs take priority over bounds/priors
            a, b = val
            if priors is not None and key in priors:
                # solve ppf at discrete points and return interpolating
                # function
                x_samples = np.linspace(0., 1., 101)
                ppf_samples = ppf(priors[key], x_samples, a, b)
                f = Interp1D(0., 1., ppf_samples)
            else:
                f = Interp1D(0., 1., np.array([a, b]))
            ppfs[key] = f

    # NOTE: It is important that iparam_names is in the same order
    # every time, otherwise results will not be reproducible, even
    # with same random seed.  This is because iparam_names[i] is
    # matched to u[i] below and u will be in a reproducible order,
    # so iparam_names must also be.
    iparam_names = [key for key in vparam_names if key in ppfs]
    ppflist = [ppfs[key] for key in iparam_names]
    npdim = len(iparam_names)  # length of u
    ndim = len(vparam_names)  # length of v

    # Check that all param_names either have a direct prior or are tied.
    for name in vparam_names:
        if name in iparam_names:
            continue
        if name in tied:
            continue
        raise ValueError("Must supply ppf or bounds or tied for parameter '{}'"
                         .format(name))

    def prior_transform(u):
        d = {}
        for i in range(npdim):
            d[iparam_names[i]] = ppflist[i](u[i])
        v = np.empty(ndim, dtype=np.float)
        for i in range(ndim):
            key = vparam_names[i]
            if key in d:
                v[i] = d[key]
            else:
                v[i] = tied[key](d)
        return v

    # Indicies of the model parameters in vparam_names
    idx = np.array([model.param_names.index(name) for name in vparam_names])

    def loglike(parameters):
        model.parameters[idx] = parameters
        return -0.5 * chisq(data, model, modelcov=modelcov)

    t0 = time.time()
    res = nestle.sample(loglike, prior_transform, ndim, npdim=npdim,
                        npoints=npoints, method=method, maxiter=maxiter,
                        maxcall=maxcall, rstate=rstate,
                        callback=(nestle.print_progress if verbose else None))
    elapsed = time.time() - t0

    # estimate parameters and covariance from samples
    vparameters, cov = nestle.mean_and_cov(res.samples, res.weights)

    # update model parameters to estimated ones.
    model.set(**dict(zip(vparam_names, vparameters)))

    # `res` is a nestle.Result object. Collect result into a sncosmo.Result
    # object for consistency, and add more fields.
    res = Result(niter=res.niter,
                 ncall=res.ncall,
                 logz=res.logz,
                 logzerr=res.logzerr,
                 h=res.h,
                 samples=res.samples,
                 weights=res.weights,
                 logvol=res.logvol,
                 logl=res.logl,
                 vparam_names=copy.copy(vparam_names),
                 ndof=len(data) - len(vparam_names),
                 bounds=bounds,
                 time=elapsed,
                 parameters=model.parameters.copy(),
                 covariance=cov,
                 errors=OrderedDict(zip(vparam_names,
                                        np.sqrt(np.diagonal(cov)))),
                 param_dict=OrderedDict(zip(model.param_names,
                                            model.parameters)))

    # Deprecated result fields.
    depmsg = ("The `param_names` attribute is deprecated in sncosmo v1.0 "
              "and will be removed in a future release. "
              "Use `vparam_names` instead.")
    res.__dict__['deprecated']['param_names'] = (res.vparam_names, depmsg)

    depmsg = ("The `logprior` attribute is deprecated in sncosmo v1.2 "
              "and will be changed in a future release. "
              "Use `logvol` instead.")
    res.__dict__['deprecated']['logprior'] = (res.logvol, depmsg)

    return res, model
예제 #20
0
 def __load_mean_and_covariance(self, results):
     self.__means, self.__covariance = nestle.mean_and_cov(
         results.samples, results.weights)
예제 #21
0
 def __save_cov_data(save_name, results):
     mean, cov = nestle.mean_and_cov(results.samples, results.weights)
     numpy.save(save_name + ".npy", {"mean": mean, "cov": cov})
예제 #22
0
def loglike(theta):
    return -0.5*(np.sum((y-model(theta, x))**2/yerr**2))


# Defines a flat prior in 0 < m < 1, 0 < b < 100:
def prior_transform(theta):
    return np.array([1., 100.]) * theta


# Run nested sampling
res = nestle.sample(loglike, prior_transform, 2, method='single',
                    npoints=1000)
print(res.summary())

# weighted average and covariance:
p, cov = nestle.mean_and_cov(res.samples, res.weights)

print("m = {0:5.2f} +/- {1:5.2f}".format(p[0], np.sqrt(cov[0, 0])))
print("b = {0:5.2f} +/- {1:5.2f}".format(p[1], np.sqrt(cov[1, 1])))

plt.figure()
plt.errorbar(x, y, yerr=yerr, capsize=0, fmt='k.', ecolor='.7')
plt.plot([0., 10.], model(p, np.array([0., 10.])), c='k')
plt.show()

###############################################################################
# Plot samples to see the full posterior surface.

fig = corner.corner(res.samples, weights=res.weights, labels=['m', 'b'],
                    range=[0.99999, 0.99999], truths=theta_true, bins=30)
plt.show()
예제 #23
0
 def __save_cov_data(save_name, results):
     mean, cov = nestle.mean_and_cov(results.samples, results.weights)
     numpy.save(save_name + ".npy", {"mean": mean, "cov": cov})