Пример #1
0
    def load_scan(self, run_tag=None):
        """ Load the details of a scan, if not already loaded
            This sets up the output of the scan in a format useful for Analysis
        """

        if self.already_loaded:
            pass
        else:
            self.run_tag = run_tag
            self.make_dirs_for_run(run_tag)

            # Load the statistics and samples
            self.a = \
                pymultinest.Analyzer(n_params=self.n_params,
                                     outputfiles_basename=self.chains_dir_for_run)
            self.s = self.a.get_stats()
            self.chain_file = self.chains_dir_for_run + '/post_equal_weights.dat'
            self.samples = np.array(np.loadtxt(self.chain_file)[:, :-1])

            # Determine the median value of each parameter
            self.medians = [
                self.s['marginals'][i]['median'] for i in range(self.n_params)
            ]

            # Determine the template normalisations
            self.calculate_norms()

            # Account for fixed parameters if necessary
            if len(self.fixed_params_list) > 0:
                self.fix_samples()
                self.fix_medians()
                self.n_params += len(self.fixed_params_list)
                self.fix_model_decompression()

            self.already_loaded = True
Пример #2
0
    def mnest_analyzer(self):
        """
        PyMultiNest Analyzer object associated with fit.

        See PyMultiNest documentation for more.
        """
        return pymultinest.Analyzer(self.n_params, self.mnest_basename)
Пример #3
0
def plotMarginal():
    import numpy as np
    import pymultinest
    from matplotlib import pyplot as plt
    outputFiles_rawbase = raw_input('Type path to chains folder: ')
    prefix = '%s/chains/1-'%outputFiles_rawbase
    
    open_params = open('%s/chains/.tmp_myCode/parameters.txt'%outputFiles_rawbase)
    paramNames = open_params.read().split()
    open_params.close()
    
    n_params = len(paramNames)

    a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename = prefix)
    s = a.get_stats()

    p = pymultinest.PlotMarginalModes(a)

    for i in range(1,n_params):
#        plt.subplot(n_params, n_params, n_params * i + i + 1)
        #p.plot_marginal(i, ls='-', color='blue', linewidth=3)
        #p.plot_marginal(i, with_ellipses = True, dim2 = True, with_points = False, grid_points=50)
        
        
        for j in range(i):

            plt.subplot(n_params-1, n_params-1, (n_params-1) * j + i)
            p.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30)
            
        plt.subplot(n_params-1,n_params-1,(n_params-1)*(i-1) + i)
        plt.xlabel(paramNames[i])
        plt.ylabel(paramNames[i-1])

    plt.show()
Пример #4
0
def lfit(xx, yy, error, bounds=[-10., 10., -10., 10.]):
    # linear fit with nested sampling

    # prevents seg fault in MultiNest
    error[error == 0] = np.mean(error[error != 0])

    def model_lin(params):
        return params[0] + xx * params[1]

    def myprior_lin(cube, ndim, n_params):
        '''This transforms a unit cube into the dimensions of your prior space.'''
        cube[0] = (bounds[1] - bounds[0]) * cube[0] + bounds[0]
        cube[1] = (bounds[3] - bounds[2]) * cube[1] + bounds[2]

    def myloglike_lin(cube, ndim, n_params):
        loglike = -np.sum(((yy - model_lin(cube)) / error)**2)
        return loglike / 2.

    pymultinest.run(
        myloglike_lin,
        myprior_lin,
        2,
        resume=False,
        sampling_efficiency=0.5,
        evidence_tolerance=0.1,
    )

    # retrieves the data that has been written to hard drive
    a = pymultinest.Analyzer(n_params=2)
    posteriors = a.get_data()
    stats = a.get_stats()
    stats['marginals'] = get_stats(posteriors)

    return stats, posteriors
Пример #5
0
def check_finesse(folder, recover=False):
    data = h5_2_dict(join(folder, 'input_finesse.h5'))
    ix = data['fit_ix']
    Lpost, dpost = read_Ld_results(data['Ld_folder'])

    i = np.random.choice(len(Lpost))
    L = Lpost[i]
    d = dpost[i]
    r = data['r']
    sig = data['sig']

    analyzer = pymultinest.Analyzer(5,
                                    outputfiles_basename=join(
                                        folder, "finesse_"))
    modes = analyzer.get_mode_stats()
    #F, A, Arel, Ti, offset = modes['modes'][0]['mean']
    F, A, Arel, Ti = modes['modes'][0]['mean']
    post = analyzer.get_equal_weighted_posterior()
    w0 = [487.873302, 487.98634]
    mu = [232.03806, 39.948]

    Lpost = Lpost[::30]
    dpost = dpost[::30]

    Fpost = post[::30, 0]
    Apost = post[::30, 1]
    Arelpost = post[::30, 2]
    Tipost = post[::30, 3]
    #offsetpost = post[::30, 4]

    #offset_mean = np.mean(post[:, 4])
    #offset_sd = np.std(post[:, 4])
    print('F: {0:f} +/- {1:f}'.format(np.mean(post[:, 0]), np.std(post[:, 0])))
    print('A: {0:e} +/- {1:e}'.format(np.mean(post[:, 1]), np.std(post[:, 1])))
    print('Arel: {0:f} +/- {1:f}'.format(np.mean(post[:, 2]), np.std(post[:,
                                                                          2])))
    print('Ti Ar: {0:f} +/- {1:f}'.format(np.mean(post[:, 3]),
                                          np.std(post[:, 3])))
    #print('offset: {0:e} +/- {1:e}'.format(offset_mean, offset_sd))

    if recover:
        try:
            post_dict = h5_2_dict(join(folder, "finesse_solver_model_post.h5"))
            sig_post = post_dict["signal post"]
            new_r = post_dict['new_r']
        except IOError, e:
            print(
                "Can't recover finesse solver q posterior. Calculating from scratch."
            )
            #sig_post = calculate_signal_post(r[ix], Lpost, dpost, Fpost, Apost, Arelpost, Tipost, offsetpost, w0, mu)
            new_r = np.linspace(0, 900, 1000)
            # sig_post = calculate_signal_post(r[ix], Lpost, dpost, Fpost, Apost, Arelpost, Tipost, w0, mu)
            sig_post = calculate_signal_post(new_r, Lpost, dpost, Fpost, Apost,
                                             Arelpost, Tipost, w0, mu)
            dict_2_h5(join(folder, "finesse_solver_model_post.h5"), {
                'signal post': sig_post,
                'new_r': new_r
            })
Пример #6
0
def get_1sigma(outputpath, param, shortlong):
    """
    star        : name of star (str)
    param       : name of parameter (str)
    shortlong   : 'short' or 'long'

    Retrieve median and 3 sigma errors of
    parameter of choice for a given star
    returns [median, low 1sigma, high 1sigma]
    """

    if shortlong == 'short':
        if 'long' in outputpath:
            outputpath = outputpath.replace("/long/", "/short/")
            print(outputpath)
        # a = Analyzer(23, outputfiles_basename = outputpath + "/test-")
        a = pymultinest.Analyzer(23,
                                 outputfiles_basename=outputpath + "/test-")
        params = [
            'C1', 'Trim_min', 'qrim', 'C2', 'Tmid_min', 'Tmid_max', 'qmid',
            'Tatm_min', 'Tatm_max', 'qatm', 'Ol. 0.1', 'Ol. 2.0', 'Ol. 5.0',
            'Py. 0.1', 'Py. 2.0', 'Py. 5.0', 'Fo. 0.1', 'Fo. 2.0', 'En. 0.1',
            'En. 2.0', 'Si. 0.1', 'Si. 2.0', 'Si. 5.0'
        ]

    elif shortlong == 'long':
        # a = Analyzer(20, outputfiles_basename = outputpath + "/test-")
        a = pymultinest.Analyzer(20,
                                 outputfiles_basename=outputpath + "/test-")
        params = [
            'C2', 'Tmid_min', 'Tmid_max', 'qmid', 'Tatm_min', 'Tatm_max',
            'qatm', 'Ol. 0.1', 'Ol. 2.0', 'Ol. 5.0', 'Py. 0.1', 'Py. 2.0',
            'Py. 5.0', 'Fo. 0.1', 'Fo. 2.0', 'En. 0.1', 'En. 2.0', 'Si. 0.1',
            'Si. 2.0', 'Si. 5.0'
        ]
    i = params.index(param)
    stats = a.get_stats()
    # print(stats['modes'][0]['maximum a posterior'][i])
    p = stats['modes'][0]['mean'][i]
    pl, ph = stats['marginals'][i]['1sigma']
    return p, p - pl, ph - p
Пример #7
0
def my_curvfit(function,
               prior,
               parameters,
               x,
               y,
               yerr1,
               yerr2=None,
               savedir=None,
               done=True):

    n_params = len(parameters)
    if yerr2 is not None:
        yerr2 = np.abs(yerr2)
    x = np.array(x)
    y = np.array(y)
    yerr1 = np.abs(yerr1)

    if savedir is None:
        savedir = '/home/laojin/software/my_python/curvfit/'
        if os.path.exists(savedir) == False:
            os.makedirs(savedir)
    else:
        if os.path.exists(savedir) == False:
            os.makedirs(savedir)

    def loglike(cube, ndim, nparams):

        ymodel = function(x, cube)
        if yerr2 is not None:
            err = []
            for index, value in enumerate(ymodel - y):
                if value > 0:
                    err.append(yerr2[index])
                else:
                    err.append(yerr1[index])
            err = np.array(err)
        else:
            err = yerr1

        loglikelihood = (-0.5 * ((ymodel - y) / err)**2).sum()
        return loglikelihood

    if ((os.path.exists(savedir + 'spectrum_params.json') == False) or (done)):
        pymultinest.run(loglike,
                        prior,
                        n_params,
                        outputfiles_basename=savedir + 'spectrum_',
                        resume=False,
                        verbose=True)
        json.dump(parameters, open(savedir + 'spectrum_params.json', 'w'))
    a = pymultinest.Analyzer(outputfiles_basename=savedir + 'spectrum0_',
                             n_params=n_params)
    return a
Пример #8
0
def get_previous_periods(n_planets):
    if n_planets == 0:
        return []
    basename = filename + '.out/%d/' % n_planets
    parameters = json.load(file('%sparams.json' % basename))
    a = pymultinest.Analyzer(n_params=len(parameters),
                             outputfiles_basename=basename)
    s = a.get_stats()
    return map(
        lambda (p, m):
        (m['5sigma'][0] - (m['5sigma'][1] - m['5sigma'][0]), m['5sigma'][1] +
         (m['5sigma'][1] - m['5sigma'][0])),
        filter(lambda (p, m): p.startswith('P'), zip(parameters,
                                                     s['marginals'])))
Пример #9
0
    def analyze(self):

        which = [0, 2, 3, 4, 5, 6]
        pars = [
            r'$v_\mathrm{Doppler}$', r'$B$', r'$\theta_B$', r'$\chi_B$',
            r'$\sigma_I$ [x10$^{-2}$]', r'$\sigma_{QUV}$ [x10$^{-2}$]'
        ]
        self.chains = pymultinest.Analyzer(n_params=self.nParams)
        samples = self.chains.get_equal_weighted_posterior()
        samples[:, -3:] *= 1e2
        fig, ax = pl.subplots(nrows=2, ncols=3, figsize=(16, 8))
        ax = ax.flatten()

        for i in range(self.nParams - 1):
            n, bins, patches = ax[i].hist(samples[:, which[i]], bins=20)
            pl.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
            ax[i].set_xlabel(pars[i])
        pl.savefig("bayesHazel.png")
Пример #10
0
def analyze(inputDirectory, inputPrefix, outputDirectory, outputPrefix):
    '''
	Helper function for analyzing distributions from PyMultiNest. 

	The arguments are:

		inputDirectory	-	The directory containing the PyMultiNest output files to process.
							This should be the 'outputDirectory' that was used to generate those files.
		inputPrefix		-	The prefix for all input file names.
							This should be the 'outputPrefix' that was used to generate those files.
		outputDirectory	-	The directory in which to place output files.
		outputPrefix	-	The prefix for all output file names.

	The return value is a PyMultiNest Analyzer object.
	'''
    prefix = inputDirectory + '/' + inputPrefix
    outputPrefix = outputDirectory + '/' + outputPrefix
    parameters = json.load(open(prefix + 'params.json'))
    n_params = len(parameters)

    a = pymultinest.Analyzer(n_params=n_params, outputfiles_basename=prefix)
    s = a.get_stats()

    json.dump(s, open(outputPrefix + 'stats.json', 'w'), indent=4)

    print('  marginal likelihood:')
    print('    ln Z = %.1f +- %.1f' %
          (s['global evidence'], s['global evidence error']))
    print('  parameters:')
    meds = []
    for p, m in zip(parameters, s['marginals']):
        lo, hi = m['1sigma']
        med = m['median']
        meds.append(med)
        sigma = (hi - lo) / 2
        print(sigma)
        i = max(0, int(-np.floor(np.log10(sigma))) + 1)
        fmt = '%%.%df' % i
        fmts = '\t'.join(['    %-15s' + fmt + " +- " + fmt])
        print(fmts % (p, med, sigma))

    return a, meds, s['global evidence']
Пример #11
0
    def get_system(self, path):
        sys = System(m_star=1, i=90)

        try:
            sys.info = json.load(open(path + 'info.json'))
        except IOError:
            warnings.warn('{}info.json does not exist'.format(path), UserWarning)

        try:
            sys.lightcurve = pd.read_json(path+'lightcurve.json')
        except ValueError:
            warnings.warn('{}lightcurve.json does not exist'.format(path), UserWarning)

        for i, planet in enumerate(sys.info):
            sys.add_planet(m=sys.info[planet]['m'], a = sys.info[planet]['a'], e=sys.info[planet]['e'],
                omega=sys.info[planet]['omega'], t0=sys.info[planet]['t0'])

            try:
                params = json.load(open(path + 'ps{}_params.json'.format(i+1)))
            except IOError:
                warnings.warn('{}ps{}_params.json does not exist'.format(path, i+1), UserWarning)
            try:
                sys.analyzer = pmn.Analyzer(len(sys.info) * 3,
                    outputfiles_basename = path + 'ps{}_'.format(i+1))
                bf_params = [sys.analyzer.get_stats()['modes'][0]['maximum']][0]
                # model_RVs = [
                #     sum([sys.calculate_RV(date, bf_params[3*i], bf_params[3*i+1],
                #     0, 0, bf_params[3*i+2])
                #     for i in range(len(sys.info))])[0]
                #     for date in sys.lightcurve.JD ]
                model_RVs = [
                    sum([sys.calculate_RV(date, 10**bf_params[3*i], 10**bf_params[3*i+1],
                    0, 0, bf_params[3*i+2])
                    for i in range(len(sys.info))])[0]
                    for date in sys.lightcurve.JD ]
                sys.lightcurve[planet] = model_RVs
            #except (IOError, IndexError, UnicodeDecodeError):
            except:
                sys.analyzer = None
                warnings.warn('{}ps{}_.txt does not exist'.format(path, i+1), UserWarning)

        return sys
Пример #12
0
def run_nested(spec, model, basename='run/test_run'):
    pymultinest.run(
        model.loglikelihood,
        model.prior_transform,
        model.n_params,
        outputfiles_basename=basename,
        resume=False,
        verbose=True,
        evidence_tolerance=0.3,
        n_live_points=400,
        sampling_efficiency=0.3,
        n_iter_before_update=2000,
    )
    analyzer = pymultinest.Analyzer(
        outputfiles_basename=basename,
        n_params=model.n_params,
    )
    lnZ = analyzer.get_stats()['global evidence']
    print(':: Evidence Z:', lnZ / np.log(10))
    return analyzer
Пример #13
0
def test_multinest():

    model_name = "LuminosityLikelihood"
    redshifts = [6, 7, 8, 10]
    F_STAR10 = [-1.3, -3, 0, 1.0]
    ALPHA_STAR = [0.5, -0.5, 1.0, 1.0]
    M_TURN = [8.69897, 8, 10, 1.0]
    t_STAR = [0.5, 0.01, 1, 0.3]

    mcmc_options = {
        "n_live_points": 10,
        "max_iter": 10,
    }
    mcmc.run_mcmc(
        [
            mcmc.CoreLuminosityFunction(redshift=z, sigma=0, name="lfz%d" % z)
            for z in redshifts
        ],
        [
            mcmc.LikelihoodLuminosityFunction(name="lfz%d" % z)
            for z in redshifts
        ],
        model_name=model_name,
        params={
            "F_STAR10": F_STAR10,
            "ALPHA_STAR": ALPHA_STAR,
            "M_TURN": M_TURN,
            "t_STAR": t_STAR,
        },
        use_multinest=True,
        **mcmc_options,
    )

    import pymultinest

    nest = pymultinest.Analyzer(4,
                                outputfiles_basename="./MultiNest/%s" %
                                model_name)
    data = nest.get_data()

    assert data.shape[1] == 6
Пример #14
0
def run_pymultinest(likelihood_function, model, label, prior, n_params):
    # run MultiNest
    pymultinest.run(likelihood_function,
                    prior,
                    n_params,
                    outputfiles_basename=datafile + label,
                    resume=False)
    json.dump(parameters, open(datafile + label + 'params.json',
                               'w'))  # save parameter names

    run = pymultinest.Analyzer(outputfiles_basename=datafile + label,
                               n_params=n_params)

    a_lnZ = run.get_stats()['global evidence']
    print()
    print('************************')
    print('MAIN RESULT: Evidence Z ')
    print('************************')
    print(f'log Z for model {label} = {a_lnZ:.1f}\n')

    return a_lnZ
Пример #15
0
def plotMarginal(n_params):
    import numpy as np
    import pymultinest
    from matplotlib import pyplot as plt
    outputFiles_rawbase = raw_input('Type path to chains folder: ')
    prefix = '%s/chains/1-' % outputFiles_rawbase

    if n_params == 8:
        paramNames = [
            r'$\epsilon_e$', 'E0', 'n', r'$\Gamma_0$', '$\epsilon_B$', 'p',
            r'$\theta_0$', r'$\alpha$'
        ]

    a = pymultinest.Analyzer(n_params=n_params, outputfiles_basename=prefix)
    s = a.get_stats()

    p = pymultinest.PlotMarginalModes(a)

    for i in range(1, n_params):
        #        plt.subplot(n_params, n_params, n_params * i + i + 1)
        #p.plot_marginal(i, ls='-', color='blue', linewidth=3)
        #p.plot_marginal(i, with_ellipses = True, dim2 = True, with_points = False, grid_points=50)

        for j in range(i):

            plt.subplot(n_params - 1, n_params - 1, (n_params - 1) * j + i)
            p.plot_conditional(i,
                               j,
                               with_ellipses=False,
                               with_points=True,
                               grid_points=30)

        plt.subplot(n_params - 1, n_params - 1, (n_params - 1) * (i - 1) + i)
        plt.xlabel(paramNames[i])
        plt.ylabel(paramNames[i - 1])

    plt.show()
Пример #16
0
def pyorbit_multinest(config_in, input_datasets=None, return_output=None):

    output_directory = './' + config_in['output'] + '/multinest/'

    mc = ModelContainerMultiNest()
    pars_input(config_in, mc, input_datasets)

    if mc.nested_sampling_parameters['shutdown_jitter']:
        for dataset_name, dataset in mc.dataset_dict.items():
            dataset.shutdown_jitter()

    mc.model_setup()
    mc.create_variables_bounds()
    mc.initialize_logchi2()

    mc.create_starting_point()

    results_analysis.results_resumen(mc, None, skip_theta=True)

    mc.output_directory = output_directory

    os.system("mkdir -p " + output_directory + mc.nested_sampling_parameters['base_dir'] + "/clusters")
    #os.system("mkdir -p " +polychord_dir_output + "chains/clusters")

    print()
    print('Reference Time Tref: ', mc.Tref)
    print()
    print('*************************************************************')
    print()

    '''
        On Linux system (BASH):
        export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/lib
        export LD_PRELOAD=/usr/lib/openmpi/lib/libmpi.so:$LD_PRELOAD
        export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libgfortran.so.3
        mpirun -np 4 python run_PyPolyChord.py
    
        on Mac:
        export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/lib
        export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libgfortran.so.3
        export LD_PRELOAD=/opt/local/lib/openmpi/lib/libmpi.so:$LD_PRELOAD
        mpirun -np 4 python run_PyPolyChord.py
    
    '''

    # parameters = mc.get_theta_dictionary()

    if 'nlive' in mc.nested_sampling_parameters:
        nlive = mc.nested_sampling_parameters['nlive']
    elif 'nlive_mult' in mc.nested_sampling_parameters:
        nlive = mc.ndim * mc.nested_sampling_parameters['nlive_mult']

    print(' Sampling efficiency: ', mc.nested_sampling_parameters['sampling_efficiency'])
    print(' N live points:', nlive)

    import pymultinest

    mnest_kwargs = dict(n_live_points=nlive, outputfiles_basename=output_directory + './')

    for key_name, key_value in mc.nested_sampling_parameters.items():
        if key_name in mc.pymultinest_signature:
            mnest_kwargs[key_name] = key_value

    print('Including priors to log-likelihood calculation (must be False):', mc.include_priors)

    pymultinest.run(LogLikelihood= mc.multinest_call, Prior=mc.multinest_priors, n_dims=mc.ndim, **mnest_kwargs)

    nested_sampling_save_to_cpickle(mc)

    analyzer = pymultinest.Analyzer(mc.ndim, outputfiles_basename=output_directory)
    stats = analyzer.get_stats()
    samples = analyzer.get_equal_weighted_posterior()[:, :-1]

    result = dict(logZ=stats['nested sampling global log-evidence'],
         logZerr=stats['nested sampling global log-evidence error'],
         samples=samples,
         )

    nested_sampling_save_to_cpickle(mc, 'result')

    print()
    print('MultiNest COMPLETED')
    print()

    #result = pymultinest.solve(LogLikelihood=mc.multinest_call, Prior=mc.multinest_priors,
    #                           n_dims=mc.ndim, outputfiles_basename=output_directory + './',
    #                           n_live_points=1000, sampling_efficiency=0.3, multimodal=True,
    #                           verbose=True, resume=True)

    print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
    print(result['logZ']//np.log(10.00), result['logZerr']//np.log(10.00))

    if return_output:
        return mc
    else:
        return
Пример #17
0
def run_qufit(dataFile,
              modelNum,
              outDir="",
              polyOrd=3,
              nBits=32,
              noStokesI=False,
              showPlots=False,
              debug=False,
              verbose=False):
    """Function controlling the fitting procedure."""

    # Get the processing environment
    if mpiSwitch:
        mpiComm = MPI.COMM_WORLD
        mpiSize = mpiComm.Get_size()
        mpiRank = mpiComm.Get_rank()
    else:
        mpiSize = 1
        mpiRank = 0

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # Output prefix is derived from the input file name
    prefixOut, ext = os.path.splitext(dataFile)
    nestOut = prefixOut + "_nest/"
    if mpiRank == 0:
        if os.path.exists(nestOut):
            shutil.rmtree(nestOut, True)
        os.mkdir(nestOut)
    if mpiSwitch:
        mpiComm.Barrier()

    # Read the data file in the root process
    if mpiRank == 0:
        dataArr = np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)

    # Parse the data array
    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = dataArr
        if mpiRank == 0:
            print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]")
    except Exception:
        # freq_Hz, Q, U, dQ, dU
        try:
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = dataArr
            if mpiRank == 0:
                print("\nFormat [freq_Hz, Q, U,  dQ, dU]")
            noStokesI = True
        except Exception:
            print("\nError: Failed to parse data file!")
            if debug:
                print(traceback.format_exc())
            if mpiSwitch:
                MPI.Finalize()
            return

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        if mpiRank == 0:
            print("Note: no Stokes I data - assuming fractional polarisation.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)

    # Fit the Stokes I spectrum and create the fractional spectra
    if mpiRank == 0:
        dataArr = create_frac_spectra(freqArr=freqArr_GHz,
                                      IArr=IArr,
                                      QArr=QArr,
                                      UArr=UArr,
                                      dIArr=dIArr,
                                      dQArr=dQArr,
                                      dUArr=dUArr,
                                      polyOrd=polyOrd,
                                      verbose=True)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)
    (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr

    # Plot the data and the Stokes I model fit
    if mpiRank == 0:
        print("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(10, 6))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModHirArr,
                              fig=specFig)

        # Use the custom navigation toolbar
        try:
            specFig.canvas.toolbar.pack_forget()
            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        if showPlots:
            specFig.canvas.draw()
            specFig.show()

    #-------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:
        print("\nLoading the model from 'models_ns/m%d.py' ..." % modelNum)
    mod = imp.load_source("m%d" % modelNum, "models_ns/m%d.py" % modelNum)
    global model
    model = mod.model

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Unpack the inParms structure
    parNames = [x["parname"] for x in mod.inParms]
    labels = [x["label"] for x in mod.inParms]
    values = [x["value"] for x in mod.inParms]
    bounds = [x["bounds"] for x in mod.inParms]
    priorTypes = [x["priortype"] for x in mod.inParms]
    wraps = [x["wrap"] for x in mod.inParms]
    nDim = len(priorTypes)
    fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes]
    nFree = sum(fixedMsk)

    # Set the prior function given the bounds of each parameter
    prior = prior_call(priorTypes, bounds, values)

    # Set the likelihood function given the data
    lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr)

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Run nested sampling using PyMultiNest
    nestArgsDict = merge_two_dicts(init_mnest(), mod.nestArgsDict)
    nestArgsDict["n_params"] = nDim
    nestArgsDict["n_dims"] = nDim
    nestArgsDict["outputfiles_basename"] = nestOut
    nestArgsDict["LogLikelihood"] = lnlike
    nestArgsDict["Prior"] = prior
    pmn.run(**nestArgsDict)

    # Do the post-processing on one processor
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:

        # Query the analyser object for results
        aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut)
        statDict = aObj.get_stats()
        fitDict = aObj.get_best_fit()
        endTime = time.time()

        # NOTE: The Analyser methods do not work well for parameters with
        # posteriors that overlap the wrap value. Use np.percentile instead.
        pMed = [None] * nDim
        for i in range(nDim):
            pMed[i] = statDict["marginals"][i]['median']
        lnLike = fitDict["log_likelihood"]
        lnEvidence = statDict["nested sampling global log-evidence"]
        dLnEvidence = statDict["nested sampling global log-evidence error"]

        # Get the best-fitting values & uncertainties directly from chains
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, pMed)
        p = [None] * nDim
        errPlus = [None] * nDim
        errMinus = [None] * nDim
        g = lambda v: (v[1], v[2] - v[1], v[1] - v[0])
        for i in range(nDim):
            p[i], errPlus[i], errMinus[i] = \
                        g(np.percentile(chains[:, i], [15.72, 50, 84.27]))

        # Calculate goodness-of-fit parameters
        nData = 2.0 * len(lamSqArr_m2)
        dof = nData - nFree - 1
        chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr)
        chiSqRed = chiSq / dof
        AIC = 2.0 * nFree - 2.0 * lnLike
        AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike
        BIC = nFree * np.log(nData) - 2.0 * lnLike

        # Summary of run
        print("")
        print("-" * 80)
        print("SUMMARY OF SAMPLING RUN:")
        print("#-PROCESSORS  = %d" % mpiSize)
        print("RUN-TIME      = %.2f" % (endTime - startTime))
        print("DOF           = %d" % dof)
        print("CHISQ:        = %.3g" % chiSq)
        print("CHISQ RED     = %.3g" % chiSqRed)
        print("AIC:          = %.3g" % AIC)
        print("AICc          = %.3g" % AICc)
        print("BIC           = %.3g" % BIC)
        print("ln(EVIDENCE)  = %.3g" % lnEvidence)
        print("dLn(EVIDENCE) = %.3g" % dLnEvidence)
        print("")
        print("-" * 80)
        print("RESULTS:\n")
        for i in range(len(p)):
            print("%s = %.4g (+%3g, -%3g)" % \
                  (parNames[i], p[i], errPlus[i], errMinus[i]))
        print("-" * 80)
        print("")

        # Create a save dictionary and store final p in values
        outFile = prefixOut + "_m%d_nest.json" % modelNum
        IfitDict["p"] = toscalar(IfitDict["p"].tolist())
        saveDict = {
            "parNames": toscalar(parNames),
            "labels": toscalar(labels),
            "values": toscalar(p),
            "errPlus": toscalar(errPlus),
            "errMinus": toscalar(errMinus),
            "bounds": toscalar(bounds),
            "priorTypes": toscalar(priorTypes),
            "wraps": toscalar(wraps),
            "dof": toscalar(dof),
            "chiSq": toscalar(chiSq),
            "chiSqRed": toscalar(chiSqRed),
            "AIC": toscalar(AIC),
            "AICc": toscalar(AICc),
            "BIC": toscalar(BIC),
            "IfitDict": IfitDict
        }
        json.dump(saveDict, open(outFile, "w"))
        print("Results saved in JSON format to:\n '%s'\n" % outFile)

        # Plot the data and best-fitting model
        lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
        freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
        IModArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        pDict = {k: v for k, v in zip(parNames, p)}
        quModArr = model(pDict, lamSqHirArr_m2)
        specFig.clf()
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModArr,
                              qModArr=quModArr.real,
                              uModArr=quModArr.imag,
                              fig=specFig)
        specFig.canvas.draw()

        # Plot the posterior samples in a corner plot
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim]
        iFixed = [i for i, e in enumerate(fixedMsk) if e == 0]
        chains = np.delete(chains, iFixed, 1)
        for i in sorted(iFixed, reverse=True):
            del (labels[i])
            del (p[i])
        cornerFig = corner.corner(xs=chains,
                                  labels=labels,
                                  range=[0.99999] * nFree,
                                  truths=p,
                                  quantiles=[0.1572, 0.8427],
                                  bins=30)

        # Save the figures
        outFile = nestOut + "fig_m%d_specfit.pdf" % modelNum
        specFig.savefig(outFile)
        print("Plot of best-fitting model saved to:\n '%s'\n" % outFile)
        outFile = nestOut + "fig_m%d_corner.pdf" % modelNum
        cornerFig.savefig(outFile)
        print("Plot of posterior samples saved to \n '%s'\n" % outFile)

        # Display the figures
        if showPlots:
            specFig.show()
            cornerFig.show()
            print("> Press <RETURN> to exit ...", end="")
            sys.stdout.flush()
            input()

        # Clean up
        plt.close(specFig)
        plt.close(cornerFig)

    # Clean up MPI environment
    if mpiSwitch:
        MPI.Finalize()
Пример #18
0
    def MN_analysis(self, basename):
        '''
        Analysis of MultiNest output.
        '''

        try:
            import pymultinest
        except:
            print("********************")
            print("Could not import pyMultiNest! Make sure that both this is in your PYTHONPATH.")
            print("MultiNest must also be on your LD_LIBRARY_PATH")
            raise ValueError("Abort BSFC fit")

        ####
        ##Only works for MultiNest v3.11+, but we use MultiNest 3.10
        ## Read output:
        #try:
        #    import nestcheck.data_processing
        #    import nestcheck.estimators as estim
        #except:
        #    print("********************")
        #    print("Could not import nestcheck! Make sure that this is in your PYTHONPATH.")
        #    raise ValueError("Abort BSFC analysis")
        # 
        ## for testing
        #run = nestcheck.data_processing.process_multinest_run(
        #    basename.split('/')[-1],
        #    os.path.dirname(basename)+'/')
        
        ####
        
        # after MultiNest run, read results
        a = pymultinest.Analyzer(
            n_params= self.lineModel.thetaLength(),
            outputfiles_basename=basename
        )

        # get chains and weights
        data = a.get_data()

        self.samples = data[:,2:]
        self.sample_weights = data[:,0]
        # Used for sampling
        self.cum_sample_weights = np.cumsum(self.sample_weights)
        self.sample_n2ll = data[:,1]

        # save statistics
        stats = a.get_stats()
        self.multinest_stats = stats

        self.modes=stats['modes'][0]
        self.maximum= self.modes['maximum']
        self.maximum_a_posterior= self.modes['maximum a posterior']
        self.mean=np.asarray(self.modes['mean'])
        self.sigma=np.asarray(self.modes['sigma'])

        # get log-evidence estimate and uncertainty (from INS, if this is used)
        self.lnev = (stats['global evidence'], stats['global evidence error'])


        g=gptools.summarize_sampler(data[:, 2:],
                                    weights=data[:, 0],
                                    burn=0,
                                    ci=0.95, chain_mask=None)
        self.params_mean = np.asarray(g[0])
        self.params_ci_l = np.asarray(g[1])
        self.params_ci_u = np.asarray(g[2])

        # summarize results
        #self.m_map = self.lineModel.modelMoments(self.maximum_a_posterior)
        #self.m_map_mean = self.lineModel.modelMoments(self.mean)
        #m1 = self.lineModel.modelMoments(self.mean+self.sigma)
        #m2 = self.lineModel.modelMoments(self.mean - self.sigma)
        #self.m_map_std = (m1 - m2)/2.0   #temporary

        # marginalized (fully-Bayesian) results:
        self.m_bayes_marg = self.lineModel.modelMoments(self.params_mean)
        self.m_bayes_marg_low = self.lineModel.modelMoments(self.params_ci_l)
        self.m_bayes_marg_up = self.lineModel.modelMoments(self.params_ci_u)

        # temporary, for compatibility with MCMC methods:
        self.theta_avg = self.params_mean
        self.m_avg = self.m_bayes_marg
        #self.m_std = self.m_map_std

        return True
Пример #19
0
    def optimize(self, num: int,
                 args_nld: Iterable,
                 guess: Dict[str, float]) -> Tuple[Dict[str, Tuple[float, float]], Dict[str, List[float]]]:  # noqa
        """Find parameters given model constraints and an initial guess

        Employs Multinest.

        Args:
            num (int): Loop number
            args_nld (Iterable): Additional arguments for the nld lnlike
            guess (Dict[str, float]): The initial guess of the parameters

        Returns:
            Tuple:
            - popt (Dict[str, Tuple[float, float]]): Median and 1sigma of the
                parameters
            - samples (Dict[str, List[float]]): Multinest samplesø.
                Note: They are still importance weighted, not random draws
                from the posterior.

        Raises:
            ValueError: Invalid parameters for automatix prior

        Note:
            You might want to adjust the priors for your specific case! Here
            we just propose a general solution that might often work out of
            the box.
        """
        if guess['alpha'] < 0:
            raise NotImplementedError("Prior selection not implemented for "
                                      "α < 0")
        alpha_exponent = np.log10(guess['alpha'])

        if guess['T'] < 0:
            raise ValueError("Prior selection not implemented for T < 0; "
                             "negative temperature is unphysical")
        T_exponent = np.log10(guess['T'])

        A = guess['A']
        B = guess["B"]

        # truncations from absolute values
        lower_A, upper_A = 0., np.inf
        mu_A, sigma_A = A, 10*A
        a_A = (lower_A - mu_A) / sigma_A
        b_A = (upper_A - mu_A) / sigma_A

        lower_Eshift, upper_Eshift = -5., 5
        mu_Eshift, sigma_Eshift = 0, 5
        a_Eshift = (lower_Eshift - mu_Eshift) / sigma_Eshift
        b_Eshift = (upper_Eshift - mu_Eshift) / sigma_Eshift

        lower_B, upper_B = 0., np.inf
        mu_B, sigma_B = B, 10*B
        a_B = (lower_B - mu_B) / sigma_B
        b_B = (upper_B - mu_B) / sigma_B

        def prior(cube, ndim, nparams):
            # NOTE: You may want to adjust this for your case!
            # truncated normal prior
            cube[0] = truncnorm_ppf(cube[0], a_A, b_A)*sigma_A + mu_A

            # log-uniform prior
            # if alpha = 1e2, it's between 1e1 and 1e3
            cube[1] = 10**(cube[1]*2 + (alpha_exponent-1))
            # log-uniform prior
            # if T = 1e2, it's between 1e1 and 1e3
            cube[2] = 10**(cube[2]*2 + (T_exponent-1))
            # truncated normal prior
            cube[3] = truncnorm_ppf(cube[3], a_Eshift,
                                    b_Eshift)*sigma_Eshift + mu_Eshift
            # truncated normal prior
            cube[4] = truncnorm_ppf(cube[4], a_B, b_B)*sigma_B + mu_B

            if np.isinf(cube[3]):
                self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3])

        def loglike(cube, ndim, nparams):
            return self.lnlike(cube, args_nld=args_nld)

        # parameters are changed in the lnlike
        norm_pars_org = copy.deepcopy(self.normalizer_gsf.norm_pars)

        self.multinest_path.mkdir(exist_ok=True)
        path = self.multinest_path / f"sim_norm_{num}_"
        assert len(str(path)) < 60, "Total path length too long for multinest"

        self.LOG.info("Starting multinest: ")
        self.LOG.debug("with following keywords %s:", self.multinest_kwargs)
        #  Hack where stdout from Multinest is redirected as info messages
        self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n'
                                      else None)

        with redirect_stdout(self.LOG):
            pymultinest.run(loglike, prior, len(guess),
                            outputfiles_basename=str(path),
                            **self.multinest_kwargs)

        # Save parameters for analyzer
        names = list(guess.keys())
        json.dump(names, open(str(path) + 'params.json', 'w'))
        analyzer = pymultinest.Analyzer(len(guess),
                                        outputfiles_basename=str(path))

        stats = analyzer.get_stats()

        samples = analyzer.get_equal_weighted_posterior()[:, :-1]
        samples = dict(zip(names, samples.T))

        # Format the output
        popt = dict()
        vals = []
        for name, m in zip(names, stats['marginals']):
            lo, hi = m['1sigma']
            med = m['median']
            sigma = (hi - lo) / 2
            popt[name] = (med, sigma)
            i = max(0, int(-np.floor(np.log10(sigma))) + 1)
            fmt = '%%.%df' % i
            fmts = '\t'.join([fmt + " ± " + fmt])
            vals.append(fmts % (med, sigma))

        self.LOG.info("Multinest results:\n%s", tt.to_string([vals],
                      header=['A', 'α [MeV⁻¹]', 'T [MeV]',
                              'Eshift [MeV]', 'B']))

        # reset state
        self.normalizer_gsf.norm_pars = norm_pars_org

        return popt, samples
Пример #20
0
def localize_frb(beam_data, out_name, max_baseline=12., out_dir="./", save=True, verbose=False):

    xoff  = beam_data.loc[beam_data['flux'].idxmax()].xpos
    yoff  = beam_data.loc[beam_data['flux'].idxmax()].ypos

    # The offset, (x,y), is related to a true sky coordinate, (RA,DEC), by
    #     x = (RA - RA0) * cos(DEC0)
    #     y = DEC - DEC0
    # correct beam positions to relative positions accounting for cos(dec)
    beam_data['xpos_new'] = (beam_data.xpos - xoff)*np.cos(yoff*np.pi/180.)
    beam_data['ypos_new'] = (beam_data.ypos - yoff)

    fwhm    = beam_fwhm(beam_data.freq, max_baseline=max_baseline)[0]
    # select beams surrounding highest detection SNR
    print("fwhm: {}".format(fwhm))
    beam_mapfig, mask = beam_select(beam_data, radius=fwhm/2)

    frb_data = beam_data[~mask]
    frb_data.reset_index(drop=True, inplace=True)
    nbeam    = len(frb_data)

    xpos  = frb_data.xpos_new
    ypos  = frb_data.ypos_new

    beam_name = frb_data.beam
    flux_data = frb_data.flux
    ferr_data = frb_data.ferr
    freq_beam = frb_data.freq
    print(beam_name)

    # normalize sensitivity
    sens  = 2000./frb_data.sefd

    print(freq_beam)
    fwhm    = beam_fwhm(freq_beam, max_baseline=max_baseline)
    rad     = fwhm/2.
    rad1400 = rad*(freq_beam/1.4)
    print(fwhm)

    #########################################################################################
    def beam_gain(x, y, fwhm):
        w   = fwhm / (2.0*sqrt(2*log(2.)))
        arg = x*x +y*y
        val = exp(-arg/2./w/w)
        return val

    def prior(cube, ndim, nparams):
        cube[0] = -2 + 4*cube[0]                   # uniform prior between -2:2
        cube[1] = -2 + 4*cube[1]                   # uniform prior between -2:2
        cube[2] = 10**(4*cube[2] - 1)              # log-uniform prior between 10^-1 and 10^3
        #nbeam = (ndim-3) / 4
        for ibeam in range(nbeam):
            mean = 1.0 ; sigma = 0.1               # Gaussian prior: mean=1, sigma=0.1
            cube[3+4*ibeam+0] = mean + (2**0.5)*sigma*erfinv(2*cube[3+4*ibeam+0] - 1)
            cube[3+4*ibeam+1] = mean + (2**0.5)*sigma*erfinv(2*cube[3+4*ibeam+1] - 1)

            mean = 0.0 ; sigma = 1./60.            # Gaussian prior: mean=0, sigma=1.0 arcminute
            cube[3+4*ibeam+2] = (2**0.5)*sigma*erfinv(2*cube[3+4*ibeam+2]-1)
            cube[3+4*ibeam+3] = (2**0.5)*sigma*erfinv(2*cube[3+4*ibeam+3]-1)

    def loglike(cube, ndim, nparams):
        loglike = 0

        x, y, flux = cube[0], cube[1], cube[2]
        #nbeam = (ndim-3) / 4
        for ibeam in range(nbeam):
            gain_err  = cube[3+4*ibeam+0]
            width_err = cube[3+4*ibeam+1]
            xpos_err  = cube[3+4*ibeam+2]
            ypos_err  = cube[3+4*ibeam+3]

            gain  = beam_gain(x-(xpos[ibeam]+xpos_err), y-(ypos[ibeam]+ypos_err), \
                              width_err*fwhm[ibeam])
            gain *= sens[ibeam]

            arg      = gain*gain_err*flux - flux_data[ibeam]
            loglike += -arg*arg/(2.0*ferr_data[ibeam]**2.0)

            cube[3+4*nbeam+ibeam] = gain

        return loglike

    ############################################################################

    # number of dimensions our problem has
    parameters = ["x_pos", "y_pos", "SNR"]
    for ibeam in range(nbeam):
        parameters.append("gain_err_" + beam_name[ibeam])
        parameters.append("width_err_" + beam_name[ibeam])
        parameters.append("xpos_err_" + beam_name[ibeam])
        parameters.append("ypos_err_" + beam_name[ibeam])

    n_dims  = len(parameters)           # dimensionality (no. of free parameters)
    for ibeam in range(nbeam):
        parameters.append("gain_" + beam_name[ibeam])

    n_params = len(parameters)           # total parameters (free + derived)
    rootbase = "chains/%s-" % (out_name)  # root for output files
    chaindir = os.path.join(os.path.abspath(out_dir), "chains")
    root     = os.path.join(os.path.abspath(out_dir), rootbase)

    os.system("mkdir -p -v " + chaindir)  # create temprory sub-dir

    # Tunable MultiNest Parameters
    mmodal   = False                     # do mode separation
    nlive    = 1000                      # number of live points
    tol      = 0.1                       # defines the stopping criteria (0.5, good enough)
    efr      = 1.0                       # sampling efficiency. 0.8 and 0.3 are recommended
    updInt   = 1000                      # after # iterations feedback & the posterior files update
    resume   = False                     # resume from a previous job
    maxiter  = 0                         # max no. of iteration. 0 is unlimited
    initMPI  = True                     # initialize MPI routines?, False if main program handles init

    # run MultiNest
    pymultinest.run(loglike, prior, n_dims, n_params=n_params, multimodal=mmodal, n_live_points=nlive, \
                 evidence_tolerance=tol, sampling_efficiency=efr, n_iter_before_update=updInt, \
                 outputfiles_basename=root, verbose = verbose, resume=resume, max_iter=maxiter, \
                 init_MPI=initMPI)

    # other default inputs
    # n_clustering_params=None, wrapped_params=None, importance_nested_sampling=True,
    # const_efficiency_mode=False,  null_log_evidence=-1e+90, max_modes=100, seed=-1,
    # mode_tolerance=-1e+90, context=0, write_output=True, log_zero=-1e+100, dump_callback=None

    #########################################################################################

    check_files(root)
    a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename = root)
    s = a.get_stats()

    # store name of parameters, always useful
    with open('%sparams.json' % root, 'w') as f:
	    json.dump(parameters, f, indent=2)
    # store derived stats
    with open('%sstats.json' % root, mode='w') as f:
	    json.dump(s, f, indent=2)

    print("  marginal likelihood:")
    print("    ln Z = %.1f +- %.1f'" % (s['global evidence'], s['global evidence error']))
    print("  parameters:")
    for p, m in zip(parameters, s['marginals']):
	    lo, hi = m['1sigma']
	    med    = m['median']
	    sigma  = (hi - lo) / 2
	    if sigma == 0:
	    	i = 3
	    else:
	    	i = max(0, int(-np.floor(np.log10(sigma))) + 1)
	    fmt  = '%%.%df' % i
	    fmts = '\t'.join(['    %-15s' + fmt + " +- " + fmt])
	    print(fmts % (p, med, sigma))

    #########################################################################################
    print("creating marginal plot ...")
    bins    = 50

    postdata = a.get_equal_weighted_posterior()[:,:3]         # only x_pos, y_pos, and SNR

    cornerdata = postdata.copy()
    cornerdata[:,2] = np.log10(cornerdata[:,2])
    cornerfig     = corner.corner(cornerdata, labels=["x_pos", "y_pos", "log SNR"], show_titles=True, \
                                  bins=50, smooth=True, smooth1d=True)

    beam_posfig = plot_beam_pos(postdata, frb_data, rad1400, bins, color="blue")


    outfile = os.path.join(os.path.abspath(out_dir), out_name)
    if save:
        beam_mapfig.savefig(outfile+'_beammap.pdf', bbox_inches='tight', papertype='letter', \
                                           orientation='landscape')
        cornerfig.savefig(outfile+'_cornerplot.pdf', bbox_inches='tight', papertype='letter', \
                                           orientation='landscape')
        beam_posfig.savefig(outfile+'_beampos.pdf', bbox_inches='tight', papertype='letter', \
                                           orientation='landscape')
        plt.close("all")

    else:
        plt.show()

    # write posterior image in fits file
    write_fits(postdata, bins, outfile, xoff, yoff, weights=None)

    return frb_data, postdata, rad1400
Пример #21
0
ssfr2d = np.tile(ssfr[np.newaxis, :], (b.size, 1))

sam_ssfr2d = np.tile(sam_ssfr[np.newaxis, :], (b.size, 1))

#Run MultiNest
pymultinest.run(myloglike,
                myprior,
                n_params,
                importance_nested_sampling=False,
                resume=False,
                verbose=True,
                sampling_efficiency='model',
                n_live_points=500,
                outputfiles_basename='chains2/q_PL_twogal_redd_ssfr-')
#Analyse the results and plot the marginals
ana = pymultinest.Analyzer(
    n_params=n_params, outputfiles_basename='chains2/q_PL_twogal_redd_ssfr-')

plt.clf()

# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.

# Copy and edit this file, and play with it.

p = pymultinest.PlotMarginalModes(ana)
plt.figure(figsize=(5 * n_params, 5 * n_params))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
    plt.subplot(n_params, n_params, n_params * i + i + 1)
    p.plot_marginal(i, with_ellipses=True, with_points=False, grid_points=50)
n_params = len(parameters)

pymultinest.run(myloglike,
                myprior,
                n_params,
                importance_nested_sampling=False,
                resume=True,
                verbose=True,
                sampling_efficiency='parameter',
                n_live_points=1000,
                outputfiles_basename='%s/2-' % plotDir,
                evidence_tolerance=0.001,
                multimodal=False)

# lets analyse the results
a = pymultinest.Analyzer(n_params=n_params,
                         outputfiles_basename='%s/2-' % plotDir)
s = a.get_stats()

import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
    json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
    json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" %
      (s['nested sampling global log-evidence'],
       s['nested sampling global log-evidence error']))
Пример #23
0
def multinest(nu, D, Ninv, beam_mat, ndim, models_fit, label):

    import pymultinest
    import json

    if not os.path.exists("chains"): os.mkdir("chains")
    parameters = ["dust_beta", "dust_Td", "sync_beta"]
    n_params = len(parameters)

    def prior_multi(cube, ndim, nparams):
        cube[0] = 0 + 3 * cube[0]
        cube[1] = 5 + 95 * cube[1]
        cube[2] = -5 + 5 * cube[2]

    def loglike_multi(cube, ndim, nparams):
        dust_beta, dust_Td, sync_beta = cube[0], cube[1], cube[2]
        dust_params = np.array([dust_beta, dust_Td])
        sync_params = np.array([sync_beta])
        cmb_params = np.array([])
        (F_fg, F_cmb, F) = F_matrix(nu, dust_params, sync_params, cmb_params,
                                    models_fit)
        H = F_fg.T * Ninv * F_fg

        x_mat = np.linalg.inv(F.T * beam_mat.T * Ninv * beam_mat *
                              F) * F.T * beam_mat.T * Ninv * D  # Equation A3

        chi_square = (D - beam_mat * F * x_mat).T * Ninv * (
            D - beam_mat * F * x_mat)  # Equation A4

        return -chi_square - 0.5 * np.log(np.linalg.det(H))

    pymultinest.run(loglike_multi,
                    prior_multi,
                    n_params,
                    outputfiles_basename='chains/single_pixel_',
                    resume=False,
                    verbose=True,
                    n_live_points=1000,
                    importance_nested_sampling=False)
    a = pymultinest.Analyzer(n_params=n_params,
                             outputfiles_basename='chains/single_pixel_')
    s = a.get_stats()

    output = a.get_equal_weighted_posterior()
    outfile = 'test.out'
    pickle.dump(output, open(outfile, "wb"))

    # store name of parameters, always useful
    with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
        json.dump(parameters, f, indent=2)
    # store derived stats
    with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
        json.dump(s, f, indent=2)
    print()
    print("-" * 30, 'ANALYSIS', "-" * 30)
    print("Global Evidence:\n\t%.15e +- %.15e" %
          (s['nested sampling global log-evidence'],
           s['nested sampling global log-evidence error']))

    # Plots
    p = pymultinest.PlotMarginalModes(a)
    plt.figure(figsize=(5 * n_params, 5 * n_params))

    for i in range(n_params):
        plt.subplot(n_params, n_params, n_params * i + i + 1)
        p.plot_marginal(i,
                        with_ellipses=True,
                        with_points=False,
                        grid_points=50)
        plt.ylabel("Probability")
        plt.xlabel(parameters[i])

        for j in range(i):
            plt.subplot(n_params, n_params, n_params * j + i + 1)
            p.plot_conditional(i,
                               j,
                               with_ellipses=False,
                               with_points=True,
                               grid_points=30)
            plt.xlabel(parameters[i])
            plt.ylabel(parameters[j])

    plt.savefig(
        "chains/single_pixel_marginals_multinest.pdf")  #, bbox_inches='tight')

    for i in range(n_params):
        outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename, i)
        p.plot_modes_marginal(i, with_ellipses=True, with_points=False)
        plt.ylabel("Probability")
        plt.xlabel(parameters[i])
        plt.savefig(outfile, format='pdf', bbox_inches='tight')
        plt.close()

        outfile = '%s-mode-marginal-cumulative-%d.pdf' % (
            a.outputfiles_basename, i)
        p.plot_modes_marginal(i,
                              cumulative=True,
                              with_ellipses=True,
                              with_points=False)
        plt.ylabel("Cumulative probability")
        plt.xlabel(parameters[i])
        plt.savefig(outfile, format='pdf', bbox_inches='tight')
        plt.close()
Пример #24
0
if n_comp == 2:
    n_sec = [6, 3]
    n_sled = 2 * sled_to_j * n_mol
else:
    n_sec = [3]
    n_sled = sled_to_j * n_mol
n_params = n_dims + np.sum(n_sec) + n_sled

meas = pickle.load(open("measdata.pkl", "rb"))
lw = np.log10(meas['head']['lw'])
# If meas doesn't include tbg, just the old default, 2.73 K
if 'tbg' not in meas: meas['tbg'] = 2.73
# If not calculated using multimol, won't have secmol.
if 'secmol' not in meas['head']: meas['head']['secmol'] = []

a = pymultinest.Analyzer(n_params=n_params)
s = a.get_stats()
data = a.get_data()
# Check if a.post_file exists; this separates the data by mode.
#### TEMPORARY FIX, in case old version of pymultinest with typo is being used.
if a.post_file == u'chains/1-post_seperate.dat':
    a.post_file = u'chains/1-post_separate.dat'
if os.path.isfile(a.post_file):
    datsep = post_sep(a.post_file)  # Divide the "data" up by mode.
else:
    datsep = {}
    datsep[0] = data
datsep['all'] = data
bestfit = a.get_best_fit()
cube = bestfit[
    'parameters']  # The best fit is not the same as the mode, cube=s['modes'][0]['maximum']
Пример #25
0
    os.makedirs('out')

parameters = ["beta1", "beta2", "p_F", "p_b", "F_0", r"$nu_{b_0}$"]
n_params = len(parameters)

pymultinest.run(loglike,
                prior,
                n_params,
                outputfiles_basename='out/' + GRB + '_fit_',
                resume=False,
                verbose=True)
json.dump(parameters, open('out/' + GRB + '_fit_' + 'params.json', 'w'))

# plot the distribution of a posteriori possible models
plt.figure()
# plt.plot(x, ydata, '+ ', color='red', label='data')
a = pymultinest.Analyzer(outputfiles_basename='out/' + GRB + '_fit_',
                         n_params=n_params)
for (beta1, beta2, p_F, p_b, F_0,
     nu_b0) in a.get_equal_weighted_posterior()[::100, :-1]:
    plt.plot(t_data,
             model_ph(t_data, nu, beta1, beta2, p_F, p_b, F_0, nu_b0),
             '-',
             color='blue',
             alpha=0.3,
             label='data')

plt.xscale('log')
plt.savefig('out/' + GRB + '_fit_posterior.png')

a_lnZ = a.get_stats()
Пример #26
0

n_params = 3
out_file = 'out_multinest'

if not os.path.exists('fit.pkl'):
    # Run MultiNest:
    pymultinest.run(loglike,
                    prior,
                    n_params,
                    n_live_points=500,
                    outputfiles_basename=out_file,
                    resume=False,
                    verbose=True)
    # Get output:
    output = pymultinest.Analyzer(outputfiles_basename=out_file,
                                  n_params=n_params)
    # Get out parameters: this matrix has (samples,n_params+1):
    posterior_samples = output.get_equal_weighted_posterior()[:, :-1]
    # Save matrix:
    pickle.dump(posterior_samples, open('fit.pkl', 'wb'))
    print('Done! Run again to plot')
    sys.exit()
else:
    posterior_samples = pickle.load(open('fit.pkl', 'rb'))

xi = posterior_samples[:, 0]
T_n = posterior_samples[:, 1]
delta_T = posterior_samples[:, 2]

for i in range(5):
    t, f = all_t[i], all_f[i]
Пример #27
0
b = ns_setup.Priors(1, n_params)
pymultinest.run(
    b.loglike,
    b.prior,
    n_params,
    loglike_args=[len_x, x_full, bin_indices, opacity_grid, ydata, yerr],
    outputfiles_basename=output_directory + planet_name + '_',
    resume=False,
    verbose=True,
    n_live_points=live)

json.dump(parameters, open(output_directory + planet_name + '_params.json',
                           'w'))  # save parameter names

a = pymultinest.Analyzer(outputfiles_basename=output_directory + planet_name +
                         '_',
                         n_params=n_params)
samples = a.get_equal_weighted_posterior()[:, :-1]
bestfit_params = a.get_best_fit()
stats = a.get_stats()

## set up results ##
retrieved_results = list(
    map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
        zip(*np.percentile(samples, [16, 50, 84], axis=0))))

plot_percentiles = []
new_param_dict = parameter_dict
retrieved_parameters_full = {}
retrieved_parameters_list = []
for i in range(n_params):
Пример #28
0
def check_solution(folder,
                   Ld_dir,
                   finesse_dir,
                   recover=False,
                   w0=487.98634,
                   mu=39.948):
    print("I'm here!")
    data = h5_2_dict(join(folder, 'input_plasma_data.h5'))

    ix = data['fit_ix']
    r = data['r']
    sig = data['sig']

    Lpost, dpost = read_Ld_results(Ld_dir)
    Fpost, _, _, _ = read_finesse_results(finesse_dir)
    nL = len(Lpost)
    nF = len(Fpost)
    i = np.random.choice(nL)
    j = np.random.choice(nF)

    L = Lpost[i]
    d = dpost[i]
    F = Fpost[j]

    Lstep = 100
    Fstep = 100
    Tistep = 100
    analyzer = pymultinest.Analyzer(3,
                                    outputfiles_basename=join(folder, "Ti_"))
    modes = analyzer.get_mode_stats()
    Ti, V, A = modes['modes'][0]['mean']
    print(modes['modes'][0]['sigma'])
    print(Ti, V, A)
    post = analyzer.get_equal_weighted_posterior()

    Tipost = post[::, 0]
    Vpost = post[::, 1]
    Apost = post[::, 2]

    if recover:
        try:
            post_dict = h5_2_dict(join(folder, "Ti_solver_model_post.h5"))
            sig_post = post_dict["signal post"]
        except:
            print(
                "Can't recover Ti solver q posterior.  Calculating from scratch."
            )
            sig_post = calculate_signal_post(r[ix],
                                             Lpost[::Lstep],
                                             dpost[::Lstep],
                                             Fpost[::Fstep],
                                             Tipost[::Tistep],
                                             Vpost[::Tistep],
                                             Apost[::Tistep],
                                             w0,
                                             mu,
                                             nprocs=32)
            dict_2_h5(join(folder, "Ti_solver_model_post.h5"),
                      {"signal post": sig_post})
    else:
        sig_post = calculate_signal_post(r[ix],
                                         Lpost[::Lstep],
                                         dpost[::Lstep],
                                         Fpost[::Fstep],
                                         Tipost[::Tistep],
                                         Vpost[::Tistep],
                                         Apost[::Tistep],
                                         w0,
                                         mu,
                                         nprocs=32)
        dict_2_h5(join(folder, "Ti_solver_model_post.h5"),
                  {"signal post": sig_post})

    sig_mean = np.mean(sig_post, axis=1)
    percentiles = calculate_percentile_ranges(sig_post)
    #vals = forward_model(r[ix], L, d, F, w0, mu, A, Ti, V, sm_ang=False, nlambda=1024)

    fig, ax = plt.subplots(figsize=(3.5, 3.5 / 1.61))
    # ax.plot(r[ix], sig[ix], 'C1', alpha=0.5, label='Data')
    ax.plot(r[ix], (sig[ix] - 3000.0) / 100.0, 'C1', alpha=0.5, label='Data')
    #ax.plot(r[ix], vals, 'r')
    alphas = [0.8, 0.5, 0.2]
    keys = [68, 95, 99]
    for alpha, per in zip(alphas, keys):
        if per == 99:
            # ax.fill_between(r[ix], percentiles[per][0], percentiles[per][1], color='C3', alpha=alpha, label='Fit')
            ax.fill_between(r[ix],
                            percentiles[per][0] / 100.0,
                            percentiles[per][1] / 100.0,
                            color='C3',
                            alpha=alpha,
                            label='Fit')
        else:
            # ax.fill_between(r[ix], percentiles[per][0], percentiles[per][1], color='C3', alpha=alpha)
            ax.fill_between(r[ix],
                            percentiles[per][0] / 100.0,
                            percentiles[per][1] / 100.0,
                            color='C3',
                            alpha=alpha)
    fig.legend(frameon=False,
               fontsize=8,
               loc='upper right',
               bbox_to_anchor=(0.5, 0.5))
    ax.set_xlabel("R (px)", fontsize=8, labelpad=-1)
    ax.set_ylabel("Counts (Hundreds)", fontsize=8, labelpad=-1)
    ax.tick_params(labelsize=8)
    #fig.tight_layout()
    fig.savefig(join(folder, "Ti_Ar_fit.png"), dpi=400)
    plt.show(block=False)

    axis_labels = ["Ti (eV)", "V (m/s)", "A (Counts)"]
    ylabels = ["P(Ti)", "P(V)", "P(A)"]
    # fig, ax = plt.subplots(3, figsize=(6, 15))
    fig, ax = plt.subplots(2, figsize=(3.5, 2 * 3.5 / 1.61))
    # for n in range(3):
    for n in range(2):
        my_hist(ax[n], post[:, n])
        ax[n].set_xlabel(axis_labels[n], fontsize=8, labelpad=-1)
        ax[n].set_ylabel(ylabels[n], fontsize=8, labelpad=-1)
        ax[n].tick_params(labelsize=8)
    #fig.tight_layout()
    fig.savefig(join(folder, "Ti_solver_histograms.png"), dpi=400)
    plt.show()
Пример #29
0
# run MultiNest
result = solve(LogLikelihood=myloglike,
               Prior=myprior,
               n_dims=n_params,
               outputfiles_basename=prefix,
               verbose=True)

print()
#print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(parameters, result['samples'].transpose()):
    print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

# lets analyse the results
a = pymultinest.Analyzer(n_params=n_params, outputfiles_basename=prefix)
s = a.get_stats()

# make marginal plots by running:
# $ python multinest_marginals.py chains/3-
# For that, we need to store the parameter names:
import json
with open('%sparams.json' % prefix, 'w') as f:
    json.dump(parameters, f, indent=2)

with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
    json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" %
      (s['nested sampling global log-evidence'],
Пример #30
0
def nlfit(xx, yy, yerr, objects, bounds, myloss='linear'):
    # have bounds that follow same format as objects

    # format bounds list
    blist = [bounds[0][0], bounds[0][1]]  # tmid
    for i in range(1, len(bounds)):
        for k in bounds[i].keys():
            blist.append(bounds[i][k][0])
            blist.append(bounds[i][k][1])

    # compute integrations for as long as the max epoch
    ndays = np.round(1.5 * (max(xx) + 1) * objects[1]['P']).astype(int)

    # prevents seg fault in MultiNest
    yerr[yerr == 0] = np.mean(yerr[yerr != 0])

    def model_sim(params):
        c = 1
        for i in range(1, len(bounds)):
            for k in bounds[i].keys():
                objects[i][k] = params[c]
                c += 1
        # create REBOUND simulation
        return get_ttv(objects, ndays)

    def myprior(cube, ndim, n_params):
        for i in range(int(len(blist) / 2)):  # for only the free params
            cube[i] = (blist[2 * i + 1] - blist[2 * i]) * cube[i] + blist[2 *
                                                                          i]

    loss = {
        'linear': lambda z: z,
        'soft_l1': lambda z: 2 * ((1 + z)**0.5 - 1),
    }

    omegas = []

    def myloglike(cube, ndim, n_params):
        epochs, ttv, tt = model_sim(cube)
        ttv_data = yy - (cube[1] * xx + cube[0])
        try:
            loglike = -np.sum(loss[myloss](
                ((ttv_data - ttv[xx.astype(int)]) / yerr)**2)) * 0.5
            return loglike
        except:
            loglike = 0
            for i in range(len(ttv)):
                loglike += loss[myloss](((ttv_data[i] - ttv[i]) / yerr[i])**2)
            return -10 * np.sum(loglike)  # penalize for unstable orbits

        # period usually changes ~30 seconds to a minute after N-body integrations
        # therefore the method below subtracts the wrong period from the data
        #ttvm = tt[xx.astype(int)] - (cube[1]*xx+cube[0])
        #return -np.sum( ((ttv_data-ttvm)/yerr)**2 )*0.5

    pymultinest.run(myloglike,
                    myprior,
                    int(len(blist) / 2),
                    resume=False,
                    evidence_tolerance=0.5,
                    sampling_efficiency=0.5,
                    n_live_points=200,
                    verbose=True)

    a = pymultinest.Analyzer(n_params=int(len(blist) / 2))

    # gets the marginalized posterior probability distributions
    posteriors = a.get_data()
    stats = a.get_stats()
    stats['marginals'] = get_stats(posteriors)

    # map posteriors back to object dict
    obj = copy.deepcopy(objects)
    mask = posteriors[:, 1] < np.percentile(posteriors[:, 1], 50)

    c = 1
    for i in range(1, len(bounds)):
        for k in bounds[i].keys():
            obj[i][k] = np.median(posteriors[mask, 2 + c])
            c += 1

    return obj, posteriors, stats