Ejemplo n.º 1
0
def get_pulsar_noise(pta, ret):

    ndim = len(pta.params)

    # Generate jump groups.

    groups0 = [[i, i + 1] for i in range(0, ndim - 1, 2)]
    groups0.extend([range(ndim)])

    groups1 = [range(ndim)]

    # This definiton can ensure a fast convergence (I tried).

    # Start the first run. Notice the choice of starting points.

    x0 = np.zeros(ndim)
    x0[0:-4:2] = 1.
    x0[1:-4:2] = -7.
    x0[-4:] = [2, -13.5, 2, -13.5]
    cov0 = np.diag(np.ones(ndim) * 0.05)
    outDir0 = '/home/sdb/xuexiao/M80PTAchains/NoiseFixing/FirstRun/' + pta.pulsars[
        0]
    sampler = PTSampler(ndim,
                        pta.get_lnlikelihood,
                        pta.get_lnprior,
                        cov0,
                        groups=groups0,
                        outDir=outDir0,
                        verbose=False)
    sampler.sample(x0, 250000, isave=1000)
    chain0 = np.loadtxt(outDir0 + '/chain_1.txt')

    # End of the first run.

    # Start the second run.

    outDir1 = '/home/sdb/xuexiao/M80PTAchains/NoiseFixing/SecondRun/' + pta.pulsars[
        0]
    x1 = chain0[np.where(chain0 == np.max(chain0[:, -3]))[0][0], :
                -4]  # Get the best fit parameters from the last run.
    cov1 = np.load(
        outDir0 + '/cov.npy'
    )  # Get the covariance matrix of proposal distribution from the last run.
    sampler = PTSampler(ndim,
                        pta.get_lnlikelihood,
                        pta.get_lnprior,
                        cov1,
                        groups=groups1,
                        outDir=outDir1,
                        verbose=False)
    sampler.sample(x1, 500000, isave=1000)
    chain1 = np.loadtxt(outDir1 + '/chain_1.txt')

    # End of the second run.

    # Return the ln-likelihood value of the best fit(maximal likelihood).

    MLHselect = chain1[np.where(chain1 == np.max(chain1[:, -3]))[0][0], :]
    Dict = {pta.params[i].name: MLHselect[i] for i in range(ndim)}
    ret.value = (Dict, pta.get_lnlikelihood(Dict), pta.get_lnprior(Dict))
Ejemplo n.º 2
0
    model = tm + dp + wnb + dmn + spn + eph
    nparams = []  # Get the number of parameters of each pulsar
    signals = []
    for psr in psrs:
        signal = model(psr)
        nparams.append(len(signal.params) -
                       5)  # Subtracting common DPDM params
        signals.append(signal)
    PTA = signal_base.PTA(signals)
    ndim = len(PTA.params) + 5

    # Use the best fit noise parameters!
    PTA.set_default_params(Dict)

    # Set starting points at the already-known best fit points!
    xs = {par.name: par.sample() for par in PTA.params}
    for parname in Dict.keys():
        if parname in xs.keys():
            xs[parname] = Dict[parname]
    x0 = np.hstack([xs[key] for key in sorted(xs.keys())])

    sampler = PTSampler(
        ndim,
        PTA.get_lnlikelihood,
        PTA.get_lnprior,
        cov=np.diag(np.ones(ndim) * 0.25),
        groups=None,
        outDir='/home/sdb/xuexiao/EPHPTAchains/FixNoiseBayesian/')
    sampler.sample(x0, 10000000, isave=1000)
Ejemplo n.º 3
0
    # Get Starting Points and constant parameter values.
    save1 = np.load('noisepars.npy')
    save2 = np.load('noisepardict.npy')
    save3 = np.load('dpdmpars-maxposprob.npy')
    save4 = np.load('dpdmpardict.npy')
    Dict = {save2[i]: save1[i] for i in range(len(save2))}
    Dict.update({save4[i]: save3[i] for i in range(len(save4))})

    # Use the best fit noise parameters!
    PTA.set_default_params(Dict)

    # Set starting points at the already-known best fit points!
    xs = {par.name: par.sample() for par in PTA.params}
    for parname in Dict.keys():
        if parname in xs.keys():
            xs[parname] = Dict[parname]
    x0 = np.hstack([xs[key] for key in sorted(xs.keys())])

    # Set groups
    groups = [range(ndim), range(ndim - 5, ndim)]

    # First sampling (prepare)

    sampler = PTSampler(ndim,
                        PTA.get_lnlikelihood,
                        PTA.get_lnprior,
                        cov=np.diag(np.ones(ndim) * 0.025),
                        groups=groups,
                        outDir='/home/sdb/xuexiao/PTAchains/openBayesian/')
    sampler.sample(x0, 10000000, isave=1000)