コード例 #1
0
def get_toas(evtfile, flags, tcoords=None, minweight=0, minMJD=0, maxMJD=100000):
    if evtfile[:-3] == 'tim':
        usepickle = False
        if 'usepickle' in flags:
            usepickle = flags['usepickle']
        ts = toa.get_TOAs(evtfile, usepickle=False)
        #Prune out of range MJDs
        mask = np.logical_or(ts.get_mjds() < minMJD * u.day,
                             ts.get_mjds() > maxMJD * u.day)
        ts.table.remove_rows(mask)
        ts.table = ts.table.group_by('obs')
    else:
        if 'usepickle' in flags and flags['usepickle']:
            try:
                picklefile = toa._check_pickle(evtfile)
                if not picklefile:
                    picklefile = evtfile
                ts = toa.TOAs(picklefile)
                return ts
            except:
                pass
        weightcol = flags['weightcol'] if 'weightcol' in flags else None
        target = tcoords if weightcol == 'CALC' else None
        tl = fermi.load_Fermi_TOAs(evtfile, weightcolumn=weightcol,
                                   targetcoord=target, minweight=minweight)
        tl = filter(lambda t: (t.mjd.value > minMJD) and (t.mjd.value < maxMJD), tl)
        ts = toa.TOAs(toalist=tl)
        ts.filename = evtfile
        ts.compute_TDBs()
        ts.compute_posvels(ephem="DE421", planets=False)
        ts.pickle()
    log.info("There are %d events we will use" % len(ts.table))
    return ts
コード例 #2
0
def _load_and_prepare_TOAs(mjds, ephem="DE405"):
    toalist = [None] * len(mjds)
    for i, m in enumerate(mjds):
        toalist[i] = toa.TOA(m, obs='Barycenter', scale='tdb')

    toalist = toa.TOAs(toalist=toalist)
    if 'tdb' not in toalist.table.colnames:
        toalist.compute_TDBs()
    if 'ssb_obs_pos' not in toalist.table.colnames:
        toalist.compute_posvels(ephem, False)
    return toalist
コード例 #3
0
 def test_process_raw_LAT_times(self):
     # This just checks that the process of reading the FT2 file and
     # computing positions doesn't crash. It doesn't validate any results.
     # Should probably run comparison with GEO or BARY phases.
     modelin = pint.models.get_model(parfile)
     FermiObs(name="Fermi", ft2name=ft2file)
     tl = load_Fermi_TOAs(eventfileraw, weightcolumn="PSRJ0030+0451")
     ts = toa.TOAs(toalist=tl)
     ts.filename = eventfileraw
     ts.compute_TDBs(ephem="DE405")
     ts.compute_posvels(ephem="DE405", planets=False)
     modelin.phase(ts)[1]
コード例 #4
0
def get_orbital_correction_from_ephemeris_file(mjdstart, mjdstop, parfile,
                                               ntimes=1000, ephem="DE405"):
    """Get a correction for orbital motion from pulsar parameter file.

    Parameters
    ----------
    mjdstart, mjdstop : float
        Start and end of the time interval where we want the orbital solution
    parfile : str
        Any parameter file understood by PINT (Tempo or Tempo2 format)

    Other parameters
    ----------------
    ntimes : int
        Number of time intervals to use for interpolation. Default 1000

    Returns
    -------
    correction_sec : function
        Function that accepts in input an array of times in seconds and a
        floating-point MJDref value, and returns the deorbited times
    correction_mjd : function
        Function that accepts times in MJDs and returns the deorbited times.
    """
    from scipy.interpolate import interp1d
    simon("Assuming events are already referred to the solar system "
          "barycenter (timescale is TDB)")
    if not HAS_PINT:
        raise ImportError("You need the optional dependency PINT to use this "
                          "functionality: github.com/nanograv/pint")

    mjds = np.linspace(mjdstart, mjdstop, ntimes)
    toalist = [None] * len(mjds)
    for i, m in enumerate(mjds):
        toalist[i] = toa.TOA(m, obs='Barycenter', scale='tdb')

    toalist = toa.TOAs(toalist = toalist)
    if 'tdb' not in toalist.table.colnames:
        toalist.compute_TDBs()
    if 'ssb_obs_pos' not in toalist.table.colnames:
        toalist.compute_posvels(ephem, False)

    m = pint.models.get_model(parfile)
    tdbtimes = m.get_barycentric_toas(toalist.table)

    correction_mjd = interp1d(mjds, tdbtimes)

    def correction_sec(times, mjdref):
        deorb_mjds = correction_mjd(times / 86400 + mjdref)
        return np.array((deorb_mjds - mjdref) * 86400)

    return correction_sec, correction_mjd
コード例 #5
0
 def test_process_raw_LAT_times(self):
     # This just checks that the process of reading the FT2 file and
     # computing positions doesn't crash. It doesn't validate any results.
     # Should probably run comparison with GEO or BARY phases.
     modelin = pint.models.get_model(parfile)
     FermiObs(name='Fermi',ft2name=ft2file,tt2tdb_mode='none')
     tl  = load_Fermi_TOAs(eventfileraw, weightcolumn='PSRJ0030+0451')
     ts = toa.TOAs(toalist=tl)
     ts.filename = eventfileraw
     ts.compute_TDBs()
     ts.compute_posvels(ephem='DE405',planets=False)
     phss = modelin.phase(ts.table)[1]
     phases = np.where(phss < 0.0 * u.cycle, phss + 1.0 * u.cycle, phss)
コード例 #6
0
ファイル: test_fitter.py プロジェクト: hellohancheng/PINT
def test_fitter():
    # Get model

    m = tm.get_model(os.path.join(datadir, "NGC6440E.par"))

    # Get TOAs
    t = toa.TOAs(os.path.join(datadir, "NGC6440E.tim"))
    t.apply_clock_corrections(include_bipm=False)
    t.compute_TDBs()
    try:
        planet_ephems = m.PLANET_SHAPIRO.value
    except AttributeError:
        planet_ephems = False
    t.compute_posvels(planets=planet_ephems)

    f = fitter.WLSFitter(toas=t, model=m)

    # Print initial chi2
    print("chi^2 is initially %0.2f" % f.resids.chi2)

    # Plot initial residuals
    xt = f.resids.toas.get_mjds().value
    yerr = t.get_errors() * 1e-6
    plt.close()
    p1 = plt.errorbar(
        xt,
        f.resids.toas.get_mjds().value,
        f.resids.time_resids.value,
        yerr.value,
        fmt="bo",
    )

    # Do a 4-parameter fit
    f.set_fitparams("F0", "F1", "RA", "DEC")
    f.fit_toas()
    print("chi^2 is %0.2f after 4-param fit" % f.resids.chi2)
    p2 = plt.errorbar(xt, f.resids.time_resids.value, yerr.value, fmt="go")

    # Now perturb F1 and fit only that. This doesn't work, though tempo2 easily fits
    # it.
    f.model.F1.value = 1.1 * f.model.F1.value
    f.fit_toas()
    print("chi^2 is %0.2f after perturbing F1" % f.resids.chi2)
    p3 = plt.errorbar(xt, f.resids.time_resids.value, yerr.value, fmt="ms")

    f.set_fitparams("F1")
    f.fit_toas()
    print('chi^2 is %0.2f after fitting just F1 with default method="Powell"' %
          f.resids.chi2)
    p4 = plt.errorbar(xt, f.resids.time_resids.value, yerr.value, fmt="k^")
コード例 #7
0
def _load_and_prepare_TOAs(mjds, errs_us=None, ephem="DE405"):
    errs_us = assign_value_if_none(errs_us, np.zeros_like(mjds))

    toalist = [None] * len(mjds)
    for i, m in enumerate(mjds):
        toalist[i] = \
            toa.TOA(m, error=errs_us[i], obs='Barycenter', scale='tdb')

    toalist = toa.TOAs(toalist=toalist)
    if 'tdb' not in toalist.table.colnames:
        toalist.compute_TDBs()
    if 'ssb_obs_pos' not in toalist.table.colnames:
        toalist.compute_posvels(ephem, False)
    return toalist
コード例 #8
0
    def make_TOAs(self):

        # Get the toa object
        self.toas = toa.TOAs(toalist=self.data_fermi + self.data_NICER +
                             self.data_NuSTAR)

        # Get the toa list
        #self.toas_list = toa.get_TOAs_list(self.data_fermi + self.data_NICER +
        #                                   self.data_NuSTAR, ephem=self.args.ephem)

        # Combine weights from individual observatories
        weights = np.concatenate(
            (self.weights_fermi, self.weights_NICER, self.weights_NuSTAR))

        # add the weights to the TOAs object
        for ii in range(len(weights)):
            self.toas.table['flags'][ii]['weights'] = weights[ii]
コード例 #9
0
from pint import fitter

#import matplotlib
#matplotlib.use('TKAgg')
import matplotlib.pyplot as plt

import numpy

from pinttestdata import testdir, datadir

# Get model

m = tm.get_model(os.path.join(datadir, 'NGC6440E.par'))

# Get TOAs
t = toa.TOAs(os.path.join(datadir, 'NGC6440E.tim'))
t.apply_clock_corrections(include_bipm=False)
t.compute_TDBs()
try:
    planet_ephems = m.PLANET_SHAPIRO.value
except AttributeError:
    planet_ephems = False
t.compute_posvels(planets=planet_ephems)

f = fitter.WlsFitter(toas=t, model=m)

# Print initial chi2
print('chi^2 is initially %0.2f' % f.resids.chi2)

# Plot initial residuals
xt = f.resids.toas.get_mjds().value
コード例 #10
0
import time, sys, os
import pint.models as tm
from pint.phase import Phase
from pint import toa
from pint import fitter
import matplotlib.pyplot as plt
import numpy

from pinttestdata import testdir, datadir

# Get model
m = tm.StandardTimingModel()
m.read_parfile(os.path.join(datadir, 'NGC6440E.par'))

# Get TOAs
t = toa.TOAs(os.path.join(datadir, 'NGC6440E.tim'), usepickle=False)
t.apply_clock_corrections()
t.compute_TDBs()
try:
    planet_ephems = m.PLANET_SHAPIRO.value
except AttributeError:
    planet_ephems = False
t.compute_posvels(planets=planet_ephems)

f = fitter.fitter(toas=t, model=m)

# Print initial chi2
print('chi^2 is initially %0.2f' % f.resids.chi2)

# Plot initial residuals
xt = [x for x in f.resids.toas.get_mjds()]
コード例 #11
0
ファイル: test_fitter.py プロジェクト: rossjjennings/PINT
def test_fitter():
    # Get model

    m = tm.get_model(os.path.join(datadir, "NGC6440E.par"))

    # Get TOAs
    t = toa.TOAs(os.path.join(datadir, "NGC6440E.tim"))
    t.apply_clock_corrections(include_bipm=False)
    t.compute_TDBs()
    try:
        planet_ephems = m.PLANET_SHAPIRO.value
    except AttributeError:
        planet_ephems = False
    t.compute_posvels(planets=planet_ephems)

    f = fitter.WLSFitter(toas=t, model=m)

    # Print initial chi2
    print("chi^2 is initially %0.2f" % f.resids.chi2)

    # Plot initial residuals
    xt = f.resids.toas.get_mjds().value
    yerr = t.get_errors() * 1e-6
    plt.close()
    p1 = plt.errorbar(
        xt,
        f.resids.toas.get_mjds().value,
        f.resids.time_resids.value,
        yerr.value,
        fmt="bo",
    )

    # Do a 4-parameter fit
    f.model.free_params = ("F0", "F1", "RAJ", "DECJ")
    f.fit_toas()

    # Check the number of degrees of freedom in the fit.
    # Fitting the 4 params above, plus the 1 implicit global offset = 5 free parameters.
    # NTOA = 62, so DOF = 62 - 5 = 57
    assert f.resids.dof == 57

    print("chi^2 is %0.2f after 4-param fit" % f.resids.chi2)
    p2 = plt.errorbar(xt, f.resids.time_resids.value, yerr.value, fmt="go")

    # Make sure the summary printing works
    f.print_summary()

    # Try a few utils

    # Now perturb F1 and fit only that. This doesn't work, though tempo2 easily fits
    # it.
    f.model.F1.value = 1.1 * f.model.F1.value
    f.fit_toas()
    print("chi^2 is %0.2f after perturbing F1" % f.resids.chi2)
    p3 = plt.errorbar(xt, f.resids.time_resids.value, yerr.value, fmt="ms")

    f.model.free_params = ["F1"]
    f.fit_toas()
    print('chi^2 is %0.2f after fitting just F1 with default method="Powell"' %
          f.resids.chi2)
    p4 = plt.errorbar(xt, f.resids.time_resids.value, yerr.value, fmt="k^")
コード例 #12
0
ファイル: event_optimize.py プロジェクト: tcromartie/PINT
def main(argv=None):

    parser = argparse.ArgumentParser(
        description="PINT tool for MCMC optimization of timing models using event data.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    parser.add_argument("eventfile", help="event file to use")
    parser.add_argument("parfile", help="par file to read model from")
    parser.add_argument("gaussianfile", help="gaussian file that defines template")
    parser.add_argument("--ft2", help="Path to FT2 file.", default=None)
    parser.add_argument(
        "--weightcol",
        help="name of weight column (or 'CALC' to have them computed)",
        default=None,
    )
    parser.add_argument(
        "--nwalkers", help="Number of MCMC walkers", type=int, default=200
    )
    parser.add_argument(
        "--burnin",
        help="Number of MCMC steps for burn in",
        type=int,
        default=100,
    )
    parser.add_argument(
        "--nsteps",
        help="Number of MCMC steps to compute",
        type=int,
        default=1000,
    )
    parser.add_argument(
        "--minMJD", help="Earliest MJD to use", type=float, default=54680.0
    )
    parser.add_argument(
        "--maxMJD", help="Latest MJD to use", type=float, default=57250.0
    )
    parser.add_argument(
        "--phs", help="Starting phase offset [0-1] (def is to measure)", type=float
    )
    parser.add_argument(
        "--phserr", help="Error on starting phase", type=float, default=0.03
    )
    parser.add_argument(
        "--minWeight",
        help="Minimum weight to include",
        type=float,
        default=0.05,
    )
    parser.add_argument(
        "--wgtexp",
        help="Raise computed weights to this power (or 0.0 to disable any rescaling of weights)",
        type=float,
        default=0.0,
    )
    parser.add_argument(
        "--testWeights",
        help="Make plots to evalute weight cuts?",
        default=False,
        action="store_true",
    )
    parser.add_argument(
        "--doOpt",
        help="Run initial scipy opt before MCMC?",
        default=False,
        action="store_true",
    )
    parser.add_argument(
        "--initerrfact",
        help="Multiply par file errors by this factor when initializing walker starting values",
        type=float,
        default=0.1,
    )
    parser.add_argument(
        "--priorerrfact",
        help="Multiple par file errors by this factor when setting gaussian prior widths",
        type=float,
        default=10.0,
    )
    parser.add_argument(
        "--usepickle",
        help="Read events from pickle file, if available?",
        default=False,
        action="store_true",
    )
    parser.add_argument(
        "--log-level",
        type=str,
        choices=("TRACE", "DEBUG", "INFO", "WARNING", "ERROR"),
        default=pint.logging.script_level,
        help="Logging level",
        dest="loglevel",
    )

    global nwalkers, nsteps, ftr

    args = parser.parse_args(argv)
    log.remove()
    log.add(
        sys.stderr,
        level=args.loglevel,
        colorize=True,
        format=pint.logging.format,
        filter=pint.logging.LogFilter(),
    )

    eventfile = args.eventfile
    parfile = args.parfile
    gaussianfile = args.gaussianfile
    weightcol = args.weightcol

    if args.ft2 is not None:
        # Instantiate Fermi observatory once so it gets added to the observatory registry
        get_satellite_observatory("Fermi", args.ft2)

    nwalkers = args.nwalkers
    burnin = args.burnin
    nsteps = args.nsteps
    if burnin >= nsteps:
        log.error("burnin must be < nsteps")
        sys.exit(1)
    nbins = 256  # For likelihood calculation based on gaussians file
    outprof_nbins = 256  # in the text file, for pygaussfit.py, for instance
    minMJD = args.minMJD
    maxMJD = args.maxMJD  # Usually set by coverage of IERS file

    minWeight = args.minWeight
    do_opt_first = args.doOpt
    wgtexp = args.wgtexp

    # Read in initial model
    modelin = pint.models.get_model(parfile)

    # The custom_timing version below is to manually construct the TimingModel
    # class, which allows it to be pickled. This is needed for parallelizing
    # the emcee call over a number of threads.  So far, it isn't quite working
    # so it is disabled.  The code above constructs the TimingModel class
    # dynamically, as usual.
    # modelin = custom_timing(parfile)

    # Remove the dispersion delay as it is unnecessary
    # modelin.delay_funcs['L1'].remove(modelin.dispersion_delay)
    # Set the target coords for automatic weighting if necessary
    if "ELONG" in modelin.params:
        tc = SkyCoord(
            modelin.ELONG.quantity,
            modelin.ELAT.quantity,
            frame="barycentrictrueecliptic",
        )
    else:
        tc = SkyCoord(modelin.RAJ.quantity, modelin.DECJ.quantity, frame="icrs")

    target = tc if weightcol == "CALC" else None

    # TODO: make this properly handle long double
    ts = None
    if args.usepickle:
        try:
            ts = toa.load_pickle(eventfile)
        except IOError:
            pass
    if ts is None:
        # Read event file and return list of TOA objects
        tl = fermi.load_Fermi_TOAs(
            eventfile, weightcolumn=weightcol, targetcoord=target, minweight=minWeight
        )
        # Limit the TOAs to ones in selected MJD range and above minWeight
        tl = [
            tl[ii]
            for ii in range(len(tl))
            if (
                tl[ii].mjd.value > minMJD
                and tl[ii].mjd.value < maxMJD
                and (weightcol is None or float(tl[ii].flags["weight"]) > minWeight)
            )
        ]
        log.info("There are %d events we will use" % len(tl))
        # Now convert to TOAs object and compute TDBs and posvels
        ts = toa.TOAs(toalist=tl)
        ts.filename = eventfile
        ts.compute_TDBs()
        ts.compute_posvels(ephem="DE421", planets=False)
        toa.save_pickle(ts)

    if weightcol is not None:
        if weightcol == "CALC":
            weights = np.asarray([float(x["weight"]) for x in ts.table["flags"]])
            log.info(
                "Original weights have min / max weights %.3f / %.3f"
                % (weights.min(), weights.max())
            )
            # Rescale the weights, if requested (by having wgtexp != 0.0)
            if wgtexp != 0.0:
                weights **= wgtexp
                wmx, wmn = weights.max(), weights.min()
                # make the highest weight = 1, but keep min weight the same
                weights = wmn + ((weights - wmn) * (1.0 - wmn) / (wmx - wmn))
            for ii, x in enumerate(ts.table["flags"]):
                x["weight"] = str(weights[ii])
        weights = np.asarray([float(x["weight"]) for x in ts.table["flags"]])
        log.info(
            "There are %d events, with min / max weights %.3f / %.3f"
            % (len(weights), weights.min(), weights.max())
        )
    else:
        weights = None
        log.info("There are %d events, no weights are being used." % ts.ntoas)

    # Now load in the gaussian template and normalize it
    gtemplate = read_gaussfitfile(gaussianfile, nbins)
    gtemplate /= gtemplate.mean()

    # Set the priors on the parameters in the model, before
    # instantiating the emcee_fitter
    # Currently, this adds a gaussian prior on each parameter
    # with width equal to the par file uncertainty * priorerrfact,
    # and then puts in some special cases.
    # *** This should be replaced/supplemented with a way to specify
    # more general priors on parameters that need certain bounds
    phs = 0.0 if args.phs is None else args.phs
    fitkeys, fitvals, fiterrs = get_fit_keyvals(modelin, phs=phs, phserr=args.phserr)

    for key, v, e in zip(fitkeys[:-1], fitvals[:-1], fiterrs[:-1]):
        if key == "SINI" or key == "E" or key == "ECC":
            getattr(modelin, key).prior = Prior(uniform(0.0, 1.0))
        elif key == "PX":
            getattr(modelin, key).prior = Prior(uniform(0.0, 10.0))
        elif key.startswith("GLPH"):
            getattr(modelin, key).prior = Prior(uniform(-0.5, 1.0))
        else:
            getattr(modelin, key).prior = Prior(
                norm(loc=float(v), scale=float(e * args.priorerrfact))
            )

    # Now define the requirements for emcee
    ftr = emcee_fitter(ts, modelin, gtemplate, weights, phs, args.phserr)

    # Use this if you want to see the effect of setting minWeight
    if args.testWeights:
        log.info("Checking H-test vs weights")
        ftr.prof_vs_weights(use_weights=True)
        ftr.prof_vs_weights(use_weights=False)
        sys.exit()

    # Now compute the photon phases and see if we see a pulse
    phss = ftr.get_event_phases()
    maxbin, like_start = marginalize_over_phase(
        phss, gtemplate, weights=ftr.weights, minimize=True, showplot=False
    )
    log.info("Starting pulse likelihood: %f" % like_start)
    if args.phs is None:
        fitvals[-1] = 1.0 - maxbin[0] / float(len(gtemplate))
        if fitvals[-1] > 1.0:
            fitvals[-1] -= 1.0
        if fitvals[-1] < 0.0:
            fitvals[-1] += 1.0
        log.info("Starting pulse phase: %f" % fitvals[-1])
    else:
        log.warning(
            "Measured starting pulse phase is %f, but using %f"
            % (1.0 - maxbin / float(len(gtemplate)), args.phs)
        )
        fitvals[-1] = args.phs
    ftr.fitvals[-1] = fitvals[-1]
    ftr.phaseogram(plotfile=ftr.model.PSR.value + "_pre.png")
    plt.close()
    # ftr.phaseogram()

    # Write out the starting pulse profile
    vs, xs = np.histogram(
        ftr.get_event_phases(), outprof_nbins, range=[0, 1], weights=ftr.weights
    )
    f = open(ftr.model.PSR.value + "_prof_pre.txt", "w")
    for x, v in zip(xs, vs):
        f.write("%.5f  %12.5f\n" % (x, v))
    f.close()

    # Try normal optimization first to see how it goes
    if do_opt_first:
        result = op.minimize(ftr.minimize_func, np.zeros_like(ftr.fitvals))
        newfitvals = np.asarray(result["x"]) * ftr.fiterrs + ftr.fitvals
        like_optmin = -result["fun"]
        log.info("Optimization likelihood: %f" % like_optmin)
        ftr.set_params(dict(zip(ftr.fitkeys, newfitvals)))
        ftr.phaseogram()
    else:
        like_optmin = -np.inf

    # Set up the initial conditions for the emcee walkers.  Use the
    # scipy.optimize newfitvals instead if they are better
    ndim = ftr.n_fit_params
    if like_start > like_optmin:
        # Keep the starting deviations small...
        pos = [
            ftr.fitvals + ftr.fiterrs * args.initerrfact * np.random.randn(ndim)
            for ii in range(nwalkers)
        ]
        # Set starting params
        for param in ["GLPH_1", "GLEP_1", "SINI", "M2", "E", "ECC", "PX", "A1"]:
            if param in ftr.fitkeys:
                idx = ftr.fitkeys.index(param)
                if param == "GLPH_1":
                    svals = np.random.uniform(-0.5, 0.5, nwalkers)
                elif param == "GLEP_1":
                    svals = np.random.uniform(minMJD + 100, maxMJD - 100, nwalkers)
                    # svals = 55422.0 + np.random.randn(nwalkers)
                elif param == "SINI":
                    svals = np.random.uniform(0.0, 1.0, nwalkers)
                elif param == "M2":
                    svals = np.random.uniform(0.1, 0.6, nwalkers)
                elif param in ["E", "ECC", "PX", "A1"]:
                    # Ensure all positive
                    svals = np.fabs(
                        ftr.fitvals[idx] + ftr.fiterrs[idx] * np.random.randn(nwalkers)
                    )
                    if param in ["E", "ECC"]:
                        svals[svals > 1.0] = 1.0 - (svals[svals > 1.0] - 1.0)
                for ii in range(nwalkers):
                    pos[ii][idx] = svals[ii]
    else:
        pos = [
            newfitvals + ftr.fiterrs * args.initerrfact * np.random.randn(ndim)
            for i in range(nwalkers)
        ]
    # Set the 0th walker to have the initial pre-fit solution
    # This way, one walker should always be in a good position
    pos[0] = ftr.fitvals

    import emcee

    # Following are for parallel processing tests...
    if 0:

        def unwrapped_lnpost(theta, ftr=ftr):
            return ftr.lnposterior(theta)

        import pathos.multiprocessing as mp

        pool = mp.ProcessPool(nodes=8)
        sampler = emcee.EnsembleSampler(
            nwalkers, ndim, unwrapped_lnpost, pool=pool, args=[ftr]
        )
    else:
        sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior)
    # The number is the number of points in the chain
    sampler.run_mcmc(pos, nsteps)

    def chains_to_dict(names, sampler):
        chains = [sampler.chain[:, :, ii].T for ii in range(len(names))]
        return dict(zip(names, chains))

    def plot_chains(chain_dict, file=False):
        npts = len(chain_dict)
        fig, axes = plt.subplots(npts, 1, sharex=True, figsize=(8, 9))
        for ii, name in enumerate(chain_dict.keys()):
            axes[ii].plot(chain_dict[name], color="k", alpha=0.3)
            axes[ii].set_ylabel(name)
        axes[npts - 1].set_xlabel("Step Number")
        fig.tight_layout()
        if file:
            fig.savefig(file)
            plt.close()
        else:
            plt.show()
            plt.close()

    chains = chains_to_dict(ftr.fitkeys, sampler)
    plot_chains(chains, file=ftr.model.PSR.value + "_chains.png")

    # Make the triangle plot.
    samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
    try:
        import corner

        fig = corner.corner(
            samples,
            labels=ftr.fitkeys,
            bins=50,
            truths=ftr.maxpost_fitvals,
            plot_contours=True,
        )
        fig.savefig(ftr.model.PSR.value + "_triangle.png")
        plt.close()
    except ImportError:
        pass

    # Plot the scaled prior probability alongside the initial gaussian probability distribution and the histogrammed samples
    ftr.plot_priors(chains, burnin, scale=True)
    plt.savefig(ftr.model.PSR.value + "_priors.png")
    plt.close()

    # Make a phaseogram with the 50th percentile values
    # ftr.set_params(dict(zip(ftr.fitkeys, np.percentile(samples, 50, axis=0))))
    # Make a phaseogram with the best MCMC result
    ftr.set_params(dict(zip(ftr.fitkeys[:-1], ftr.maxpost_fitvals[:-1])))
    ftr.phaseogram(plotfile=ftr.model.PSR.value + "_post.png")
    plt.close()

    # Write out the output pulse profile
    vs, xs = np.histogram(
        ftr.get_event_phases(), outprof_nbins, range=[0, 1], weights=ftr.weights
    )
    f = open(ftr.model.PSR.value + "_prof_post.txt", "w")
    for x, v in zip(xs, vs):
        f.write("%.5f  %12.5f\n" % (x, v))
    f.close()

    # Write out the par file for the best MCMC parameter est
    f = open(ftr.model.PSR.value + "_post.par", "w")
    f.write(ftr.model.as_parfile())
    f.close()

    # Print the best MCMC values and ranges
    ranges = map(
        lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
        zip(*np.percentile(samples, [16, 50, 84], axis=0)),
    )
    log.info("Post-MCMC values (50th percentile +/- (16th/84th percentile):")
    for name, vals in zip(ftr.fitkeys, ranges):
        log.info("%8s:" % name + "%25.15g (+ %12.5g  / - %12.5g)" % vals)

    # Put the same stuff in a file
    f = open(ftr.model.PSR.value + "_results.txt", "w")

    f.write("Post-MCMC values (50th percentile +/- (16th/84th percentile):\n")
    for name, vals in zip(ftr.fitkeys, ranges):
        f.write("%8s:" % name + " %25.15g (+ %12.5g  / - %12.5g)\n" % vals)

    f.write("\nMaximum likelihood par file:\n")
    f.write(ftr.model.as_parfile())
    f.close()

    import pickle

    pickle.dump(samples, open(ftr.model.PSR.value + "_samples.pickle", "wb"))
コード例 #13
0
ファイル: htest_optimize.py プロジェクト: cdeil/PINT
    # Set the target coords for automatic weighting if necessary
    target = SkyCoord(modelin.RAJ.value, modelin.DECJ.value, \
        frame='icrs') if weightcol=='CALC' else None

    # TODO: make this properly handle long double
    if not (os.path.isfile(eventfile+".pickle") or
        os.path.isfile(eventfile+".pickle.gz")):
        # Read event file and return list of TOA objects
        tl = fermi.load_Fermi_TOAs(eventfile, weightcolumn=weightcol,
                                   targetcoord=target, minweight=minWeight)
        # Limit the TOAs to ones where we have IERS corrections for
        tl = [tl[ii] for ii in range(len(tl)) if (tl[ii].mjd.value < maxMJD
            and (weightcol is None or tl[ii].flags['weight'] > minWeight))]
        print "There are %d events we will use" % len(tl)
        # Now convert to TOAs object and compute TDBs and posvels
        ts = toa.TOAs(toalist=tl)
        ts.filename = eventfile
        ts.compute_TDBs()
        ts.compute_posvels(ephem="DE421", planets=False)
        ts.pickle()
    else:  # read the events in as a pickle file
        picklefile = toa._check_pickle(eventfile)
        if not picklefile:
            picklefile = eventfile
        ts = toa.TOAs(picklefile)

    if weightcol is not None:
        if weightcol=='CALC':
            weights = np.asarray([x['weight'] for x in ts.table['flags']])
            print "Original weights have min / max weights %.3f / %.3f" % \
                (weights.min(), weights.max())
コード例 #14
0
 def setUp(self):
     os.chdir(datadir)
     self.x = toa.TOAs("test1.tim")
     self.x.apply_clock_corrections()
     self.x.compute_TDBs()
     self.x.table.sort("index")
コード例 #15
0
def main(argv=None):
    import argparse
    parser = argparse.ArgumentParser(description="Use PINT to compute event phases and make plots of photon event files.")
    parser.add_argument("eventfile",help="Photon event FITS file name (e.g. from NICER, RXTE, XMM, Chandra).")
    parser.add_argument("parfile",help="par file to construct model from")
    parser.add_argument("--orbfile",help="Name of orbit file", default=None)
    parser.add_argument("--maxMJD",help="Maximum MJD to include in analysis", default=None)
    parser.add_argument("--plotfile",help="Output figure file name (default=None)", default=None)
    parser.add_argument("--addphase",help="Write FITS file with added phase column",default=False,action='store_true')
    parser.add_argument("--absphase",help="Write FITS file with integral portion of pulse phase (ABS_PHASE)",default=False,action='store_true')
    parser.add_argument("--barytime",help="Write FITS file with a column containing the barycentric time as double precision MJD.",default=False,action='store_true')
    parser.add_argument("--outfile",help="Output FITS file name (default=same as eventfile)", default=None)
    parser.add_argument("--ephem",help="Planetary ephemeris to use (default=DE421)", default="DE421")
    parser.add_argument("--plot",help="Show phaseogram plot.", action='store_true', default=False)
    parser.add_argument("--fix",help="Apply 1.0 second offset for NICER", action='store_true', default=False)
    args = parser.parse_args(argv)

    # If outfile is specified, that implies addphase
    if args.outfile is not None:
        args.addphase = True

    # If plotfile is specified, that implies plot
    if args.plotfile is not None:
        args.plot = True

    # Read event file header to figure out what instrument is is from
    hdr = pyfits.getheader(args.eventfile,ext=1)

    log.info('Event file TELESCOPE = {0}, INSTRUMENT = {1}'.format(hdr['TELESCOP'],
        hdr['INSTRUME']))
    if hdr['TELESCOP'] == 'NICER':
        # Instantiate NICERObs once so it gets added to the observatory registry
        if args.orbfile is not None:
            log.info('Setting up NICER observatory')
            NICERObs(name='NICER',FPorbname=args.orbfile,tt2tdb_mode='none')
        # Read event file and return list of TOA objects
        try:
            tl  = load_NICER_TOAs(args.eventfile)
        except KeyError:
            log.error("Observatory not recognized.  This probably means you need to provide an orbit file or barycenter the event file.")
            sys.exit(1)
    elif hdr['TELESCOP'] == 'XTE':
        # Instantiate RXTEObs once so it gets added to the observatory registry
        if args.orbfile is not None:
            # Determine what observatory type is.
            log.info('Setting up RXTE observatory')
            RXTEObs(name='RXTE',FPorbname=args.orbfile,tt2tdb_mode='none')
        # Read event file and return list of TOA objects
        tl  = load_RXTE_TOAs(args.eventfile)
    elif hdr['TELESCOP'].startswith('XMM'):
        # Not loading orbit file here, since that is not yet supported.
        tl  = load_XMM_TOAs(args.eventfile)
    else:
        log.error("FITS file not recognized, TELESCOPE = {0}, INSTRUMENT = {1}".format(
            hdr['TELESCOP'], hdr['INSTRUME']))
        sys.exit(1)

    # Read in model
    modelin = pint.models.get_model(args.parfile)

    # Discard events outside of MJD range
    if args.maxMJD is not None:
        tlnew = []
        print("pre len : ",len(tl))
        maxT = Time(float(args.maxMJD),format='mjd')
        print("maxT : ",maxT)
        for tt in tl:
            if tt.mjd < maxT:
                tlnew.append(tt)
        tl=tlnew
        print("post len : ",len(tlnew))

    # Now convert to TOAs object and compute TDBs and posvels
    if len(tl) == 0:
        log.error("No TOAs, exiting!")
        sys.exit(0)
    ts = toa.TOAs(toalist=tl)
    ts.filename = args.eventfile
    if args.fix:
        ts.adjust_TOAs(TimeDelta(np.ones(len(ts.table))*-1.0*u.s,scale='tt'))
    ts.compute_TDBs()
    # Could add a check here to only compute planet positions if PLANET_SHAPIRO is true.
    # For now, just being lazy and always computing planet positions.
    ts.compute_posvels(ephem=args.ephem,planets=True)

    print(ts.get_summary())
    mjds = ts.get_mjds()
    print(mjds.min(),mjds.max())

    # Compute model phase for each TOA
    iphss,phss = modelin.phase(ts.table)
    # ensure all postive
    negmask = phss < 0.0 * u.cycle
    phases = np.where(negmask, phss + 1.0 * u.cycle, phss)
    h = float(hm(phases))
    print("Htest : {0:.2f} ({1:.2f} sigma)".format(h,h2sig(h)))
    if args.plot:
        phaseogram_binned(mjds,phases,bins=100,plotfile = args.plotfile)

    if args.addphase:
        # Read input FITS file (again).
        # If overwriting, open in 'update' mode
        if args.outfile is None:
            hdulist = pyfits.open(args.eventfile,mode='update')
        else:
            hdulist = pyfits.open(args.eventfile)
        if len(hdulist[1].data) != len(phases):
            raise RuntimeError('Mismatch between length of FITS table ({0}) and length of phase array ({1})!'.format(len(hdulist[1].data),len(phases)))
        data_to_add = {'PULSE_PHASE':[phases,'D']}
        if args.absphase:
            data_to_add['ABS_PHASE'] = [iphss-negmask*u.cycle,'K']
        if args.barytime:
            tdbs = np.asarray([t.mjd for t in ts.table['tdb']])
            data_to_add['BARY_TIME'] = [tdbs,'D']
        for key in data_to_add.keys():
            if key in hdulist[1].columns.names:
                log.info('Found existing %s column, overwriting...'%key)
                # Overwrite values in existing Column
                hdulist[1].data[key] = data_to_add[key][0]
            else:
                # Construct and append new column, preserving HDU header and name
                log.info('Adding new %s column.'%key)
                datacol = pyfits.ColDefs([pyfits.Column(name=key,
                    format=data_to_add[key][1], array=data_to_add[key][0])])
                bt = pyfits.BinTableHDU.from_columns(
                    hdulist[1].columns + datacol, header=hdulist[1].header,
                    name=hdulist[1].name)
                hdulist[1] = bt
        if args.outfile is None:
            # Overwrite the existing file
            log.info('Overwriting existing FITS file '+args.eventfile)
            hdulist.flush(verbose=True, output_verify='warn')
        else:
            # Write to new output file
            log.info('Writing output FITS file '+args.outfile)
            hdulist.writeto(args.outfile,overwrite=True, checksum=True, output_verify='warn')
コード例 #16
0
    def __init__(self, args):

        # Store the input arguments
        self.args = args

        # Initialize weights and TOAs data
        # These need to be initalized to empty so that make_toas doesn't
        # throw an error of one of these datasets is not provided, and it tries
        # to concatinate an unset variable with a list.
        self.weights_fermi = []
        self.weights_NICER = []
        self.weights_NuSTAR = []
        self.data_fermi = []
        self.data_NICER = []
        self.data_NuSTAR = []

        # If only a string is given, turn it into a list
        if type(self.args.ft1) is str:
            self.args.ft1 = [self.args.ft1]

        # If only a string is given, turn it into a list
        #if type(self.args.weightcol) is str:
        #self.args.weightcol = [self.args.weightcol]

        if self.args.load_pick is False:
            # Determine if multiple observations should be merged
            for ii in self.args.ft1:

                # Get the name of the telescope
                tele = self.check_tele(ii)

                # Use the appropriate function to load data

                # If the telescope is Fermi (formally GLAST)...
                # Note: should there be an additional check that the data comes
                # from the LAT instrument?
                if tele == 'GLAST':

                    # Load the fermi data
                    self.read_fermi(ii, self.args.weightcol)

                # If the telescope is NICER...
                elif tele == 'NICER':

                    # Load the NICER data
                    self.read_NICER(ii)

                # If the telescope is NuSTAR
                if tele == 'NuSTAR':

                    # Load the NuSTAR data
                    self.read_NuSTAR(ii)

            # Perform the merging
            self.make_TOAs()

            # Add errors to the data (for residual minimization)
            self.add_errors()

        # If load_pick is not False, the user has specified a file
        # Try loading it
        elif self.args.load_pick is not False:
            # load the model
            self.modelin = pint.models.get_model(self.args.par)

            # Read the saved pickle file
            self.toas = toa.TOAs(self.args.load_pick + '.pickle')

            # Create a toas_list from the loaded file
            #self.toas_list = toa.get_TOAs_list([self.toas.toas[:]])

            # This is a bit janky. TOA objects do not include a way to store
            # photon weights. Thus, I am forced to save the weights as a
            # separate file. The other option would be to... Somehow pickle it
            # all together, load that output file, then split it up again in
            # here. However, I don't want to figure that out right now. I also
            # don't want to end up with a propritary file type.

            # Thus, I simply append 'weights_' and move on.

            # self.weights = np.load('weights_' + self.args.load_pick + '.npy')

        if self.args.save_pick is not False:
            self.toas.pickle(filename=self.args.save_pick + '.pickle')
            # np.save('weights_' + self.args.save_pick + '.npy' , self.weights)

        self.mid_save()

        # Check for minMJD
        if self.args.minMJD is not None:
            self.args.minMJD = self.args.minMJD * u.day
            self.cut_minMJD()

        # Check for maxMJD
        if self.args.maxMJD is not None:
            self.args.maxMJD = self.args.maxMJD * u.day
            self.cut_maxMJD()

        # setup the MCMC to run
        self.init_MCMC()
コード例 #17
0
ファイル: pintbary.py プロジェクト: spacefan/PINT
def main(argv=None):
    parser = argparse.ArgumentParser(
        description="PINT tool for command-line barycentering calculations.")

    parser.add_argument("time", help="MJD (UTC, by default)")
    parser.add_argument(
        "--timescale",
        default="utc",
        help="Time scale for MJD argument ('utc', 'tt', 'tdb'), default=utc")
    parser.add_argument(
        "--format",
        help=
        "Format for time argument ('mjd' or any astropy.Time format (e.g. 'isot'), see <http://docs.astropy.org/en/stable/time/#time-format>)",
        default="mjd")
    parser.add_argument("--freq",
                        type=float,
                        default=np.inf,
                        help="Frequency to use, MHz")
    parser.add_argument("--obs",
                        default="Geocenter",
                        help="Observatory code (default = Geocenter)")
    parser.add_argument("--parfile",
                        help="par file to read model from",
                        default=None)
    parser.add_argument(
        "--ra",
        help="RA to use (e.g. '12h22m33.2s' if not read from par file)")
    parser.add_argument(
        "--dec",
        help="Decl. to use (e.g. '19d21m44.2s' if not read from par file)")
    parser.add_argument("--dm",
                        help="DM to use (if not read from par file)",
                        type=float,
                        default=0.0)
    parser.add_argument("--ephem", default="DE421", help="Ephemeris to use")

    args = parser.parse_args(argv)

    if args.format in ("mjd", "jd", "unix"):
        # These formats require conversion from string to longdouble first
        fmt = args.format
        # Never allow format == 'mjd' because it fails when scale is 'utc'
        # Change 'mjd' to 'pulsar_mjd' to deal with this.
        if fmt == "mjd":
            fmt = "pulsar_mjd"
        t = Time(np.longdouble(args.time),
                 scale=args.timescale,
                 format=fmt,
                 precision=9)
    else:
        t = Time(args.time,
                 scale=args.timescale,
                 format=args.format,
                 precision=9)
    log.debug(t.iso)

    t = toa.TOA(t, freq=args.freq, obs=args.obs)
    # Build TOAs and compute TDBs and positions from ephemeris
    ts = toa.TOAs(toalist=[t])
    ts.compute_TDBs()
    ts.compute_posvels(ephem=args.ephem)

    if args.parfile is not None:
        m = pint.models.get_model(args.parfile)
    else:
        # Construct model by hand
        m = pint.models.StandardTimingModel
        # Should check if 12:13:14.2 syntax is used and support that as well!
        m.RAJ.quantity = Angle(args.ra)
        m.DECJ.quantity = Angle(args.dec)
        m.DM.quantity = args.dm * u.parsec / u.cm**3

    tdbtimes = m.get_barycentric_toas(ts.table)

    print("{0:.14f}".format(tdbtimes[0].value))
    return
コード例 #18
0
ファイル: test_toa_reader.py プロジェクト: zpleunis/PINT
 def setUp(self):
     self.x = toa.TOAs("test1.tim")
     self.x.apply_clock_corrections()
     self.x.compute_TDBs()
     self.x.table.sort('index')
コード例 #19
0
ファイル: fermiphase.py プロジェクト: zpleunis/PINT
def main(argv=None):

    parser = argparse.ArgumentParser(
        description=
        "Use PINT to compute H-test and plot Phaseogram from a Fermi FT1 event file."
    )
    parser.add_argument("eventfile", help="Fermi event FITS file name.")
    parser.add_argument("parfile", help="par file to construct model from")
    parser.add_argument(
        "weightcol",
        help="Column name for event weights (or 'CALC' to compute them)")
    parser.add_argument("--ft2", help="Path to FT2 file.", default=None)
    parser.add_argument("--addphase",
                        help="Write FT1 file with added phase column",
                        default=False,
                        action='store_true')
    parser.add_argument("--plot",
                        help="Show phaseogram plot.",
                        action='store_true',
                        default=False)
    parser.add_argument("--plotfile",
                        help="Output figure file name (default=None)",
                        default=None)
    parser.add_argument("--maxMJD",
                        help="Maximum MJD to include in analysis",
                        default=None)
    parser.add_argument(
        "--outfile",
        help="Output figure file name (default is to overwrite input file)",
        default=None)
    parser.add_argument(
        "--planets",
        help="Use planetary Shapiro delay in calculations (default=False)",
        default=False,
        action="store_true")
    parser.add_argument("--ephem",
                        help="Planetary ephemeris to use (default=DE421)",
                        default="DE421")
    args = parser.parse_args(argv)

    # If outfile is specified, that implies addphase
    if args.outfile is not None:
        args.addphase = True

    # Read in model
    modelin = pint.models.get_model(args.parfile)
    if 'ELONG' in modelin.params:
        tc = SkyCoord(modelin.ELONG.quantity,
                      modelin.ELAT.quantity,
                      frame='barycentrictrueecliptic')
    else:
        tc = SkyCoord(modelin.RAJ.quantity,
                      modelin.DECJ.quantity,
                      frame='icrs')

    if args.ft2 is not None:
        # Instantiate FermiObs once so it gets added to the observatory registry
        FermiObs(name='Fermi', ft2name=args.ft2)

    # Read event file and return list of TOA objects
    tl = load_Fermi_TOAs(args.eventfile,
                         weightcolumn=args.weightcol,
                         targetcoord=tc)

    # Discard events outside of MJD range
    if args.maxMJD is not None:
        tlnew = []
        print("pre len : ", len(tl))
        maxT = Time(float(args.maxMJD), format='mjd')
        print("maxT : ", maxT)
        for tt in tl:
            if tt.mjd < maxT:
                tlnew.append(tt)
        tl = tlnew
        print("post len : ", len(tlnew))

    # Now convert to TOAs object and compute TDBs and posvels
    ts = toa.TOAs(toalist=tl)
    ts.filename = args.eventfile
    ts.compute_TDBs()
    ts.compute_posvels(ephem=args.ephem, planets=args.planets)

    print(ts.get_summary())
    mjds = ts.get_mjds()
    print(mjds.min(), mjds.max())

    # Compute model phase for each TOA
    phss = modelin.phase(ts.table)[1]
    # ensure all postive
    phases = np.where(phss < 0.0 * u.cycle, phss + 1.0 * u.cycle, phss)
    mjds = ts.get_mjds()
    weights = np.array([w['weight'] for w in ts.table['flags']])
    h = float(hmw(phases, weights))
    print("Htest : {0:.2f} ({1:.2f} sigma)".format(h, h2sig(h)))
    if args.plot:
        log.info("Making phaseogram plot with {0} photons".format(len(mjds)))
        phaseogram(mjds, phases, weights, bins=100, plotfile=args.plotfile)

    if args.addphase:
        # Read input FITS file (again).
        # If overwriting, open in 'update' mode
        if args.outfile is None:
            hdulist = pyfits.open(args.eventfile, mode='update')
        else:
            hdulist = pyfits.open(args.eventfile)
        event_hdu = hdulist[1]
        event_hdr = event_hdu.header
        event_dat = event_hdu.data
        if len(event_dat) != len(phases):
            raise RuntimeError(
                'Mismatch between length of FITS table ({0}) and length of phase array ({1})!'
                .format(len(event_dat), len(phases)))
        if 'PULSE_PHASE' in event_hdu.columns.names:
            log.info('Found existing PULSE_PHASE column, overwriting...')
            # Overwrite values in existing Column
            event_dat['PULSE_PHASE'] = phases
        else:
            # Construct and append new column, preserving HDU header and name
            log.info('Adding new PULSE_PHASE column.')
            phasecol = pyfits.ColDefs(
                [pyfits.Column(name='PULSE_PHASE', format='D', array=phases)])
            bt = pyfits.BinTableHDU.from_columns(event_hdu.columns + phasecol,
                                                 header=event_hdr,
                                                 name=event_hdu.name)
            hdulist[1] = bt
        if args.outfile is None:
            # Overwrite the existing file
            log.info('Overwriting existing FITS file ' + args.eventfile)
            hdulist.flush(verbose=True, output_verify='warn')
        else:
            # Write to new output file
            log.info('Writing output FITS file ' + args.outfile)
            hdulist.writeto(args.outfile,
                            overwrite=True,
                            checksum=True,
                            output_verify='warn')

    return 0
コード例 #20
0
def main(argv=None):

    parser = argparse.ArgumentParser(description="PINT tool for MCMC optimization of timing models using event data.")

    parser.add_argument("eventfile",help="event file to use")
    parser.add_argument("parfile",help="par file to read model from")
    parser.add_argument("gaussianfile",help="gaussian file that defines template")
    parser.add_argument("--ft2",help="Path to FT2 file.",default=None)
    parser.add_argument("--weightcol",help="name of weight column (or 'CALC' to have them computed",default=None)
    parser.add_argument("--nwalkers",help="Number of MCMC walkers (def 200)",type=int,
        default=200)
    parser.add_argument("--burnin",help="Number of MCMC steps for burn in (def 100)",
        type=int, default=100)
    parser.add_argument("--nsteps",help="Number of MCMC steps to compute (def 1000)",
        type=int, default=1000)
    parser.add_argument("--minMJD",help="Earliest MJD to use (def 54680)",type=float,
        default=54680.0)
    parser.add_argument("--maxMJD",help="Latest MJD to use (def 57250)",type=float,
        default=57250.0)
    parser.add_argument("--phs",help="Starting phase offset [0-1] (def is to measure)",type=float)
    parser.add_argument("--phserr",help="Error on starting phase",type=float,
        default=0.03)
    parser.add_argument("--minWeight",help="Minimum weight to include (def 0.05)",
        type=float,default=0.05)
    parser.add_argument("--wgtexp",
        help="Raise computed weights to this power (or 0.0 to disable any rescaling of weights)",
        type=float, default=0.0)
    parser.add_argument("--testWeights",help="Make plots to evalute weight cuts?",
        default=False,action="store_true")
    parser.add_argument("--doOpt",help="Run initial scipy opt before MCMC?",
        default=False,action="store_true")
    parser.add_argument("--initerrfact",help="Multiply par file errors by this factor when initializing walker starting values",type=float,default=0.1)
    parser.add_argument("--priorerrfact",help="Multiple par file errors by this factor when setting gaussian prior widths",type=float,default=10.0)
    parser.add_argument("--usepickle",help="Read events from pickle file, if available?",
        default=False,action="store_true")

    global nwalkers, nsteps, ftr

    args = parser.parse_args(argv)

    eventfile = args.eventfile
    parfile = args.parfile
    gaussianfile = args.gaussianfile
    weightcol = args.weightcol

    if args.ft2 is not None:
        # Instantiate FermiObs once so it gets added to the observatory registry
        FermiObs(name='Fermi',ft2name=args.ft2)

    nwalkers = args.nwalkers
    burnin = args.burnin
    nsteps = args.nsteps
    if burnin >= nsteps:
        log.error('burnin must be < nsteps')
        sys.exit(1)
    nbins = 256 # For likelihood calculation based on gaussians file
    outprof_nbins = 256 # in the text file, for pygaussfit.py, for instance
    minMJD = args.minMJD
    maxMJD = args.maxMJD # Usually set by coverage of IERS file

    minWeight = args.minWeight
    do_opt_first = args.doOpt
    wgtexp = args.wgtexp


    # Read in initial model
    modelin = pint.models.get_model(parfile)

    # The custom_timing version below is to manually construct the TimingModel
    # class, which allows it to be pickled. This is needed for parallelizing
    # the emcee call over a number of threads.  So far, it isn't quite working
    # so it is disabled.  The code above constructs the TimingModel class
    # dynamically, as usual.
    #modelin = custom_timing(parfile)

    # Remove the dispersion delay as it is unnecessary
    #modelin.delay_funcs['L1'].remove(modelin.dispersion_delay)
    # Set the target coords for automatic weighting if necessary
    if 'ELONG' in modelin.params:
        tc = SkyCoord(modelin.ELONG.quantity,modelin.ELAT.quantity,
            frame='barycentrictrueecliptic')
    else:
        tc = SkyCoord(modelin.RAJ.quantity,modelin.DECJ.quantity,frame='icrs')

    target = tc if weightcol=='CALC' else None

    # TODO: make this properly handle long double
    if not args.usepickle or (not (os.path.isfile(eventfile+".pickle") or
        os.path.isfile(eventfile+".pickle.gz"))):
        # Read event file and return list of TOA objects
        tl = fermi.load_Fermi_TOAs(eventfile, weightcolumn=weightcol,
                                   targetcoord=target, minweight=minWeight)
        # Limit the TOAs to ones in selected MJD range and above minWeight
        tl = [tl[ii] for ii in range(len(tl)) if (tl[ii].mjd.value > minMJD and tl[ii].mjd.value < maxMJD
            and (weightcol is None or tl[ii].flags['weight'] > minWeight))]
        log.info("There are %d events we will use" % len(tl))
        # Now convert to TOAs object and compute TDBs and posvels
        ts = toa.TOAs(toalist=tl)
        ts.filename = eventfile
        ts.compute_TDBs()
        ts.compute_posvels(ephem="DE421", planets=False)
        ts.pickle()
    else:  # read the events in as a pickle file
        picklefile = toa._check_pickle(eventfile)
        if not picklefile:
            picklefile = eventfile
        ts = toa.TOAs(picklefile)

    if weightcol is not None:
        if weightcol=='CALC':
            weights = np.asarray([x['weight'] for x in ts.table['flags']])
            log.info("Original weights have min / max weights %.3f / %.3f" % \
                (weights.min(), weights.max()))
            # Rescale the weights, if requested (by having wgtexp != 0.0)
            if wgtexp != 0.0:
                weights **= wgtexp
                wmx, wmn = weights.max(), weights.min()
                # make the highest weight = 1, but keep min weight the same
                weights = wmn + ((weights - wmn) * (1.0 - wmn) / (wmx - wmn))
            for ii, x in enumerate(ts.table['flags']):
                x['weight'] = weights[ii]
        weights = np.asarray([x['weight'] for x in ts.table['flags']])
        log.info("There are %d events, with min / max weights %.3f / %.3f" % \
            (len(weights), weights.min(), weights.max()))
    else:
        weights = None
        log.info("There are %d events, no weights are being used." % ts.ntoas)

    # Now load in the gaussian template and normalize it
    gtemplate = read_gaussfitfile(gaussianfile, nbins)
    gtemplate /= gtemplate.mean()

    # Set the priors on the parameters in the model, before
    # instantiating the emcee_fitter
    # Currently, this adds a gaussian prior on each parameter
    # with width equal to the par file uncertainty * priorerrfact,
    # and then puts in some special cases.
    # *** This should be replaced/supplemented with a way to specify
    # more general priors on parameters that need certain bounds
    phs = 0.0 if args.phs is None else args.phs
    
    sampler = EmceeSampler(nwalkers)
    ftr = MCMCFitterBinnedTemplate(ts, modelin, sampler, template=gtemplate, \
        weights=weights, phs=phs, phserr=args.phserr, minMJD=minMJD, maxMJD=maxMJD)

    fitkeys, fitvals, fiterrs = ftr.get_fit_keyvals()

    # Use this if you want to see the effect of setting minWeight
    if args.testWeights:
        log.info("Checking H-test vs weights")
        ftr.prof_vs_weights(use_weights=True)
        ftr.prof_vs_weights(use_weights=False)
        sys.exit()

    # Now compute the photon phases and see if we see a pulse
    phss = ftr.get_event_phases()
    maxbin, like_start = marginalize_over_phase(phss, gtemplate,
        weights=ftr.weights, minimize=True, showplot=False)
    log.info("Starting pulse likelihood: %f" % like_start)

    if args.phs is None:
        fitvals[-1] = 1.0 - maxbin[0] / float(len(gtemplate))
        if fitvals[-1] > 1.0: fitvals[-1] -= 1.0
        if fitvals[-1] < 0.0: fitvals[-1] += 1.0
        log.info("Starting pulse phase: %f" % fitvals[-1])
    else:
        log.info("Measured starting pulse phase is %f, but using %f" % \
            (1.0 - maxbin / float(len(gtemplate)), args.phs))
        fitvals[-1] = args.phs
    ftr.fitvals[-1] = fitvals[-1]
    ftr.phaseogram(plotfile=ftr.model.PSR.value+"_pre.png")
    plt.close()

    # Write out the starting pulse profile
    vs, xs = np.histogram(ftr.get_event_phases(), outprof_nbins, \
        range=[0,1], weights=ftr.weights)
    f = open(ftr.model.PSR.value+"_prof_pre.txt", 'w')
    for x, v in zip(xs, vs):
        f.write("%.5f  %12.5f\n" % (x, v))
    f.close()

    # Try normal optimization first to see how it goes
    if do_opt_first:
        result = op.minimize(ftr.minimize_func, np.zeros_like(ftr.fitvals))
        newfitvals = np.asarray(result['x']) * ftr.fiterrs + ftr.fitvals
        like_optmin = -result['fun']
        log.info("Optimization likelihood: %f" % like_optmin)
        ftr.set_params(dict(zip(ftr.fitkeys, newfitvals)))
        ftr.phaseogram()
    else:
        like_optmin = -np.inf

    # Set up the initial conditions for the emcee walkers.  Use the
    # scipy.optimize newfitvals instead if they are better
    ndim = ftr.n_fit_params
    if like_start > like_optmin:
        pos = None
    else:
        pos = [newfitvals + ftr.fiterrs*args.initerrfact*np.random.randn(ndim)
            for i in range(nwalkers)]
        pos[0] = ftr.fitvals
    
    ftr.fit_toas(maxiter=nsteps, pos=pos, 
        priorerrfact=args.priorerrfact, errfact=args.initerrfact)

    def plot_chains(chain_dict, file=False):
        npts = len(chain_dict)
        fig, axes = plt.subplots(npts, 1, sharex=True, figsize=(8, 9))
        for ii, name in enumerate(chain_dict.keys()):
            axes[ii].plot(chain_dict[name], color="k", alpha=0.3)
            axes[ii].set_ylabel(name)
        axes[npts-1].set_xlabel("Step Number")
        fig.tight_layout()
        if file:
            fig.savefig(file)
            plt.close()
        else:
            plt.show()
            plt.close()

    chains = sampler.chains_to_dict(ftr.fitkeys)
    plot_chains(chains, file=ftr.model.PSR.value+"_chains.png")

    # Make the triangle plot.
    samples = sampler.sampler.chain[:, burnin:, :].reshape((-1, ftr.n_fit_params))
    try:
        import corner
        fig = corner.corner(samples, labels=ftr.fitkeys, bins=50,
            truths=ftr.maxpost_fitvals, plot_contours=True)
        fig.savefig(ftr.model.PSR.value+"_triangle.png")
        plt.close()
    except ImportError:
        pass

    # Make a phaseogram with the 50th percentile values
    #ftr.set_params(dict(zip(ftr.fitkeys, np.percentile(samples, 50, axis=0))))
    # Make a phaseogram with the best MCMC result
    ftr.set_params(dict(zip(ftr.fitkeys[:-1], ftr.maxpost_fitvals[:-1])))
    ftr.phaseogram(plotfile=ftr.model.PSR.value+"_post.png")
    plt.close()

    # Write out the output pulse profile
    vs, xs = np.histogram(ftr.get_event_phases(), outprof_nbins, \
        range=[0,1], weights=ftr.weights)
    f = open(ftr.model.PSR.value+"_prof_post.txt", 'w')
    for x, v in zip(xs, vs):
        f.write("%.5f  %12.5f\n" % (x, v))
    f.close()

    # Write out the par file for the best MCMC parameter est
    f = open(ftr.model.PSR.value+"_post.par", 'w')
    f.write(ftr.model.as_parfile())
    f.close()

    # Print the best MCMC values and ranges
    ranges = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
        zip(*np.percentile(samples, [16, 50, 84], axis=0)))
    log.info("Post-MCMC values (50th percentile +/- (16th/84th percentile):")
    for name, vals in zip(ftr.fitkeys, ranges):
        log.info("%8s:"%name + "%25.15g (+ %12.5g  / - %12.5g)"%vals)

    # Put the same stuff in a file
    f = open(ftr.model.PSR.value+"_results.txt", 'w')

    f.write("Post-MCMC values (50th percentile +/- (16th/84th percentile):\n")
    for name, vals in zip(ftr.fitkeys, ranges):
        f.write("%8s:"%name + " %25.15g (+ %12.5g  / - %12.5g)\n"%vals)

    f.write("\nMaximum likelihood par file:\n")
    f.write(ftr.model.as_parfile())
    f.close()

    import cPickle
    cPickle.dump(samples, open(ftr.model.PSR.value+"_samples.pickle", "wb"))
コード例 #21
0
def main(argv=None):
    import argparse

    parser = argparse.ArgumentParser(description="PINT tool for simulating TOAs")
    parser.add_argument("parfile", help="par file to read model from")
    parser.add_argument("timfile", help="Output TOA file name")
    parser.add_argument(
        "--inputtim",
        help="Input tim file for fake TOA sampling",
        type=str,
        default=None,
    )
    parser.add_argument(
        "--startMJD",
        help="MJD of first fake TOA (default=56000.0)",
        type=float,
        default=56000.0,
    )
    parser.add_argument(
        "--ntoa", help="Number of fake TOAs to generate", type=int, default=100
    )
    parser.add_argument(
        "--duration", help="Span of TOAs to generate (days)", type=float, default=400.0
    )
    parser.add_argument("--obs", help="Observatory code (default: GBT)", default="GBT")
    parser.add_argument(
        "--freq",
        help="Frequency for TOAs (MHz) (default: 1400)",
        nargs="+",
        type=float,
        default=1400.0,
    )
    parser.add_argument(
        "--error",
        help="Random error to apply to each TOA (us, default=1.0)",
        type=float,
        default=1.0,
    )
    parser.add_argument(
        "--fuzzdays",
        help="Standard deviation of 'fuzz' distribution (jd) (default: 0.0)",
        type=float,
        default=0.0,
    )
    parser.add_argument(
        "--plot", help="Plot residuals", action="store_true", default=False
    )
    parser.add_argument("--ephem", help="Ephemeris to use", default="DE421")
    parser.add_argument(
        "--planets",
        help="Use planetary Shapiro delay",
        action="store_true",
        default=False,
    )
    parser.add_argument(
        "--format", help="The format of out put .tim file.", default="TEMPO2"
    )
    args = parser.parse_args(argv)

    log.info("Reading model from {0}".format(args.parfile))
    m = pint.models.get_model(args.parfile)

    out_format = args.format
    error = args.error * u.microsecond

    if args.inputtim is None:
        log.info("Generating uniformly spaced TOAs")
        duration = args.duration * u.day
        # start = Time(args.startMJD,scale='utc',format='pulsar_mjd',precision=9)
        start = np.longdouble(args.startMJD) * u.day
        freq = np.atleast_1d(args.freq) * u.MHz
        site = get_observatory(args.obs)
        scale = site.timescale

        times = np.linspace(0, duration.to(u.day).value, args.ntoa) * u.day + start

        # 'Fuzz' out times
        if args.fuzzdays > 0.0:
            fuzz = np.random.normal(scale=args.fuzzdays, size=len(times)) * u.day
            times += fuzz

        # Add mulitple frequency
        freq_array = get_freq_array(freq, len(times))
        tl = [
            toa.TOA(t.value, error=error, obs=args.obs, freq=f, scale=scale)
            for t, f in zip(times, freq_array)
        ]
        ts = toa.TOAs(toalist=tl)
    else:
        log.info("Reading initial TOAs from {0}".format(args.inputtim))
        ts = toa.TOAs(toafile=args.inputtim)
        ts.table["error"][:] = error

    # WARNING! I'm not sure how clock corrections should be handled here!
    # Do we apply them, or not?
    if not any(["clkcorr" in f for f in ts.table["flags"]]):
        log.info("Applying clock corrections.")
        ts.apply_clock_corrections()
    if "tdb" not in ts.table.colnames:
        log.info("Getting IERS params and computing TDBs.")
        ts.compute_TDBs(ephem=args.ephem)
    if "ssb_obs_pos" not in ts.table.colnames:
        log.info("Computing observatory positions and velocities.")
        ts.compute_posvels(args.ephem, args.planets)

    log.info("Creating TOAs")
    F_local = m.d_phase_d_toa(ts)
    rs = m.phase(ts).frac / F_local

    # Adjust the TOA times to put them where their residuals will be 0.0
    ts.adjust_TOAs(TimeDelta(-1.0 * rs))
    rspost = m.phase(ts).frac / F_local

    log.info("Second iteration")
    # Do a second iteration
    ts.adjust_TOAs(TimeDelta(-1.0 * rspost))

    err = np.random.randn(len(ts.table)) * error
    # Add the actual error fuzzing
    ts.adjust_TOAs(TimeDelta(err))

    # Write TOAs to a file
    ts.write_TOA_file(args.timfile, name="fake", format=out_format)

    if args.plot:
        # This should be a very boring plot with all residuals flat at 0.0!
        import matplotlib.pyplot as plt
        from astropy.visualization import quantity_support

        quantity_support()

        rspost2 = m.phase(ts).frac / F_local
        plt.errorbar(
            ts.get_mjds(), rspost2.to(u.us), yerr=ts.get_errors().to(u.us), fmt="."
        )
        newts = pint.toa.get_TOAs(args.timfile, ephem=args.ephem, planets=args.planets)
        rsnew = m.phase(newts).frac / F_local
        plt.errorbar(
            newts.get_mjds(), rsnew.to(u.us), yerr=newts.get_errors().to(u.us), fmt="."
        )
        # plt.plot(ts.get_mjds(),rspost.to(u.us),'x')
        plt.xlabel("MJD")
        plt.ylabel("Residual (us)")
        plt.grid(True)
        plt.show()
コード例 #22
0
ファイル: zima.py プロジェクト: spacefan/PINT
def main(argv=None):
    import argparse
    parser = argparse.ArgumentParser(
        description="PINT tool for simulating TOAs")
    parser.add_argument("parfile", help="par file to read model from")
    parser.add_argument("timfile", help="Output TOA file name")
    parser.add_argument("--startMJD",
                        help="MJD of first fake TOA (default=56000.0)",
                        type=float,
                        default=56000.0)
    parser.add_argument("--ntoa",
                        help="Number of fake TOAs to generate",
                        type=int,
                        default=100)
    parser.add_argument("--duration",
                        help="Span of TOAs to generate (days)",
                        type=int,
                        default=400)
    parser.add_argument("--obs",
                        help="Observatory code (default: GBT)",
                        default="GBT")
    parser.add_argument("--freq",
                        help="Frequency for TOAs (MHz) (default: 1400)",
                        type=float,
                        default=1400.0)
    parser.add_argument(
        "--error",
        help="Random error to apply to each TOA (us, default=1.0)",
        type=float,
        default=1.0)
    parser.add_argument("--plot",
                        help="Plot residuals",
                        action="store_true",
                        default=False)
    parser.add_argument("--ephem", help="Ephemeris to use", default="DE421")
    parser.add_argument("--planets",
                        help="Use planetary Shapiro delay",
                        action="store_true",
                        default=False)
    args = parser.parse_args(argv)

    log.info("Reading model from {0}".format(args.parfile))
    m = pint.models.get_model(args.parfile)

    duration = args.duration * u.day
    start = Time(args.startMJD, scale='utc', format='mjd', precision=9)
    error = args.error * u.microsecond
    freq = args.freq * u.MHz
    scale = 'utc'

    times = np.linspace(0, duration.to(u.day).value, args.ntoa) * u.day + start

    tl = [
        toa.TOA(t, error=error, obs=args.obs, freq=freq, scale=scale)
        for t in times
    ]

    ts = toa.TOAs(toalist=tl)

    # WARNING! I'm not sure how clock corrections should be handled here!
    # Do we apply them, or not?
    if not any(['clkcorr' in f for f in ts.table['flags']]):
        log.info("Applying clock corrections.")
        ts.apply_clock_corrections()
    if 'tdb' not in ts.table.colnames:
        log.info("Getting IERS params and computing TDBs.")
        ts.compute_TDBs()
    if 'ssb_obs_pos' not in ts.table.colnames:
        log.info("Computing observatory positions and velocities.")
        ts.compute_posvels(args.ephem, args.planets)

    # F_local has units of Hz; discard cycles unit in phase to get a unit
    # that TimeDelta understands
    F_local = m.d_phase_d_toa(ts)
    rs = m.phase(ts.table).frac.value / F_local

    # Adjust the TOA times to put them where their residuals will be 0.0
    ts.adjust_TOAs(TimeDelta(-1.0 * rs))
    rspost = m.phase(ts.table).frac.value / F_local

    # Do a second iteration
    ts.adjust_TOAs(TimeDelta(-1.0 * rspost))

    # Write TOAs to a file
    ts.write_TOA_file(args.timfile, name='fake', format='Tempo2')

    if args.plot:
        # This should be a very boring plot with all residuals flat at 0.0!
        import matplotlib.pyplot as plt
        rspost2 = m.phase(ts.table).frac / F_local
        plt.errorbar(ts.get_mjds(),
                     rspost2.to(u.us).value,
                     yerr=ts.get_errors().to(u.us).value)
        newts = pint.toa.get_TOAs(args.timfile)
        rsnew = m.phase(newts.table).frac / F_local
        plt.errorbar(newts.get_mjds(),
                     rsnew.to(u.us).value,
                     yerr=newts.get_errors().to(u.us).value)
        #plt.plot(ts.get_mjds(),rspost.to(u.us),'x')
        plt.xlabel('MJD')
        plt.ylabel('Residual (us)')
        plt.show()
コード例 #23
0
def test_sampler():
    r = []
    for i in range(2):
        random.seed(0)
        numpy.random.seed(0)
        s = numpy.random.mtrand.RandomState(0)

        parfile = join(datadir, "PSRJ0030+0451_psrcat.par")
        eventfile = join(
            datadir,
            "J0030+0451_P8_15.0deg_239557517_458611204_ft1weights_GEO_wt.gt.0.4.fits",
        )
        gaussianfile = join(datadir, "templateJ0030.3gauss")
        weightcol = "PSRJ0030+0451"

        minWeight = 0.9
        nwalkers = 10
        nsteps = 1
        nbins = 256
        phs = 0.0

        model = pint.models.get_model(parfile)
        tl = fermi.load_Fermi_TOAs(eventfile,
                                   weightcolumn=weightcol,
                                   minweight=minWeight)
        ts = toa.TOAs(toalist=tl)
        # Introduce a small error so that residuals can be calculated
        ts.table["error"] = 1.0
        ts.filename = eventfile
        ts.compute_TDBs()
        ts.compute_posvels(ephem="DE421", planets=False)

        weights, _ = ts.get_flag_value("weight", as_type=float)
        weights = np.array(weights)
        template = read_gaussfitfile(gaussianfile, nbins)
        template /= template.mean()

        sampler = EmceeSampler(nwalkers)
        fitter = MCMCFitterBinnedTemplate(ts,
                                          model,
                                          sampler,
                                          template=template,
                                          weights=weights,
                                          phs=phs)
        fitter.sampler.random_state = s

        # phases = fitter.get_event_phases()
        # maxbin, like_start = marginalize_over_phase(phases, template,
        #                                            weights=fitter.weights,
        #                                            minimize=True,
        #                                            showplot=True)
        # fitter.fitvals[-1] = 1.0 - maxbin[0] / float(len(template))

        # fitter.set_priors(fitter, 10)
        pos = fitter.sampler.get_initial_pos(
            fitter.fitkeys,
            fitter.fitvals,
            fitter.fiterrs,
            0.1,
            minMJD=fitter.minMJD,
            maxMJD=fitter.maxMJD,
        )
        # pos = fitter.clip_template_params(pos)
        fitter.sampler.initialize_sampler(fitter.lnposterior,
                                          fitter.n_fit_params)
        fitter.sampler.run_mcmc(pos, nsteps)
        # fitter.fit_toas(maxiter=nsteps, pos=None)
        # fitter.set_parameters(fitter.maxpost_fitvals)

        # fitter.phaseogram()
        # samples = sampler.sampler.chain[:, 10:, :].reshape((-1, fitter.n_fit_params))

        # r.append(np.random.randn())
        r.append(sampler.sampler.chain[0])
    assert_array_equal(r[0], r[1])
コード例 #24
0
                                              frame='icrs'))

    # Discard events outside of MJD range
    if args.maxMJD is not None:
        tlnew = []
        print("pre len : ", len(tl))
        maxT = Time(float(args.maxMJD), format='mjd')
        print("maxT : ", maxT)
        for tt in tl:
            if tt.mjd < maxT:
                tlnew.append(tt)
        tl = tlnew
        print("post len : ", len(tlnew))

    # Now convert to TOAs object and compute TDBs and posvels
    ts = toa.TOAs(toalist=tl)
    ts.filename = args.eventfile
    ts.compute_TDBs()
    ts.compute_posvels(ephem=args.ephem, planets=args.planets)

    print(ts.get_summary())
    mjds = ts.get_mjds()
    print(mjds.min(), mjds.max())

    # Read in model
    modelin = pint.models.get_model(args.parfile)

    # Remove the dispersion delay as it is unnecessary
    modelin.delay_funcs['L1'].remove(modelin.dispersion_delay)

    # Compute model phase for each TOA
コード例 #25
0
    # TODO: make this properly handle long double
    if not (os.path.isfile(eventfile + ".pickle")
            or os.path.isfile(eventfile + ".pickle.gz")):
        # Read event file and return list of TOA objects
        tl = fermi.load_Fermi_TOAs(eventfile,
                                   weightcolumn=weightcol,
                                   targetcoord=target,
                                   minweight=minWeight)
        # Limit the TOAs to ones where we have IERS corrections for
        tl = [
            tl[ii] for ii in range(len(tl)) if (tl[ii].mjd.value < maxMJD and (
                weightcol is None or tl[ii].flags['weight'] > minWeight))
        ]
        print "There are %d events we will use" % len(tl)
        # Now convert to TOAs object and compute TDBs and posvels
        ts = toa.TOAs(toalist=tl)
        ts.filename = eventfile
        ts.compute_TDBs()
        ts.compute_posvels(ephem="DE421", planets=False)
        ts.pickle()
    else:  # read the events in as a picke file
        ts = toa.TOAs(eventfile, usepickle=True)

    if weightcol is not None:
        if weightcol == 'CALC':
            weights = np.asarray([x['weight'] for x in ts.table['flags']])
            print "Original weights have min / max weights %.3f / %.3f" % \
                (weights.min(), weights.max())
            weights **= wgtexp
            wmx, wmn = weights.max(), weights.min()
            # make the highest weight = 1, but keep min weight the same
コード例 #26
0
ファイル: htest_optimize.py プロジェクト: hellohancheng/PINT
def main(argv=None):

    if len(argv) == 3:
        eventfile, parfile, weightcol = sys.argv[1:]
    elif len(argv) == 2:
        eventfile, parfile = sys.argv[1:]
        weightcol = None
    else:
        print("usage: htest_optimize eventfile parfile [weightcol]")
        sys.exit()

    # Read in initial model
    modelin = pint.models.get_model(parfile)
    # Remove the dispersion delay as it is unnecessary
    modelin.delay_funcs.remove(modelin.dispersion_delay)
    # Set the target coords for automatic weighting if necessary
    if "ELONG" in modelin.params:
        tc = SkyCoord(
            modelin.ELONG.quantity,
            modelin.ELAT.quantity,
            frame="barycentrictrueecliptic",
        )
    else:
        tc = SkyCoord(modelin.RAJ.quantity, modelin.DECJ.quantity, frame="icrs")

    target = tc if weightcol == "CALC" else None

    # TODO: make this properly handle long double
    if not (
        os.path.isfile(eventfile + ".pickle")
        or os.path.isfile(eventfile + ".pickle.gz")
    ):
        # Read event file and return list of TOA objects
        tl = fermi.load_Fermi_TOAs(
            eventfile, weightcolumn=weightcol, targetcoord=target, minweight=minWeight
        )
        # Limit the TOAs to ones where we have IERS corrections for
        tl = [
            tl[ii]
            for ii in range(len(tl))
            if (
                tl[ii].mjd.value < maxMJD
                and (weightcol is None or tl[ii].flags["weight"] > minWeight)
            )
        ]
        print("There are %d events we will use" % len(tl))
        # Now convert to TOAs object and compute TDBs and posvels
        ts = toa.TOAs(toalist=tl)
        ts.filename = eventfile
        ts.compute_TDBs()
        ts.compute_posvels(ephem="DE421", planets=False)
        ts.pickle()
    else:  # read the events in as a pickle file
        picklefile = toa._check_pickle(eventfile)
        if not picklefile:
            picklefile = eventfile
        ts = toa.TOAs(picklefile)

    if weightcol is not None:
        if weightcol == "CALC":
            weights = np.asarray([x["weight"] for x in ts.table["flags"]])
            print(
                "Original weights have min / max weights %.3f / %.3f"
                % (weights.min(), weights.max())
            )
            weights **= wgtexp
            wmx, wmn = weights.max(), weights.min()
            # make the highest weight = 1, but keep min weight the same
            weights = wmn + ((weights - wmn) * (1.0 - wmn) / (wmx - wmn))
            for ii, x in enumerate(ts.table["flags"]):
                x["weight"] = weights[ii]
        weights = np.asarray([x["weight"] for x in ts.table["flags"]])
        print(
            "There are %d events, with min / max weights %.3f / %.3f"
            % (len(weights), weights.min(), weights.max())
        )
    else:
        weights = None
        print("There are %d events, no weights are being used." % (len(weights)))

    # Now define the requirements for emcee
    ftr = emcee_fitter(ts, modelin, weights)

    # Use this if you want to see the effect of setting minWeight
    if minWeight == 0.0:
        print("Checking h-test vs weights")
        ftr.prof_vs_weights(use_weights=True)
        ftr.prof_vs_weights(use_weights=False)
        sys.exit()

    # Now compute the photon phases and see if we see a pulse
    phss = ftr.get_event_phases()
    like_start = -1.0 * sf_hm(hmw(phss, weights=ftr.weights), logprob=True)
    print("Starting pulse likelihood:", like_start)
    ftr.phaseogram(file=ftr.model.PSR.value + "_pre.png")
    plt.close()
    ftr.phaseogram()

    # Write out the starting pulse profile
    vs, xs = np.histogram(
        ftr.get_event_phases(), outprof_nbins, range=[0, 1], weights=ftr.weights
    )
    f = open(ftr.model.PSR.value + "_prof_pre.txt", "w")
    for x, v in zip(xs, vs):
        f.write("%.5f  %12.5f\n" % (x, v))
    f.close()

    # Try normal optimization first to see how it goes
    if do_opt_first:
        result = op.minimize(ftr.minimize_func, np.zeros_like(ftr.fitvals))
        newfitvals = np.asarray(result["x"]) * ftr.fiterrs + ftr.fitvals
        like_optmin = -result["fun"]
        print("Optimization likelihood:", like_optmin)
        ftr.set_params(dict(zip(ftr.fitkeys, newfitvals)))
        ftr.phaseogram()
    else:
        like_optmin = -np.inf

    # Set up the initial conditions for the emcee walkers.  Use the
    # scipy.optimize newfitvals instead if they are better
    ndim = ftr.n_fit_params
    if like_start > like_optmin:
        # Keep the starting deviations small...
        pos = [
            ftr.fitvals + ftr.fiterrs / errfact * np.random.randn(ndim)
            for ii in range(nwalkers)
        ]
        # Set starting params with uniform priors to uniform in the prior
        for param in ["GLPH_1", "GLEP_1", "SINI", "M2", "E", "ECC", "PX", "A1"]:
            if param in ftr.fitkeys:
                idx = ftr.fitkeys.index(param)
                if param == "GLPH_1":
                    svals = np.random.uniform(-0.5, 0.5, nwalkers)
                elif param == "GLEP_1":
                    svals = np.random.uniform(minMJD + 100, maxMJD - 100, nwalkers)
                    # svals = 55422.0 + np.random.randn(nwalkers)
                elif param == "SINI":
                    svals = np.random.uniform(0.0, 1.0, nwalkers)
                elif param == "M2":
                    svals = np.random.uniform(0.1, 0.6, nwalkers)
                elif param in ["E", "ECC", "PX", "A1"]:
                    # Ensure all positive
                    svals = np.fabs(
                        ftr.fitvals[idx] + ftr.fiterrs[idx] * np.random.randn(nwalkers)
                    )
                    if param in ["E", "ECC"]:
                        svals[svals > 1.0] = 1.0 - (svals[svals > 1.0] - 1.0)
                for ii in range(nwalkers):
                    pos[ii][idx] = svals[ii]
    else:
        pos = [
            newfitvals + ftr.fiterrs / errfact * np.random.randn(ndim)
            for i in range(nwalkers)
        ]
    # Set the 0th walker to have the initial pre-fit solution
    # This way, one walker should always be in a good position
    pos[0] = ftr.fitvals

    import emcee

    # sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior, threads=10)
    sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior)
    # The number is the number of points in the chain
    sampler.run_mcmc(pos, nsteps)

    def chains_to_dict(names, sampler):
        chains = [sampler.chain[:, :, ii].T for ii in range(len(names))]
        return dict(zip(names, chains))

    def plot_chains(chain_dict, file=False):
        np = len(chain_dict)
        fig, axes = plt.subplots(np, 1, sharex=True, figsize=(8, 9))
        for ii, name in enumerate(chain_dict.keys()):
            axes[ii].plot(chain_dict[name], color="k", alpha=0.3)
            axes[ii].set_ylabel(name)
        axes[np - 1].set_xlabel("Step Number")
        fig.tight_layout()
        if file:
            fig.savefig(file)
            plt.close()
        else:
            plt.show()
            plt.close()

    chains = chains_to_dict(ftr.fitkeys, sampler)
    plot_chains(chains, file=ftr.model.PSR.value + "_chains.png")

    # Make the triangle plot.
    try:
        import corner

        samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
        fig = corner.corner(samples, labels=ftr.fitkeys, bins=50)
        fig.savefig(ftr.model.PSR.value + "_triangle.png")
        plt.close()
    except ImportError:
        pass

    # Make a phaseogram with the 50th percentile values
    # ftr.set_params(dict(zip(ftr.fitkeys, np.percentile(samples, 50, axis=0))))
    # Make a phaseogram with the best MCMC result
    ftr.set_params(dict(zip(ftr.fitkeys, ftr.maxpost_fitvals)))
    ftr.phaseogram(file=ftr.model.PSR.value + "_post.png")
    plt.close()

    # Write out the output pulse profile
    vs, xs = np.histogram(
        ftr.get_event_phases(), outprof_nbins, range=[0, 1], weights=ftr.weights
    )
    f = open(ftr.model.PSR.value + "_prof_post.txt", "w")
    for x, v in zip(xs, vs):
        f.write("%.5f  %12.5f\n" % (x, v))
    f.close()

    # Write out the par file for the best MCMC parameter est
    f = open(ftr.model.PSR.value + "_post.par", "w")
    f.write(ftr.model.as_parfile())
    f.close()

    # Print the best MCMC values and ranges
    ranges = map(
        lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
        zip(*np.percentile(samples, [16, 50, 84], axis=0)),
    )
    print("Post-MCMC values (50th percentile +/- (16th/84th percentile):")
    for name, vals in zip(ftr.fitkeys, ranges):
        print("%8s:" % name, "%25.15g (+ %12.5g  / - %12.5g)" % vals)

    # Put the same stuff in a file
    f = open(ftr.model.PSR.value + "_results.txt", "w")

    f.write("Post-MCMC values (50th percentile +/- (16th/84th percentile):\n")
    for name, vals in zip(ftr.fitkeys, ranges):
        f.write("%8s:" % name + " %25.15g (+ %12.5g  / - %12.5g)\n" % vals)

    f.write("\nMaximum likelihood par file:\n")
    f.write(ftr.model.as_parfile())
    f.close()

    import cPickle

    cPickle.dump(samples, open(ftr.model.PSR.value + "_samples.pickle", "wb"))
コード例 #27
0
ファイル: polarphase.py プロジェクト: produit/PINT
def main(argv=None):

    parser = argparse.ArgumentParser(
        description=
        "Use PINT to compute H-test and plot Phaseogram from a POLAR event file."
    )
    parser.add_argument("eventfile", help="Polar event root file name.")
    parser.add_argument("parfile", help="par file to construct model from")
    parser.add_argument("--wei",
                        help="Branch name for event weights",
                        default="")
    parser.add_argument("--eng",
                        help="Path to ENG file.",
                        default="$POLAR_AUX/Alleng.root")
    parser.add_argument("--addphase",
                        help="Add phase tree Friend",
                        default=False,
                        action='store_true')
    parser.add_argument("--plot",
                        help="Show phaseogram plot.",
                        action='store_true',
                        default=False)
    parser.add_argument("--plotfile",
                        help="Output figure file name (default=None)",
                        default=None)
    parser.add_argument("--maxMJD",
                        help="Maximum MJD to include in analysis",
                        default=None)
    parser.add_argument(
        "--planets",
        help="Use planetary Shapiro delay in calculations (default=False)",
        default=False,
        action="store_true")
    parser.add_argument("--ephem",
                        help="Planetary ephemeris to use (default=DE421)",
                        default="DE421")
    args = parser.parse_args(argv)

    # Read in model
    modelin = pint.models.get_model(args.parfile)
    if 'ELONG' in modelin.params:
        tc = SkyCoord(modelin.ELONG.quantity,
                      modelin.ELAT.quantity,
                      frame='barycentrictrueecliptic')
    else:
        tc = SkyCoord(modelin.RAJ.quantity,
                      modelin.DECJ.quantity,
                      frame='icrs')

    if args.eng is not None:
        # Instantiate PolarObs once so it gets added to the observatory registry
        PolarObs(name='Polar', engname=args.eng)

    # Read event file and return list of TOA objects
    tl = load_Polar_TOAs(args.eventfile, weightcolumn=args.wei)

    # Discard events outside of MJD range
    if args.maxMJD is not None:
        tlnew = []
        print("pre len : ", len(tl))
        maxT = Time(float(args.maxMJD), format='mjd')
        print("maxT : ", maxT)
        for tt in tl:
            if tt.mjd < maxT:
                tlnew.append(tt)
        tl = tlnew
        print("post len : ", len(tlnew))

    # Now convert to TOAs object and compute TDBs and posvels
    ts = toa.TOAs(toalist=tl)
    ts.filename = args.eventfile
    ts.compute_TDBs()
    ts.compute_posvels(ephem=args.ephem, planets=args.planets)

    print(ts.get_summary())
    mjds = ts.get_mjds()
    print(mjds.min(), mjds.max())

    # Compute model phase for each TOA
    phss = modelin.phase(ts.table)[1]
    # ensure all postive
    phases = np.where(phss < 0.0 * u.cycle, phss + 1.0 * u.cycle, phss)
    mjds = ts.get_mjds()
    weights = np.array([1] * len(mjds))
    h = float(hmw(phases, weights))
    print("Htest : {0:.2f} ({1:.2f} sigma)".format(h, h2sig(h)))
    if args.plot:
        log.info("Making phaseogram plot with {0} photons".format(len(mjds)))
        phaseogram(mjds, phases, weights, bins=100, plotfile=args.plotfile)

    if args.addphase:
        if len(event_dat) != len(phases):
            raise RuntimeError(
                'Mismatch between length of ROOT tree ({0}) and length of phase array ({1})!'
                .format(len(event_dat), len(phases)))

    return 0