def get_toas(evtfile, flags, tcoords=None, minweight=0, minMJD=0, maxMJD=100000): if evtfile[:-3] == 'tim': usepickle = False if 'usepickle' in flags: usepickle = flags['usepickle'] ts = toa.get_TOAs(evtfile, usepickle=False) #Prune out of range MJDs mask = np.logical_or(ts.get_mjds() < minMJD * u.day, ts.get_mjds() > maxMJD * u.day) ts.table.remove_rows(mask) ts.table = ts.table.group_by('obs') else: if 'usepickle' in flags and flags['usepickle']: try: picklefile = toa._check_pickle(evtfile) if not picklefile: picklefile = evtfile ts = toa.TOAs(picklefile) return ts except: pass weightcol = flags['weightcol'] if 'weightcol' in flags else None target = tcoords if weightcol == 'CALC' else None tl = fermi.load_Fermi_TOAs(evtfile, weightcolumn=weightcol, targetcoord=target, minweight=minweight) tl = filter(lambda t: (t.mjd.value > minMJD) and (t.mjd.value < maxMJD), tl) ts = toa.TOAs(toalist=tl) ts.filename = evtfile ts.compute_TDBs() ts.compute_posvels(ephem="DE421", planets=False) ts.pickle() log.info("There are %d events we will use" % len(ts.table)) return ts
def test_process_and_accuracy(self): # Checks that instantiating an observatory and phasing of # topocentric times works. Verifies accuracy to the sub-mus # level by comparison with stored Tempo2 "Fermi plugin" results. modelin = pint.models.get_model(parfile) get_satellite_observatory("Fermi", ft2file) tl = load_Fermi_TOAs(eventfileraw, weightcolumn="PSRJ0030+0451") # ts = toa.TOAs(toalist=tl) ts = toa.get_TOAs_list(tl, include_gps=False, include_bipm=False, planets=False, ephem="DE405") iphss, phss = modelin.phase(ts, abs_phase=True) ph_pint = phss % 1 with fits.open(eventfileraw) as f: ph_tempo2 = f[1].data.field("pulse_phase") dphi = ph_pint - ph_tempo2 dphi[dphi < -0.1] += 1 dphi[dphi > 0.1] -= 1 resids_mus = dphi / modelin.F0.value * 1e6 # if the "2 mus" problem exists, the scatter in these values will # be 4-5 mus, whereas if everything is OK it should be few 100 ns # require range in TOAs to be less than 200ns self.assertTrue((resids_mus.max() - resids_mus.min()) < 0.2) # require absolute phase to be within 500 ns; NB this relies on # GBT clock corrections since the TZR is referenced there self.assertTrue(max(abs(resids_mus)) < 0.5)
def test_process_raw_LAT_times(self): # This just checks that the process of reading the FT2 file and # computing positions doesn't crash. It doesn't validate any results. # Should probably run comparison with GEO or BARY phases. modelin = pint.models.get_model(parfile) FermiObs(name="Fermi", ft2name=ft2file) tl = load_Fermi_TOAs(eventfileraw, weightcolumn="PSRJ0030+0451") ts = toa.TOAs(toalist=tl) ts.filename = eventfileraw ts.compute_TDBs(ephem="DE405") ts.compute_posvels(ephem="DE405", planets=False) modelin.phase(ts)[1]
def test_process_raw_LAT_times(self): # This just checks that the process of reading the FT2 file and # computing positions doesn't crash. It doesn't validate any results. # Should probably run comparison with GEO or BARY phases. modelin = pint.models.get_model(parfile) FermiObs(name='Fermi',ft2name=ft2file,tt2tdb_mode='none') tl = load_Fermi_TOAs(eventfileraw, weightcolumn='PSRJ0030+0451') ts = toa.TOAs(toalist=tl) ts.filename = eventfileraw ts.compute_TDBs() ts.compute_posvels(ephem='DE405',planets=False) phss = modelin.phase(ts.table)[1] phases = np.where(phss < 0.0 * u.cycle, phss + 1.0 * u.cycle, phss)
def read_fermi(self, ft1_file, weight_name): # Read in model self.modelin = pint.models.get_model(self.args.par) # Extract target coordinates self.t_coord = SkyCoord(self.modelin.RAJ.quantity, self.modelin.DECJ.quantity, frame="icrs") # Read in Fermi data self.data_fermi = load_Fermi_TOAs(ft1_file, weight_name, targetcoord=self.t_coord, minweight=self.args.minWeight) # Get the weights self.weights_fermi = np.array( [w.flags["weight"] for w in self.data_fermi]) print('\n') print('%d photons from Fermi' % (len(self.weights_fermi))) print('%f is the minimum weight' % (min(self.weights_fermi))) print('\n')
def main(argv=None): parser = argparse.ArgumentParser( description= "Use PINT to compute H-test and plot Phaseogram from a Fermi FT1 event file." ) parser.add_argument("eventfile", help="Fermi event FITS file name.") parser.add_argument("parfile", help="par file to construct model from") parser.add_argument( "weightcol", help="Column name for event weights (or 'CALC' to compute them)") parser.add_argument("--ft2", help="Path to FT2 file.", default=None) parser.add_argument( "--addphase", help="Write FT1 file with added phase column", default=False, action="store_true", ) parser.add_argument("--plot", help="Show phaseogram plot.", action="store_true", default=False) parser.add_argument("--plotfile", help="Output figure file name (default=None)", default=None) parser.add_argument("--maxMJD", help="Maximum MJD to include in analysis", default=None) parser.add_argument( "--outfile", help="Output figure file name (default is to overwrite input file)", default=None, ) parser.add_argument( "--planets", help="Use planetary Shapiro delay in calculations (default=False)", default=False, action="store_true", ) parser.add_argument("--ephem", help="Planetary ephemeris to use (default=DE421)", default="DE421") args = parser.parse_args(argv) # If outfile is specified, that implies addphase if args.outfile is not None: args.addphase = True # Read in model modelin = pint.models.get_model(args.parfile) if "ELONG" in modelin.params: tc = SkyCoord( modelin.ELONG.quantity, modelin.ELAT.quantity, frame="barycentrictrueecliptic", ) else: tc = SkyCoord(modelin.RAJ.quantity, modelin.DECJ.quantity, frame="icrs") if args.ft2 is not None: # Instantiate FermiObs once so it gets added to the observatory registry FermiObs(name="Fermi", ft2name=args.ft2) # Read event file and return list of TOA objects tl = load_Fermi_TOAs(args.eventfile, weightcolumn=args.weightcol, targetcoord=tc) # Discard events outside of MJD range if args.maxMJD is not None: tlnew = [] print("pre len : ", len(tl)) maxT = Time(float(args.maxMJD), format="mjd") print("maxT : ", maxT) for tt in tl: if tt.mjd < maxT: tlnew.append(tt) tl = tlnew print("post len : ", len(tlnew)) # Now convert to TOAs object and compute TDBs and posvels # For Fermi, we are not including GPS or TT(BIPM) corrections ts = toa.get_TOAs_list( tl, include_gps=False, include_bipm=False, planets=args.planets, ephem=args.ephem, ) ts.filename = args.eventfile print(ts.get_summary()) mjds = ts.get_mjds() print(mjds.min(), mjds.max()) # Compute model phase for each TOA iphss, phss = modelin.phase(ts, abs_phase=True) # ensure all postive phases = np.where(phss < 0.0 * u.cycle, phss + 1.0 * u.cycle, phss) mjds = ts.get_mjds() weights = np.array([w["weight"] for w in ts.table["flags"]]) h = float(hmw(phases, weights)) print("Htest : {0:.2f} ({1:.2f} sigma)".format(h, h2sig(h))) if args.plot: log.info("Making phaseogram plot with {0} photons".format(len(mjds))) phaseogram(mjds, phases, weights, bins=100, plotfile=args.plotfile) if args.addphase: # Read input FITS file (again). # If overwriting, open in 'update' mode if args.outfile is None: hdulist = pyfits.open(args.eventfile, mode="update") else: hdulist = pyfits.open(args.eventfile) event_hdu = hdulist[1] event_hdr = event_hdu.header event_dat = event_hdu.data if len(event_dat) != len(phases): raise RuntimeError( "Mismatch between length of FITS table ({0}) and length of phase array ({1})!" .format(len(event_dat), len(phases))) if "PULSE_PHASE" in event_hdu.columns.names: log.info("Found existing PULSE_PHASE column, overwriting...") # Overwrite values in existing Column event_dat["PULSE_PHASE"] = phases else: # Construct and append new column, preserving HDU header and name log.info("Adding new PULSE_PHASE column.") phasecol = pyfits.ColDefs( [pyfits.Column(name="PULSE_PHASE", format="D", array=phases)]) bt = pyfits.BinTableHDU.from_columns(event_hdu.columns + phasecol, header=event_hdr, name=event_hdu.name) hdulist[1] = bt if args.outfile is None: # Overwrite the existing file log.info("Overwriting existing FITS file " + args.eventfile) hdulist.flush(verbose=True, output_verify="warn") else: # Write to new output file log.info("Writing output FITS file " + args.outfile) hdulist.writeto(args.outfile, overwrite=True, checksum=True, output_verify="warn") return 0
def main(argv=None): parser = argparse.ArgumentParser(description="PINT tool for MCMC optimization of timing models using event data.") parser.add_argument("eventfile",help="event file to use") parser.add_argument("parfile",help="par file to read model from") parser.add_argument("gaussianfile",help="gaussian file that defines template") parser.add_argument("--ft2",help="Path to FT2 file.",default=None) parser.add_argument("--weightcol",help="name of weight column (or 'CALC' to have them computed",default=None) parser.add_argument("--nwalkers",help="Number of MCMC walkers (def 200)",type=int, default=200) parser.add_argument("--burnin",help="Number of MCMC steps for burn in (def 100)", type=int, default=100) parser.add_argument("--nsteps",help="Number of MCMC steps to compute (def 1000)", type=int, default=1000) parser.add_argument("--minMJD",help="Earliest MJD to use (def 54680)",type=float, default=54680.0) parser.add_argument("--maxMJD",help="Latest MJD to use (def 57250)",type=float, default=57250.0) parser.add_argument("--phs",help="Starting phase offset [0-1] (def is to measure)",type=float) parser.add_argument("--phserr",help="Error on starting phase",type=float, default=0.03) parser.add_argument("--minWeight",help="Minimum weight to include (def 0.05)", type=float,default=0.05) parser.add_argument("--wgtexp", help="Raise computed weights to this power (or 0.0 to disable any rescaling of weights)", type=float, default=0.0) parser.add_argument("--testWeights",help="Make plots to evalute weight cuts?", default=False,action="store_true") parser.add_argument("--doOpt",help="Run initial scipy opt before MCMC?", default=False,action="store_true") parser.add_argument("--initerrfact",help="Multiply par file errors by this factor when initializing walker starting values",type=float,default=0.1) parser.add_argument("--priorerrfact",help="Multiple par file errors by this factor when setting gaussian prior widths",type=float,default=10.0) parser.add_argument("--usepickle",help="Read events from pickle file, if available?", default=False,action="store_true") global nwalkers, nsteps, ftr args = parser.parse_args(argv) eventfile = args.eventfile parfile = args.parfile gaussianfile = args.gaussianfile weightcol = args.weightcol if args.ft2 is not None: # Instantiate FermiObs once so it gets added to the observatory registry FermiObs(name='Fermi',ft2name=args.ft2) nwalkers = args.nwalkers burnin = args.burnin nsteps = args.nsteps if burnin >= nsteps: log.error('burnin must be < nsteps') sys.exit(1) nbins = 256 # For likelihood calculation based on gaussians file outprof_nbins = 256 # in the text file, for pygaussfit.py, for instance minMJD = args.minMJD maxMJD = args.maxMJD # Usually set by coverage of IERS file minWeight = args.minWeight do_opt_first = args.doOpt wgtexp = args.wgtexp # Read in initial model modelin = pint.models.get_model(parfile) # The custom_timing version below is to manually construct the TimingModel # class, which allows it to be pickled. This is needed for parallelizing # the emcee call over a number of threads. So far, it isn't quite working # so it is disabled. The code above constructs the TimingModel class # dynamically, as usual. #modelin = custom_timing(parfile) # Remove the dispersion delay as it is unnecessary #modelin.delay_funcs['L1'].remove(modelin.dispersion_delay) # Set the target coords for automatic weighting if necessary if 'ELONG' in modelin.params: tc = SkyCoord(modelin.ELONG.quantity,modelin.ELAT.quantity, frame='barycentrictrueecliptic') else: tc = SkyCoord(modelin.RAJ.quantity,modelin.DECJ.quantity,frame='icrs') target = tc if weightcol=='CALC' else None # TODO: make this properly handle long double if not args.usepickle or (not (os.path.isfile(eventfile+".pickle") or os.path.isfile(eventfile+".pickle.gz"))): # Read event file and return list of TOA objects tl = fermi.load_Fermi_TOAs(eventfile, weightcolumn=weightcol, targetcoord=target, minweight=minWeight) # Limit the TOAs to ones in selected MJD range and above minWeight tl = [tl[ii] for ii in range(len(tl)) if (tl[ii].mjd.value > minMJD and tl[ii].mjd.value < maxMJD and (weightcol is None or tl[ii].flags['weight'] > minWeight))] log.info("There are %d events we will use" % len(tl)) # Now convert to TOAs object and compute TDBs and posvels ts = toa.TOAs(toalist=tl) ts.filename = eventfile ts.compute_TDBs() ts.compute_posvels(ephem="DE421", planets=False) ts.pickle() else: # read the events in as a pickle file picklefile = toa._check_pickle(eventfile) if not picklefile: picklefile = eventfile ts = toa.TOAs(picklefile) if weightcol is not None: if weightcol=='CALC': weights = np.asarray([x['weight'] for x in ts.table['flags']]) log.info("Original weights have min / max weights %.3f / %.3f" % \ (weights.min(), weights.max())) # Rescale the weights, if requested (by having wgtexp != 0.0) if wgtexp != 0.0: weights **= wgtexp wmx, wmn = weights.max(), weights.min() # make the highest weight = 1, but keep min weight the same weights = wmn + ((weights - wmn) * (1.0 - wmn) / (wmx - wmn)) for ii, x in enumerate(ts.table['flags']): x['weight'] = weights[ii] weights = np.asarray([x['weight'] for x in ts.table['flags']]) log.info("There are %d events, with min / max weights %.3f / %.3f" % \ (len(weights), weights.min(), weights.max())) else: weights = None log.info("There are %d events, no weights are being used." % ts.ntoas) # Now load in the gaussian template and normalize it gtemplate = read_gaussfitfile(gaussianfile, nbins) gtemplate /= gtemplate.mean() # Set the priors on the parameters in the model, before # instantiating the emcee_fitter # Currently, this adds a gaussian prior on each parameter # with width equal to the par file uncertainty * priorerrfact, # and then puts in some special cases. # *** This should be replaced/supplemented with a way to specify # more general priors on parameters that need certain bounds phs = 0.0 if args.phs is None else args.phs sampler = EmceeSampler(nwalkers) ftr = MCMCFitterBinnedTemplate(ts, modelin, sampler, template=gtemplate, \ weights=weights, phs=phs, phserr=args.phserr, minMJD=minMJD, maxMJD=maxMJD) fitkeys, fitvals, fiterrs = ftr.get_fit_keyvals() # Use this if you want to see the effect of setting minWeight if args.testWeights: log.info("Checking H-test vs weights") ftr.prof_vs_weights(use_weights=True) ftr.prof_vs_weights(use_weights=False) sys.exit() # Now compute the photon phases and see if we see a pulse phss = ftr.get_event_phases() maxbin, like_start = marginalize_over_phase(phss, gtemplate, weights=ftr.weights, minimize=True, showplot=False) log.info("Starting pulse likelihood: %f" % like_start) if args.phs is None: fitvals[-1] = 1.0 - maxbin[0] / float(len(gtemplate)) if fitvals[-1] > 1.0: fitvals[-1] -= 1.0 if fitvals[-1] < 0.0: fitvals[-1] += 1.0 log.info("Starting pulse phase: %f" % fitvals[-1]) else: log.info("Measured starting pulse phase is %f, but using %f" % \ (1.0 - maxbin / float(len(gtemplate)), args.phs)) fitvals[-1] = args.phs ftr.fitvals[-1] = fitvals[-1] ftr.phaseogram(plotfile=ftr.model.PSR.value+"_pre.png") plt.close() # Write out the starting pulse profile vs, xs = np.histogram(ftr.get_event_phases(), outprof_nbins, \ range=[0,1], weights=ftr.weights) f = open(ftr.model.PSR.value+"_prof_pre.txt", 'w') for x, v in zip(xs, vs): f.write("%.5f %12.5f\n" % (x, v)) f.close() # Try normal optimization first to see how it goes if do_opt_first: result = op.minimize(ftr.minimize_func, np.zeros_like(ftr.fitvals)) newfitvals = np.asarray(result['x']) * ftr.fiterrs + ftr.fitvals like_optmin = -result['fun'] log.info("Optimization likelihood: %f" % like_optmin) ftr.set_params(dict(zip(ftr.fitkeys, newfitvals))) ftr.phaseogram() else: like_optmin = -np.inf # Set up the initial conditions for the emcee walkers. Use the # scipy.optimize newfitvals instead if they are better ndim = ftr.n_fit_params if like_start > like_optmin: pos = None else: pos = [newfitvals + ftr.fiterrs*args.initerrfact*np.random.randn(ndim) for i in range(nwalkers)] pos[0] = ftr.fitvals ftr.fit_toas(maxiter=nsteps, pos=pos, priorerrfact=args.priorerrfact, errfact=args.initerrfact) def plot_chains(chain_dict, file=False): npts = len(chain_dict) fig, axes = plt.subplots(npts, 1, sharex=True, figsize=(8, 9)) for ii, name in enumerate(chain_dict.keys()): axes[ii].plot(chain_dict[name], color="k", alpha=0.3) axes[ii].set_ylabel(name) axes[npts-1].set_xlabel("Step Number") fig.tight_layout() if file: fig.savefig(file) plt.close() else: plt.show() plt.close() chains = sampler.chains_to_dict(ftr.fitkeys) plot_chains(chains, file=ftr.model.PSR.value+"_chains.png") # Make the triangle plot. samples = sampler.sampler.chain[:, burnin:, :].reshape((-1, ftr.n_fit_params)) try: import corner fig = corner.corner(samples, labels=ftr.fitkeys, bins=50, truths=ftr.maxpost_fitvals, plot_contours=True) fig.savefig(ftr.model.PSR.value+"_triangle.png") plt.close() except ImportError: pass # Make a phaseogram with the 50th percentile values #ftr.set_params(dict(zip(ftr.fitkeys, np.percentile(samples, 50, axis=0)))) # Make a phaseogram with the best MCMC result ftr.set_params(dict(zip(ftr.fitkeys[:-1], ftr.maxpost_fitvals[:-1]))) ftr.phaseogram(plotfile=ftr.model.PSR.value+"_post.png") plt.close() # Write out the output pulse profile vs, xs = np.histogram(ftr.get_event_phases(), outprof_nbins, \ range=[0,1], weights=ftr.weights) f = open(ftr.model.PSR.value+"_prof_post.txt", 'w') for x, v in zip(xs, vs): f.write("%.5f %12.5f\n" % (x, v)) f.close() # Write out the par file for the best MCMC parameter est f = open(ftr.model.PSR.value+"_post.par", 'w') f.write(ftr.model.as_parfile()) f.close() # Print the best MCMC values and ranges ranges = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0))) log.info("Post-MCMC values (50th percentile +/- (16th/84th percentile):") for name, vals in zip(ftr.fitkeys, ranges): log.info("%8s:"%name + "%25.15g (+ %12.5g / - %12.5g)"%vals) # Put the same stuff in a file f = open(ftr.model.PSR.value+"_results.txt", 'w') f.write("Post-MCMC values (50th percentile +/- (16th/84th percentile):\n") for name, vals in zip(ftr.fitkeys, ranges): f.write("%8s:"%name + " %25.15g (+ %12.5g / - %12.5g)\n"%vals) f.write("\nMaximum likelihood par file:\n") f.write(ftr.model.as_parfile()) f.close() import cPickle cPickle.dump(samples, open(ftr.model.PSR.value+"_samples.pickle", "wb"))
if __name__ == '__main__': parser = argparse.ArgumentParser(description="Use PINT to compute H-test and plot Phaseogram from a Fermi FT1 event file.") parser.add_argument("eventfile",help="Fermi event FITS file name. Should be GEOCENTERED.") parser.add_argument("parfile",help="par file to construct model from") parser.add_argument("weightcol",help="Column name for event weights (or 'CALC' to compute them)") parser.add_argument("--maxMJD",help="Maximum MJD to include in analysis", default=None) parser.add_argument("--outfile",help="Output figure file name (default=None)", default=None) parser.add_argument("--planets",help="Use planetary Shapiro delay in calculations (default=False)", default=False, action="store_true") parser.add_argument("--ephem",help="Planetary ephemeris to use (default=DE421)", default="DE421") args = parser.parse_args() pf = psr_par(args.parfile) # Read event file and return list of TOA objects tl = load_Fermi_TOAs(args.eventfile, weightcolumn=args.weightcol, targetcoord=SkyCoord(pf.RAJ,pf.DECJ,unit=(u.hourangle,u.degree),frame='icrs')) # Discard events outside of MJD range if args.maxMJD is not None: tlnew = [] print("pre len : ",len(tl)) maxT = Time(float(args.maxMJD),format='mjd') print("maxT : ",maxT) for tt in tl: if tt.mjd < maxT: tlnew.append(tt) tl=tlnew print("post len : ",len(tlnew)) # Now convert to TOAs object and compute TDBs and posvels ts = toa.TOAs(toalist=tl)
def main(argv=None): parser = argparse.ArgumentParser( description= "Use PINT to compute H-test and plot Phaseogram from a Fermi FT1 event file." ) parser.add_argument("eventfile", help="Fermi event FITS file name.") parser.add_argument("parfile", help="par file to construct model from") parser.add_argument( "weightcol", help="Column name for event weights (or 'CALC' to compute them)") parser.add_argument("--ft2", help="Path to FT2 file.", default=None) parser.add_argument( "--addphase", help="Write FT1 file with added phase column", default=False, action="store_true", ) parser.add_argument("--plot", help="Show phaseogram plot.", action="store_true", default=False) parser.add_argument("--plotfile", help="Output figure file name (default=None)", default=None) parser.add_argument("--maxMJD", help="Maximum MJD to include in analysis", default=None) parser.add_argument("--minMJD", help="Minimum MJD to include in analysis", default=None) parser.add_argument( "--outfile", help="Output figure file name (default is to overwrite input file)", default=None, ) parser.add_argument( "--planets", help="Use planetary Shapiro delay in calculations (default=False)", default=False, action="store_true", ) parser.add_argument("--ephem", help="Planetary ephemeris to use (default=DE421)", default="DE421") args = parser.parse_args(argv) # If outfile is specified, that implies addphase if args.outfile is not None: args.addphase = True # Read in model modelin = pint.models.get_model(args.parfile) if "ELONG" in modelin.params: tc = SkyCoord( modelin.ELONG.quantity, modelin.ELAT.quantity, frame="barycentrictrueecliptic", ) else: tc = SkyCoord(modelin.RAJ.quantity, modelin.DECJ.quantity, frame="icrs") if args.ft2 is not None: # Instantiate Fermi observatory once so it gets added to the observatory registry get_satellite_observatory("Fermi", args.ft2) # Read event file and return list of TOA objects maxmjd = np.inf if (args.maxMJD is None) else float(args.maxMJD) minmjd = 0.0 if (args.minMJD is None) else float(args.minMJD) tl = load_Fermi_TOAs( args.eventfile, maxmjd=maxmjd, minmjd=minmjd, weightcolumn=args.weightcol, targetcoord=tc, ) # Now convert to TOAs object and compute TDBs and posvels # For Fermi, we are not including GPS or TT(BIPM) corrections ts = toa.get_TOAs_list( tl, include_gps=False, include_bipm=False, planets=args.planets, ephem=args.ephem, ) ts.filename = args.eventfile print(ts.get_summary()) mjds = ts.get_mjds() print(mjds.min(), mjds.max()) # Compute model phase for each TOA iphss, phss = modelin.phase(ts, abs_phase=True) phss %= 1 phases = phss.value mjds = ts.get_mjds() weights = np.array([w["weight"] for w in ts.table["flags"]]) h = float(hmw(phases, weights)) print("Htest : {0:.2f} ({1:.2f} sigma)".format(h, h2sig(h))) if args.plot: log.info("Making phaseogram plot with {0} photons".format(len(mjds))) phaseogram(mjds, phases, weights, bins=100, plotfile=args.plotfile) if args.addphase: # Read input FITS file (again). # If overwriting, open in 'update' mode if args.outfile is None: hdulist = pyfits.open(args.eventfile, mode="update") else: hdulist = pyfits.open(args.eventfile) event_hdu = hdulist[1] event_hdr = event_hdu.header event_dat = event_hdu.data event_mjds = read_fits_event_mjds_tuples(event_hdu) mjds_float = np.asarray([r[0] + r[1] for r in event_mjds]) time_mask = np.logical_and((mjds_float > minmjd), (mjds_float < maxmjd)) new_phases = np.full(len(event_dat), -1, dtype=float) new_phases[time_mask] = phases if "PULSE_PHASE" in event_hdu.columns.names: log.info("Found existing PULSE_PHASE column, overwriting...") # Overwrite values in existing Column event_dat["PULSE_PHASE"] = new_phases else: # Construct and append new column, preserving HDU header and name log.info("Adding new PULSE_PHASE column.") phasecol = pyfits.ColDefs([ pyfits.Column(name="PULSE_PHASE", format="D", array=new_phases) ]) bt = pyfits.BinTableHDU.from_columns(event_hdu.columns + phasecol, header=event_hdr, name=event_hdu.name) hdulist[1] = bt if args.outfile is None: # Overwrite the existing file log.info("Overwriting existing FITS file " + args.eventfile) hdulist.flush(verbose=True, output_verify="warn") else: # Write to new output file log.info("Writing output FITS file " + args.outfile) hdulist.writeto(args.outfile, overwrite=True, checksum=True, output_verify="warn") return 0
def main(argv=None): if len(argv) == 3: eventfile, parfile, weightcol = sys.argv[1:] elif len(argv) == 2: eventfile, parfile = sys.argv[1:] weightcol = None else: print("usage: htest_optimize eventfile parfile [weightcol]") sys.exit() # Read in initial model modelin = pint.models.get_model(parfile) # Remove the dispersion delay as it is unnecessary modelin.delay_funcs.remove(modelin.dispersion_delay) # Set the target coords for automatic weighting if necessary if "ELONG" in modelin.params: tc = SkyCoord( modelin.ELONG.quantity, modelin.ELAT.quantity, frame="barycentrictrueecliptic", ) else: tc = SkyCoord(modelin.RAJ.quantity, modelin.DECJ.quantity, frame="icrs") target = tc if weightcol == "CALC" else None # TODO: make this properly handle long double if not ( os.path.isfile(eventfile + ".pickle") or os.path.isfile(eventfile + ".pickle.gz") ): # Read event file and return list of TOA objects tl = fermi.load_Fermi_TOAs( eventfile, weightcolumn=weightcol, targetcoord=target, minweight=minWeight ) # Limit the TOAs to ones where we have IERS corrections for tl = [ tl[ii] for ii in range(len(tl)) if ( tl[ii].mjd.value < maxMJD and (weightcol is None or tl[ii].flags["weight"] > minWeight) ) ] print("There are %d events we will use" % len(tl)) # Now convert to TOAs object and compute TDBs and posvels ts = toa.TOAs(toalist=tl) ts.filename = eventfile ts.compute_TDBs() ts.compute_posvels(ephem="DE421", planets=False) ts.pickle() else: # read the events in as a pickle file picklefile = toa._check_pickle(eventfile) if not picklefile: picklefile = eventfile ts = toa.TOAs(picklefile) if weightcol is not None: if weightcol == "CALC": weights = np.asarray([x["weight"] for x in ts.table["flags"]]) print( "Original weights have min / max weights %.3f / %.3f" % (weights.min(), weights.max()) ) weights **= wgtexp wmx, wmn = weights.max(), weights.min() # make the highest weight = 1, but keep min weight the same weights = wmn + ((weights - wmn) * (1.0 - wmn) / (wmx - wmn)) for ii, x in enumerate(ts.table["flags"]): x["weight"] = weights[ii] weights = np.asarray([x["weight"] for x in ts.table["flags"]]) print( "There are %d events, with min / max weights %.3f / %.3f" % (len(weights), weights.min(), weights.max()) ) else: weights = None print("There are %d events, no weights are being used." % (len(weights))) # Now define the requirements for emcee ftr = emcee_fitter(ts, modelin, weights) # Use this if you want to see the effect of setting minWeight if minWeight == 0.0: print("Checking h-test vs weights") ftr.prof_vs_weights(use_weights=True) ftr.prof_vs_weights(use_weights=False) sys.exit() # Now compute the photon phases and see if we see a pulse phss = ftr.get_event_phases() like_start = -1.0 * sf_hm(hmw(phss, weights=ftr.weights), logprob=True) print("Starting pulse likelihood:", like_start) ftr.phaseogram(file=ftr.model.PSR.value + "_pre.png") plt.close() ftr.phaseogram() # Write out the starting pulse profile vs, xs = np.histogram( ftr.get_event_phases(), outprof_nbins, range=[0, 1], weights=ftr.weights ) f = open(ftr.model.PSR.value + "_prof_pre.txt", "w") for x, v in zip(xs, vs): f.write("%.5f %12.5f\n" % (x, v)) f.close() # Try normal optimization first to see how it goes if do_opt_first: result = op.minimize(ftr.minimize_func, np.zeros_like(ftr.fitvals)) newfitvals = np.asarray(result["x"]) * ftr.fiterrs + ftr.fitvals like_optmin = -result["fun"] print("Optimization likelihood:", like_optmin) ftr.set_params(dict(zip(ftr.fitkeys, newfitvals))) ftr.phaseogram() else: like_optmin = -np.inf # Set up the initial conditions for the emcee walkers. Use the # scipy.optimize newfitvals instead if they are better ndim = ftr.n_fit_params if like_start > like_optmin: # Keep the starting deviations small... pos = [ ftr.fitvals + ftr.fiterrs / errfact * np.random.randn(ndim) for ii in range(nwalkers) ] # Set starting params with uniform priors to uniform in the prior for param in ["GLPH_1", "GLEP_1", "SINI", "M2", "E", "ECC", "PX", "A1"]: if param in ftr.fitkeys: idx = ftr.fitkeys.index(param) if param == "GLPH_1": svals = np.random.uniform(-0.5, 0.5, nwalkers) elif param == "GLEP_1": svals = np.random.uniform(minMJD + 100, maxMJD - 100, nwalkers) # svals = 55422.0 + np.random.randn(nwalkers) elif param == "SINI": svals = np.random.uniform(0.0, 1.0, nwalkers) elif param == "M2": svals = np.random.uniform(0.1, 0.6, nwalkers) elif param in ["E", "ECC", "PX", "A1"]: # Ensure all positive svals = np.fabs( ftr.fitvals[idx] + ftr.fiterrs[idx] * np.random.randn(nwalkers) ) if param in ["E", "ECC"]: svals[svals > 1.0] = 1.0 - (svals[svals > 1.0] - 1.0) for ii in range(nwalkers): pos[ii][idx] = svals[ii] else: pos = [ newfitvals + ftr.fiterrs / errfact * np.random.randn(ndim) for i in range(nwalkers) ] # Set the 0th walker to have the initial pre-fit solution # This way, one walker should always be in a good position pos[0] = ftr.fitvals import emcee # sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior, threads=10) sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior) # The number is the number of points in the chain sampler.run_mcmc(pos, nsteps) def chains_to_dict(names, sampler): chains = [sampler.chain[:, :, ii].T for ii in range(len(names))] return dict(zip(names, chains)) def plot_chains(chain_dict, file=False): np = len(chain_dict) fig, axes = plt.subplots(np, 1, sharex=True, figsize=(8, 9)) for ii, name in enumerate(chain_dict.keys()): axes[ii].plot(chain_dict[name], color="k", alpha=0.3) axes[ii].set_ylabel(name) axes[np - 1].set_xlabel("Step Number") fig.tight_layout() if file: fig.savefig(file) plt.close() else: plt.show() plt.close() chains = chains_to_dict(ftr.fitkeys, sampler) plot_chains(chains, file=ftr.model.PSR.value + "_chains.png") # Make the triangle plot. try: import corner samples = sampler.chain[:, burnin:, :].reshape((-1, ndim)) fig = corner.corner(samples, labels=ftr.fitkeys, bins=50) fig.savefig(ftr.model.PSR.value + "_triangle.png") plt.close() except ImportError: pass # Make a phaseogram with the 50th percentile values # ftr.set_params(dict(zip(ftr.fitkeys, np.percentile(samples, 50, axis=0)))) # Make a phaseogram with the best MCMC result ftr.set_params(dict(zip(ftr.fitkeys, ftr.maxpost_fitvals))) ftr.phaseogram(file=ftr.model.PSR.value + "_post.png") plt.close() # Write out the output pulse profile vs, xs = np.histogram( ftr.get_event_phases(), outprof_nbins, range=[0, 1], weights=ftr.weights ) f = open(ftr.model.PSR.value + "_prof_post.txt", "w") for x, v in zip(xs, vs): f.write("%.5f %12.5f\n" % (x, v)) f.close() # Write out the par file for the best MCMC parameter est f = open(ftr.model.PSR.value + "_post.par", "w") f.write(ftr.model.as_parfile()) f.close() # Print the best MCMC values and ranges ranges = map( lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0)), ) print("Post-MCMC values (50th percentile +/- (16th/84th percentile):") for name, vals in zip(ftr.fitkeys, ranges): print("%8s:" % name, "%25.15g (+ %12.5g / - %12.5g)" % vals) # Put the same stuff in a file f = open(ftr.model.PSR.value + "_results.txt", "w") f.write("Post-MCMC values (50th percentile +/- (16th/84th percentile):\n") for name, vals in zip(ftr.fitkeys, ranges): f.write("%8s:" % name + " %25.15g (+ %12.5g / - %12.5g)\n" % vals) f.write("\nMaximum likelihood par file:\n") f.write(ftr.model.as_parfile()) f.close() import cPickle cPickle.dump(samples, open(ftr.model.PSR.value + "_samples.pickle", "wb"))
sys.exit() # Read in initial model modelin = pint.models.get_model(parfile) # Remove the dispersion delay as it is unnecessary modelin.delay_funcs['L1'].remove(modelin.dispersion_delay) # Set the target coords for automatic weighting if necessary target = SkyCoord(modelin.RAJ.value, modelin.DECJ.value, \ frame='icrs') if weightcol=='CALC' else None # TODO: make this properly handle long double if not (os.path.isfile(eventfile + ".pickle") or os.path.isfile(eventfile + ".pickle.gz")): # Read event file and return list of TOA objects tl = fermi.load_Fermi_TOAs(eventfile, weightcolumn=weightcol, targetcoord=target, minweight=minWeight) # Limit the TOAs to ones where we have IERS corrections for tl = [ tl[ii] for ii in range(len(tl)) if (tl[ii].mjd.value < maxMJD and ( weightcol is None or tl[ii].flags['weight'] > minWeight)) ] print "There are %d events we will use" % len(tl) # Now convert to TOAs object and compute TDBs and posvels ts = toa.TOAs(toalist=tl) ts.filename = eventfile ts.compute_TDBs() ts.compute_posvels(ephem="DE421", planets=False) ts.pickle() else: # read the events in as a picke file ts = toa.TOAs(eventfile, usepickle=True)
def main(argv=None): parser = argparse.ArgumentParser(description="Use PINT to compute H-test and plot Phaseogram from a Fermi FT1 event file.") parser.add_argument("eventfile",help="Fermi event FITS file name.") parser.add_argument("parfile",help="par file to construct model from") parser.add_argument("weightcol",help="Column name for event weights (or 'CALC' to compute them)") parser.add_argument("--ft2",help="Path to FT2 file.",default=None) parser.add_argument("--addphase",help="Write FT1 file with added phase column", default=False,action='store_true') parser.add_argument("--plot",help="Show phaseogram plot.", action='store_true', default=False) parser.add_argument("--plotfile",help="Output figure file name (default=None)", default=None) parser.add_argument("--maxMJD",help="Maximum MJD to include in analysis", default=None) parser.add_argument("--outfile",help="Output figure file name (default is to overwrite input file)", default=None) parser.add_argument("--planets",help="Use planetary Shapiro delay in calculations (default=False)", default=False, action="store_true") parser.add_argument("--ephem",help="Planetary ephemeris to use (default=DE421)", default="DE421") args = parser.parse_args(argv) # If outfile is specified, that implies addphase if args.outfile is not None: args.addphase = True # Read in model modelin = pint.models.get_model(args.parfile) if 'ELONG' in modelin.params: tc = SkyCoord(modelin.ELONG.quantity,modelin.ELAT.quantity, frame='barycentrictrueecliptic') else: tc = SkyCoord(modelin.RAJ.quantity,modelin.DECJ.quantity,frame='icrs') if args.ft2 is not None: # Instantiate FermiObs once so it gets added to the observatory registry FermiObs(name='Fermi',ft2name=args.ft2) # Read event file and return list of TOA objects tl = load_Fermi_TOAs(args.eventfile, weightcolumn=args.weightcol, targetcoord=tc) # Discard events outside of MJD range if args.maxMJD is not None: tlnew = [] print("pre len : ",len(tl)) maxT = Time(float(args.maxMJD),format='mjd') print("maxT : ",maxT) for tt in tl: if tt.mjd < maxT: tlnew.append(tt) tl=tlnew print("post len : ",len(tlnew)) # Now convert to TOAs object and compute TDBs and posvels # For Fermi, we are not including GPS or TT(BIPM) corrections ts = toa.get_TOAs_list(tl,include_gps=False,include_bipm=False,planets=args.planets,ephem=args.ephem) ts.filename = args.eventfile print(ts.get_summary()) mjds = ts.get_mjds() print(mjds.min(),mjds.max()) # Compute model phase for each TOA iphss,phss = modelin.phase(ts,abs_phase=True) # ensure all postive phases = np.where(phss < 0.0 * u.cycle, phss + 1.0 * u.cycle, phss) mjds = ts.get_mjds() weights = np.array([w['weight'] for w in ts.table['flags']]) h = float(hmw(phases,weights)) print("Htest : {0:.2f} ({1:.2f} sigma)".format(h,h2sig(h))) if args.plot: log.info("Making phaseogram plot with {0} photons".format(len(mjds))) phaseogram(mjds,phases,weights,bins=100,plotfile = args.plotfile) if args.addphase: # Read input FITS file (again). # If overwriting, open in 'update' mode if args.outfile is None: hdulist = pyfits.open(args.eventfile,mode='update') else: hdulist = pyfits.open(args.eventfile) event_hdu = hdulist[1] event_hdr=event_hdu.header event_dat=event_hdu.data if len(event_dat) != len(phases): raise RuntimeError('Mismatch between length of FITS table ({0}) and length of phase array ({1})!'.format(len(event_dat),len(phases))) if 'PULSE_PHASE' in event_hdu.columns.names: log.info('Found existing PULSE_PHASE column, overwriting...') # Overwrite values in existing Column event_dat['PULSE_PHASE'] = phases else: # Construct and append new column, preserving HDU header and name log.info('Adding new PULSE_PHASE column.') phasecol = pyfits.ColDefs([pyfits.Column(name='PULSE_PHASE', format='D', array=phases)]) bt = pyfits.BinTableHDU.from_columns( event_hdu.columns + phasecol, header=event_hdr,name=event_hdu.name) hdulist[1] = bt if args.outfile is None: # Overwrite the existing file log.info('Overwriting existing FITS file '+args.eventfile) hdulist.flush(verbose=True, output_verify='warn') else: # Write to new output file log.info('Writing output FITS file '+args.outfile) hdulist.writeto(args.outfile,overwrite=True, checksum=True, output_verify='warn') return 0
def test_sampler(): r = [] for i in range(2): random.seed(0) numpy.random.seed(0) s = numpy.random.mtrand.RandomState(0) parfile = join(datadir, "PSRJ0030+0451_psrcat.par") eventfile = join( datadir, "J0030+0451_P8_15.0deg_239557517_458611204_ft1weights_GEO_wt.gt.0.4.fits", ) gaussianfile = join(datadir, "templateJ0030.3gauss") weightcol = "PSRJ0030+0451" minWeight = 0.9 nwalkers = 10 nsteps = 1 nbins = 256 phs = 0.0 model = pint.models.get_model(parfile) tl = fermi.load_Fermi_TOAs(eventfile, weightcolumn=weightcol, minweight=minWeight) ts = toa.TOAs(toalist=tl) # Introduce a small error so that residuals can be calculated ts.table["error"] = 1.0 ts.filename = eventfile ts.compute_TDBs() ts.compute_posvels(ephem="DE421", planets=False) weights, _ = ts.get_flag_value("weight", as_type=float) weights = np.array(weights) template = read_gaussfitfile(gaussianfile, nbins) template /= template.mean() sampler = EmceeSampler(nwalkers) fitter = MCMCFitterBinnedTemplate(ts, model, sampler, template=template, weights=weights, phs=phs) fitter.sampler.random_state = s # phases = fitter.get_event_phases() # maxbin, like_start = marginalize_over_phase(phases, template, # weights=fitter.weights, # minimize=True, # showplot=True) # fitter.fitvals[-1] = 1.0 - maxbin[0] / float(len(template)) # fitter.set_priors(fitter, 10) pos = fitter.sampler.get_initial_pos( fitter.fitkeys, fitter.fitvals, fitter.fiterrs, 0.1, minMJD=fitter.minMJD, maxMJD=fitter.maxMJD, ) # pos = fitter.clip_template_params(pos) fitter.sampler.initialize_sampler(fitter.lnposterior, fitter.n_fit_params) fitter.sampler.run_mcmc(pos, nsteps) # fitter.fit_toas(maxiter=nsteps, pos=None) # fitter.set_parameters(fitter.maxpost_fitvals) # fitter.phaseogram() # samples = sampler.sampler.chain[:, 10:, :].reshape((-1, fitter.n_fit_params)) # r.append(np.random.randn()) r.append(sampler.sampler.chain[0]) assert_array_equal(r[0], r[1])
def main(argv=None): parser = argparse.ArgumentParser( description="PINT tool for MCMC optimization of timing models using event data.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument("eventfile", help="event file to use") parser.add_argument("parfile", help="par file to read model from") parser.add_argument("gaussianfile", help="gaussian file that defines template") parser.add_argument("--ft2", help="Path to FT2 file.", default=None) parser.add_argument( "--weightcol", help="name of weight column (or 'CALC' to have them computed)", default=None, ) parser.add_argument( "--nwalkers", help="Number of MCMC walkers", type=int, default=200 ) parser.add_argument( "--burnin", help="Number of MCMC steps for burn in", type=int, default=100, ) parser.add_argument( "--nsteps", help="Number of MCMC steps to compute", type=int, default=1000, ) parser.add_argument( "--minMJD", help="Earliest MJD to use", type=float, default=54680.0 ) parser.add_argument( "--maxMJD", help="Latest MJD to use", type=float, default=57250.0 ) parser.add_argument( "--phs", help="Starting phase offset [0-1] (def is to measure)", type=float ) parser.add_argument( "--phserr", help="Error on starting phase", type=float, default=0.03 ) parser.add_argument( "--minWeight", help="Minimum weight to include", type=float, default=0.05, ) parser.add_argument( "--wgtexp", help="Raise computed weights to this power (or 0.0 to disable any rescaling of weights)", type=float, default=0.0, ) parser.add_argument( "--testWeights", help="Make plots to evalute weight cuts?", default=False, action="store_true", ) parser.add_argument( "--doOpt", help="Run initial scipy opt before MCMC?", default=False, action="store_true", ) parser.add_argument( "--initerrfact", help="Multiply par file errors by this factor when initializing walker starting values", type=float, default=0.1, ) parser.add_argument( "--priorerrfact", help="Multiple par file errors by this factor when setting gaussian prior widths", type=float, default=10.0, ) parser.add_argument( "--usepickle", help="Read events from pickle file, if available?", default=False, action="store_true", ) parser.add_argument( "--log-level", type=str, choices=("TRACE", "DEBUG", "INFO", "WARNING", "ERROR"), default=pint.logging.script_level, help="Logging level", dest="loglevel", ) global nwalkers, nsteps, ftr args = parser.parse_args(argv) log.remove() log.add( sys.stderr, level=args.loglevel, colorize=True, format=pint.logging.format, filter=pint.logging.LogFilter(), ) eventfile = args.eventfile parfile = args.parfile gaussianfile = args.gaussianfile weightcol = args.weightcol if args.ft2 is not None: # Instantiate Fermi observatory once so it gets added to the observatory registry get_satellite_observatory("Fermi", args.ft2) nwalkers = args.nwalkers burnin = args.burnin nsteps = args.nsteps if burnin >= nsteps: log.error("burnin must be < nsteps") sys.exit(1) nbins = 256 # For likelihood calculation based on gaussians file outprof_nbins = 256 # in the text file, for pygaussfit.py, for instance minMJD = args.minMJD maxMJD = args.maxMJD # Usually set by coverage of IERS file minWeight = args.minWeight do_opt_first = args.doOpt wgtexp = args.wgtexp # Read in initial model modelin = pint.models.get_model(parfile) # The custom_timing version below is to manually construct the TimingModel # class, which allows it to be pickled. This is needed for parallelizing # the emcee call over a number of threads. So far, it isn't quite working # so it is disabled. The code above constructs the TimingModel class # dynamically, as usual. # modelin = custom_timing(parfile) # Remove the dispersion delay as it is unnecessary # modelin.delay_funcs['L1'].remove(modelin.dispersion_delay) # Set the target coords for automatic weighting if necessary if "ELONG" in modelin.params: tc = SkyCoord( modelin.ELONG.quantity, modelin.ELAT.quantity, frame="barycentrictrueecliptic", ) else: tc = SkyCoord(modelin.RAJ.quantity, modelin.DECJ.quantity, frame="icrs") target = tc if weightcol == "CALC" else None # TODO: make this properly handle long double ts = None if args.usepickle: try: ts = toa.load_pickle(eventfile) except IOError: pass if ts is None: # Read event file and return list of TOA objects tl = fermi.load_Fermi_TOAs( eventfile, weightcolumn=weightcol, targetcoord=target, minweight=minWeight ) # Limit the TOAs to ones in selected MJD range and above minWeight tl = [ tl[ii] for ii in range(len(tl)) if ( tl[ii].mjd.value > minMJD and tl[ii].mjd.value < maxMJD and (weightcol is None or float(tl[ii].flags["weight"]) > minWeight) ) ] log.info("There are %d events we will use" % len(tl)) # Now convert to TOAs object and compute TDBs and posvels ts = toa.TOAs(toalist=tl) ts.filename = eventfile ts.compute_TDBs() ts.compute_posvels(ephem="DE421", planets=False) toa.save_pickle(ts) if weightcol is not None: if weightcol == "CALC": weights = np.asarray([float(x["weight"]) for x in ts.table["flags"]]) log.info( "Original weights have min / max weights %.3f / %.3f" % (weights.min(), weights.max()) ) # Rescale the weights, if requested (by having wgtexp != 0.0) if wgtexp != 0.0: weights **= wgtexp wmx, wmn = weights.max(), weights.min() # make the highest weight = 1, but keep min weight the same weights = wmn + ((weights - wmn) * (1.0 - wmn) / (wmx - wmn)) for ii, x in enumerate(ts.table["flags"]): x["weight"] = str(weights[ii]) weights = np.asarray([float(x["weight"]) for x in ts.table["flags"]]) log.info( "There are %d events, with min / max weights %.3f / %.3f" % (len(weights), weights.min(), weights.max()) ) else: weights = None log.info("There are %d events, no weights are being used." % ts.ntoas) # Now load in the gaussian template and normalize it gtemplate = read_gaussfitfile(gaussianfile, nbins) gtemplate /= gtemplate.mean() # Set the priors on the parameters in the model, before # instantiating the emcee_fitter # Currently, this adds a gaussian prior on each parameter # with width equal to the par file uncertainty * priorerrfact, # and then puts in some special cases. # *** This should be replaced/supplemented with a way to specify # more general priors on parameters that need certain bounds phs = 0.0 if args.phs is None else args.phs fitkeys, fitvals, fiterrs = get_fit_keyvals(modelin, phs=phs, phserr=args.phserr) for key, v, e in zip(fitkeys[:-1], fitvals[:-1], fiterrs[:-1]): if key == "SINI" or key == "E" or key == "ECC": getattr(modelin, key).prior = Prior(uniform(0.0, 1.0)) elif key == "PX": getattr(modelin, key).prior = Prior(uniform(0.0, 10.0)) elif key.startswith("GLPH"): getattr(modelin, key).prior = Prior(uniform(-0.5, 1.0)) else: getattr(modelin, key).prior = Prior( norm(loc=float(v), scale=float(e * args.priorerrfact)) ) # Now define the requirements for emcee ftr = emcee_fitter(ts, modelin, gtemplate, weights, phs, args.phserr) # Use this if you want to see the effect of setting minWeight if args.testWeights: log.info("Checking H-test vs weights") ftr.prof_vs_weights(use_weights=True) ftr.prof_vs_weights(use_weights=False) sys.exit() # Now compute the photon phases and see if we see a pulse phss = ftr.get_event_phases() maxbin, like_start = marginalize_over_phase( phss, gtemplate, weights=ftr.weights, minimize=True, showplot=False ) log.info("Starting pulse likelihood: %f" % like_start) if args.phs is None: fitvals[-1] = 1.0 - maxbin[0] / float(len(gtemplate)) if fitvals[-1] > 1.0: fitvals[-1] -= 1.0 if fitvals[-1] < 0.0: fitvals[-1] += 1.0 log.info("Starting pulse phase: %f" % fitvals[-1]) else: log.warning( "Measured starting pulse phase is %f, but using %f" % (1.0 - maxbin / float(len(gtemplate)), args.phs) ) fitvals[-1] = args.phs ftr.fitvals[-1] = fitvals[-1] ftr.phaseogram(plotfile=ftr.model.PSR.value + "_pre.png") plt.close() # ftr.phaseogram() # Write out the starting pulse profile vs, xs = np.histogram( ftr.get_event_phases(), outprof_nbins, range=[0, 1], weights=ftr.weights ) f = open(ftr.model.PSR.value + "_prof_pre.txt", "w") for x, v in zip(xs, vs): f.write("%.5f %12.5f\n" % (x, v)) f.close() # Try normal optimization first to see how it goes if do_opt_first: result = op.minimize(ftr.minimize_func, np.zeros_like(ftr.fitvals)) newfitvals = np.asarray(result["x"]) * ftr.fiterrs + ftr.fitvals like_optmin = -result["fun"] log.info("Optimization likelihood: %f" % like_optmin) ftr.set_params(dict(zip(ftr.fitkeys, newfitvals))) ftr.phaseogram() else: like_optmin = -np.inf # Set up the initial conditions for the emcee walkers. Use the # scipy.optimize newfitvals instead if they are better ndim = ftr.n_fit_params if like_start > like_optmin: # Keep the starting deviations small... pos = [ ftr.fitvals + ftr.fiterrs * args.initerrfact * np.random.randn(ndim) for ii in range(nwalkers) ] # Set starting params for param in ["GLPH_1", "GLEP_1", "SINI", "M2", "E", "ECC", "PX", "A1"]: if param in ftr.fitkeys: idx = ftr.fitkeys.index(param) if param == "GLPH_1": svals = np.random.uniform(-0.5, 0.5, nwalkers) elif param == "GLEP_1": svals = np.random.uniform(minMJD + 100, maxMJD - 100, nwalkers) # svals = 55422.0 + np.random.randn(nwalkers) elif param == "SINI": svals = np.random.uniform(0.0, 1.0, nwalkers) elif param == "M2": svals = np.random.uniform(0.1, 0.6, nwalkers) elif param in ["E", "ECC", "PX", "A1"]: # Ensure all positive svals = np.fabs( ftr.fitvals[idx] + ftr.fiterrs[idx] * np.random.randn(nwalkers) ) if param in ["E", "ECC"]: svals[svals > 1.0] = 1.0 - (svals[svals > 1.0] - 1.0) for ii in range(nwalkers): pos[ii][idx] = svals[ii] else: pos = [ newfitvals + ftr.fiterrs * args.initerrfact * np.random.randn(ndim) for i in range(nwalkers) ] # Set the 0th walker to have the initial pre-fit solution # This way, one walker should always be in a good position pos[0] = ftr.fitvals import emcee # Following are for parallel processing tests... if 0: def unwrapped_lnpost(theta, ftr=ftr): return ftr.lnposterior(theta) import pathos.multiprocessing as mp pool = mp.ProcessPool(nodes=8) sampler = emcee.EnsembleSampler( nwalkers, ndim, unwrapped_lnpost, pool=pool, args=[ftr] ) else: sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior) # The number is the number of points in the chain sampler.run_mcmc(pos, nsteps) def chains_to_dict(names, sampler): chains = [sampler.chain[:, :, ii].T for ii in range(len(names))] return dict(zip(names, chains)) def plot_chains(chain_dict, file=False): npts = len(chain_dict) fig, axes = plt.subplots(npts, 1, sharex=True, figsize=(8, 9)) for ii, name in enumerate(chain_dict.keys()): axes[ii].plot(chain_dict[name], color="k", alpha=0.3) axes[ii].set_ylabel(name) axes[npts - 1].set_xlabel("Step Number") fig.tight_layout() if file: fig.savefig(file) plt.close() else: plt.show() plt.close() chains = chains_to_dict(ftr.fitkeys, sampler) plot_chains(chains, file=ftr.model.PSR.value + "_chains.png") # Make the triangle plot. samples = sampler.chain[:, burnin:, :].reshape((-1, ndim)) try: import corner fig = corner.corner( samples, labels=ftr.fitkeys, bins=50, truths=ftr.maxpost_fitvals, plot_contours=True, ) fig.savefig(ftr.model.PSR.value + "_triangle.png") plt.close() except ImportError: pass # Plot the scaled prior probability alongside the initial gaussian probability distribution and the histogrammed samples ftr.plot_priors(chains, burnin, scale=True) plt.savefig(ftr.model.PSR.value + "_priors.png") plt.close() # Make a phaseogram with the 50th percentile values # ftr.set_params(dict(zip(ftr.fitkeys, np.percentile(samples, 50, axis=0)))) # Make a phaseogram with the best MCMC result ftr.set_params(dict(zip(ftr.fitkeys[:-1], ftr.maxpost_fitvals[:-1]))) ftr.phaseogram(plotfile=ftr.model.PSR.value + "_post.png") plt.close() # Write out the output pulse profile vs, xs = np.histogram( ftr.get_event_phases(), outprof_nbins, range=[0, 1], weights=ftr.weights ) f = open(ftr.model.PSR.value + "_prof_post.txt", "w") for x, v in zip(xs, vs): f.write("%.5f %12.5f\n" % (x, v)) f.close() # Write out the par file for the best MCMC parameter est f = open(ftr.model.PSR.value + "_post.par", "w") f.write(ftr.model.as_parfile()) f.close() # Print the best MCMC values and ranges ranges = map( lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0)), ) log.info("Post-MCMC values (50th percentile +/- (16th/84th percentile):") for name, vals in zip(ftr.fitkeys, ranges): log.info("%8s:" % name + "%25.15g (+ %12.5g / - %12.5g)" % vals) # Put the same stuff in a file f = open(ftr.model.PSR.value + "_results.txt", "w") f.write("Post-MCMC values (50th percentile +/- (16th/84th percentile):\n") for name, vals in zip(ftr.fitkeys, ranges): f.write("%8s:" % name + " %25.15g (+ %12.5g / - %12.5g)\n" % vals) f.write("\nMaximum likelihood par file:\n") f.write(ftr.model.as_parfile()) f.close() import pickle pickle.dump(samples, open(ftr.model.PSR.value + "_samples.pickle", "wb"))
parser.add_argument( "--planets", help="Use planetary Shapiro delay in calculations (default=False)", default=False, action="store_true") parser.add_argument("--ephem", help="Planetary ephemeris to use (default=DE421)", default="DE421") args = parser.parse_args() pf = psr_par(args.parfile) # Read event file and return list of TOA objects tl = load_Fermi_TOAs(args.eventfile, weightcolumn=args.weightcol, targetcoord=SkyCoord(pf.RAJ, pf.DECJ, unit=(u.hourangle, u.degree), frame='icrs')) # Discard events outside of MJD range if args.maxMJD is not None: tlnew = [] print("pre len : ", len(tl)) maxT = Time(float(args.maxMJD), format='mjd') print("maxT : ", maxT) for tt in tl: if tt.mjd < maxT: tlnew.append(tt) tl = tlnew print("post len : ", len(tlnew))
maxpost = -9e99 numcalls = 0 # Read in initial model modelin = pint.models.get_model(parfile) # Remove the dispersion delay as it is unnecessary modelin.delay_funcs['L1'].remove(modelin.dispersion_delay) # Set the target coords for automatic weighting if necessary target = SkyCoord(modelin.RAJ.value, modelin.DECJ.value, \ frame='icrs') if weightcol=='CALC' else None # TODO: make this properly handle long double if not args.usepickle or (not (os.path.isfile(eventfile+".pickle") or os.path.isfile(eventfile+".pickle.gz"))): # Read event file and return list of TOA objects tl = fermi.load_Fermi_TOAs(eventfile, weightcolumn=weightcol, targetcoord=target, minweight=minWeight) # Limit the TOAs to ones in selected MJD range and above minWeight tl = [tl[ii] for ii in range(len(tl)) if (tl[ii].mjd.value > minMJD and tl[ii].mjd.value < maxMJD and (weightcol is None or tl[ii].flags['weight'] > minWeight))] print "There are %d events we will use" % len(tl) # Now convert to TOAs object and compute TDBs and posvels ts = toa.TOAs(toalist=tl) ts.filename = eventfile ts.compute_TDBs() ts.compute_posvels(ephem="DE421", planets=False) ts.pickle() else: # read the events in as a picke file ts = toa.TOAs(eventfile, usepickle=True) if weightcol is not None: if weightcol=='CALC':
def main(argv=None): if len(argv)==3: eventfile, parfile, weightcol = sys.argv[1:] elif len(argv)==2: eventfile, parfile = sys.argv[1:] weightcol=None else: print("usage: htest_optimize eventfile parfile [weightcol]") sys.exit() # Read in initial model modelin = pint.models.get_model(parfile) # Remove the dispersion delay as it is unnecessary modelin.delay_funcs['L1'].remove(modelin.dispersion_delay) # Set the target coords for automatic weighting if necessary if 'ELONG' in modelin.params: tc = SkyCoord(modelin.ELONG.quantity,modelin.ELAT.quantity, frame='barycentrictrueecliptic') else: tc = SkyCoord(modelin.RAJ.quantity,modelin.DECJ.quantity,frame='icrs') target = tc if weightcol=='CALC' else None # TODO: make this properly handle long double if not (os.path.isfile(eventfile+".pickle") or os.path.isfile(eventfile+".pickle.gz")): # Read event file and return list of TOA objects tl = fermi.load_Fermi_TOAs(eventfile, weightcolumn=weightcol, targetcoord=target, minweight=minWeight) # Limit the TOAs to ones where we have IERS corrections for tl = [tl[ii] for ii in range(len(tl)) if (tl[ii].mjd.value < maxMJD and (weightcol is None or tl[ii].flags['weight'] > minWeight))] print("There are %d events we will use" % len(tl)) # Now convert to TOAs object and compute TDBs and posvels ts = toa.TOAs(toalist=tl) ts.filename = eventfile ts.compute_TDBs() ts.compute_posvels(ephem="DE421", planets=False) ts.pickle() else: # read the events in as a pickle file picklefile = toa._check_pickle(eventfile) if not picklefile: picklefile = eventfile ts = toa.TOAs(picklefile) if weightcol is not None: if weightcol=='CALC': weights = np.asarray([x['weight'] for x in ts.table['flags']]) print("Original weights have min / max weights %.3f / %.3f" % \ (weights.min(), weights.max())) weights **= wgtexp wmx, wmn = weights.max(), weights.min() # make the highest weight = 1, but keep min weight the same weights = wmn + ((weights - wmn) * (1.0 - wmn) / (wmx - wmn)) for ii, x in enumerate(ts.table['flags']): x['weight'] = weights[ii] weights = np.asarray([x['weight'] for x in ts.table['flags']]) print("There are %d events, with min / max weights %.3f / %.3f" % \ (len(weights), weights.min(), weights.max())) else: weights = None print("There are %d events, no weights are being used." % (len(weights))) # Now define the requirements for emcee ftr = emcee_fitter(ts, modelin, weights) # Use this if you want to see the effect of setting minWeight if minWeight == 0.0: print("Checking h-test vs weights") ftr.prof_vs_weights(use_weights=True) ftr.prof_vs_weights(use_weights=False) sys.exit() # Now compute the photon phases and see if we see a pulse phss = ftr.get_event_phases() like_start = -1.0*sf_hm(hmw(phss,weights=ftr.weights),logprob=True) print("Starting pulse likelihood:", like_start) ftr.phaseogram(file=ftr.model.PSR.value+"_pre.png") plt.close() ftr.phaseogram() # Write out the starting pulse profile vs, xs = np.histogram(ftr.get_event_phases(), outprof_nbins, \ range=[0,1], weights=ftr.weights) f = open(ftr.model.PSR.value+"_prof_pre.txt", 'w') for x, v in zip(xs, vs): f.write("%.5f %12.5f\n" % (x, v)) f.close() # Try normal optimization first to see how it goes if do_opt_first: result = op.minimize(ftr.minimize_func, np.zeros_like(ftr.fitvals)) newfitvals = np.asarray(result['x']) * ftr.fiterrs + ftr.fitvals like_optmin = -result['fun'] print("Optimization likelihood:", like_optmin) ftr.set_params(dict(zip(ftr.fitkeys, newfitvals))) ftr.phaseogram() else: like_optmin = -np.inf # Set up the initial conditions for the emcee walkers. Use the # scipy.optimize newfitvals instead if they are better ndim = ftr.n_fit_params if like_start > like_optmin: # Keep the starting deviations small... pos = [ftr.fitvals + ftr.fiterrs/errfact * np.random.randn(ndim) for ii in range(nwalkers)] # Set starting params with uniform priors to uniform in the prior for param in ["GLPH_1", "GLEP_1", "SINI", "M2", "E", "ECC", "PX", "A1"]: if param in ftr.fitkeys: idx = ftr.fitkeys.index(param) if param=="GLPH_1": svals = np.random.uniform(-0.5, 0.5, nwalkers) elif param=="GLEP_1": svals = np.random.uniform(minMJD+100, maxMJD-100, nwalkers) #svals = 55422.0 + np.random.randn(nwalkers) elif param=="SINI": svals = np.random.uniform(0.0, 1.0, nwalkers) elif param=="M2": svals = np.random.uniform(0.1, 0.6, nwalkers) elif param in ["E", "ECC", "PX", "A1"]: # Ensure all positive svals = np.fabs(ftr.fitvals[idx] + ftr.fiterrs[idx] * np.random.randn(nwalkers)) if param in ["E", "ECC"]: svals[svals>1.0] = 1.0 - (svals[svals>1.0] - 1.0) for ii in range(nwalkers): pos[ii][idx] = svals[ii] else: pos = [newfitvals + ftr.fiterrs/errfact*np.random.randn(ndim) for i in range(nwalkers)] # Set the 0th walker to have the initial pre-fit solution # This way, one walker should always be in a good position pos[0] = ftr.fitvals import emcee #sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior, threads=10) sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior) # The number is the number of points in the chain sampler.run_mcmc(pos, nsteps) def chains_to_dict(names, sampler): chains = [sampler.chain[:,:,ii].T for ii in range(len(names))] return dict(zip(names,chains)) def plot_chains(chain_dict, file=False): np = len(chain_dict) fig, axes = plt.subplots(np, 1, sharex=True, figsize=(8, 9)) for ii, name in enumerate(chain_dict.keys()): axes[ii].plot(chain_dict[name], color="k", alpha=0.3) axes[ii].set_ylabel(name) axes[np-1].set_xlabel("Step Number") fig.tight_layout() if file: fig.savefig(file) plt.close() else: plt.show() plt.close() chains = chains_to_dict(ftr.fitkeys, sampler) plot_chains(chains, file=ftr.model.PSR.value+"_chains.png") # Make the triangle plot. try: import corner samples = sampler.chain[:, burnin:, :].reshape((-1, ndim)) fig = corner.corner(samples, labels=ftr.fitkeys, bins=50) fig.savefig(ftr.model.PSR.value+"_triangle.png") plt.close() except: pass # Make a phaseogram with the 50th percentile values #ftr.set_params(dict(zip(ftr.fitkeys, np.percentile(samples, 50, axis=0)))) # Make a phaseogram with the best MCMC result ftr.set_params(dict(zip(ftr.fitkeys, ftr.maxpost_fitvals))) ftr.phaseogram(file=ftr.model.PSR.value+"_post.png") plt.close() # Write out the output pulse profile vs, xs = np.histogram(ftr.get_event_phases(), outprof_nbins, \ range=[0,1], weights=ftr.weights) f = open(ftr.model.PSR.value+"_prof_post.txt", 'w') for x, v in zip(xs, vs): f.write("%.5f %12.5f\n" % (x, v)) f.close() # Write out the par file for the best MCMC parameter est f = open(ftr.model.PSR.value+"_post.par", 'w') f.write(ftr.model.as_parfile()) f.close() # Print the best MCMC values and ranges ranges = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0))) print("Post-MCMC values (50th percentile +/- (16th/84th percentile):") for name, vals in zip(ftr.fitkeys, ranges): print("%8s:"%name, "%25.15g (+ %12.5g / - %12.5g)"%vals) # Put the same stuff in a file f = open(ftr.model.PSR.value+"_results.txt", 'w') f.write("Post-MCMC values (50th percentile +/- (16th/84th percentile):\n") for name, vals in zip(ftr.fitkeys, ranges): f.write("%8s:"%name + " %25.15g (+ %12.5g / - %12.5g)\n"%vals) f.write("\nMaximum likelihood par file:\n") f.write(ftr.model.as_parfile()) f.close() import cPickle cPickle.dump(samples, open(ftr.model.PSR.value+"_samples.pickle", "wb"))