def main(): fn = args[0] if fn.endswith(".fil"): # Filterbank file filetype = "filterbank" rawdatafile = filterbank.FilterbankFile(fn) elif fn.endswith(".fits"): # PSRFITS file filetype = "psrfits" rawdatafile = psrfits.PsrfitsFile(fn) else: raise ValueError("Cannot recognize data file type from " "extension. (Only '.fits' and '.fil' " "are supported.)") data, bins, nbins, start, source_name = waterfall(rawdatafile, options.start, \ options.duration, dm=options.dm,\ nbins=options.nbins, nsub=options.nsub,\ subdm=options.subdm, zerodm=options.zerodm, \ downsamp=options.downsamp, \ scaleindep=options.scaleindep, \ width_bins=options.width_bins, mask=options.mask, \ maskfn=options.maskfile, \ bandpass_corr=options.bandpass_corr) plot_waterfall(data, start, source_name, options.duration, \ dm=options.dm,ofile=options.ofile, integrate_ts=options.integrate_ts, \ integrate_spec=options.integrate_spec, show_cb=options.show_cb, cmap_str=options.cmap, sweep_dms=options.sweep_dms, \ sweep_posns=options.sweep_posns, downsamp=options.downsamp,width=options.width,snr=options.snr)
def main(): parser = OptionParser(usage) parser.add_option("-x", "--xwin", action="store_true", dest="xwin", default=False, help="Show the bandpass in an x-window as well") parser.add_option("-p", "--plot", action="store_true", dest="plot", default=False, help="Show the bandpass in a .png plot as well") parser.add_option("-n", "--nomods", action="store_true", dest="nomods", default=False, help="Do not apply offsets/scales (default applies)") parser.add_option("-w", "--weights", action="store_true", dest="weights", default=False, help="Apply weights (default doesn't apply_") parser.add_option("-f", "--first", type="int", dest="subfirst", default=0, help="First subint to compute stats for") parser.add_option("-s", "--skip", type="int", dest="subskip", default=10, help="Number of subints to skip during stats calculations") parser.add_option("-o", "--outfile", type="string", dest="outfile", default=None, help="Output filename (default will be INFILE.bandpass") (opts, args) = parser.parse_args() if len(args)==0: print full_usage sys.exit(0) for infile in args: print "Processing '%s'" % (infile) pf = psrfits.PsrfitsFile(infile) if opts.nomods: # for a bandpass histogram of raw bits htot = np.zeros(1<<pf.nbits) subints = np.arange(opts.subfirst, pf.specinfo.num_subint, opts.subskip).astype(np.int) means = np.zeros((len(subints), pf.nchan)) stdevs = np.zeros((len(subints), pf.nchan)) for ii, subint in enumerate(subints): print "%.0f%%.." % (100.0 * float(subint) / pf.specinfo.num_subint), sys.stdout.flush() specs = pf.read_subint(subint, apply_weights=opts.weights, apply_scales=not opts.nomods, apply_offsets=not opts.nomods) if opts.nomods: h, b = np.histogram(specs.flatten(), bins=np.arange((1<<pf.nbits)+1)) htot += h means[ii] = specs.mean(axis=0) stdevs[ii] = specs.std(axis=0) print "%.0f%%" % (100.0) med_mean = np.median(means, axis=0) med_stdev = np.median(stdevs, axis=0) outfilenm = infile+".bandpass" if opts.outfile is None else opts.outfile plotfilenm = outfilenm+".png" if opts.plot else None if opts.xwin or opts.plot: plot_bandpass(pf.freqs, med_mean, med_stdev, outfile=plotfilenm) if opts.nomods: htot = htot / htot.sum() print "# Bits histogram" print "# val fract" print "#---------------" for b, h in zip(b, htot): print "%3d %6.4f" % (b, h) write_bandpass(outfilenm, pf.freqs, med_mean, med_stdev)
def main(): fn = args[0] if fn.endswith(".fil"): # Filterbank file filetype = "filterbank" rawdatafile = filterbank.FilterbankFile(fn) elif fn.endswith(".fits"): # PSRFITS file filetype = "psrfits" rawdatafile = psrfits.PsrfitsFile(fn) else: raise ValueError("Cannot recognize data file type from " "extension. (Only '.fits' and '.fil' " "are supported.)") data, bins, nbins, start, source_name = waterfall(rawdatafile, options.start, \ options.duration, dm=options.dm,\ nbins=options.nbins, nsub=options.nsub,\ subdm=options.subdm, zerodm=options.zerodm, \ downsamp=options.downsamp, \ scaleindep=options.scaleindep, \ width_bins=options.width_bins, mask=options.mask, \ maskfn=options.maskfile, \ csv_file=options.csv_file,\ bandpass_corr=options.bandpass_corr) ofile = plot_waterfall(data, start, source_name, options.duration, \ dm=options.dm,ofile=options.ofile, integrate_ts=options.integrate_ts, \ integrate_spec=options.integrate_spec, show_cb=options.show_cb, cmap_str=options.cmap, sweep_dms=options.sweep_dms, \ sweep_posns=options.sweep_posns, downsamp=options.downsamp,width=options.width,snr=options.snr,csv_file=options.csv_file) # Update CSV file if file is provided if csv_file: sourcename=rawdatafile.header['source_name'] src_ra=rawdatafile.header['src_raj'] src_dec=rawdatafile.header['src_dej'] tstart=rawdatafile.header['tstart'] fch1=rawdatafile.header['fch1'] nchans=rawdatafile.header['nchans'] bw=int(rawdatafile.header['nchans'])*rawdatafile.header['foff'] cat=ofile.split("_")[0] snr=options.snr width=options.width dm=options.dm df = pd.DataFrame({'PNGFILE':[ofile],'Category':[cat],'SNR':[snr],'WIDTH':[width],'DM':[dm],'SourceName':[sourcename],'RA':[src_ra],'DEC':[src_dec],'MJD':[tstart],'Hfreq':[fch1],'NCHANS':[nchans],'BANDWIDTH':[bw],'filename':[fn]}) #Column order coming out irregular, so fixing it here col=['PNGFILE','Category','SNR','WIDTH','DM','SourceName','RA','DEC','MJD','Hfreq','NCHANS','BANDWIDTH','filename'] df = df.reindex(columns=col) if os.path.exists(csv_file) is False: with open(csv_file,'w') as f: df.to_csv(f,header=True,index=False) else: with open(csv_file,'a') as f: df.to_csv(f,header=False,index=False)
def bary_to_topo(infofilenm, rawdatafile=False, ephem="DE200"): """ bary_to_topo(infofilenm, ephem="DE200"): Returns the barycentric and topocentric times evert 10 seconds. The data for the observation must be found in the info file. """ if infofilenm[-4:]==".inf": infofilenm = infofilenm[:-4] filetype = 'inf' elif infofilenm[-5:]==".fits": infofilenm = infofilenm filetype = 'PSRFITS' else: raise ValueError("file type not recognized. Must be .inf, or .fits") if filetype=="inf": obs = read_inffile(infofilenm) T = obs.N * obs.dt dt = 10.0 tto = obs.mjd_i + obs.mjd_f tts = Num.arange(tto, tto + (T + dt) / psr_utils.SECPERDAY, dt / psr_utils.SECPERDAY) nn = len(tts) bts = Num.zeros(nn, 'd') vel = Num.zeros(nn, 'd') ra = psr_utils.coord_to_string(obs.ra_h, obs.ra_m, obs.ra_s) dec = psr_utils.coord_to_string(obs.dec_d, obs.dec_m, obs.dec_s) if (obs.telescope == 'Parkes'): tel = 'PK' elif (obs.telescope == 'Effelsberg'): tel = 'EB' elif (obs.telescope == 'Arecibo'): tel = 'AO' elif (obs.telescope == 'MMT'): tel = 'MT' elif (obs.telescope == 'GBT'): tel = 'GB' else: print "Telescope not recognized." return 0 elif filetype=="PSRFITS": if not rawdatafile: rawdatafile = psrfits.PsrfitsFile(infofilenm) T = rawdatafile.specinfo.T dt = 10.0 tto = rawdatafile.specinfo.start_MJD[0] tts = Num.arange(tto, tto + (T + dt) / psr_utils.SECPERDAY, dt / psr_utils.SECPERDAY) nn = len(tts) bts = Num.zeros(nn, 'd') vel = Num.zeros(nn, 'd') ra = rawdatafile.specinfo.ra_str dec = rawdatafile.specinfo.dec_str if (rawdatafile.specinfo.telescope == 'Parkes'): tel = 'PK' elif (rawdatafile.specinfo.telescope == 'Effelsberg'): tel = 'EB' elif (rawdatafile.specinfo.telescope == 'Arecibo'): tel = 'AO' elif (rawdatafile.specinfo.telescope == 'MMT'): tel = 'MT' elif (rawdatafile.specinfo.telescope == 'GBT'): tel = 'GB' else: print "Telescope not recognized." return 0 barycenter(tts, bts, vel, nn, ra, dec, tel, ephem) avgvel = Num.add.reduce(vel) / nn tts = Num.arange(nn, dtype='d') * dt bts = (bts - bts[0]) * psr_utils.SECPERDAY return tts, bts
def main(): fn = args[0] if fn.endswith(".fil"): # Filterbank file filetype = "filterbank" print_debug("Reading filterbank file..") rawdatafile = filterbank.filterbank(fn) basename = fn[:-4] if fn.endswith(".fits"): # PSRFITS file filetype = "psrfits" print_debug("Reading PSRFITS file..") rawdatafile = psrfits.PsrfitsFile(fn) basename = fn[:-5] else: raise ValueError("Cannot recognize data file type from " "extension. (Only '.fits' and '.fil' " "are supported.)") if options.outbasenm: basename = options.outbasenm spdcand = spcand.params() if not options.man_params: print_debug('Maximum number of candidates to plot: %i' % options.maxnumcands) make_spd_from_file(spdcand, rawdatafile, \ options.txtfile, options.maskfile, \ options.min_rank, options.group_rank, \ options.plot, options.just_waterfall, \ options.integrate_ts, options.integrate_spec, options.disp_pulse, \ options.loc_pulse, options.nsub, \ options.maxnumcands, \ basename, \ mask=options.mask, barytime=options.barytime, \ bandpass_corr=options.bandpass_corr) else: print_debug("Making spd files based on mannual parameters. I suggest" \ "reading in parameters from the groups.txt file.") make_spd_from_man_params(spdcand, rawdatafile, \ options.txtfile, options.maskfile, \ options.plot, options.just_waterfall, \ options.subdm, options.dm, options.sweep_dms, \ options.sigma, \ options.start, options.duration, \ options.width_bins, options.nbins, options.downsamp, \ options.nsub, \ options.scaleindep, \ options.spec_width, options.loc_pulse, \ options.integrate_ts, options.integrate_spec, options.disp_pulse, \ basename, \ options.mask, options.bandpass_corr, options.barytime, \ options.man_params)
def main(): fn = args[0] if fn.endswith(".fil"): # Filterbank file filetype = "filterbank" rawdatafile = filterbank.FilterbankFile(fn) elif fn.endswith(".fits"): # PSRFITS file filetype = "psrfits" rawdatafile = psrfits.PsrfitsFile(fn) else: raise ValueError("Cannot recognize data file type from " "extension. (Only '.fits' and '.fil' " "are supported.)") data, bins, nbins, start = waterfall(rawdatafile, options.start, \ options.duration, dm=options.dm,\ nbins=options.nbins, nsub=options.nsub,\ subdm=options.subdm, zerodm=options.zerodm, \ downsamp=options.downsamp, \ scaleindep=options.scaleindep, \ width_bins=options.width_bins, mask=options.mask, \ maskfn=options.maskfile, \ bandpass_corr=options.bandpass_corr) #### PuMA edition starts: #Get the data: nbinlim = np.int(options.duration / data.dt) data_out = np.array(data.data[..., :nbinlim]) Dedisp_ts = data_out.sum(axis=0) times = (np.arange(data.numspectra) * data.dt + start)[..., :nbinlim] # we write the original time and intensity vectors into csv files np.savetxt('times.csv', times, delimiter=',') np.savetxt('original.csv', Dedisp_ts, delimiter=',')
def main(fits_fn, outfn, nbits, \ apply_weights, apply_scales, apply_offsets): start = time.time() psrfits_file = psrfits.PsrfitsFile(fits_fn) fil_header = translate_header(psrfits_file) fil_header['nbits'] = nbits outfil = filterbank.create_filterbank_file(outfn, fil_header, \ nbits=nbits) # if frequency channels are in ascending order # band will need to be flipped if psrfits_file.fits['SUBINT'].header['CHAN_BW'] > 0: flip_band = True print("\nFits file frequencies in ascending order.") print("\tFlipping frequency band.\n") else: flip_band = False # check nbits for input if psrfits_file.nbits < 4: raise ValueError('Does not support %d-bit data' % \ psrfits_file.nbits) if nbits != 32: print("\nCalculating statistics on first subintegration...") subint0 = psrfits_file.read_subint(0, \ apply_weights, apply_scales, apply_offsets) #new_max = np.mean(subint0) + 3*np.std(subint0) new_max = 3 * np.median(subint0) print("\t3*median =", new_max) if new_max > 2.0**nbits: scale = True scale_fac = new_max / (2.0**nbits) print("\tScaling data by", 1 / scale_fac) print("\tValues larger than",new_max,"(pre-scaling) "\ "will be set to",2.0**nbits - 1,"\n") else: scale = False scale_fac = 1 print("\tNo scaling necessary") print("\tValues larger than",2.0**nbits-1,"(2^nbits) will "\ "be set to ",2.0**nbits-1,"\n") else: scale_fac = 1 print("\nNo scaling necessary for 32-bit float output file.") print("Writing data...") sys.stdout.flush() oldpcnt = "" for isub in range(int(psrfits_file.nsubints)): subint = psrfits_file.read_subint(isub, \ apply_weights, apply_scales, apply_offsets) if flip_band: subint = np.fliplr(subint) subint /= scale_fac outfil.append_spectra(subint) pcnt = "%d" % (isub * 100.0 / psrfits_file.nsubints) if pcnt != oldpcnt: sys.stdout.write("% 4s%% complete\r" % pcnt) sys.stdout.flush() print("Done ") outfil.close() print("Runtime:", time.time() - start)
def main(fits, database, time, DM, IMJD, SMJD, sigma, duration=0.01, pulse_id=4279, top_freq=0., directory='.',\ FRB_name='FRB121102', downsamp=1., beam=0, group=0, plot_standard=True, plot_zoom=True, plot_wide=False): num_elements = time.size if isinstance(DM, float) or isinstance(DM, int): DM = np.zeros(num_elements) + DM if isinstance(sigma, float) or isinstance(sigma, int): sigma = np.zeros(num_elements) + sigma if isinstance(duration, float) or isinstance(duration, int): duration = np.zeros(num_elements) + duration if isinstance(pulse_id, float) or isinstance(pulse_id, int): pulse_id = np.zeros(num_elements) + pulse_id if isinstance(downsamp, float) or isinstance(downsamp, int): downsamp = np.zeros(num_elements) + downsamp if FRB_name == 'FRB121102': rawdata = psrfits.PsrfitsFile(fits) observation = os.path.basename(fits) observation = observation[:observation.find('_subs_')] if FRB_name == 'FRB180814': rawdata = filterbank.FilterbankFile(fits) observation = os.path.basename(fits) observation = observation[:observation.find('.0001.')] events = pd.read_hdf(database, 'events') #Fractional day SMJD = SMJD / 86400. for i, t in enumerate(time): if FRB_name == 'FRB130628': fits = glob("%s/*b%ds%d*.fits" % (fits, beam[i], group[i]))[0] rawdata = psrfits.PsrfitsFile(fits) observation = os.path.basename(fits) observation = os.path.splitext(observation)[0] pulse_events = events[events.Pulse == pulse_id[i]] #zero-DM filering version start_time = t - 0.05 plot_duration = 0.1 if plot_standard: zero_dm_data, nbinsextra, nbins, zero_dm_start = waterfall(rawdata, start_time, plot_duration, DM[i],\ nbins=None, nsub=None, subdm = DM, zerodm=True, downsamp=1,\ scaleindep=False, width_bins=1, mask=False, maskfn=None,\ bandpass_corr=False, ref_freq=None) #non-zero-DM filtering version data, nbinsextra, nbins, start = waterfall(rawdata, start_time, plot_duration, DM[i],\ nbins=None, nsub=None, subdm = DM, zerodm=False, downsamp=1,\ scaleindep=False, width_bins=1, mask=False, maskfn=None,\ bandpass_corr=False, ref_freq=None) plotter(data, start, plot_duration, t, DM[i], IMJD[i], SMJD[i], duration[i], top_freq,\ sigma[i], directory, FRB_name, observation, zero_dm_data, zero_dm_start, pulse_events=pulse_events, zoom=False, idx=i, pulse_id=pulse_id[i], downsamp=False) #Zoomed version start_time = t - 0.01 plot_duration = 0.03 if plot_zoom: zero_dm_data, nbinsextra, nbins, zero_dm_start = waterfall(rawdata, start_time, plot_duration, DM[i],\ nbins=None, nsub=None, subdm = DM, zerodm=True, downsamp=1,\ scaleindep=False, width_bins=1, mask=False, maskfn=None,\ bandpass_corr=False, ref_freq=None) data, nbinsextra, nbins, start = waterfall(rawdata, start_time, plot_duration, DM[i],\ nbins=None, nsub=None, subdm = DM, zerodm=False, downsamp=1,\ scaleindep=False, width_bins=1, mask=False, maskfn=None,\ bandpass_corr=False, ref_freq=None) plotter(data, start, plot_duration, t, DM[i], IMJD[i], SMJD[i], duration[i], top_freq,\ sigma[i], directory, FRB_name, observation, zero_dm_data, zero_dm_start, pulse_events=pulse_events, zoom=True, idx=i, pulse_id=pulse_id[i], downsamp=False) #Wide version start_time = t - 0.5 plot_duration = 1. if plot_wide: zero_dm_data, nbinsextra, nbins, zero_dm_start = waterfall(rawdata, start_time, plot_duration, DM[i],\ nbins=None, nsub=None, subdm = DM, zerodm=True, downsamp=1,\ scaleindep=False, width_bins=1, mask=False, maskfn=None,\ bandpass_corr=False, ref_freq=None) #non-zero-DM filtering version data, nbinsextra, nbins, start = waterfall(rawdata, start_time, plot_duration, DM[i],\ nbins=None, nsub=None, subdm = DM, zerodm=False, downsamp=1,\ scaleindep=False, width_bins=1, mask=False, maskfn=None,\ bandpass_corr=False, ref_freq=None) plotter(data, start, plot_duration, t, DM[i], IMJD[i], SMJD[i], duration[i], top_freq,\ sigma[i], directory, FRB_name, observation, zero_dm_data, zero_dm_start, pulse_events=pulse_events, zoom=False, idx=i, pulse_id=pulse_id[i], downsamp=False)
def __init__(self, filename, gs, dm, AO): self.begin_times = [] self.end_times = [] self.lines = {} ax1 = plt.subplot(gs[2]) #dynamic spectrum ax2 = plt.subplot(gs[0], sharex=ax1) #profile ax3 = plt.subplot(gs[-1], sharey=ax1) #spectrum self.ds = ax1 self.spec = ax3 self.axes = ax2 # off pulse only necessary for the profile which is in subplot ax2 self.canvas = ax2.figure.canvas if filename.endswith(".fil"): fil = filterbank.filterbank(filename) arr = filterbank_to_arr.filterbank_to_np(filename, dm=dm, maskfile=None, bandpass=False) if filename.endswith(".fits"): fits = psrfits.PsrfitsFile(filename) arr = filterbank_to_arr.fits_to_np(filename, dm=dm, maskfile=None, bandpass=False, offpulse=None, nbins=6, AO=AO) profile = np.mean(arr, axis=0) self.ax2plot, = ax2.plot(profile, 'k-', alpha=1.0, zorder=1) ax2.tick_params(axis='y', which='both', left='off', right='off', labelleft='off') ax2.tick_params(axis='x', labelbottom='off', top='off') y_range = profile.max() - profile.min() ax2.set_ylim(profile.min() - y_range * 0.15, profile.max() * 1.1) ax2.tick_params(labelbottom=False, labeltop=False, labelleft=False, labelright=False, bottom=True, top=True, left=True, right=True) fig.add_subplot(ax2) self.cid = self.canvas.mpl_connect('button_press_event', self.onpress) self.crel = self.canvas.mpl_connect('button_release_event', self.onrel) self.keyPress = self.canvas.mpl_connect('key_press_event', self.onKeyPress) self.keyRelease = self.canvas.mpl_connect('key_release_event', self.onKeyRelease) self.data = self.ax2plot.get_data() self.profile = self.data[1] self.x = False
def __init__(self, filename, gs, prof, ds, spec, ithres, ax2, dm, AO): self.begin_chan = [] self.mask_chan = [] self.axes = ds # off pulse only necessary for the profile which is in subplot ax2 self.canvas = ds.figure.canvas self.ithres = ithres if filename.endswith(".fil"): fil = filterbank.filterbank(filename) arr = filterbank_to_arr.filterbank_to_np(filename, dm=dm, maskfile=None, bandpass=False) self.total_N = fil.number_of_samples self.freqs = fil.frequencies if filename.endswith(".fits"): fits = psrfits.PsrfitsFile(filename) arr = filterbank_to_arr.fits_to_np(filename, dm=dm, maskfile=None, bandpass=False, offpulse=None, nbins=6, AO=AO) self.total_N = arr.shape[1] self.freqs = fits.frequencies spectrum = np.mean(arr, axis=1) self.nchans = len(spectrum) self.freqbins = np.arange(0, arr.shape[0], 1) threshold = np.amax(arr) - (np.abs(np.amax(arr) - np.amin(arr)) * 0.99) self.cmap = mpl.cm.binary self.ax1 = ds self.ax3 = spec self.ax2 = ax2 self.ax2plot = prof self.ax1plot = self.ax1.imshow(arr, aspect='auto', vmin=np.amin(arr), vmax=threshold, cmap=self.cmap, origin='lower', interpolation='nearest', picker=True) self.cmap.set_over(color='pink') self.cmap.set_bad(color='red') self.ax1.set_xlim(0, self.total_N) self.ax3plot, = self.ax3.plot(spectrum, self.freqbins, 'k-', zorder=2) self.ax3.tick_params(axis='x', which='both', top='off', bottom='off', labelbottom='off') self.ax3.tick_params(axis='y', labelleft='off') self.ax3.set_ylim(self.freqbins[0], self.freqbins[-1]) x_range = spectrum.max() - spectrum.min() self.ax3.set_xlim(-x_range / 4., x_range * 6. / 5.) fig.add_subplot(self.ax1) fig.add_subplot(self.ax3) self.cid = self.canvas.mpl_connect('button_press_event', self.onpress) self.crel = self.canvas.mpl_connect('button_release_event', self.onrel) self.keyPress = self.canvas.mpl_connect('key_press_event', self.onKeyPress) self.keyRelease = self.canvas.mpl_connect('key_release_event', self.onKeyRelease) self.x = False self.r = False
options.infile = args[-1] dm = options.dm filename = options.infile if filename.endswith(".fil"): fil = filterbank.filterbank(filename) total_N = fil.number_of_samples tot_freq = fil.header['nchans'] picklename = re.search('(.*).fil', filename).group(1) t_samp = fil.header['tsamp'] tstart = fil.header['tstart'] freqs = np.flip(fil.frequencies) if filename.endswith(".fits"): fits = psrfits.PsrfitsFile(filename) total_N = fits.specinfo.N t_samp = fits.specinfo.dt freqs = np.flip(fits.frequencies) if options.AO == True: total_N = int(0.2 / t_samp) picklename = re.search('(.*).fits', filename).group(1) tot_freq = fits.specinfo.num_channels #start time of data block imjd, fmjd = psrfits.DATEOBS_to_MJD(fits.specinfo.date_obs) tstart = imjd + fmjd if options.AO == True: peak_bin = (total_N / 10.) * 2 begin_bin = int(peak_bin - (0.1 / t_samp)) begin_sec = begin_bin * t_samp begin_MJD = begin_sec / (24. * 3600.)
def GBNCC_wrapper(txtfile, maskfile, fitsfilenm, path_sp_files): """ The pipeline should pass job.fits_filenm as argument. """ files = spio.get_textfile(txtfile) min_rank = 3 groups = [i for i in range(7) if (i >= min_rank)][::-1] numcands = 0 # counter for max number of candidates loop_must_break = False # dont break the loop unless num of cands >100. values = [] lis = [] ranks = [] for group in groups: rank = group + 1 if files[group] != "Number of rank %i groups: 0 " % rank: add_values = spio.split_parameters(rank, txtfile) values.extend(add_values) lis = np.append( lis, np.where(files == '\tRank: %i.000000' % rank)[0]) add_ranks = np.ones(len(add_values)) * rank ranks = np.append(ranks, add_ranks) if len(values) > 100: values = values[0:100] lis = lis[0:100] #Sort candidates based on DM zip_list = zip(values, lis, ranks) zip_list = sorted(zip_list, key=itemgetter(0, 0)) values = [x[0] for x in zip_list] lis = [x[1] for x in zip_list] ranks = [x[2] for x in zip_list] basename = fitsfilenm[:-5] #generate subbanded file at a DM of 0 to extract observation parameters cmd = "psrfits_subband -dm 0.0 -nsub 128 -o %s_subband_0.0 %s" % ( basename, fitsfilenm) print "executing %s" % cmd subprocess.call(cmd, shell=True) subfilenm = basename + "_subband_0.0_0001.fits" subfile = psrfits.PsrfitsFile(subfilenm) for ii in range(len(values)): dm_list, time_list, dm_arr, sigma_arr, width_arr = spio.read_RRATrap_info( txtfile, lis[ii], int(ranks[ii])) wrapper_cand = spcand.params() wrapper_cand.read_from_file(values[ii], subfile.tsamp, subfile.specinfo.N, \ get_obs_info(fitsfilenm)['hifreq'], get_obs_info(fitsfilenm)['lofreq'], \ subfile, loc_pulse=0.5, dedisp = True,\ scaleindep = None, zerodm = None, mask = None,\ barytime=True, nsub = None, bandpass_corr = False) temp_filename = basename + "_rank_%i" % int(ranks[ii]) if ii == 0: #check if index is 0 correct_rawdatafile, prevsubfile, prevDM = pick_rawdatafile(wrapper_cand.subdm, fitsfilenm, \ get_obs_info(fitsfilenm)['hifreq'], get_obs_info(fitsfilenm)['lofreq'], \ wrapper_cand.pulse_width, prevDM = 0, prevsubfile = '', \ init_flag = 1) else: correct_rawdatafile, prevsubfile, prevDM = pick_rawdatafile(wrapper_cand.subdm, fitsfilenm, \ get_obs_info(fitsfilenm)['hifreq'], get_obs_info(fitsfilenm)['lofreq'], \ wrapper_cand.pulse_width, prevDM = prevDM, \ prevsubfile=prevsubfile, init_flag = 0) path = os.path.dirname(os.path.abspath(__file__)) cmd = "python " + path + "/make_spd.py --use_manual_params --subdm %f --nsub %d" \ " --dm %f -T %f -t %f --width-bins %d --downsamp %d --show-spec"\ " --noplot --notopo -o %s %s " %(wrapper_cand.subdm, \ wrapper_cand.nsub, wrapper_cand.dm, \ wrapper_cand.topo_start_time, wrapper_cand.duration, \ wrapper_cand.width_bins, wrapper_cand.downsamp,\ temp_filename, correct_rawdatafile) print "executing %s" % cmd subprocess.call(cmd, shell=True) # Add additional information to the header information array text_array = np.array([correct_rawdatafile, subfile.specinfo.telescope, \ subfile.specinfo.ra_str, subfile.specinfo.dec_str, \ subfile.specinfo.start_MJD[0], int(ranks[ii]), \ wrapper_cand.nsub, wrapper_cand.nbins, \ wrapper_cand.subdm, wrapper_cand.sigma, wrapper_cand.sample_number, \ wrapper_cand.duration, wrapper_cand.width_bins, wrapper_cand.pulse_width, \ subfile.tsamp, subfile.specinfo.T, wrapper_cand.topo_start_time]) temp_filename += "_DM%.1f_%.1fs" % (wrapper_cand.subdm, wrapper_cand.topo_start_time) spd = read_spd.spd(temp_filename + '.spd') spd.man_params = None text_array = np.append(text_array, spd.waterfall_start_time) text_array = np.append(text_array, spd.waterfall_tsamp) text_array = np.append(text_array, spd.waterfall_prededisp_nbins) text_array = np.append(text_array, spd.min_freq) text_array = np.append(text_array, spd.max_freq) text_array = np.append(text_array, spd.sweep_duration) text_array = np.append(text_array, spd.sweep_start_time) text_array = np.append(text_array, spd.bary_pulse_peak_time) text_array = np.append(text_array, spd.man_params) with open(temp_filename + ".spd", 'wb') as f: np.savez_compressed(f, \ Data_dedisp_nozerodm = spd.data_nozerodm_dedisp,\ Data_dedisp_zerodm = spd.data_zerodm_dedisp,\ Data_nozerodm = spd.data_nozerodm,\ delays_nozerodm = spd.dmsweep_delays, \ freqs_nozerodm = spd.dmsweep_freqs,\ Data_zerodm = spd.data_zerodm, \ dm_arr= map(np.float16, dm_arr),\ sigma_arr = map(np.float16, sigma_arr), \ width_arr =map(np.uint8, width_arr),\ dm_list= map(np.float16, dm_list), \ time_list = map(np.float16, time_list), \ text_array = text_array) plot_spd.plot(temp_filename+".spd", glob.glob(path_sp_files+'/*.singlepulse'), maskfile, \ outfile=basename, just_waterfall=False, \ integrate_spec=True, integrate_ts=True, \ disp_pulse=False, bandpass_corr = True, tar = None) numcands += 1 print 'Finished sp_candidate : %i' % numcands if numcands >= 100: # Max number of candidates to plot 100. loop_must_break = True break
def waterfall(filename, start, duration, dm=0, mask=False, maskfn=None, favg=1, tavg=1, scaleindep=False, extra_begin_chan=None, extra_end_chan=None): """ """ if filename.endswith('.fil'): rawdatafile = filterbank.filterbank(filename) tsamp = rawdatafile.header['tsamp'] nchans = rawdatafile.header['nchans'] freqs = rawdatafile.frequencies total_N = rawdatafile.number_of_samples df = np.abs(freqs[-1] - freqs[0]) fres = df / int(nchans + 1) scan_start = rawdatafile.tstart if filename.endswith('.fits'): rawdatafile = psrfits.PsrfitsFile(filename) tsamp = rawdatafile.tsamp nchans = rawdatafile.nchan freqs = rawdatafile.frequencies total_N = rawdatafile.specinfo.N df = np.abs(freqs[-1] - freqs[0]) fres = df / int(nchans + 1) scan_start = rawdatafile.header['STT_IMJD'] + ( rawdatafile.header['STT_SMJD'] + rawdatafile.header['STT_OFFS']) / (24. * 3600.) start_bin = np.round(start / tsamp).astype( 'int') #convert begin time to bins dmdelay_coeff = 4.15e3 * np.abs(1. / freqs[0]**2 - 1. / freqs[-1]**2) nbins = np.round(duration / tsamp).astype('int') #convert duration to bins if dm != 0: nbinsextra = np.round( (duration + dmdelay_coeff * dm) / tsamp).astype('int') else: nbinsextra = nbins # If at end of observation if (start_bin + nbinsextra) > total_N - 1: nbinsextra = total_N - 1 - start_bin data = rawdatafile.get_spectra(start_bin, nbinsextra) #masking if mask and maskfn: data, masked_chans = maskfile(maskfn, data, start_bin, nbinsextra, extra_begin_chan=extra_begin_chan, extra_end_chan=extra_end_chan) else: masked_chans = np.zeros(nchans, dtype=bool) data_masked = np.ma.masked_array(data.data) data_masked[masked_chans] = np.ma.masked data.data = data_masked if dm != 0: data.dedisperse(dm, padval='mean') data.downsample(tavg) # scale data data_noscale = data data = data.scaled(scaleindep) return data, data_noscale, nbinsextra, nbins, start, tsamp, fres, scan_start