示例#1
0
def nextpage(vertical=False):
    """
    Start a new page in the currently opened device.
    Default is to orient paper horizontally (landscape). If vertical
    is True then the paper will be oriented vertically.
    """
    # New page
    if ppgplot.pgqid()!=0:
        ppgplot.pgpage()
	ppgplot.pgswin(0,1,0,1)
	if vertical:
	    ppgplot.pgpap(7.9, 11.0/8.5)
	else:
	    ppgplot.pgpap(10.25, 8.5/11.0)
        ppgplot.pgiden()
    else:
        sys.stderr.write("Cannot start new page. No pgplot device open.\n")
        raise "No open pgplot device"
示例#2
0
def beginplot(fn, vertical=False):
    """
    Set up a colour ps plot with filename fn for letter-size paper.

    Default is to orient paper horizontally (landscape). If vertical
    is True then the paper will be oriented vertically.
    """
    if ppgplot.pgqid()!=0:
	# ppgplot already has a device open
	
	# 
	# QUESTION: Should raise error here?
	#
	pass
    else:
	# Setup
	ppgplot.pgbeg("%s/CPS" % fn, 1, 1)
	if vertical:
	    ppgplot.pgpap(7.9, 11.0/8.5)
	else:
	    ppgplot.pgpap(10.25, 8.5/11.0)
	ppgplot.pgiden()
def main():
    parser = OptionParser(usage)
    parser.add_option("-x", "--xwin", action="store_true", dest="xwin",
                      default=False, help="Don't make a postscript plot, just use an X-window")
    parser.add_option("-p", "--noplot", action="store_false", dest="makeplot",
                      default=True, help="Look for pulses but do not generate a plot")
    parser.add_option("-m", "--maxwidth", type="float", dest="maxwidth", default=0.0,
                      help="Set the max downsampling in sec (see below for default)")
    parser.add_option("-t", "--threshold", type="float", dest="threshold", default=5.0,
                      help="Set a different threshold SNR (default=5.0)")
    parser.add_option("-s", "--start", type="float", dest="T_start", default=0.0,
                      help="Only plot events occuring after this time (s)")
    parser.add_option("-e", "--end", type="float", dest="T_end", default=1e9,
                      help="Only plot events occuring before this time (s)")
    parser.add_option("-g", "--glob", type="string", dest="globexp", default=None,
                      help="Process the files from this glob expression")
    parser.add_option("-f", "--fast", action="store_true", dest="fast",
                      default=False, help="Use a faster method of de-trending (2x speedup)")
    parser.add_option("-b", "--nobadblocks", action="store_false", dest="badblocks",
                      default=True, help="Don't check for bad-blocks (may save strong pulses)")
    parser.add_option("-d", "--detrendlen", type="int", dest="detrendfact", default=1,
                      help="Chunksize for detrending (pow-of-2 in 1000s)")
    (opts, args) = parser.parse_args()
    if len(args)==0:
        if opts.globexp==None:
            print full_usage
            sys.exit(0)
        else:
            args = []
            for globexp in opts.globexp.split():
                args += glob.glob(globexp)
    useffts = True
    dosearch = True
    if opts.xwin:
        pgplot_device = "/XWIN"
    else:
        pgplot_device = ""

    fftlen = 8192     # Should be a power-of-two for best speed
    chunklen = 8000   # Must be at least max_downfact less than fftlen
    assert(opts.detrendfact in [1,2,4,8,16,32])
    detrendlen = opts.detrendfact*1000
    if (detrendlen > chunklen):
        chunklen = detrendlen
        fftlen = int(next2_to_n(chunklen))
    blocks_per_chunk = chunklen / detrendlen
    overlap = (fftlen - chunklen)/2
    worklen = chunklen + 2*overlap  # currently it is fftlen...

    max_downfact = 30
    default_downfacts = [2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150, 220, 300]

    if args[0].endswith(".singlepulse"):
        filenmbase = args[0][:args[0].rfind(".singlepulse")]
        dosearch = False
    elif args[0].endswith(".dat"):
        filenmbase = args[0][:args[0].rfind(".dat")]
    else:
        filenmbase = args[0]

    # Don't do a search, just read results and plot
    if not dosearch:
        info, DMs, candlist, num_v_DMstr = \
              read_singlepulse_files(args, opts.threshold, opts.T_start, opts.T_end)
        orig_N, orig_dt = int(info.N), info.dt
        obstime = orig_N * orig_dt
    else:
        DMs = []
        candlist = []
        num_v_DMstr = {}

        # Loop over the input files
        for filenm in args:
            if filenm.endswith(".dat"):
                filenmbase = filenm[:filenm.rfind(".dat")]
            else:
                filenmbase = filenm
            info = infodata.infodata(filenmbase+".inf")
            DMstr = "%.2f"%info.DM
            DMs.append(info.DM)
            N, dt = int(info.N), info.dt
            obstime = N * dt
            # Choose the maximum width to search based on time instead
            # of bins.  This helps prevent increased S/N when the downsampling
            # changes as the DM gets larger.
            if opts.maxwidth > 0.0:
                downfacts = [x for x in default_downfacts if x*dt <= opts.maxwidth]
            else:
                downfacts = [x for x in default_downfacts if x <= max_downfact]
            if len(downfacts) == 0:
                downfacts = [default_downfacts[0]]
            if (filenm == args[0]):
                orig_N = N
                orig_dt = dt

            if info.breaks:
                offregions = zip([x[1] for x in info.onoff[:-1]],
                                 [x[0] for x in info.onoff[1:]])

                # If last break spans to end of file, don't read it in (its just padding)
                if offregions[-1][1] == N - 1:
                    N = offregions[-1][0] + 1

            outfile = open(filenmbase+'.singlepulse', mode='w')

            # Compute the file length in detrendlens
            roundN = N/detrendlen * detrendlen
            numchunks = roundN / chunklen
            # Read in the file
            print 'Reading "%s"...'%filenm
            timeseries = Num.fromfile(filenm, dtype=Num.float32, count=roundN)
            # Split the timeseries into chunks for detrending
            numblocks = roundN/detrendlen
            timeseries.shape = (numblocks, detrendlen)
            stds = Num.zeros(numblocks, dtype=Num.float64)
            # de-trend the data one chunk at a time
            print '  De-trending the data and computing statistics...'
            for ii, chunk in enumerate(timeseries):
                if opts.fast:  # use median removal instead of detrending (2x speedup)
                    tmpchunk = chunk.copy()
                    tmpchunk.sort()
                    med = tmpchunk[detrendlen/2]
                    chunk -= med
                    tmpchunk -= med
                else:
                    # The detrend calls are the most expensive in the program
                    timeseries[ii] = scipy.signal.detrend(chunk, type='linear')
                    tmpchunk = timeseries[ii].copy()
                    tmpchunk.sort()
                # The following gets rid of (hopefully) most of the 
                # outlying values (i.e. power dropouts and single pulses)
                # If you throw out 5% (2.5% at bottom and 2.5% at top)
                # of random gaussian deviates, the measured stdev is ~0.871
                # of the true stdev.  Thus the 1.0/0.871=1.148 correction below.
                # The following is roughly .std() since we already removed the median
                stds[ii] = Num.sqrt((tmpchunk[detrendlen/40:-detrendlen/40]**2.0).sum() /
                                    (0.95*detrendlen))
            stds *= 1.148
            # sort the standard deviations and separate those with
            # very low or very high values
            sort_stds = stds.copy()
            sort_stds.sort()
            # identify the differences with the larges values (this
            # will split off the chunks with very low and very high stds
            locut = (sort_stds[1:numblocks/2+1] -
                     sort_stds[:numblocks/2]).argmax() + 1
            hicut = (sort_stds[numblocks/2+1:] -
                     sort_stds[numblocks/2:-1]).argmax() + numblocks/2 - 2
            std_stds = scipy.std(sort_stds[locut:hicut])
            median_stds = sort_stds[(locut+hicut)/2]
            print "    pseudo-median block standard deviation = %.2f" % (median_stds)
            if (opts.badblocks):
                lo_std = median_stds - 4.0 * std_stds
                hi_std = median_stds + 4.0 * std_stds
                # Determine a list of "bad" chunks.  We will not search these.
                bad_blocks = Num.nonzero((stds < lo_std) | (stds > hi_std))[0]
                print "    identified %d bad blocks out of %d (i.e. %.2f%%)" % \
                      (len(bad_blocks), len(stds),
                       100.0*float(len(bad_blocks))/float(len(stds)))
                stds[bad_blocks] = median_stds
            else:
                bad_blocks = []
            print "  Now searching..."

            # Now normalize all of the data and reshape it to 1-D
            timeseries /= stds[:,Num.newaxis]
            timeseries.shape = (roundN,)
            # And set the data in the bad blocks to zeros
            # Even though we don't search these parts, it is important
            # because of the overlaps for the convolutions
            for bad_block in bad_blocks:
                loind, hiind = bad_block*detrendlen, (bad_block+1)*detrendlen
                timeseries[loind:hiind] = 0.0
            # Convert to a set for faster lookups below
            bad_blocks = set(bad_blocks)

            # Step through the data
            dm_candlist = []
            for chunknum in xrange(numchunks):
                loind = chunknum*chunklen-overlap
                hiind = (chunknum+1)*chunklen+overlap
                # Take care of beginning and end of file overlap issues
                if (chunknum==0): # Beginning of file
                    chunk = Num.zeros(worklen, dtype=Num.float32)
                    chunk[overlap:] = timeseries[loind+overlap:hiind]
                elif (chunknum==numchunks-1): # end of the timeseries
                    chunk = Num.zeros(worklen, dtype=Num.float32)
                    chunk[:-overlap] = timeseries[loind:hiind-overlap]
                else:
                    chunk = timeseries[loind:hiind]

                # Make a set with the current block numbers
                lowblock = blocks_per_chunk * chunknum
                currentblocks = set(Num.arange(blocks_per_chunk) + lowblock)
                localgoodblocks = Num.asarray(list(currentblocks -
                                                   bad_blocks)) - lowblock
                # Search this chunk if it is not all bad
                if len(localgoodblocks):
                    # This is the good part of the data (end effects removed)
                    goodchunk = chunk[overlap:-overlap]

                    # need to pass blocks/chunklen, localgoodblocks
                    # dm_candlist, dt, opts.threshold to cython routine

                    # Search non-downsampled data first
                    # NOTE:  these nonzero() calls are some of the most
                    #        expensive calls in the program.  Best bet would 
                    #        probably be to simply iterate over the goodchunk
                    #        in C and append to the candlist there.
                    hibins = Num.flatnonzero(goodchunk>opts.threshold)
                    hivals = goodchunk[hibins]
                    hibins += chunknum * chunklen
                    hiblocks = hibins/detrendlen
                    # Add the candidates (which are sorted by bin)
                    for bin, val, block in zip(hibins, hivals, hiblocks):
                        if block not in bad_blocks:
                            time = bin * dt
                            dm_candlist.append(candidate(info.DM, val, time, bin, 1))

                    # Now do the downsampling...
                    for downfact in downfacts:
                        if useffts: 
                            # Note:  FFT convolution is faster for _all_ downfacts, even 2
                            chunk2 = Num.concatenate((Num.zeros(1000), chunk, Num.zeros(1000)))
                            goodchunk = Num.convolve(chunk2, Num.ones(downfact), mode='same') / Num.sqrt(downfact)
                            goodchunk = goodchunk[overlap:-overlap]
                            #O qualcosa di simile, altrimenti non so perche' trova piu' candidati! Controllare!
                        else:
                            # The normalization of this kernel keeps the post-smoothing RMS = 1
                            kernel = Num.ones(downfact, dtype=Num.float32) / \
                                     Num.sqrt(downfact)
                            smoothed_chunk = scipy.signal.convolve(chunk, kernel, 1)
                            goodchunk = smoothed_chunk[overlap:-overlap]
                        #hibins = Num.nonzero(goodchunk>opts.threshold)[0]
                        hibins = Num.flatnonzero(goodchunk>opts.threshold)
                        hivals = goodchunk[hibins]
                        hibins += chunknum * chunklen
                        hiblocks = hibins/detrendlen
                        hibins = hibins.tolist()
                        hivals = hivals.tolist()
                        # Now walk through the new candidates and remove those
                        # that are not the highest but are within downfact/2
                        # bins of a higher signal pulse
                        hibins, hivals = prune_related1(hibins, hivals, downfact)
                        # Insert the new candidates into the candlist, but
                        # keep it sorted...
                        for bin, val, block in zip(hibins, hivals, hiblocks):
                            if block not in bad_blocks:
                                time = bin * dt
                                bisect.insort(dm_candlist,
                                              candidate(info.DM, val, time, bin, downfact))

            # Now walk through the dm_candlist and remove the ones that
            # are within the downsample proximity of a higher
            # signal-to-noise pulse
            dm_candlist = prune_related2(dm_candlist, downfacts)
            print "  Found %d pulse candidates"%len(dm_candlist)
            
            # Get rid of those near padding regions
            if info.breaks: prune_border_cases(dm_candlist, offregions)

            # Write the pulses to an ASCII output file
            if len(dm_candlist):
                #dm_candlist.sort(cmp_sigma)
                outfile.write("# DM      Sigma      Time (s)     Sample    Downfact\n")
                for cand in dm_candlist:
                    outfile.write(str(cand))
            outfile.close()

            # Add these candidates to the overall candidate list
            for cand in dm_candlist:
                candlist.append(cand)
            num_v_DMstr[DMstr] = len(dm_candlist)

    if (opts.makeplot):

        # Step through the candidates to make a SNR list
        DMs.sort()
        snrs = []
        for cand in candlist:
            if not Num.isinf(cand.sigma):
                snrs.append(cand.sigma)
        if snrs:
            maxsnr = max(int(max(snrs)), int(opts.threshold)) + 3
        else:
            maxsnr = int(opts.threshold) + 3

        # Generate the SNR histogram
        snrs = Num.asarray(snrs)
        (num_v_snr, lo_snr, d_snr, num_out_of_range) = \
                    scipy.stats.histogram(snrs,
                                          int(maxsnr-opts.threshold+1),
                                          [opts.threshold, maxsnr])
        snrs = Num.arange(maxsnr-opts.threshold+1, dtype=Num.float64) * d_snr \
               + lo_snr + 0.5*d_snr
        num_v_snr = num_v_snr.astype(Num.float32)
        num_v_snr[num_v_snr==0.0] = 0.001

        # Generate the DM histogram
        num_v_DM = Num.zeros(len(DMs))
        for ii, DM in enumerate(DMs):
            num_v_DM[ii] = num_v_DMstr["%.2f"%DM]
        DMs = Num.asarray(DMs)

        # open the plot device
        short_filenmbase = filenmbase[:filenmbase.find("_DM")]
        if opts.T_end > obstime:
            opts.T_end = obstime
        if pgplot_device:
            ppgplot.pgopen(pgplot_device)
        else:
            if (opts.T_start > 0.0 or opts.T_end < obstime):
                ppgplot.pgopen(short_filenmbase+'_%.0f-%.0fs_singlepulse.ps/VPS'%
                               (opts.T_start, opts.T_end))
            else:
                ppgplot.pgopen(short_filenmbase+'_singlepulse.ps/VPS')
        ppgplot.pgpap(7.5, 1.0)  # Width in inches, aspect

        # plot the SNR histogram
        ppgplot.pgsvp(0.06, 0.31, 0.6, 0.87)
        ppgplot.pgswin(opts.threshold, maxsnr,
                       Num.log10(0.5), Num.log10(2*max(num_v_snr)))
        ppgplot.pgsch(0.8)
        ppgplot.pgbox("BCNST", 0, 0, "BCLNST", 0, 0)
        ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Signal-to-Noise")
        ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Number of Pulses")
        ppgplot.pgsch(1.0)
        ppgplot.pgbin(snrs, Num.log10(num_v_snr), 1)

        # plot the DM histogram
        ppgplot.pgsvp(0.39, 0.64, 0.6, 0.87)
        # Add [1] to num_v_DM in YMAX below so that YMIN != YMAX when max(num_v_DM)==0
        ppgplot.pgswin(min(DMs)-0.5, max(DMs)+0.5, 0.0, 1.1*max(num_v_DM+[1]))
        ppgplot.pgsch(0.8)
        ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
        ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\u-3\d)")
        ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Number of Pulses")
        ppgplot.pgsch(1.0)
        ppgplot.pgbin(DMs, num_v_DM, 1)

        # plot the SNR vs DM plot 
        ppgplot.pgsvp(0.72, 0.97, 0.6, 0.87)
        ppgplot.pgswin(min(DMs)-0.5, max(DMs)+0.5, opts.threshold, maxsnr)
        ppgplot.pgsch(0.8)
        ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
        ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\u-3\d)")
        ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-Noise")
        ppgplot.pgsch(1.0)
        cand_ts = Num.zeros(len(candlist), dtype=Num.float32)
        cand_SNRs = Num.zeros(len(candlist), dtype=Num.float32)
        cand_DMs = Num.zeros(len(candlist), dtype=Num.float32)
        for ii, cand in enumerate(candlist):
            cand_ts[ii], cand_SNRs[ii], cand_DMs[ii] = \
                         cand.time, cand.sigma, cand.DM
        ppgplot.pgpt(cand_DMs, cand_SNRs, 20)

        # plot the DM vs Time plot
        ppgplot.pgsvp(0.06, 0.97, 0.08, 0.52)
        ppgplot.pgswin(opts.T_start, opts.T_end, min(DMs)-0.5, max(DMs)+0.5)
        ppgplot.pgsch(0.8)
        ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
        ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)")
        ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\u-3\d)")
        # Circles are symbols 20-26 in increasing order
        snr_range = 12.0
        cand_symbols = (cand_SNRs-opts.threshold)/snr_range * 6.0 + 20.5
        cand_symbols = cand_symbols.astype(Num.int32)
        cand_symbols[cand_symbols>26] = 26
        for ii in [26, 25, 24, 23, 22, 21, 20]:
            inds = Num.nonzero(cand_symbols==ii)[0]
            ppgplot.pgpt(cand_ts[inds], cand_DMs[inds], ii)

        # Now fill the infomation area
        ppgplot.pgsvp(0.05, 0.95, 0.87, 0.97)
        ppgplot.pgsch(1.0)
        ppgplot.pgmtxt('T', 0.5, 0.0, 0.0,
                       "Single pulse results for '%s'"%short_filenmbase)
        ppgplot.pgsch(0.8)
        # first row
        ppgplot.pgmtxt('T', -1.1, 0.02, 0.0, 'Source: %s'%\
                       info.object)
        ppgplot.pgmtxt('T', -1.1, 0.33, 0.0, 'RA (J2000):')
        ppgplot.pgmtxt('T', -1.1, 0.5, 0.0, info.RA)
        ppgplot.pgmtxt('T', -1.1, 0.73, 0.0, 'N samples: %.0f'%orig_N)
        # second row
        ppgplot.pgmtxt('T', -2.4, 0.02, 0.0, 'Telescope: %s'%\
                       info.telescope)
        ppgplot.pgmtxt('T', -2.4, 0.33, 0.0, 'DEC (J2000):')
        ppgplot.pgmtxt('T', -2.4, 0.5, 0.0, info.DEC)
        ppgplot.pgmtxt('T', -2.4, 0.73, 0.0, 'Sampling time: %.2f \gms'%\
                       (orig_dt*1e6))
        # third row
        if info.instrument.find("pigot") >= 0:
            instrument = "Spigot"
        else:
            instrument = info.instrument
        ppgplot.pgmtxt('T', -3.7, 0.02, 0.0, 'Instrument: %s'%instrument)
        if (info.bary):
            ppgplot.pgmtxt('T', -3.7, 0.33, 0.0, 'MJD\dbary\u: %.12f'%info.epoch)
        else:
            ppgplot.pgmtxt('T', -3.7, 0.33, 0.0, 'MJD\dtopo\u: %.12f'%info.epoch)
        ppgplot.pgmtxt('T', -3.7, 0.73, 0.0, 'Freq\dctr\u: %.1f MHz'%\
                       ((info.numchan/2-0.5)*info.chan_width+info.lofreq))
        ppgplot.pgiden()
        ppgplot.pgend()
示例#4
0
文件: Pgplot.py 项目: bretonr/presto
def prepplot(rangex, rangey, title=None, labx=None, laby=None, \
             rangex2=None, rangey2=None, labx2=None, laby2=None, \
             logx=0, logy=0, logx2=0, logy2=0, font=ppgplot_font_, \
             fontsize=ppgplot_font_size_, id=0, aspect=1, ticks='in', \
             panels=[1,1], device=ppgplot_device_):
    """
    prepplot(rangex, rangey, ...)
        Open a PGPLOT device for plotting.
            'rangex' and 'rangey' are sequence objects giving min and
                max values for each axis.
        The optional entries are:
            title:    graph title                 (default = None)   
            labx:     label for the x-axis        (default = None)   
            laby:     label for the y-axis        (default = None)   
            rangex2:  ranges for 2nd x-axis       (default = None)   
            rangey2:  ranges for 2nd y-axis       (default = None)   
            labx2:    label for the 2nd x-axis    (default = None)   
            laby2:    label for the 2nd y-axis    (default = None)   
            logx:     make the 1st x-axis log     (default = 0 (no))
            logy:     make the 1st y-axis log     (default = 0 (no))
            logx2:    make the 2nd x-axis log     (default = 0 (no))
            logy2:    make the 2nd y-axis log     (default = 0 (no))
            font:     PGPLOT font to use          (default = 1 (normal))
            fontsize: PGPLOT font size to use     (default = 1.0 (normal))
            id:       Show ID line on plot        (default = 0 (no)) 
            aspect:   Aspect ratio                (default = 1 (square))
            ticks:    Ticks point in or out       (default = 'in')   
            panels:   Number of subpanels [r,c]   (default = [1,1])
            device:   PGPLOT device to use        (default = '/XWIN')
        Note:  Many default values are defined in global variables
            with names like ppgplot_font_ or ppgplot_device_.
    """
    global ppgplot_dev_open_, ppgplot_dev_prep_
    # Check if we will use second X or Y axes
    # Note:  if using a 2nd X axis, the range should correspond
    #   to the minimum and maximum values of the 1st X axis.  If
    #   using a 2nd Y axis, the range should correspond to the
    #   scalerange() values of the 1st Y axis.
    if rangex2 is None:
        rangex2=rangex
        otherxaxis=0
    else: otherxaxis=1
    if rangey2 is None:
        rangey2=rangey
        otheryaxis=0
    else: otheryaxis=1
    # Open the plot device
    if (not ppgplot_dev_open_):
        ppgplot.pgopen(device)
	# My little add-on to switch the background to white
	if device == '/XWIN':
	    reset_colors()
	if device == '/AQT':
	    ppgplot.pgsci(0)
        # Let the routines know that we already have a device open
        ppgplot_dev_open_ = 1
        # Set the aspect ratio
        ppgplot.pgpap(0.0, aspect)
        if (panels != [1,1]):
            # Set the number of panels
            ppgplot.pgsubp(panels[0], panels[1])
            ppgplot.pgpage()
    # Choose the font  
    ppgplot.pgscf(font)
    # Choose the font size
    ppgplot.pgsch(fontsize)
    # Choose the font size
    ppgplot.pgslw(ppgplot_linewidth_)
    # Plot the 2nd axis if needed first
    if otherxaxis or otheryaxis:
        ppgplot.pgvstd()
        ppgplot.pgswin(rangex2[0], rangex2[1], rangey2[0], rangey2[1])
        # Decide how the axes will be drawn
        if ticks=='in': env = "CMST"
        else: env = "CMSTI"
        if logx2: lxenv='L'
        else: lxenv=''
        if logy2: lyenv='L'
        else: lyenv=''
        if otherxaxis and otheryaxis:
            ppgplot.pgbox(env+lxenv, 0.0, 0, env+lyenv, 0.0, 0)
        elif otheryaxis:
            ppgplot.pgbox("", 0.0, 0, env+lyenv, 0.0, 0)
        else:
            ppgplot.pgbox(env+lxenv, 0.0, 0, "", 0.0, 0)
    # Now setup the primary axis
    ppgplot.pgvstd()
    ppgplot.pgswin(rangex[0], rangex[1], rangey[0], rangey[1])
    # Decide how the axes will be drawn
    if ticks=='in': env = "ST"
    else: env = "STI"
    if logx: lxenv='L'
    else: lxenv=''
    if logy: lyenv='L'
    else: lyenv=''
    if otherxaxis and otheryaxis:
        ppgplot.pgbox("BN"+env+lxenv, 0.0, 0, "BN"+env+lyenv, 0.0, 0)
    elif otheryaxis:
        ppgplot.pgbox("BCN"+env+lxenv, 0.0, 0, "BN"+env+lyenv, 0.0, 0)
    elif otherxaxis:
        ppgplot.pgbox("BN"+env+lxenv, 0.0, 0, "BCN"+env+lyenv, 0.0, 0)
    else:
        ppgplot.pgbox("BCN"+env+lxenv, 0.0, 0, "BCN"+env+lyenv, 0.0, 0)
    # My little add-on to switch the background to white
    if device == '/AQT' or device == '/XWIN':
	reset_colors()
    # Add labels
    if not title is None: ppgplot.pgmtxt("T", 3.2, 0.5, 0.5, title)
    ppgplot.pgmtxt("B", 3.0, 0.5, 0.5, labx)
    ppgplot.pgmtxt("L", 2.6, 0.5, 0.5, laby)
    if otherxaxis: ppgplot.pgmtxt("T", 2.0, 0.5, 0.5, labx2)
    if otheryaxis: ppgplot.pgmtxt("R", 3.0, 0.5, 0.5, laby2)
    # Add ID line if required
    if (id==1): ppgplot.pgiden()
    # Let the routines know that we have already prepped the device
    ppgplot_dev_prep_ = 1
示例#5
0
def prepplot(rangex, rangey, title=None, labx=None, laby=None, \
             rangex2=None, rangey2=None, labx2=None, laby2=None, \
             logx=0, logy=0, logx2=0, logy2=0, font=ppgplot_font_, \
             fontsize=ppgplot_font_size_, id=0, aspect=1, ticks='in', \
             panels=[1,1], device=ppgplot_device_):
    """
    prepplot(rangex, rangey, ...)
        Open a PGPLOT device for plotting.
            'rangex' and 'rangey' are sequence objects giving min and
                max values for each axis.
        The optional entries are:
            title:    graph title                 (default = None)   
            labx:     label for the x-axis        (default = None)   
            laby:     label for the y-axis        (default = None)   
            rangex2:  ranges for 2nd x-axis       (default = None)   
            rangey2:  ranges for 2nd y-axis       (default = None)   
            labx2:    label for the 2nd x-axis    (default = None)   
            laby2:    label for the 2nd y-axis    (default = None)   
            logx:     make the 1st x-axis log     (default = 0 (no))
            logy:     make the 1st y-axis log     (default = 0 (no))
            logx2:    make the 2nd x-axis log     (default = 0 (no))
            logy2:    make the 2nd y-axis log     (default = 0 (no))
            font:     PGPLOT font to use          (default = 1 (normal))
            fontsize: PGPLOT font size to use     (default = 1.0 (normal))
            id:       Show ID line on plot        (default = 0 (no)) 
            aspect:   Aspect ratio                (default = 1 (square))
            ticks:    Ticks point in or out       (default = 'in')   
            panels:   Number of subpanels [r,c]   (default = [1,1])
            device:   PGPLOT device to use        (default = '/XWIN')
        Note:  Many default values are defined in global variables
            with names like ppgplot_font_ or ppgplot_device_.
    """
    global ppgplot_dev_open_, ppgplot_dev_prep_
    # Check if we will use second X or Y axes
    # Note:  if using a 2nd X axis, the range should correspond
    #   to the minimum and maximum values of the 1st X axis.  If
    #   using a 2nd Y axis, the range should correspond to the
    #   scalerange() values of the 1st Y axis.
    if rangex2 is None:
        rangex2 = rangex
        otherxaxis = 0
    else:
        otherxaxis = 1
    if rangey2 is None:
        rangey2 = rangey
        otheryaxis = 0
    else:
        otheryaxis = 1
    # Open the plot device
    if (not ppgplot_dev_open_):
        ppgplot.pgopen(device)
        # Let the routines know that we already have a device open
        ppgplot_dev_open_ = 1
        # Set the aspect ratio
        ppgplot.pgpap(0.0, aspect)
        if (panels != [1, 1]):
            # Set the number of panels
            ppgplot.pgsubp(panels[0], panels[1])
            ppgplot.pgpage()
    # Choose the font
    ppgplot.pgscf(font)
    # Choose the font size
    ppgplot.pgsch(fontsize)
    # Choose the font size
    ppgplot.pgslw(ppgplot_linewidth_)
    # Plot the 2nd axis if needed first
    if otherxaxis or otheryaxis:
        ppgplot.pgvstd()
        ppgplot.pgswin(rangex2[0], rangex2[1], rangey2[0], rangey2[1])
        # Decide how the axes will be drawn
        if ticks == 'in': env = "CMST"
        else: env = "CMSTI"
        if logx2: lxenv = 'L'
        else: lxenv = ''
        if logy2: lyenv = 'L'
        else: lyenv = ''
        if otherxaxis and otheryaxis:
            ppgplot.pgbox(env + lxenv, 0.0, 0, env + lyenv, 0.0, 0)
        elif otheryaxis:
            ppgplot.pgbox("", 0.0, 0, env + lyenv, 0.0, 0)
        else:
            ppgplot.pgbox(env + lxenv, 0.0, 0, "", 0.0, 0)
    # Now setup the primary axis
    ppgplot.pgvstd()
    ppgplot.pgswin(rangex[0], rangex[1], rangey[0], rangey[1])
    # Decide how the axes will be drawn
    if ticks == 'in': env = "ST"
    else: env = "STI"
    if logx: lxenv = 'L'
    else: lxenv = ''
    if logy: lyenv = 'L'
    else: lyenv = ''
    if otherxaxis and otheryaxis:
        ppgplot.pgbox("BN" + env + lxenv, 0.0, 0, "BN" + env + lyenv, 0.0, 0)
    elif otheryaxis:
        ppgplot.pgbox("BCN" + env + lxenv, 0.0, 0, "BN" + env + lyenv, 0.0, 0)
    elif otherxaxis:
        ppgplot.pgbox("BN" + env + lxenv, 0.0, 0, "BCN" + env + lyenv, 0.0, 0)
    else:
        ppgplot.pgbox("BCN" + env + lxenv, 0.0, 0, "BCN" + env + lyenv, 0.0, 0)
    # Add labels
    if not title is None: ppgplot.pgmtxt("T", 3.2, 0.5, 0.5, title)
    ppgplot.pgmtxt("B", 3.0, 0.5, 0.5, labx)
    ppgplot.pgmtxt("L", 2.6, 0.5, 0.5, laby)
    if otherxaxis: ppgplot.pgmtxt("T", 2.0, 0.5, 0.5, labx2)
    if otheryaxis: ppgplot.pgmtxt("R", 3.0, 0.5, 0.5, laby2)
    # Add ID line if required
    if (id == 1): ppgplot.pgiden()
    # Let the routines know that we have already prepped the device
    ppgplot_dev_prep_ = 1
示例#6
0
def main():
    parser = OptionParser(usage)
    parser.add_option(
        "-x",
        "--xwin",
        action="store_true",
        dest="xwin",
        default=False,
        help="Don't make a postscript plot, just use an X-window")
    parser.add_option("-p",
                      "--noplot",
                      action="store_false",
                      dest="makeplot",
                      default=True,
                      help="Look for pulses but do not generate a plot")
    parser.add_option(
        "-m",
        "--maxwidth",
        type="float",
        dest="maxwidth",
        default=0.0,
        help="Set the max downsampling in sec (see below for default)")
    parser.add_option("-t",
                      "--threshold",
                      type="float",
                      dest="threshold",
                      default=5.0,
                      help="Set a different threshold SNR (default=5.0)")
    parser.add_option("-s",
                      "--start",
                      type="float",
                      dest="T_start",
                      default=0.0,
                      help="Only plot events occuring after this time (s)")
    parser.add_option("-e",
                      "--end",
                      type="float",
                      dest="T_end",
                      default=1e9,
                      help="Only plot events occuring before this time (s)")
    parser.add_option("-g",
                      "--glob",
                      type="string",
                      dest="globexp",
                      default=None,
                      help="Process the files from this glob expression")
    parser.add_option("-f",
                      "--fast",
                      action="store_true",
                      dest="fast",
                      default=False,
                      help="Use a faster method of de-trending (2x speedup)")
    (opts, args) = parser.parse_args()
    if len(args) == 0:
        if opts.globexp == None:
            print full_usage
            sys.exit(0)
        else:
            args = []
            for globexp in opts.globexp.split():
                args += glob.glob(globexp)
    useffts = True
    dosearch = True
    if opts.xwin:
        pgplot_device = "/XWIN"
    else:
        pgplot_device = ""

    fftlen = 8192  # Should be a power-of-two for best speed
    chunklen = 8000  # Must be at least max_downfact less than fftlen
    detrendlen = 1000  # length of a linear piecewise chunk of data for detrending
    blocks_per_chunk = chunklen / detrendlen
    overlap = (fftlen - chunklen) / 2
    worklen = chunklen + 2 * overlap  # currently it is fftlen...

    max_downfact = 30
    default_downfacts = [2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150]

    if args[0].endswith(".singlepulse"):
        filenmbase = args[0][:args[0].rfind(".singlepulse")]
        dosearch = False
    elif args[0].endswith(".dat"):
        filenmbase = args[0][:args[0].rfind(".dat")]
    else:
        filenmbase = args[0]

    # Don't do a search, just read results and plot
    if not dosearch:
        info, DMs, candlist, num_v_DMstr = \
              read_singlepulse_files(args, opts.threshold, opts.T_start, opts.T_end)
        orig_N, orig_dt = int(info.N), info.dt
        obstime = orig_N * orig_dt
    else:
        DMs = []
        candlist = []
        num_v_DMstr = {}

        # Loop over the input files
        for filenm in args:
            if filenm.endswith(".dat"):
                filenmbase = filenm[:filenm.rfind(".dat")]
            else:
                filenmbase = filenm
            info = infodata.infodata(filenmbase + ".inf")
            DMstr = "%.2f" % info.DM
            DMs.append(info.DM)
            N, dt = int(info.N), info.dt
            obstime = N * dt
            # Choose the maximum width to search based on time instead
            # of bins.  This helps prevent increased S/N when the downsampling
            # changes as the DM gets larger.
            if opts.maxwidth > 0.0:
                downfacts = [
                    x for x in default_downfacts if x * dt <= opts.maxwidth
                ]
            else:
                downfacts = [x for x in default_downfacts if x <= max_downfact]
            if len(downfacts) == 0:
                downfacts = [default_downfacts[0]]
            if (filenm == args[0]):
                orig_N = N
                orig_dt = dt
                if useffts:
                    fftd_kerns = make_fftd_kerns(downfacts, fftlen)
            if info.breaks:
                offregions = zip([x[1] for x in info.onoff[:-1]],
                                 [x[0] for x in info.onoff[1:]])
            outfile = open(filenmbase + '.singlepulse', mode='w')

            # Compute the file length in detrendlens
            roundN = N / detrendlen * detrendlen
            numchunks = roundN / chunklen
            # Read in the file
            print 'Reading "%s"...' % filenm
            timeseries = Num.fromfile(filenm, dtype=Num.float32, count=roundN)
            # Split the timeseries into chunks for detrending
            numblocks = roundN / detrendlen
            timeseries.shape = (numblocks, detrendlen)
            stds = Num.zeros(numblocks, dtype=Num.float64)
            # de-trend the data one chunk at a time
            print '  De-trending the data and computing statistics...'
            for ii, chunk in enumerate(timeseries):
                if opts.fast:  # use median removal instead of detrending (2x speedup)
                    tmpchunk = chunk.copy()
                    tmpchunk.sort()
                    med = tmpchunk[detrendlen / 2]
                    chunk -= med
                    tmpchunk -= med
                else:
                    # The detrend calls are the most expensive in the program
                    timeseries[ii] = scipy.signal.detrend(chunk, type='linear')
                    tmpchunk = timeseries[ii].copy()
                    tmpchunk.sort()
                # The following gets rid of (hopefully) most of the
                # outlying values (i.e. power dropouts and single pulses)
                # If you throw out 5% (2.5% at bottom and 2.5% at top)
                # of random gaussian deviates, the measured stdev is ~0.871
                # of the true stdev.  Thus the 1.0/0.871=1.148 correction below.
                # The following is roughly .std() since we already removed the median
                stds[ii] = Num.sqrt(
                    (tmpchunk[detrendlen / 40:-detrendlen / 40]**2.0).sum() /
                    (0.95 * detrendlen))
            stds *= 1.148
            # sort the standard deviations and separate those with
            # very low or very high values
            sort_stds = stds.copy()
            sort_stds.sort()
            # identify the differences with the larges values (this
            # will split off the chunks with very low and very high stds
            locut = (sort_stds[1:numblocks / 2 + 1] -
                     sort_stds[:numblocks / 2]).argmax() + 1
            hicut = (sort_stds[numblocks / 2 + 1:] -
                     sort_stds[numblocks / 2:-1]).argmax() + numblocks / 2 - 2
            std_stds = scipy.std(sort_stds[locut:hicut])
            median_stds = sort_stds[(locut + hicut) / 2]
            lo_std = median_stds - 4.0 * std_stds
            hi_std = median_stds + 4.0 * std_stds
            # Determine a list of "bad" chunks.  We will not search these.
            bad_blocks = Num.nonzero((stds < lo_std) | (stds > hi_std))[0]
            print "    pseudo-median block standard deviation = %.2f" % (
                median_stds)
            print "    identified %d bad blocks out of %d (i.e. %.2f%%)" % \
                  (len(bad_blocks), len(stds),
                   100.0*float(len(bad_blocks))/float(len(stds)))
            stds[bad_blocks] = median_stds
            print "  Now searching..."

            # Now normalize all of the data and reshape it to 1-D
            timeseries /= stds[:, Num.newaxis]
            timeseries.shape = (roundN, )
            # And set the data in the bad blocks to zeros
            # Even though we don't search these parts, it is important
            # because of the overlaps for the convolutions
            for bad_block in bad_blocks:
                loind, hiind = bad_block * detrendlen, (bad_block +
                                                        1) * detrendlen
                timeseries[loind:hiind] = 0.0
            # Convert to a set for faster lookups below
            bad_blocks = set(bad_blocks)

            # Step through the data
            dm_candlist = []
            for chunknum in range(numchunks):
                loind = chunknum * chunklen - overlap
                hiind = (chunknum + 1) * chunklen + overlap
                # Take care of beginning and end of file overlap issues
                if (chunknum == 0):  # Beginning of file
                    chunk = Num.zeros(worklen, dtype=Num.float32)
                    chunk[overlap:] = timeseries[loind + overlap:hiind]
                elif (chunknum == numchunks - 1):  # end of the timeseries
                    chunk = Num.zeros(worklen, dtype=Num.float32)
                    chunk[:-overlap] = timeseries[loind:hiind - overlap]
                else:
                    chunk = timeseries[loind:hiind]

                # Make a set with the current block numbers
                lowblock = blocks_per_chunk * chunknum
                currentblocks = set(Num.arange(blocks_per_chunk) + lowblock)
                localgoodblocks = Num.asarray(
                    list(currentblocks - bad_blocks)) - lowblock
                # Search this chunk if it is not all bad
                if len(localgoodblocks):
                    # This is the good part of the data (end effects removed)
                    goodchunk = chunk[overlap:-overlap]

                    # need to pass blocks/chunklen, localgoodblocks
                    # dm_candlist, dt, opts.threshold to cython routine

                    # Search non-downsampled data first
                    # NOTE:  these nonzero() calls are some of the most
                    #        expensive calls in the program.  Best bet would
                    #        probably be to simply iterate over the goodchunk
                    #        in C and append to the candlist there.
                    hibins = Num.flatnonzero(goodchunk > opts.threshold)
                    hivals = goodchunk[hibins]
                    hibins += chunknum * chunklen
                    hiblocks = hibins / detrendlen
                    # Add the candidates (which are sorted by bin)
                    for bin, val, block in zip(hibins, hivals, hiblocks):
                        if block not in bad_blocks:
                            time = bin * dt
                            dm_candlist.append(
                                candidate(info.DM, val, time, bin, 1))

                    # Prepare our data for the convolution
                    if useffts: fftd_chunk = rfft(chunk, -1)

                    # Now do the downsampling...
                    for ii, downfact in enumerate(downfacts):
                        if useffts:
                            # Note:  FFT convolution is faster for _all_ downfacts, even 2
                            goodchunk = fft_convolve(fftd_chunk,
                                                     fftd_kerns[ii], overlap,
                                                     -overlap)
                        else:
                            # The normalization of this kernel keeps the post-smoothing RMS = 1
                            kernel = Num.ones(downfact, dtype=Num.float32) / \
                                     Num.sqrt(downfact)
                            smoothed_chunk = scipy.signal.convolve(
                                chunk, kernel, 1)
                            goodchunk = smoothed_chunk[overlap:-overlap]
                        #hibins = Num.nonzero(goodchunk>opts.threshold)[0]
                        hibins = Num.flatnonzero(goodchunk > opts.threshold)
                        hivals = goodchunk[hibins]
                        hibins += chunknum * chunklen
                        hiblocks = hibins / detrendlen
                        hibins = hibins.tolist()
                        hivals = hivals.tolist()
                        # Now walk through the new candidates and remove those
                        # that are not the highest but are within downfact/2
                        # bins of a higher signal pulse
                        hibins, hivals = prune_related1(
                            hibins, hivals, downfact)
                        # Insert the new candidates into the candlist, but
                        # keep it sorted...
                        for bin, val, block in zip(hibins, hivals, hiblocks):
                            if block not in bad_blocks:
                                time = bin * dt
                                bisect.insort(
                                    dm_candlist,
                                    candidate(info.DM, val, time, bin,
                                              downfact))

            # Now walk through the dm_candlist and remove the ones that
            # are within the downsample proximity of a higher
            # signal-to-noise pulse
            dm_candlist = prune_related2(dm_candlist, downfacts)
            print "  Found %d pulse candidates" % len(dm_candlist)

            # Get rid of those near padding regions
            if info.breaks: prune_border_cases(dm_candlist, offregions)

            # Write the pulses to an ASCII output file
            if len(dm_candlist):
                #dm_candlist.sort(cmp_sigma)
                outfile.write(
                    "# DM      Sigma      Time (s)     Sample    Downfact\n")
                for cand in dm_candlist:
                    outfile.write(str(cand))
            outfile.close()

            # Add these candidates to the overall candidate list
            for cand in dm_candlist:
                candlist.append(cand)
            num_v_DMstr[DMstr] = len(dm_candlist)

    if (opts.makeplot):

        # Step through the candidates to make a SNR list
        DMs.sort()
        snrs = []
        for cand in candlist:
            snrs.append(cand.sigma)
        if snrs:
            maxsnr = max(int(max(snrs)), int(opts.threshold)) + 3
        else:
            maxsnr = int(opts.threshold) + 3

        # Generate the SNR histogram
        snrs = Num.asarray(snrs)
        (num_v_snr, lo_snr, d_snr, num_out_of_range) = \
                    scipy.stats.histogram(snrs,
                                          int(maxsnr-opts.threshold+1),
                                          [opts.threshold, maxsnr])
        snrs = Num.arange(maxsnr-opts.threshold+1, dtype=Num.float64) * d_snr \
               + lo_snr + 0.5*d_snr
        num_v_snr = num_v_snr.astype(Num.float32)
        num_v_snr[num_v_snr == 0.0] = 0.001

        # Generate the DM histogram
        num_v_DM = Num.zeros(len(DMs))
        for ii, DM in enumerate(DMs):
            num_v_DM[ii] = num_v_DMstr["%.2f" % DM]
        DMs = Num.asarray(DMs)

        # open the plot device
        short_filenmbase = filenmbase[:filenmbase.find("_DM")]
        if opts.T_end > obstime:
            opts.T_end = obstime
        if pgplot_device:
            ppgplot.pgopen(pgplot_device)
        else:
            if (opts.T_start > 0.0 or opts.T_end < obstime):
                ppgplot.pgopen(short_filenmbase +
                               '_%.0f-%.0fs_singlepulse.ps/VPS' %
                               (opts.T_start, opts.T_end))
            else:
                ppgplot.pgopen(short_filenmbase + '_singlepulse.ps/VPS')
        ppgplot.pgpap(7.5, 1.0)  # Width in inches, aspect

        # plot the SNR histogram
        ppgplot.pgsvp(0.06, 0.31, 0.6, 0.87)
        ppgplot.pgswin(opts.threshold, maxsnr, Num.log10(0.5),
                       Num.log10(2 * max(num_v_snr)))
        ppgplot.pgsch(0.8)
        ppgplot.pgbox("BCNST", 0, 0, "BCLNST", 0, 0)
        ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Signal-to-Noise")
        ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Number of Pulses")
        ppgplot.pgsch(1.0)
        ppgplot.pgbin(snrs, Num.log10(num_v_snr), 1)

        # plot the DM histogram
        ppgplot.pgsvp(0.39, 0.64, 0.6, 0.87)
        # Add [1] to num_v_DM in YMAX below so that YMIN != YMAX when max(num_v_DM)==0
        ppgplot.pgswin(
            min(DMs) - 0.5,
            max(DMs) + 0.5, 0.0, 1.1 * max(num_v_DM + [1]))
        ppgplot.pgsch(0.8)
        ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
        ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\u-3\d)")
        ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Number of Pulses")
        ppgplot.pgsch(1.0)
        ppgplot.pgbin(DMs, num_v_DM, 1)

        # plot the SNR vs DM plot
        ppgplot.pgsvp(0.72, 0.97, 0.6, 0.87)
        ppgplot.pgswin(min(DMs) - 0.5, max(DMs) + 0.5, opts.threshold, maxsnr)
        ppgplot.pgsch(0.8)
        ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
        ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\u-3\d)")
        ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-Noise")
        ppgplot.pgsch(1.0)
        cand_ts = Num.zeros(len(candlist), dtype=Num.float32)
        cand_SNRs = Num.zeros(len(candlist), dtype=Num.float32)
        cand_DMs = Num.zeros(len(candlist), dtype=Num.float32)
        for ii, cand in enumerate(candlist):
            cand_ts[ii], cand_SNRs[ii], cand_DMs[ii] = \
                         cand.time, cand.sigma, cand.DM
        ppgplot.pgpt(cand_DMs, cand_SNRs, 20)

        # plot the DM vs Time plot
        ppgplot.pgsvp(0.06, 0.97, 0.08, 0.52)
        ppgplot.pgswin(opts.T_start, opts.T_end,
                       min(DMs) - 0.5,
                       max(DMs) + 0.5)
        ppgplot.pgsch(0.8)
        ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
        ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)")
        ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\u-3\d)")
        # Circles are symbols 20-26 in increasing order
        snr_range = 12.0
        cand_symbols = (cand_SNRs - opts.threshold) / snr_range * 6.0 + 20.5
        cand_symbols = cand_symbols.astype(Num.int32)
        cand_symbols[cand_symbols > 26] = 26
        for ii in [26, 25, 24, 23, 22, 21, 20]:
            inds = Num.nonzero(cand_symbols == ii)[0]
            ppgplot.pgpt(cand_ts[inds], cand_DMs[inds], ii)

        # Now fill the infomation area
        ppgplot.pgsvp(0.05, 0.95, 0.87, 0.97)
        ppgplot.pgsch(1.0)
        ppgplot.pgmtxt('T', 0.5, 0.0, 0.0,
                       "Single pulse results for '%s'" % short_filenmbase)
        ppgplot.pgsch(0.8)
        # first row
        ppgplot.pgmtxt('T', -1.1, 0.02, 0.0, 'Source: %s'%\
                       info.object)
        ppgplot.pgmtxt('T', -1.1, 0.33, 0.0, 'RA (J2000):')
        ppgplot.pgmtxt('T', -1.1, 0.5, 0.0, info.RA)
        ppgplot.pgmtxt('T', -1.1, 0.73, 0.0, 'N samples: %.0f' % orig_N)
        # second row
        ppgplot.pgmtxt('T', -2.4, 0.02, 0.0, 'Telescope: %s'%\
                       info.telescope)
        ppgplot.pgmtxt('T', -2.4, 0.33, 0.0, 'DEC (J2000):')
        ppgplot.pgmtxt('T', -2.4, 0.5, 0.0, info.DEC)
        ppgplot.pgmtxt('T', -2.4, 0.73, 0.0, 'Sampling time: %.2f \gms'%\
                       (orig_dt*1e6))
        # third row
        if info.instrument.find("pigot") >= 0:
            instrument = "Spigot"
        else:
            instrument = info.instrument
        ppgplot.pgmtxt('T', -3.7, 0.02, 0.0, 'Instrument: %s' % instrument)
        if (info.bary):
            ppgplot.pgmtxt('T', -3.7, 0.33, 0.0,
                           'MJD\dbary\u: %.12f' % info.epoch)
        else:
            ppgplot.pgmtxt('T', -3.7, 0.33, 0.0,
                           'MJD\dtopo\u: %.12f' % info.epoch)
        ppgplot.pgmtxt('T', -3.7, 0.73, 0.0, 'Freq\dctr\u: %.1f MHz'%\
                       ((info.numchan/2-0.5)*info.chan_width+info.lofreq))
        ppgplot.pgiden()
        ppgplot.pgend()
示例#7
0
def joy_division_plot(pulses, timeseries, downfactor=1, hgt_mult=1):
    """Plot each pulse profile on the same plot separated
        slightly on the vertical axis.
        'timeseries' is the Datfile object dissected.
        Downsample profiles by factor 'downfactor' before plotting.
        hgt_mult is a factor to stretch the height of the paper.
    """
    first = True
    ppgplot.pgbeg("%s.joydiv.ps/CPS" % \
                    os.path.split(timeseries.basefn)[1], 1, 1)
    ppgplot.pgpap(10.25, hgt_mult*8.5/11.0) # Letter landscape
    # ppgplot.pgpap(7.5, 11.7/8.3) # A4 portrait, doesn't print properly
    ppgplot.pgiden()
    ppgplot.pgsci(1)
    
    # Set up main plot
    ppgplot.pgsvp(0.1, 0.9, 0.1, 0.8)
    ppgplot.pglab("Profile bin", "Single pulse profiles", "")
    to_plot = []
    xmin = 0
    xmax = None
    ymin = None
    ymax = None
    for pulse in pulses:
        vertical_offset = (pulse.number-1)*JOYDIV_SEP
        copy_of_pulse = pulse.make_copy()
        if downfactor > 1:
            # Interpolate before downsampling
            interp = ((copy_of_pulse.N/downfactor)+1)*downfactor
            copy_of_pulse.interpolate(interp)
            copy_of_pulse.downsample(downfactor)
            # copy_of_pulse.scale()
        if first:
            summed_prof = copy_of_pulse.profile.copy()
            first = False
        else:
            summed_prof += copy_of_pulse.profile
        prof = copy_of_pulse.profile + vertical_offset
        min = prof.min()
        if ymin is None or min < ymin:
            ymin = min
        max = prof.max()
        if ymax is None or max > ymax:
            ymax = max
        max = prof.size-1
        if xmax is None or max > xmax:
            xmax = max
        to_plot.append(prof)
    yspace = 0.1*ymax
    ppgplot.pgswin(0, xmax, ymin-yspace, ymax+yspace)
    for prof in to_plot:
        ppgplot.pgline(np.arange(0,prof.size), prof)
    ppgplot.pgbox("BNTS", 0, 0, "BC", 0, 0)

    # Set up summed profile plot
    ppgplot.pgsvp(0.1, 0.9, 0.8, 0.9)
    ppgplot.pglab("", "Summed profile", "Pulses from %s" % timeseries.datfn)
    summed_prof = summed_prof - summed_prof.mean()
    ppgplot.pgswin(0, xmax, summed_prof.min(), summed_prof.max())
    ppgplot.pgline(np.arange(0, summed_prof.size), summed_prof)
    ppgplot.pgbox("C", 0, 0, "BC", 0, 0)
    ppgplot.pgclos()