示例#1
0
    def test_bar_plus_show(self):
        """Test the progress bar plus's rendering."""

        # With percentage
        pbar = progress.ProgressBarPlus()
        for i in range(101):
            pbar.inc(1)
            pbar.show()

        # Without percentage
        pbar = progress.ProgressBarPlus(print_percent=False)
        for i in range(101):
            pbar.inc(1)
            pbar.show()
示例#2
0
    def test_download_default(self):
        """Test the download bar, default options."""

        pbar = progress.ProgressBarPlus()
        for i in range(101):
            pbar.inc(2)
            pbar.dec(1)
示例#3
0
    def test_bar_plus_default(self):
        """Test the progress bar plus, default options."""

        pbar = progress.ProgressBarPlus()
        for i in range(101):
            pbar.inc(2)
            pbar.dec(1)
示例#4
0
    def test_bar_plus_attributes(self):
        """Test the progress bar plus's attributes."""

        pbar2 = progress.ProgressBarPlus()
        for i in range(101):
            pbar2 += 2
            pbar2 -= 1

            pbar2 = pbar2 + 1
            pbar2 = pbar2 - 1

            self.assertEqual(pbar2.amount, i + 1)
示例#5
0
def main(args):
    # Parse command line options
    global MAX_QUEUE_DEPTH
    MAX_QUEUE_DEPTH = min([args.queue_depth, 10])

    # Find out where the source is if needed
    if args.source is not None:
        if args.ra is None or args.dec is None:
            tempRA, tempDec, tempService = resolveTarget('PSR ' + args.source)
            print("%s resolved to %s, %s using '%s'" %
                  (args.source, tempRA, tempDec, tempService))
            out = input('=> Accept? [Y/n] ')
            if out == 'n' or out == 'N':
                sys.exit()
            else:
                args.ra = tempRA
                args.dec = tempDec

    else:
        args.source = "None"

    if args.ra is None:
        args.ra = "00:00:00.00"
    if args.dec is None:
        args.dec = "+00:00:00.0"
    args.ra = str(args.ra)
    args.dec = str(args.dec)

    # FFT length
    LFFT = args.nchan

    # Sub-integration block size
    nsblk = args.nsblk

    DM = float(args.DM)

    # Open
    idf = DRXFile(args.filename)

    # Load in basic information about the data
    nFramesFile = idf.get_info('nframe')
    srate = idf.get_info('sample_rate')
    beampols = idf.get_info('nbeampol')
    tunepol = beampols

    # Offset, if needed
    o = 0
    if args.skip != 0.0:
        o = idf.offset(args.skip)
    nFramesFile -= int(o * srate / 4096) * tunepol

    ## Date
    beginDate = idf.get_info('start_time')
    beginTime = beginDate.datetime
    mjd = beginDate.mjd
    mjd_day = int(mjd)
    mjd_sec = (mjd - mjd_day) * 86400
    if args.output is None:
        args.output = "drx_%05d_%s" % (mjd_day, args.source.replace(' ', ''))

    ## Tuning frequencies
    central_freq1 = idf.get_info('freq1')
    central_freq2 = idf.get_info('freq2')
    beam = idf.get_info('beam')

    ## Coherent Dedispersion Setup
    timesPerFrame = numpy.arange(4096, dtype=numpy.float64) / srate
    spectraFreq1 = numpy.fft.fftshift(numpy.fft.fftfreq(
        LFFT, d=1.0 / srate)) + central_freq1
    spectraFreq2 = numpy.fft.fftshift(numpy.fft.fftfreq(
        LFFT, d=1.0 / srate)) + central_freq2

    # File summary
    print("Input Filename: %s" % args.filename)
    print("Date of First Frame: %s (MJD=%f)" % (str(beginDate), mjd))
    print("Tune/Pols: %i" % tunepol)
    print("Tunings: %.1f Hz, %.1f Hz" % (central_freq1, central_freq2))
    print("Sample Rate: %i Hz" % srate)
    print("Sample Time: %f s" % (LFFT / srate, ))
    print("Sub-block Time: %f s" % (LFFT / srate * nsblk, ))
    print("Frames: %i (%.3f s)" %
          (nFramesFile, 4096.0 * nFramesFile / srate / tunepol))
    print("---")
    print("Using FFTW Wisdom? %s" % useWisdom)
    print("DM: %.4f pc / cm^3" % DM)
    print("Samples Needed: %i, %i to %i, %i" %
          (get_coherent_sample_size(central_freq1 - srate / 2,
                                    1.0 * srate / LFFT, DM),
           get_coherent_sample_size(central_freq2 - srate / 2,
                                    1.0 * srate / LFFT, DM),
           get_coherent_sample_size(central_freq1 + srate / 2,
                                    1.0 * srate / LFFT, DM),
           get_coherent_sample_size(central_freq2 + srate / 2,
                                    1.0 * srate / LFFT, DM)))

    # Create the output PSRFITS file(s)
    pfu_out = []
    if (not args.no_summing):
        polNames = 'I'
        nPols = 1
        reduceEngine = CombineToIntensity
    elif args.stokes:
        polNames = 'IQUV'
        nPols = 4
        reduceEngine = CombineToStokes
    elif args.circular:
        polNames = 'LLRR'
        nPols = 2
        reduceEngine = CombineToCircular
    else:
        polNames = 'XXYY'
        nPols = 2
        reduceEngine = CombineToLinear

    if args.four_bit_data:
        OptimizeDataLevels = OptimizeDataLevels4Bit
    else:
        OptimizeDataLevels = OptimizeDataLevels8Bit

    # Parameter validation
    if get_coherent_sample_size(central_freq1 - srate / 2, 1.0 * srate / LFFT,
                                DM) > nsblk:
        raise RuntimeError(
            "Too few samples for coherent dedispersion.  Considering increasing the number of channels."
        )
    elif get_coherent_sample_size(central_freq2 - srate / 2,
                                  1.0 * srate / LFFT, DM) > nsblk:
        raise RuntimeError(
            "Too few samples for coherent dedispersion.  Considering increasing the number of channels."
        )

    # Adjust the time for the padding used for coherent dedispersion
    print("MJD shifted by %.3f ms to account for padding" %
          (nsblk * LFFT / srate * 1000.0, ))
    beginDate = idf.get_info('start_time') + nsblk * LFFT / srate
    beginTime = beginDate.datetime
    mjd = beginDate.mjd

    for t in range(1, 2 + 1):
        ## Basic structure and bounds
        pfo = pfu.psrfits()
        pfo.basefilename = "%s_b%it%i" % (args.output, beam, t)
        pfo.filenum = 0
        pfo.tot_rows = pfo.N = pfo.T = pfo.status = pfo.multifile = 0
        pfo.rows_per_file = 32768

        ## Frequency, bandwidth, and channels
        if t == 1:
            pfo.hdr.fctr = central_freq1 / 1e6
        else:
            pfo.hdr.fctr = central_freq2 / 1e6
        pfo.hdr.BW = srate / 1e6
        pfo.hdr.nchan = LFFT
        pfo.hdr.df = srate / 1e6 / LFFT
        pfo.hdr.dt = LFFT / srate

        ## Metadata about the observation/observatory/pulsar
        pfo.hdr.observer = "writePsrfits2D.py"
        pfo.hdr.source = args.source
        pfo.hdr.fd_hand = 1
        pfo.hdr.nbits = 4 if args.four_bit_data else 8
        pfo.hdr.nsblk = nsblk
        pfo.hdr.ds_freq_fact = 1
        pfo.hdr.ds_time_fact = 1
        pfo.hdr.npol = nPols
        pfo.hdr.summed_polns = 1 if (not args.no_summing) else 0
        pfo.hdr.obs_mode = "SEARCH"
        pfo.hdr.telescope = "LWA"
        pfo.hdr.frontend = "LWA"
        pfo.hdr.backend = "DRX"
        pfo.hdr.project_id = "Pulsar"
        pfo.hdr.ra_str = args.ra
        pfo.hdr.dec_str = args.dec
        pfo.hdr.poln_type = "LIN" if not args.circular else "CIRC"
        pfo.hdr.poln_order = polNames
        pfo.hdr.date_obs = str(beginTime.strftime("%Y-%m-%dT%H:%M:%S"))
        pfo.hdr.MJD_epoch = pfu.get_ld(mjd)

        ## Coherent dedispersion information
        pfo.hdr.chan_dm = DM

        ## Setup the subintegration structure
        pfo.sub.tsubint = pfo.hdr.dt * pfo.hdr.nsblk
        pfo.sub.bytes_per_subint = pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk * pfo.hdr.nbits // 8
        pfo.sub.dat_freqs = pfu.malloc_doublep(
            pfo.hdr.nchan * 8)  # 8-bytes per double @ LFFT channels
        pfo.sub.dat_weights = pfu.malloc_floatp(
            pfo.hdr.nchan * 4)  # 4-bytes per float @ LFFT channels
        pfo.sub.dat_offsets = pfu.malloc_floatp(
            pfo.hdr.nchan * pfo.hdr.npol *
            4)  # 4-bytes per float @ LFFT channels per pol.
        pfo.sub.dat_scales = pfu.malloc_floatp(
            pfo.hdr.nchan * pfo.hdr.npol *
            4)  # 4-bytes per float @ LFFT channels per pol.
        if args.four_bit_data:
            pfo.sub.data = pfu.malloc_ucharp(
                pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk
            )  # 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples
            pfo.sub.rawdata = pfu.malloc_ucharp(
                pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk // 2
            )  # 4-bits per nibble @ (LFFT channels x pols. x nsblk sub-integrations) samples
        else:
            pfo.sub.rawdata = pfu.malloc_ucharp(
                pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk
            )  # 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples

        ## Create and save it for later use
        pfu.psrfits_create(pfo)
        pfu_out.append(pfo)

    freqBaseMHz = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT,
                                                       d=1.0 / srate)) / 1e6
    for i in range(len(pfu_out)):
        # Define the frequencies available in the file (in MHz)
        pfu.convert2_double_array(pfu_out[i].sub.dat_freqs,
                                  freqBaseMHz + pfu_out[i].hdr.fctr, LFFT)

        # Define which part of the spectra are good (1) or bad (0).  All channels
        # are good except for the two outermost.
        pfu.convert2_float_array(pfu_out[i].sub.dat_weights, numpy.ones(LFFT),
                                 LFFT)
        pfu.set_float_value(pfu_out[i].sub.dat_weights, 0, 0)
        pfu.set_float_value(pfu_out[i].sub.dat_weights, LFFT - 1, 0)

        # Define the data scaling (default is a scale of one and an offset of zero)
        pfu.convert2_float_array(pfu_out[i].sub.dat_offsets,
                                 numpy.zeros(LFFT * nPols), LFFT * nPols)
        pfu.convert2_float_array(pfu_out[i].sub.dat_scales,
                                 numpy.ones(LFFT * nPols), LFFT * nPols)

    # Speed things along, the data need to be processed in units of 'nsblk'.
    # Find out how many frames per tuning/polarization that corresponds to.
    chunkSize = nsblk * LFFT // 4096
    chunkTime = LFFT / srate * nsblk

    # Calculate the SK limites for weighting
    if (not args.no_sk_flagging):
        skLimits = kurtosis.get_limits(4.0, 1.0 * nsblk)

        GenerateMask = lambda x: ComputeSKMask(x, skLimits[0], skLimits[1])
    else:

        def GenerateMask(x):
            flag = numpy.ones((4, LFFT), dtype=numpy.float32)
            flag[:, 0] = 0.0
            flag[:, -1] = 0.0
            return flag

    # Create the progress bar so that we can keep up with the conversion.
    pbar = progress.ProgressBarPlus(max=nFramesFile // (4 * chunkSize) - 2,
                                    span=52)

    # Go!
    rdr = threading.Thread(target=reader,
                           args=(idf, chunkTime, readerQ),
                           kwargs={'core': 0})
    rdr.setDaemon(True)
    rdr.start()

    # Unpack - Previous data
    incoming = getFromQueue(readerQ)
    siCount, t, rawdata = incoming
    rawSpectraPrev = PulsarEngineRaw(rawdata, LFFT)

    # Unpack - Current data
    incoming = getFromQueue(readerQ)
    siCount, t, rawdata = incoming
    rawSpectra = PulsarEngineRaw(rawdata, LFFT)

    # Main loop
    incoming = getFromQueue(readerQ)
    while incoming[0] is not None:
        ## Unpack
        siCount, t, rawdata = incoming

        ## FFT
        try:
            rawSpectraNext = PulsarEngineRaw(rawdata, LFFT, rawSpectraNext)
        except NameError:
            rawSpectraNext = PulsarEngineRaw(rawdata, LFFT)

        ## S-K flagging
        flag = GenerateMask(rawSpectra)
        weight1 = numpy.where(flag[:2, :].sum(axis=0) == 0, 0,
                              1).astype(numpy.float32)
        weight2 = numpy.where(flag[2:, :].sum(axis=0) == 0, 0,
                              1).astype(numpy.float32)
        ff1 = 1.0 * (LFFT - weight1.sum()) / LFFT
        ff2 = 1.0 * (LFFT - weight2.sum()) / LFFT

        ## Dedisperse
        try:
            rawSpectraDedispersed = MultiChannelCD(
                rawSpectra, spectraFreq1, spectraFreq2, 1.0 * srate / LFFT, DM,
                rawSpectraPrev, rawSpectraNext, rawSpectraDedispersed)
        except NameError:
            rawSpectraDedispersed = MultiChannelCD(rawSpectra, spectraFreq1,
                                                   spectraFreq2,
                                                   1.0 * srate / LFFT, DM,
                                                   rawSpectraPrev,
                                                   rawSpectraNext)

        ## Update the state variables used to get the CD process continuous
        rawSpectraPrev[...] = rawSpectra
        rawSpectra[...] = rawSpectraNext

        ## Detect power
        try:
            redData = reduceEngine(rawSpectraDedispersed, redData)
        except NameError:
            redData = reduceEngine(rawSpectraDedispersed)

        ## Optimal data scaling
        try:
            bzero, bscale, bdata = OptimizeDataLevels(redData, LFFT, bzero,
                                                      bscale, bdata)
        except NameError:
            bzero, bscale, bdata = OptimizeDataLevels(redData, LFFT)

        ## Polarization mangling
        bzero1 = bzero[:nPols, :].T.ravel()
        bzero2 = bzero[nPols:, :].T.ravel()
        bscale1 = bscale[:nPols, :].T.ravel()
        bscale2 = bscale[nPols:, :].T.ravel()
        bdata1 = bdata[:nPols, :].T.ravel()
        bdata2 = bdata[nPols:, :].T.ravel()

        ## Write the spectra to the PSRFITS files
        for j, sp, bz, bs, wt in zip(range(2), (bdata1, bdata2),
                                     (bzero1, bzero2), (bscale1, bscale2),
                                     (weight1, weight2)):
            ## Time
            pfu_out[j].sub.offs = (
                pfu_out[j].tot_rows) * pfu_out[j].hdr.nsblk * pfu_out[
                    j].hdr.dt + pfu_out[j].hdr.nsblk * pfu_out[j].hdr.dt / 2.0

            ## Data
            ptr, junk = sp.__array_interface__['data']
            if args.four_bit_data:
                ctypes.memmove(
                    int(pfu_out[j].sub.data), ptr,
                    pfu_out[j].hdr.nchan * nPols * pfu_out[j].hdr.nsblk)
            else:
                ctypes.memmove(
                    int(pfu_out[j].sub.rawdata), ptr,
                    pfu_out[j].hdr.nchan * nPols * pfu_out[j].hdr.nsblk)

            ## Zero point
            ptr, junk = bz.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_offsets), ptr,
                           pfu_out[j].hdr.nchan * nPols * 4)

            ## Scale factor
            ptr, junk = bs.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_scales), ptr,
                           pfu_out[j].hdr.nchan * nPols * 4)

            ## SK
            ptr, junk = wt.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_weights), ptr,
                           pfu_out[j].hdr.nchan * 4)

            ## Save
            pfu.psrfits_write_subint(pfu_out[j])

        ## Update the progress bar and remaining time estimate
        pbar.inc()
        sys.stdout.write('%5.1f%% %5.1f%% %s %2i\r' %
                         (ff1 * 100, ff2 * 100, pbar.show(), len(readerQ)))
        sys.stdout.flush()

        ## Fetch another one
        incoming = getFromQueue(readerQ)

    rdr.join()

    # Update the progress bar with the total time used but only if we have
    # reached the end of the file
    if incoming[1]:
        pbar.amount = pbar.max
    sys.stdout.write('              %s %2i\n' % (pbar.show(), len(readerQ)))
    sys.stdout.flush()

    # And close out the files
    for pfo in pfu_out:
        pfu.psrfits_close(pfo)
示例#6
0
def main(args):
    # Open the file
    idf = DRXFile(args.filename)

    # Load in basic information about the data
    nFramesFile = idf.get_info('nframe')
    srate = idf.get_info('sample_rate')
    ttSkip = int(round(196e6 / srate)) * 4096
    beam = idf.get_info('beam')
    beampols = idf.get_info('nbeampol')
    tunepol = beampols

    # Offset, if needed
    args.offset = idf.offset(args.offset)
    nFramesFile -= int(args.offset * srate / 4096) * tunepol

    ## Date
    beginDate = idf.get_info('start_time')
    beginTime = beginDate.datetime
    mjd = beginDate.mjd
    mjd_day = int(mjd)
    mjd_sec = (mjd - mjd_day) * 86400

    ## Tuning frequencies
    central_freq1 = idf.get_info('freq1')
    central_freq2 = idf.get_info('freq2')
    beam = idf.get_info('beam')

    # File summary
    print("Input Filename: %s" % args.filename)
    print("Date of First Frame: %s (MJD=%f)" % (str(beginDate), mjd))
    print("Tune/Pols: %i" % tunepol)
    print("Tunings: %.1f Hz, %.1f Hz" % (central_freq1, central_freq2))
    print("Sample Rate: %i Hz" % srate)
    print("Frames: %i (%.3f s)" %
          (nFramesFile, 4096.0 * nFramesFile / srate / tunepol))

    if args.count > 0:
        nCaptures = int(args.count * srate / 4096)
    else:
        nCaptures = nFramesFile / beampols
        args.count = nCaptures * 4096 / srate
    nSkip = int(args.offset * srate / 4096)

    print("Seconds to Skip:  %.2f (%i captures)" % (args.offset, nSkip))
    print("Seconds to Split: %.2f (%i captures)" % (args.count, nCaptures))

    outname = os.path.basename(args.filename)
    outname = os.path.splitext(outname)[0]
    print("Writing %.2f s to file '%s_b%it[12].dat'" %
          (nCaptures * 4096 / srate, outname, beam))

    # Ready the internal interface for file access
    fh = idf.fh

    # Ready the output files - one for each tune/pol
    fhOut = []
    fhOut.append(open("%s_b%it1.dat" % (outname, beam), 'wb'))
    fhOut.append(open("%s_b%it2.dat" % (outname, beam), 'wb'))

    pb = progress.ProgressBarPlus(max=nCaptures)

    newFrame = bytearray([0 for i in range(32 + 4096 * 2)])

    # Setup the buffer
    buffer = RawDRXFrameBuffer(beams=[
        beam,
    ], reorder=True)

    # Go!
    c = 0
    eofFound = False
    while c < int(nCaptures):
        if eofFound:
            break

        ## Load in some frames
        if not buffer.overfilled:
            rFrames = deque()
            for i in range(tunepol):
                try:
                    rFrames.append(RawDRXFrame(fh.read(drx.FRAME_SIZE)))
                    #print rFrames[-1].id, rFrames[-1].timetag, c, i
                except errors.EOFError:
                    eofFound = True
                    buffer.append(rFrames)
                    break
                except errors.SyncError:
                    continue

            buffer.append(rFrames)

        timetag = buffer.peek()
        if timetag is None:
            # Continue adding frames if nothing comes out.
            continue
        else:
            # Otherwise, make sure we are on track
            try:
                timetag = timetag - tNomX  # T_NOM has been subtracted from ttLast
                if timetag != ttLast + ttSkip:
                    missing = (timetag - ttLast - ttSkip) / float(ttSkip)
                    if int(missing) == missing and missing < 50:
                        ## This is kind of black magic down here
                        for m in range(int(missing)):
                            m = ttLast + ttSkip * (
                                m + 1
                            ) + tNomX  # T_NOM has been subtracted from ttLast
                            baseframe = copy.deepcopy(rFrames[0])
                            baseframe[14:24] = struct.pack(
                                '>HQ',
                                struct.unpack('>HQ', baseframe[14:24])[0], m)
                            baseframe[32:] = '\x00' * 4096
                            buffer.append(baseframe)
            except NameError:
                pass
        rFrames = buffer.get()

        ## Continue adding frames if nothing comes out.
        if rFrames is None:
            continue

        ## If something comes out, process it
        for tuning in (1, 2):
            ### Load
            pair0 = rFrames[2 * (tuning - 1) + 0]
            pair1 = rFrames[2 * (tuning - 1) + 1]

            ### ID manipulation
            idX = pair0[4]
            #idY = pair1[4]
            id = (0 << 7) | (1 << 6) | (idX & (7 << 3)) | (idX & 7)

            ### Time tag manipulation to remove the T_NOM offset
            tNomX, timetagX = pair0.tNom, pair0.timetag
            #tNomY, timetagX = pair1.tNom, pair1.timetag
            tNom = tNomX - tNomX
            timetag = timetagX - tNomX

            ## Check for timetag problems
            if tuning == 1:
                try:
                    ttDiff = timetag - ttLast
                    if ttDiff != ttSkip:
                        raise RuntimeError(
                            "timetag skip at %i, %i != %i (%.1f frames)" %
                            (c, ttDiff, ttSkip, 1.0 * ttDiff / ttSkip))
                except NameError:
                    pass
                ttLast = timetag

            ### Build the new frame
            newFrame[0:32] = pair0[0:32]
            newFrame[32:8224:2] = pair0[32:]
            newFrame[33:8224:2] = pair1[32:]

            ### Update the quatities that have changed
            try:
                newFrame[4] = struct.pack('>B', id)
            except TypeError:
                newFrame[4] = int.from_bytes(struct.pack('>B', id),
                                             byteorder='little')
            newFrame[14:24] = struct.pack('>HQ', tNom, timetag)

            ### Save
            fhOut[tuning - 1].write(newFrame)

        c += 1
        pb.inc(amount=1)
        if c != 0 and c % 5000 == 0:
            sys.stdout.write(pb.show() + '\r')
            sys.stdout.flush()

    # If we've hit the end of the file and haven't read in enough frames,
    # flush the buffer
    if eofFound or c < int(nCaptures):
        for rFrames in buffer.flush():
            if c == int(nCaptures):
                break

            for tuning in (1, 2):
                ### Load
                pair0 = rFrames[2 * (tuning - 1) + 0]
                pair1 = rFrames[2 * (tuning - 1) + 1]

                ### ID manipulation
                idX = pair0[4]
                #idY = pair1[4]
                id = (0 << 7) | (1 << 6) | (idX & (7 << 3)) | (idX & 7)

                ### Time tag manipulation to remove the T_NOM offset
                tNomX, timetagX = pair0.tNom, pair0.timetag
                #tNomY, timetagX = pair1.tNom, pair1.timetag
                tNom = tNomX - tNomX
                timetag = timetagX - tNomX

                ## Check for timetag problems
                if tuning == 1:
                    try:
                        ttDiff = timetag - ttLast
                        if ttDiff != ttSkip:
                            raise RuntimeError(
                                "timetag skip at %i, %i != %i (%.1f frames)" %
                                (c, ttDiff, ttSkip, 1.0 * ttDiff / ttSkip))
                    except NameError:
                        pass
                    ttLast = timetag

                ### Build the new frame
                newFrame[0:32] = pair0[0:32]
                newFrame[32:8224:2] = pair0[32:]
                newFrame[33:8224:2] = pair1[32:]

                ### Update the quatities that have changed
                try:
                    newFrame[4] = struct.pack('>B', id)
                except TypeError:
                    newFrame[4] = int.from_bytes(struct.pack('>B', id),
                                                 byteorder='little')
                newFrame[14:24] = struct.pack('>HQ', tNom, timetag)

                ### Save
                fhOut[tuning - 1].write(newFrame)

            c += 1
            pb.inc(amount=1)
            if c != 0 and c % 5000 == 0:
                sys.stdout.write(pb.show() + '\r')
                sys.stdout.flush()

    # Update the progress bar with the total time used
    pb.amount = pb.max
    sys.stdout.write(pb.show() + '\n')
    sys.stdout.flush()
    for f in fhOut:
        f.close()

    fh.close()
示例#7
0
def main(args):
    # Parse the command line
    if args.frequencies is not None:
        values = args.frequencies.split(',')

        args.frequencies = []
        for v in values:
            if v.find('-') == -1:
                args.frequencies.append(float(v))
            else:
                v1, v2 = [float(vs) for vs in v.split('-', 1)]
                v = v1
                while v <= v2:
                    args.frequencies.append(v)
                    v += 0.1
                args.frequencies.append(v2)
    else:
        args.frequencies = []

    for filename in args.filename:
        print("Working on '%s'..." % os.path.basename(filename))

        # Open the PRSFITS file
        hdulist = astrofits.open(filename, mode='update', memmap=True)

        # Figure out the integration time per sub-integration so we know how
        # many sections to work with at a time
        nPol = hdulist[1].header['NPOL']
        nSubs = hdulist[1].header['NSBLK']
        tInt = hdulist[1].data[0][0]
        nSubsChunk = int(numpy.ceil(args.duration / tInt))
        print("  Polarizations: %i" % nPol)
        print("  Sub-integration time: %.3f ms" % (tInt / nSubs * 1000.0, ))
        print("  Sub-integrations per block: %i" % nSubs)
        print("  Block integration time: %.3f ms" % (tInt * 1000.0, ))
        print("  Working in chunks of %i blocks (%.3f s)" %
              (nSubsChunk, nSubsChunk * tInt))

        # Figure out the SK parameters to use
        srate = hdulist[0].header['OBSBW'] * 1e6
        LFFT = hdulist[1].data[0][12].size
        skM = nSubsChunk * nSubs
        skN = srate // LFFT * (tInt / nSubs)
        if nPol == 1:
            skN *= 2
        skLimits = kurtosis.get_limits(args.sk_sigma, skM, N=1.0 * skN)
        print("  (p)SK M: %i" % (nSubsChunk * nSubs, ))
        print("  (p)SK N: %i" % skN)
        print("  (p)SK Limits: %.4f <= valid <= %.4f" % skLimits)

        # Figure out what to mask for the specified frequencies and report
        toMask = []
        freq = hdulist[1].data[0][12]
        for f in args.frequencies:
            metric = numpy.abs(freq - f)
            toMaskCurrent = numpy.where(metric <= 0.05)[0]
            toMask.extend(list(toMaskCurrent))
        if len(toMask) > 0:
            toMask = list(set(toMask))
            toMask.sort()
            print("  Masking Channels:")
            for c in toMask:
                print("    %i -> %.3f MHz" % (c, freq[c]))

        # Setup the progress bar
        try:
            pbar = progress.ProgressBarPlus(max=len(hdulist[1].data) /
                                            nSubsChunk,
                                            span=58)
        except AttributeError:
            pbar = progress.ProgressBar(max=len(hdulist[1].data) / nSubsChunk,
                                        span=58)

        # Go!
        flagged = 0
        processed = 0
        sk = numpy.zeros((nPol, LFFT)) - 99.99
        for i in range(0, (len(hdulist[1].data) // nSubsChunk) * nSubsChunk,
                       nSubsChunk):
            ## Load in the current block of data
            blockData = []
            blockMask = None
            for j in range(i, i + nSubsChunk):
                ### Access the correct subintegration
                subint = hdulist[1].data[j]

                ### Pull out various bits that we need, including:
                ###  * the weight mask
                ###  * the scale and offset values - bscl and bzero
                ###  * the actual data - data
                msk = subint[13]
                bzero = subint[14]
                bscl = subint[15]
                bzero.shape = (LFFT, nPol)
                bscl.shape = (LFFT, nPol)
                bzero = bzero.T
                bscl = bscl.T
                data = subint[16]
                data.shape = (nSubs, LFFT, nPol)
                data = data.T

                ### Apply the scaling/offset to the data and save the results
                ### to blockData
                for k in range(nSubs):
                    d = data[:, :, k] * bscl + bzero
                    d.shape += (1, )
                    blockData.append(d)

                ### Save a master mask
                try:
                    blockMask *= msk
                except TypeError:
                    blockMask = msk

            blockData = numpy.concatenate(blockData, axis=2)

            ## Compute the S-K statistics
            for p in range(nPol):
                for l in range(LFFT):
                    sk[p, l] = kurtosis.spectral_power(blockData[p, l, :],
                                                       N=1.0 * skN)

            ## Compute the new mask - both SK and the frequency flagging
            newMask = numpy.where((sk < skLimits[0]) | (sk > skLimits[1]), 0.0,
                                  1.0)
            newMask = numpy.where(newMask.mean(axis=0) <= 0.5, 0.0, 1.0)
            for c in toMask:
                newMask[c] *= 0.0

            if args.replace:
                ## Replace the existing mask
                blockMask = newMask
            else:
                ## Update the existing mask
                blockMask *= newMask

            ## Update file
            for j in range(i, i + nSubsChunk):
                hdulist[1].data[j][13] = blockMask

            ## Update the counters
            processed += LFFT
            flagged += (1.0 - blockMask).sum()

            ## Update the progress bar and remaining time estimate
            pbar.inc()
            sys.stdout.write('  %5.1f%% %s\r' %
                             (100.0 *
                              (1.0 - blockMask).sum() / LFFT, pbar.show()))
            sys.stdout.flush()

        # Update the progress bar with the total time used
        sys.stdout.write('  %5.1f%% %s\n' %
                         (100.0 * flagged / processed, pbar.show()))
        sys.stdout.flush()

        # Done
        hdulist.close()
示例#8
0
def process_data_to_linear(idf,
                           antennas,
                           tStart,
                           duration,
                           sample_rate,
                           args,
                           dataSets,
                           obsID=1,
                           clip1=0):
    """
    Process a chunk of data in a raw DRX file into linear polarization 
    products and add the contents to an HDF5 file.
    """

    # Length of the FFT
    LFFT = args.fft_length

    # Find the start of the observation
    print('Looking for #%i at %s with sample rate %.1f Hz...' %
          (obsID, tStart, sample_rate))
    idf.reset()

    t0 = idf.get_info('start_time')
    tDiff = tStart - datetime.utcfromtimestamp(t0)
    offset = idf.offset(tDiff.total_seconds())
    t0 = idf.get_info('start_time')
    srate = idf.get_info('sample_rate')
    while datetime.utcfromtimestamp(t0) < tStart or srate != sample_rate:
        offset = idf.offset(512. / sample_rate)
        t0 = idf.get_info('start_time')
        srate = idf.get_info('sample_rate')

    print('... Found #%i at %s with sample rate %.1f Hz' %
          (obsID, datetime.utcfromtimestamp(t0), srate))
    tDiff = datetime.utcfromtimestamp(t0) - tStart
    duration = duration - max([0, tDiff.total_seconds()])

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    nChunks = int(round(duration / args.average))
    if nChunks == 0:
        nChunks = 1

    # Date & Central Frequency
    beginDate = t0.datetime
    central_freq1 = idf.get_info('freq1')
    freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=1 / srate))

    for t in range(len(antennas) // 2):
        dataSets['obs%i-freq%i' % (obsID, t + 1)][:] = freq + central_freq1

    obs = dataSets['obs%i' % obsID]
    obs.attrs['tInt'] = args.average
    obs.attrs['tInt_Unit'] = 's'
    obs.attrs['LFFT'] = LFFT
    obs.attrs['nChan'] = LFFT
    obs.attrs['RBW'] = freq[1] - freq[0]
    obs.attrs['RBW_Units'] = 'Hz'

    # Create the progress bar so that we can keep up with the conversion.
    pbar = progress.ProgressBarPlus(max=nChunks)

    data_products = ['XX', 'YY']
    done = False
    for i in xrange(nChunks):
        # Inner loop that actually reads the frames into the data array
        tInt, cTime, data = idf.read(args.average)
        if i == 0:
            print("Actual integration time is %.1f ms" % (tInt * 1000.0, ))

        # Save out some easy stuff
        dataSets['obs%i-time' % obsID][i] = (cTime[0], cTime[1])

        if (not args.without_sats):
            sats = ((data.real**2 + data.imag**2) >= 127**2).sum(axis=0)
            for t in range(len(antennas) // 2):
                dataSets['obs%i-Saturation%i' %
                         (obsID, t + 1)][i, :] = sats[2 * t + 0:2 * t + 2]
        else:
            for t in range(len(antennas) // 2):
                dataSets['obs%i-Saturation%i' % (obsID, t + 1)][i, :] = -1

        # Calculate the spectra for this block of data and then weight the results by
        # the total number of frames read.  This is needed to keep the averages correct.
        freq, tempSpec1 = fxc.SpecMaster(data,
                                         LFFT=LFFT,
                                         window=args.window,
                                         verbose=args.verbose,
                                         sample_rate=srate,
                                         clip_level=clip1)

        l = 0
        for t in range(len(antennas) // 2):
            for p in data_products:
                dataSets['obs%i-%s%i' %
                         (obsID, p, t + 1)][i, :] = tempSpec1[l, :]
                l += 1

        # We don't really need the data array anymore, so delete it
        del (data)

        # Are we done yet?
        if done:
            break

        ## Update the progress bar and remaining time estimate
        pbar.inc()
        sys.stdout.write('%s\r' % pbar.show())
        sys.stdout.flush()

    pbar.amount = pbar.max
    sys.stdout.write('%s\n' % pbar.show())
    sys.stdout.flush()

    return True
示例#9
0
def main(args):
    # Parse command line options
    args.filename.sort()
    global MAX_QUEUE_DEPTH
    MAX_QUEUE_DEPTH = min([args.queue_depth, 10])

    # Find out where the source is if needed
    if args.source is not None:
        if args.ra is None or args.dec is None:
            tempRA, tempDec, tempService = resolveTarget('PSR ' + args.source)
            print("%s resolved to %s, %s using '%s'" %
                  (args.source, tempRA, tempDec, tempService))
            out = input('=> Accept? [Y/n] ')
            if out == 'n' or out == 'N':
                sys.exit()
            else:
                args.ra = tempRA
                args.dec = tempDec

    else:
        args.source = "None"

    if args.ra is None:
        args.ra = "00:00:00.00"
    if args.dec is None:
        args.dec = "+00:00:00.0"
    args.ra = str(args.ra)
    args.dec = str(args.dec)

    # FFT length
    LFFT = args.nchan

    # Sub-integration block size
    nsblk = args.nsblk

    startTimes = []
    nFrames = []
    for filename in args.filename:
        idf = DRXFile(filename)

        # Find out how many frame sets are in each file
        srate = idf.get_info('sample_rate')
        beampols = idf.get_info('nbeampol')
        tunepol = beampols
        nFramesFile = idf.get_info('nframe')

        # Offset, if needed
        o = 0
        if args.skip != 0.0:
            o = idf.offset(args.skip)
        nFramesFile -= int(o * srate / 4096) * tunepol
        nFrames.append(nFramesFile // tunepol)

        # Get the start time of the file
        startTimes.append(idf.get_info('start_time_samples'))

        # Validate
        try:
            if srate != srateOld:
                raise RuntimeError(
                    "Sample rate change detected in this set of files")
        except NameError:
            srateOld = srate

        # Done
        idf.close()

    ttSkip = int(fS / srate * 4096)
    spSkip = int(fS / srate)
    frameOffsets = []
    sampleOffsets = []
    tickOffsets = []
    siCountMax = []
    for filename, startTime, nFrame in zip(args.filename, startTimes, nFrames):
        diff = max(startTimes) - startTime
        frameOffsets.append(diff // ttSkip)
        diff = diff - frameOffsets[-1] * ttSkip
        sampleOffset = diff // spSkip
        sampleOffsets.append(sampleOffset)
        if sampleOffsets[-1] == 4096:
            frameOffsets[-1] += 1
            sampleOffsets[-1] %= 4096
        if args.subsample_correction:
            tickOffsets.append(
                max(startTimes) - (startTime + frameOffsets[-1] * ttSkip +
                                   sampleOffsets[-1] * spSkip))
        else:
            tickOffsets.append(0)

        nFrame = nFrame - frameOffsets[-1] - 1
        nSubints = nFrame // (nsblk * LFFT // 4096)
        siCountMax.append(nSubints)
    siCountMax = min(siCountMax)

    print("Proposed File Time Alignment:")
    residualOffsets = []
    for filename, startTime, frameOffset, sampleOffset, tickOffset in zip(
            args.filename, startTimes, frameOffsets, sampleOffsets,
            tickOffsets):
        tStartNow = startTime
        tStartAfter = startTime + frameOffset * ttSkip + int(
            sampleOffset * fS / srate) + tickOffset
        residualOffset = max(startTimes) - tStartAfter
        print("  %s with %i frames, %i samples, %i ticks" %
              (os.path.basename(filename), frameOffset, sampleOffset,
               tickOffset))
        print("    before: %i" % tStartNow)
        print("    after:  %i" % tStartAfter)
        print("      residual: %i" % residualOffset)

        residualOffsets.append(residualOffset)
    print("Minimum Residual: %i ticks (%.1f ns)" %
          (min(residualOffsets), min(residualOffsets) * (1e9 / fS)))
    print("Maximum Residual: %i ticks (%.1f ns)" %
          (max(residualOffsets), max(residualOffsets) * (1e9 / fS)))
    if not args.yes:
        out = input('=> Accept? [Y/n] ')
        if out == 'n' or out == 'N':
            sys.exit()
    else:
        print("=> Accepted via the command line")
    print(" ")

    # Setup the processing constraints
    if (not args.no_summing):
        polNames = 'I'
        nPols = 1
        reduceEngine = CombineToIntensity
    elif args.stokes:
        polNames = 'IQUV'
        nPols = 4
        reduceEngine = CombineToStokes
    elif args.circular:
        polNames = 'LLRR'
        nPols = 2
        reduceEngine = CombineToCircular
    else:
        polNames = 'XXYY'
        nPols = 2
        reduceEngine = CombineToLinear

    if args.four_bit_data:
        OptimizeDataLevels = OptimizeDataLevels4Bit
    else:
        OptimizeDataLevels = OptimizeDataLevels8Bit

    for c, filename, frameOffset, sampleOffset, tickOffset in zip(
            range(len(args.filename)), args.filename, frameOffsets,
            sampleOffsets, tickOffsets):
        idf = DRXFile(filename)

        # Find out how many frame sets are in each file
        srate = idf.get_info('sample_rate')
        beampols = idf.get_info('nbeampol')
        tunepol = beampols
        nFramesFile = idf.get_info('nframe')

        # Offset, if needed
        o = 0
        if args.skip != 0.0:
            o = idf.offset(args.skip)
        nFramesFile -= int(o * srate / srate) * tunepol

        # Additional seek for timetag alignment across the files
        o += idf.offset(frameOffset * 4096 / srate)

        ## Date
        tStart = idf.get_info(
            'start_time') + sampleOffset * spSkip / fS + tickOffset / fS
        beginDate = tStart.datetime
        beginTime = beginDate
        mjd = tStart.mjd
        mjd_day = int(mjd)
        mjd_sec = (mjd - mjd_day) * 86400
        if args.output is None:
            args.output = "drx_%05d_%s" % (mjd_day, args.source.replace(
                ' ', ''))

        ## Tuning frequencies
        central_freq1 = idf.get_info('freq1')
        central_freq2 = idf.get_info('freq2')
        beam = idf.get_info('beam')

        # File summary
        print("Input Filename: %s (%i of %i)" %
              (filename, c + 1, len(args.filename)))
        print("Date of First Frame: %s (MJD=%f)" % (str(beginDate), mjd))
        print("Tune/Pols: %i" % tunepol)
        print("Tunings: %.1f Hz, %.1f Hz" % (central_freq1, central_freq2))
        print("Sample Rate: %i Hz" % srate)
        print("Sample Time: %f s" % (LFFT / srate, ))
        print("Sub-block Time: %f s" % (LFFT / srate * nsblk, ))
        print("Frames: %i (%.3f s)" %
              (nFramesFile, 4096.0 * nFramesFile / srate / tunepol))
        print("---")
        print("Using FFTW Wisdom? %s" % useWisdom)

        # Create the output PSRFITS file(s)
        pfu_out = []
        for t in range(1, 2 + 1):
            ## Basic structure and bounds
            pfo = pfu.psrfits()
            pfo.basefilename = "%s_b%it%i" % (args.output, beam, t)
            pfo.filenum = 0
            pfo.tot_rows = pfo.N = pfo.T = pfo.status = pfo.multifile = 0
            pfo.rows_per_file = 32768

            ## Frequency, bandwidth, and channels
            if t == 1:
                pfo.hdr.fctr = central_freq1 / 1e6
            else:
                pfo.hdr.fctr = central_freq2 / 1e6
            pfo.hdr.BW = srate / 1e6
            pfo.hdr.nchan = LFFT
            pfo.hdr.df = srate / 1e6 / LFFT
            pfo.hdr.dt = LFFT / srate

            ## Metadata about the observation/observatory/pulsar
            pfo.hdr.observer = "writePsrfits2Multi.py"
            pfo.hdr.source = args.source
            pfo.hdr.fd_hand = 1
            pfo.hdr.nbits = 4 if args.four_bit_data else 8
            pfo.hdr.nsblk = nsblk
            pfo.hdr.ds_freq_fact = 1
            pfo.hdr.ds_time_fact = 1
            pfo.hdr.npol = nPols
            pfo.hdr.summed_polns = 1 if (not args.no_summing) else 0
            pfo.hdr.obs_mode = "SEARCH"
            pfo.hdr.telescope = "LWA"
            pfo.hdr.frontend = "LWA"
            pfo.hdr.backend = "DRX"
            pfo.hdr.project_id = "Pulsar"
            pfo.hdr.ra_str = args.ra
            pfo.hdr.dec_str = args.dec
            pfo.hdr.poln_type = "LIN" if not args.circular else "CIRC"
            pfo.hdr.poln_order = polNames
            pfo.hdr.date_obs = str(beginTime.strftime("%Y-%m-%dT%H:%M:%S"))
            pfo.hdr.MJD_epoch = pfu.get_ld(mjd)

            ## Setup the subintegration structure
            pfo.sub.tsubint = pfo.hdr.dt * pfo.hdr.nsblk
            pfo.sub.bytes_per_subint = pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk * pfo.hdr.nbits // 8
            pfo.sub.dat_freqs = pfu.malloc_doublep(
                pfo.hdr.nchan * 8)  # 8-bytes per double @ LFFT channels
            pfo.sub.dat_weights = pfu.malloc_floatp(
                pfo.hdr.nchan * 4)  # 4-bytes per float @ LFFT channels
            pfo.sub.dat_offsets = pfu.malloc_floatp(
                pfo.hdr.nchan * pfo.hdr.npol *
                4)  # 4-bytes per float @ LFFT channels per pol.
            pfo.sub.dat_scales = pfu.malloc_floatp(
                pfo.hdr.nchan * pfo.hdr.npol *
                4)  # 4-bytes per float @ LFFT channels per pol.
            if args.four_bit_data:
                pfo.sub.data = pfu.malloc_ucharp(
                    pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk
                )  # 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples
                pfo.sub.rawdata = pfu.malloc_ucharp(
                    pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk // 2
                )  # 4-bits per nibble @ (LFFT channels x pols. x nsblk sub-integrations) samples
            else:
                pfo.sub.rawdata = pfu.malloc_ucharp(
                    pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk
                )  # 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples

            ## Create and save it for later use
            pfu.psrfits_create(pfo)
            pfu_out.append(pfo)

        freqBaseMHz = numpy.fft.fftshift(numpy.fft.fftfreq(
            LFFT, d=1.0 / srate)) / 1e6
        for i in range(len(pfu_out)):
            # Define the frequencies available in the file (in MHz)
            pfu.convert2_double_array(pfu_out[i].sub.dat_freqs,
                                      freqBaseMHz + pfu_out[i].hdr.fctr, LFFT)

            # Define which part of the spectra are good (1) or bad (0).  All channels
            # are good except for the two outermost.
            pfu.convert2_float_array(pfu_out[i].sub.dat_weights,
                                     numpy.ones(LFFT), LFFT)
            pfu.set_float_value(pfu_out[i].sub.dat_weights, 0, 0)
            pfu.set_float_value(pfu_out[i].sub.dat_weights, LFFT - 1, 0)

            # Define the data scaling (default is a scale of one and an offset of zero)
            pfu.convert2_float_array(pfu_out[i].sub.dat_offsets,
                                     numpy.zeros(LFFT * nPols), LFFT * nPols)
            pfu.convert2_float_array(pfu_out[i].sub.dat_scales,
                                     numpy.ones(LFFT * nPols), LFFT * nPols)

        # Speed things along, the data need to be processed in units of 'nsblk'.
        # Find out how many frames per tuning/polarization that corresponds to.
        chunkSize = nsblk * LFFT // 4096
        chunkTime = LFFT / srate * nsblk

        # Frequency arrays for use with the phase rotator
        freq1 = central_freq1 + numpy.fft.fftshift(
            numpy.fft.fftfreq(LFFT, d=1.0 / srate))
        freq2 = central_freq2 + numpy.fft.fftshift(
            numpy.fft.fftfreq(LFFT, d=1.0 / srate))

        # Calculate the SK limites for weighting
        if (not args.no_sk_flagging):
            skLimits = kurtosis.get_limits(4.0, 1.0 * nsblk)

            GenerateMask = lambda x: ComputeSKMask(x, skLimits[0], skLimits[1])
        else:

            def GenerateMask(x):
                flag = numpy.ones((4, LFFT), dtype=numpy.float32)
                flag[:, 0] = 0.0
                flag[:, -1] = 0.0
                return flag

        # Create the progress bar so that we can keep up with the conversion.
        pbar = progress.ProgressBarPlus(max=siCountMax, span=52)

        # Pre-read the first frame so that we have something to pad with, if needed
        if sampleOffset != 0:
            # Pre-read the first frame
            readT, t, dataPrev = idf.read(4096 / srate)

        # Go!
        rdr = threading.Thread(target=reader,
                               args=(idf, chunkTime, readerQ),
                               kwargs={'core': 0})
        rdr.setDaemon(True)
        rdr.start()

        # Main Loop
        incoming = getFromQueue(readerQ)
        while incoming[0] is not None:
            ## Unpack
            siCount, t, rawdata = incoming

            ## Check to see where we are
            if siCount > siCountMax:
                ### Looks like we are done, allow the reader to finish
                incoming = getFromQueue(readerQ)
                continue

            ## Apply the sample offset
            if sampleOffset != 0:
                try:
                    dataComb[:, :4096] = dataPrev
                except NameError:
                    dataComb = numpy.zeros(
                        (rawdata.shape[0], rawdata.shape[1] + 4096),
                        dtype=rawdata.dtype)
                    dataComb[:, :4096] = dataPrev
                dataComb[:, 4096:] = rawdata
                dataPrev = dataComb[:, -4096:]
                rawdata[...] = dataComb[:, sampleOffset:sampleOffset +
                                        4096 * chunkSize]

            ## FFT
            try:
                rawSpectra = PulsarEngineRaw(rawdata, LFFT, rawSpectra)
            except NameError:
                rawSpectra = PulsarEngineRaw(rawdata, LFFT)

            ## Apply the sub-sample offset as a phase rotation
            if tickOffset != 0:
                PhaseRotator(rawSpectra, freq1, freq2, tickOffset / fS,
                             rawSpectra)

            ## S-K flagging
            flag = GenerateMask(rawSpectra)
            weight1 = numpy.where(flag[:2, :].sum(axis=0) == 0, 0,
                                  1).astype(numpy.float32)
            weight2 = numpy.where(flag[2:, :].sum(axis=0) == 0, 0,
                                  1).astype(numpy.float32)
            ff1 = 1.0 * (LFFT - weight1.sum()) / LFFT
            ff2 = 1.0 * (LFFT - weight2.sum()) / LFFT

            ## Detect power
            try:
                redData = reduceEngine(rawSpectra, redData)
            except NameError:
                redData = reduceEngine(rawSpectra)

            ## Optimal data scaling
            try:
                bzero, bscale, bdata = OptimizeDataLevels(
                    redData, LFFT, bzero, bscale, bdata)
            except NameError:
                bzero, bscale, bdata = OptimizeDataLevels(redData, LFFT)

            ## Polarization mangling
            bzero1 = bzero[:nPols, :].T.ravel()
            bzero2 = bzero[nPols:, :].T.ravel()
            bscale1 = bscale[:nPols, :].T.ravel()
            bscale2 = bscale[nPols:, :].T.ravel()
            bdata1 = bdata[:nPols, :].T.ravel()
            bdata2 = bdata[nPols:, :].T.ravel()

            ## Write the spectra to the PSRFITS files
            for j, sp, bz, bs, wt in zip(range(2), (bdata1, bdata2),
                                         (bzero1, bzero2), (bscale1, bscale2),
                                         (weight1, weight2)):
                ## Time
                pfu_out[j].sub.offs = (pfu_out[j].tot_rows) * pfu_out[
                    j].hdr.nsblk * pfu_out[j].hdr.dt + pfu_out[
                        j].hdr.nsblk * pfu_out[j].hdr.dt / 2.0

                ## Data
                ptr, junk = sp.__array_interface__['data']
                if args.four_bit_data:
                    ctypes.memmove(
                        int(pfu_out[j].sub.data), ptr,
                        pfu_out[j].hdr.nchan * nPols * pfu_out[j].hdr.nsblk)
                else:
                    ctypes.memmove(
                        int(pfu_out[j].sub.rawdata), ptr,
                        pfu_out[j].hdr.nchan * nPols * pfu_out[j].hdr.nsblk)

                ## Zero point
                ptr, junk = bz.__array_interface__['data']
                ctypes.memmove(int(pfu_out[j].sub.dat_offsets), ptr,
                               pfu_out[j].hdr.nchan * nPols * 4)

                ## Scale factor
                ptr, junk = bs.__array_interface__['data']
                ctypes.memmove(int(pfu_out[j].sub.dat_scales), ptr,
                               pfu_out[j].hdr.nchan * nPols * 4)

                ## SK
                ptr, junk = wt.__array_interface__['data']
                ctypes.memmove(int(pfu_out[j].sub.dat_weights), ptr,
                               pfu_out[j].hdr.nchan * 4)

                ## Save
                pfu.psrfits_write_subint(pfu_out[j])

            ## Update the progress bar and remaining time estimate
            pbar.inc()
            sys.stdout.write('%5.1f%% %5.1f%% %s %2i\r' %
                             (ff1 * 100, ff2 * 100, pbar.show(), len(readerQ)))
            sys.stdout.flush()

            ## Fetch another one
            incoming = getFromQueue(readerQ)

        rdr.join()
        if sampleOffset != 0:
            del dataComb
        del rawSpectra
        del redData
        del bzero
        del bscale
        del bdata

        # Update the progress bar with the total time used but only if we have
        # reached the end of the file
        if incoming[1]:
            pbar.amount = pbar.max
        sys.stdout.write('              %s %2i\n' %
                         (pbar.show(), len(readerQ)))
        sys.stdout.flush()

        # And close out the files
        for pfo in pfu_out:
            pfu.psrfits_close(pfo)
示例#10
0
def main(args):
    # Open the file and load in basic information about the observation's goal
    fh = h5py.File(args.filename, 'r')
    if len(fh.keys()) != 1 or 'Observation1' not in fh:
        raise RuntimeError('Only HDF5 waterfall files with a single observation, labeled "Observation1", are supported')
        
    try:
        station = fh.attrs['StationName']
    except KeyError:
        station = 'lwa1'
        
    obs1 = fh['Observation1']
    if args.source is None:
        try:
            ## Load from the observation
            sourceName = obs1.attrs['TargetName']
            
            ## Validate
            assert(sourceName != '')
            
            ## Save
            args.source = sourceName
            
        except Exception as e:
            print("WARNING: Could not load source name from file")
            
    if args.ra is None or args.dec is None:
        try:
            ## Load from the observation
            ra = obs1.attrs['RA']
            if obs1.attrs['RA_Units'] == 'degrees':
                ra /= 15.0
            dec = obs1.attrs['Dec']
            decSign = '-' if dec < 0 else '+'
            dec = abs(dec)
            
            ## Validate
            assert(ra >= 0)
            assert(ra < 24)
            assert(dec <= 90)
            
            ## Save
            args.ra = '%02d:%02d:%04.1f' % (int(ra), int(ra * 60) % 60, ra * 3600 % 60)
            args.dec = '%s%02d:%02d:%04.1f' % (decSign, int(dec), int(dec * 60) % 60, dec * 3600 % 60)
            
        except Exception as e:
            print("WARNING: Could not load source RA/dec. from file")
            
    # Find out where the source is if needed
    if args.source is not None:
        if args.ra is None or args.dec is None:
            tempRA, tempDec, tempService = resolveTarget('PSR '+args.source)
            print("%s resolved to %s, %s using '%s'" % (args.source, tempRA, tempDec, tempService))
            out = input('=> Accept? [Y/n] ')
            if out == 'n' or out == 'N':
                sys.exit()
            else:
                args.ra = tempRA
                args.dec = tempDec
                
    else:
        args.source = "None"
        
    if args.ra is None:
        args.ra = "00:00:00.00"
    if args.dec is None:
        args.dec = "+00:00:00.0"
    args.ra = str(args.ra)
    args.dec = str(args.dec)
    
    ## What's in the data?
    obs1tuning1 = obs1['Tuning1']
    try:
        obs1tuning2 = obs1['Tuning2']
    except KeyError:
        obs1tuning2 = None
    
    nFramesFile = obs1['time'].shape[0]
    srate = float(obs1.attrs['sampleRate'])
    beam = int(obs1.attrs['Beam'])
    LFFT = int(obs1.attrs['LFFT'])
    nchan = int(obs1.attrs['nChan'])
    chanOffset = LFFT - nchan		# Packing offset to deal with old HDF5 files that contain only LFFT-1 channels
    central_freq1 = obs1tuning1['freq'][LFFT//2-chanOffset]
    try:
        central_freq2 = obs1tuning2['freq'][LFFT//2-chanOffset]
    except TypeError:
        central_freq2 = 0.0
    data_products = list(obs1tuning1)
    data_products.sort()
    try:
        del data_products[ data_products.index('Saturation') ]
    except ValueError:
        pass
    try:
        del data_products[ data_products.index('freq') ]
    except ValueError:
        pass
    tInt = obs1.attrs['tInt']
    
    # Sub-integration block size
    nsblk = args.nsblk
    
    ## Date
    try:
        beginATime = AstroTime(obs1['time'][0]['int'], obs1['time'][0]['frac'],
                               format=obs1['time'].attrs['format'],
                               scale=obs1['time'].attrs['scale'])
    except (KeyError, ValueError):
        beginATime = AstroTime(obs1['time'][0], format='unix', scale='utc')
    beginDate = beginATime.utc.datetime
    beginTime = beginDate
    mjd = beginATime.utc.mjd
    mjd_day = int(mjd)
    mjd_sec = (mjd-mjd_day)*86400
    if args.output is None:
        args.output = "drx_%05d_%s" % (mjd_day, args.source.replace(' ', ''))
        
    # File summary
    print("Input Filename: %s" % args.filename)
    print("Date of First Frame: %s (MJD=%f)" % (str(beginDate),mjd))
    print("Beam: %i" % beam)
    print("Tunings: %.1f Hz, %.1f Hz" % (central_freq1, central_freq2))
    print("Sample Rate: %i Hz" % srate)
    print("Sample Time: %f s" % tInt)
    print("Sub-block Time: %f s" % (tInt*nsblk,))
    print("Data Products: %s" % ','.join(data_products))
    print("Frames: %i (%.3f s)" % (nFramesFile, tInt*nFramesFile))
    print("---")
    
    # Create the output PSRFITS file(s)
    pfu_out = []
    if 'XX' in data_products and 'YY' in data_products and (not args.no_summing):
        polNames = 'I'
        nPols = 1
        def reduceEngine(x):
            y = numpy.zeros((2,x.shape[1]), dtype=numpy.float64)
            y[0,:] += x[0,:]
            y[0,:] += x[1,:]
            y[1,:] += x[2,:]
            y[1,:] += x[3,:]
            return y.astype(numpy.float32)
    elif 'I' in data_products:
        args.no_summing = False
        polNames = 'I'
        nPols = 1
        def reduceEngine(x):
            y = numpy.zeros((2,x.shape[1]), dtype=numpy.float32)
            y[0,:] = x[0,:]
            y[1,:] = x[x.shape[0]//2,:]
            return y
    else:
        args.no_summing = True
        allowed_indices = []
        allowed_products = []
        for p,pol in enumerate(data_products):
            if pol in ('XX', 'YY'):
                allowed_indices.append(p)
                allowed_products.append(pol)
        polNames = ''.join(allowed_products)
        iPols = len(data_products)
        nPols = len(allowed_products)
        if nPols == 0:
            raise RuntimeError('No valid polarization products found: %s' % (','.join(data_products),))
        def reduceEngine(x, iPols=iPols, nPols=nPols, indicies=allowed_indices):
            y = numpy.zeros((len(allowed_products),x.shape[1]), dtype=numpy.float32)
            for i,j in enumerate(indicies):
                y[i,:] = x[j,:]
                y[nPols+i,:] = x[iPols+j]
            return y
            
    if args.four_bit_data:
        OptimizeDataLevels = OptimizeDataLevels4Bit
    else:
        OptimizeDataLevels = OptimizeDataLevels8Bit
        
    for t in range(1, 2+1):
        if t == 2 and obs1tuning2 is None:
            continue
            
        ## Basic structure and bounds
        pfo = pfu.psrfits()
        pfo.basefilename = "%s_b%it%i" % (args.output, beam, t)
        pfo.filenum = 0
        pfo.tot_rows = pfo.N = pfo.T = pfo.status = pfo.multifile = 0
        pfo.rows_per_file = 32768
        
        ## Frequency, bandwidth, and channels
        if t == 1:
            pfo.hdr.fctr=central_freq1/1e6
        else:
            pfo.hdr.fctr=central_freq2/1e6
        pfo.hdr.BW = srate/1e6
        pfo.hdr.nchan = LFFT
        pfo.hdr.df = srate/1e6/LFFT
        pfo.hdr.dt = tInt
        
        ## Metadata about the observation/observatory/pulsar
        pfo.hdr.observer = "wP2FromHDF5.py"
        pfo.hdr.source = args.source
        pfo.hdr.fd_hand = 1
        pfo.hdr.nbits = 4 if args.four_bit_data else 8
        pfo.hdr.nsblk = nsblk
        pfo.hdr.ds_freq_fact = 1
        pfo.hdr.ds_time_fact = 1
        pfo.hdr.npol = nPols
        pfo.hdr.summed_polns = 1 if (not args.no_summing) else 0
        pfo.hdr.obs_mode = "SEARCH"
        if station in ('ovro-lwa', 'ovrolwa'):
            pfo.hdr.telescope = "OVRO-LWA"
            pfo.hdr.frontend = "OVRO-LWA"
            pfo.hdr.backend = "Beamformer"
        else:
            pfo.hdr.telescope = "LWA"
            pfo.hdr.frontend = "LWA"
            pfo.hdr.backend = "DRSpectrometer"
        pfo.hdr.project_id = "Pulsar"
        pfo.hdr.ra_str = args.ra
        pfo.hdr.dec_str = args.dec
        pfo.hdr.poln_type = "LIN"
        pfo.hdr.poln_order = polNames
        pfo.hdr.date_obs = str(beginTime.strftime("%Y-%m-%dT%H:%M:%S"))     
        pfo.hdr.MJD_epoch = pfu.get_ld(mjd)
        
        ## Setup the subintegration structure
        pfo.sub.tsubint = pfo.hdr.dt*pfo.hdr.nsblk
        pfo.sub.bytes_per_subint = pfo.hdr.nchan*pfo.hdr.npol*pfo.hdr.nsblk*pfo.hdr.nbits//8
        pfo.sub.dat_freqs   = pfu.malloc_doublep(pfo.hdr.nchan*8)				# 8-bytes per double @ LFFT channels
        pfo.sub.dat_weights = pfu.malloc_floatp(pfo.hdr.nchan*4)				# 4-bytes per float @ LFFT channels
        pfo.sub.dat_offsets = pfu.malloc_floatp(pfo.hdr.nchan*pfo.hdr.npol*4)		# 4-bytes per float @ LFFT channels per pol.
        pfo.sub.dat_scales  = pfu.malloc_floatp(pfo.hdr.nchan*pfo.hdr.npol*4)		# 4-bytes per float @ LFFT channels per pol.
        if args.four_bit_data:
            pfo.sub.data = pfu.malloc_ucharp(pfo.hdr.nchan*pfo.hdr.npol*pfo.hdr.nsblk)	# 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples
            pfo.sub.rawdata = pfu.malloc_ucharp(pfo.hdr.nchan*pfo.hdr.npol*pfo.hdr.nsblk//2)	# 4-bits per nibble @ (LFFT channels x pols. x nsblk sub-integrations) samples
        else:
            pfo.sub.rawdata = pfu.malloc_ucharp(pfo.hdr.nchan*pfo.hdr.npol*pfo.hdr.nsblk)	# 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples
        ## Create and save it for later use
        pfu.psrfits_create(pfo)
        pfu_out.append(pfo)
        
    for i,t in enumerate((obs1tuning1, obs1tuning2)):
        if i == 1 and t is None:
            continue
            
        # Define the frequencies available in the file (in MHz) making sure to correct the array
        # if chanOffset is not zero
        tfreqs = numpy.zeros(LFFT, dtype=t['freq'].dtype)
        tfreqs[chanOffset:] = t['freq'][:]/1e6
        if chanOffset != 0:
            tfreqs[:chanOffset] = (t['freq'][0] - numpy.arange(1, chanOffset+1)[::-1]*(t['freq'][1] - t['freq'][0])) / 1e6
        pfu.convert2_double_array(pfu_out[i].sub.dat_freqs, tfreqs, LFFT)
        
        # Define which part of the spectra are good (1) or bad (0).  All channels
        # are good except for the two outermost or those that are not contained in
        # the input HDF5 file.
        pfu.convert2_float_array(pfu_out[i].sub.dat_weights, numpy.ones(LFFT),  LFFT)
        pfu.set_float_value(pfu_out[i].sub.dat_weights, 0,      0)
        pfu.set_float_value(pfu_out[i].sub.dat_weights, LFFT-1, 0)
        for j in range(chanOffset):
            pfu.set_float_value(pfu_out[i].sub.dat_weights, j, 0)
            
        # Define the data scaling (default is a scale of one and an offset of zero)
        pfu.convert2_float_array(pfu_out[i].sub.dat_offsets, numpy.zeros(LFFT*nPols), LFFT*nPols)
        pfu.convert2_float_array(pfu_out[i].sub.dat_scales,  numpy.ones(LFFT*nPols),  LFFT*nPols)
        
    # Speed things along, the data need to be processed in units of 'nsblk'.  
    # Find out how many frames that corresponds to.
    chunkSize = nsblk
    
    # Calculate the SK limites for weighting
    if (not args.no_sk_flagging) and 'XX' in data_products and 'YY' in data_products:
        skN = int(tInt*srate / LFFT)
        skLimits = kurtosis.get_limits(4.0, M=1.0*nsblk, N=1.0*skN)
        
        GenerateMask = lambda x: ComputePseudoSKMask(x, LFFT, skN, skLimits[0], skLimits[1])
    else:
        def GenerateMask(x):
            flag = numpy.ones((4, LFFT), dtype=numpy.float32)
            flag[:,0] = 0.0
            flag[:,-1] = 0.0
            return flag
            
    # Create the progress bar so that we can keep up with the conversion.
    pbar = progress.ProgressBarPlus(max=nFramesFile//chunkSize, span=55)
    
    # Go!
    done = False
    
    siCount = 0
    nSubInts = nFramesFile // chunkSize
    for i in range(nSubInts):
        ## Read in the data
        data = numpy.zeros((2*len(data_products), LFFT*chunkSize), dtype=numpy.float64)
        
        for j in range(chunkSize):
            jP = j + i*chunkSize
            nTime = obs1['time'][jP]
            try:
                nTime = nTime['int'] + nTime['frac']
            except ValueError:
                pass
                
            try:
                if nTime > oTime + 1.001*tInt:
                    # pylint: disable-next=bad-string-format-type
                    print('Warning: Time tag error in subint. %i; %.3f > %.3f + %.3f' % (siCount, nTime, oTime, tInt))
            except NameError:
                pass
            oTime = nTime
            
            k = 0
            for t in (obs1tuning1, obs1tuning2):
                if t is None:
                    continue
                    
                for p in data_products:
                    data[k, j*LFFT+chanOffset:(j+1)*LFFT] = t[p][jP,:]
                    k += 1
        siCount += 1
        
        ## Are we done yet?
        if done:
            break
            
        ## FFT
        spectra = data
        
        ## S-K flagging
        flag = GenerateMask(spectra)
        weight1 = numpy.where( flag[:2,:].sum(axis=0) == 0, 0, 1 ).astype(numpy.float32)
        weight2 = numpy.where( flag[2:,:].sum(axis=0) == 0, 0, 1 ).astype(numpy.float32)
        ff1 = 1.0*(LFFT - weight1.sum()) / LFFT
        ff2 = 1.0*(LFFT - weight2.sum()) / LFFT
        
        ## Detect power
        data = reduceEngine(spectra)
        
        ## Optimal data scaling
        bzero, bscale, bdata = OptimizeDataLevels(data, LFFT)
        
        ## Polarization mangling
        bzero1 = bzero[:nPols,:].T.ravel()
        bzero2 = bzero[nPols:,:].T.ravel()
        bscale1 = bscale[:nPols,:].T.ravel()
        bscale2 = bscale[nPols:,:].T.ravel()
        bdata1 = bdata[:nPols,:].T.ravel()
        bdata2 = bdata[nPols:,:].T.ravel()
        
        ## Write the spectra to the PSRFITS files
        for j,sp,bz,bs,wt in zip(range(2), (bdata1, bdata2), (bzero1, bzero2), (bscale1, bscale2), (weight1, weight2)):
            if j == 1 and obs1tuning2 is None:
                continue
                
            ## Time
            pfu_out[j].sub.offs = (pfu_out[j].tot_rows)*pfu_out[j].hdr.nsblk*pfu_out[j].hdr.dt+pfu_out[j].hdr.nsblk*pfu_out[j].hdr.dt/2.0
            
            ## Data
            ptr, junk = sp.__array_interface__['data']
            if args.four_bit_data:
                ctypes.memmove(int(pfu_out[j].sub.data), ptr, pfu_out[j].hdr.nchan*nPols*pfu_out[j].hdr.nsblk)
            else:
                ctypes.memmove(int(pfu_out[j].sub.rawdata), ptr, pfu_out[j].hdr.nchan*nPols*pfu_out[j].hdr.nsblk)
                
            ## Zero point
            ptr, junk = bz.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_offsets), ptr, pfu_out[j].hdr.nchan*nPols*4)
            
            ## Scale factor
            ptr, junk = bs.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_scales), ptr, pfu_out[j].hdr.nchan*nPols*4)
            
            ## SK
            ptr, junk = wt.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_weights), ptr, pfu_out[j].hdr.nchan*4)
            
            ## Save
            pfu.psrfits_write_subint(pfu_out[j])
            
        ## Update the progress bar and remaining time estimate
        pbar.inc()
        sys.stdout.write('%5.1f%% %5.1f%% %s\r' % (ff1*100, ff2*100, pbar.show()))
        sys.stdout.flush()
        
    # Update the progress bar with the total time used
    sys.stdout.write('              %s\n' % pbar.show())
    sys.stdout.flush()
    
    # And close out the files
    for pfo in pfu_out:
        pfu.psrfits_close(pfo)
示例#11
0
def main(args):
    # Find out where the source is if needed
    if args.source is not None:
        if args.ra is None or args.dec is None:
            tempRA, tempDec, tempService = resolveTarget('PSR ' + args.source)
            print("%s resolved to %s, %s using '%s'" %
                  (args.source, tempRA, tempDec, tempService))
            out = input('=> Accept? [Y/n] ')
            if out == 'n' or out == 'N':
                sys.exit()
            else:
                args.ra = tempRA
                args.dec = tempDec

    else:
        args.source = "None"

    if args.ra is None:
        args.ra = "00:00:00.00"
    if args.dec is None:
        args.dec = "+00:00:00.0"
    args.ra = str(args.ra)
    args.dec = str(args.dec)

    # Open
    idf = DRSpecFile(args.filename)
    nFramesFile = idf.get_info('nframe')
    LFFT = idf.get_info('LFFT')

    # Load in basic information about the data
    srate = idf.get_info('sample_rate')
    beam = idf.get_info('beam')
    central_freq1 = idf.get_info('freq1')
    central_freq2 = idf.get_info('freq2')
    data_products = idf.get_info('data_products')
    isLinear = ('XX' in data_products) or ('YY' in data_products)
    tInt = idf.get_info('tint')

    # Offset, if needed
    o = 0
    if args.skip != 0.0:
        o = idf.offset(args.skip)
    nFramesFile -= int(round(o / tInt))

    # Sub-integration block size
    nsblk = args.nsblk

    ## Date
    beginDate = idf.get_info('start_time')
    beginTime = beginDate.datetime
    mjd = beginDate.mjd
    mjd_day = int(mjd)
    mjd_sec = (mjd - mjd_day) * 86400
    if args.output is None:
        args.output = "drx_%05d_%s" % (mjd_day, args.source.replace(' ', ''))

    # File summary
    print("Input Filename: %s" % args.filename)
    print("Date of First Frame: %s (MJD=%f)" % (str(beginDate), mjd))
    print("Beam: %i" % beam)
    print("Tunings: %.1f Hz, %.1f Hz" % (central_freq1, central_freq2))
    print("Sample Rate: %i Hz" % srate)
    print("Sample Time: %f s" % tInt)
    print("Sub-block Time: %f s" % (tInt * nsblk, ))
    print("Data Products: %s" % ','.join(data_products))
    print("Frames: %i (%.3f s)" % (nFramesFile, tInt * nFramesFile))
    print("---")
    print("Offset: %.3f s (%.0f frames)" % (o, o / tInt))
    print("---")

    # Create the output PSRFITS file(s)
    pfu_out = []
    if isLinear and (not args.no_summing):
        polNames = 'I'
        nPols = 1

        def reduceEngine(x):
            y = numpy.zeros((2, x.shape[1]), dtype=numpy.float32)
            y[0, :] += x[0, :]
            y[0, :] += x[1, :]
            y[1, :] += x[2, :]
            y[1, :] += x[3, :]
            return y
    else:
        args.no_summing = True
        polNames = ''.join(data_products)
        nPols = len(data_products)
        reduceEngine = lambda x: x.astype(numpy.float32)

    if args.four_bit_data:
        OptimizeDataLevels = OptimizeDataLevels4Bit
    else:
        OptimizeDataLevels = OptimizeDataLevels8Bit

    for t in range(1, 2 + 1):
        ## Basic structure and bounds
        pfo = pfu.psrfits()
        pfo.basefilename = "%s_b%it%i" % (args.output, beam, t)
        pfo.filenum = 0
        pfo.tot_rows = pfo.N = pfo.T = pfo.status = pfo.multifile = 0
        pfo.rows_per_file = 32768

        ## Frequency, bandwidth, and channels
        if t == 1:
            pfo.hdr.fctr = central_freq1 / 1e6
        else:
            pfo.hdr.fctr = central_freq2 / 1e6
        pfo.hdr.BW = srate / 1e6
        pfo.hdr.nchan = LFFT
        pfo.hdr.df = srate / 1e6 / LFFT
        pfo.hdr.dt = tInt

        ## Metadata about the observation/observatory/pulsar
        pfo.hdr.observer = "wP2FromDRSpec.py"
        pfo.hdr.source = args.source
        pfo.hdr.fd_hand = 1
        pfo.hdr.nbits = 4 if args.four_bit_data else 8
        pfo.hdr.nsblk = nsblk
        pfo.hdr.ds_freq_fact = 1
        pfo.hdr.ds_time_fact = 1
        pfo.hdr.npol = nPols
        pfo.hdr.summed_polns = 1 if (not args.no_summing) else 0
        pfo.hdr.obs_mode = "SEARCH"
        pfo.hdr.telescope = "LWA"
        pfo.hdr.frontend = "LWA"
        pfo.hdr.backend = "DRSpectrometer"
        pfo.hdr.project_id = "Pulsar"
        pfo.hdr.ra_str = args.ra
        pfo.hdr.dec_str = args.dec
        pfo.hdr.poln_type = "LIN"
        pfo.hdr.poln_order = polNames
        pfo.hdr.date_obs = str(beginTime.strftime("%Y-%m-%dT%H:%M:%S"))
        pfo.hdr.MJD_epoch = pfu.get_ld(mjd)

        ## Setup the subintegration structure
        pfo.sub.tsubint = pfo.hdr.dt * pfo.hdr.nsblk
        pfo.sub.bytes_per_subint = pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk * pfo.hdr.nbits // 8
        pfo.sub.dat_freqs = pfu.malloc_doublep(
            pfo.hdr.nchan * 8)  # 8-bytes per double @ LFFT channels
        pfo.sub.dat_weights = pfu.malloc_floatp(
            pfo.hdr.nchan * 4)  # 4-bytes per float @ LFFT channels
        pfo.sub.dat_offsets = pfu.malloc_floatp(
            pfo.hdr.nchan * pfo.hdr.npol *
            4)  # 4-bytes per float @ LFFT channels per pol.
        pfo.sub.dat_scales = pfu.malloc_floatp(
            pfo.hdr.nchan * pfo.hdr.npol *
            4)  # 4-bytes per float @ LFFT channels per pol.
        if args.four_bit_data:
            pfo.sub.data = pfu.malloc_ucharp(
                pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk
            )  # 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples
            pfo.sub.rawdata = pfu.malloc_ucharp(
                pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk // 2
            )  # 4-bits per nibble @ (LFFT channels x pols. x nsblk sub-integrations) samples
        else:
            pfo.sub.rawdata = pfu.malloc_ucharp(
                pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk
            )  # 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples

        ## Create and save it for later use
        pfu.psrfits_create(pfo)
        pfu_out.append(pfo)

    freqBaseMHz = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT,
                                                       d=1.0 / srate)) / 1e6
    for i in range(len(pfu_out)):
        # Define the frequencies available in the file (in MHz)
        pfu.convert2_double_array(pfu_out[i].sub.dat_freqs,
                                  freqBaseMHz + pfu_out[i].hdr.fctr, LFFT)

        # Define which part of the spectra are good (1) or bad (0).  All channels
        # are good except for the two outermost.
        pfu.convert2_float_array(pfu_out[i].sub.dat_weights, numpy.ones(LFFT),
                                 LFFT)
        pfu.set_float_value(pfu_out[i].sub.dat_weights, 0, 0)
        pfu.set_float_value(pfu_out[i].sub.dat_weights, LFFT - 1, 0)

        # Define the data scaling (default is a scale of one and an offset of zero)
        pfu.convert2_float_array(pfu_out[i].sub.dat_offsets,
                                 numpy.zeros(LFFT * nPols), LFFT * nPols)
        pfu.convert2_float_array(pfu_out[i].sub.dat_scales,
                                 numpy.ones(LFFT * nPols), LFFT * nPols)

    # Speed things along, the data need to be processed in units of 'nsblk'.
    # Find out how many frames that corresponds to.
    chunkSize = nsblk
    chunkTime = tInt * nsblk

    # Calculate the SK limites for weighting
    if (not args.no_sk_flagging) and isLinear:
        skN = int(tInt * srate / LFFT)
        skLimits = kurtosis.get_limits(4.0, M=1.0 * nsblk, N=1.0 * skN)

        GenerateMask = lambda x: ComputePseudoSKMask(x, LFFT, skN, skLimits[0],
                                                     skLimits[1])
    else:

        def GenerateMask(x):
            flag = numpy.ones((4, LFFT), dtype=numpy.float32)
            flag[:, 0] = 0.0
            flag[:, -1] = 0.0
            return flag

    # Create the progress bar so that we can keep up with the conversion.
    pbar = progress.ProgressBarPlus(max=nFramesFile // chunkSize, span=55)

    # Go!
    done = False

    siCount = 0
    while True:
        ## Read in the data
        try:
            readT, t, data = idf.read(chunkTime)
            siCount += 1
        except errors.EOFError:
            break

        ## FFT (really promote and reshape since the data are already spectra)
        spectra = data.astype(numpy.float64)
        spectra = spectra.reshape(spectra.shape[0], -1)

        ## S-K flagging
        flag = GenerateMask(spectra)
        weight1 = numpy.where(flag[:2, :].sum(axis=0) == 0, 0,
                              1).astype(numpy.float32)
        weight2 = numpy.where(flag[2:, :].sum(axis=0) == 0, 0,
                              1).astype(numpy.float32)
        ff1 = 1.0 * (LFFT - weight1.sum()) / LFFT
        ff2 = 1.0 * (LFFT - weight2.sum()) / LFFT

        ## Detect power
        data = reduceEngine(spectra)

        ## Optimal data scaling
        bzero, bscale, bdata = OptimizeDataLevels(data, LFFT)

        ## Polarization mangling
        bzero1 = bzero[:nPols, :].T.ravel()
        bzero2 = bzero[nPols:, :].T.ravel()
        bscale1 = bscale[:nPols, :].T.ravel()
        bscale2 = bscale[nPols:, :].T.ravel()
        bdata1 = bdata[:nPols, :].T.ravel()
        bdata2 = bdata[nPols:, :].T.ravel()

        ## Write the spectra to the PSRFITS files
        for j, sp, bz, bs, wt in zip(range(2), (bdata1, bdata2),
                                     (bzero1, bzero2), (bscale1, bscale2),
                                     (weight1, weight2)):
            ## Time
            pfu_out[j].sub.offs = (
                pfu_out[j].tot_rows) * pfu_out[j].hdr.nsblk * pfu_out[
                    j].hdr.dt + pfu_out[j].hdr.nsblk * pfu_out[j].hdr.dt / 2.0

            ## Data
            ptr, junk = sp.__array_interface__['data']
            if args.four_bit_data:
                ctypes.memmove(
                    int(pfu_out[j].sub.data), ptr,
                    pfu_out[j].hdr.nchan * nPols * pfu_out[j].hdr.nsblk)
            else:
                ctypes.memmove(
                    int(pfu_out[j].sub.rawdata), ptr,
                    pfu_out[j].hdr.nchan * nPols * pfu_out[j].hdr.nsblk)

            ## Zero point
            ptr, junk = bz.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_offsets), ptr,
                           pfu_out[j].hdr.nchan * nPols * 4)

            ## Scale factor
            ptr, junk = bs.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_scales), ptr,
                           pfu_out[j].hdr.nchan * nPols * 4)

            ## SK
            ptr, junk = wt.__array_interface__['data']
            ctypes.memmove(int(pfu_out[j].sub.dat_weights), ptr,
                           pfu_out[j].hdr.nchan * 4)

            ## Save
            pfu.psrfits_write_subint(pfu_out[j])

        ## Update the progress bar and remaining time estimate
        pbar.inc()
        sys.stdout.write('%5.1f%% %5.1f%% %s\r' %
                         (ff1 * 100, ff2 * 100, pbar.show()))
        sys.stdout.flush()

    # Update the progress bar with the total time used
    sys.stdout.write('              %s\n' % pbar.show())
    sys.stdout.flush()

    # And close out the files
    for pfo in pfu_out:
        pfu.psrfits_close(pfo)
示例#12
0
def processDataBatchLinear(fh,
                           header,
                           antennas,
                           tStart,
                           duration,
                           sample_rate,
                           args,
                           dataSets,
                           obsID=1,
                           clip1=0,
                           clip2=0):
    """
    Process a chunk of data in a raw vdif file into linear polarization 
    products and add the contents to an HDF5 file.
    """

    # Length of the FFT
    LFFT = args.fft_length

    # Find the start of the observation
    junkFrame = vdif.read_frame(fh,
                                central_freq=header['OBSFREQ'],
                                sample_rate=header['OBSBW'] * 2.0)
    srate = junkFrame.sample_rate
    t0 = junkFrame.time
    fh.seek(-vdif.FRAME_SIZE, 1)

    print('Looking for #%i at %s with sample rate %.1f Hz...' %
          (obsID, tStart, sample_rate))
    while t0.datetime < tStart or srate != sample_rate:
        junkFrame = vdif.read_frame(fh,
                                    central_freq=header['OBSFREQ'],
                                    sample_rate=header['OBSBW'] * 2.0)
        srate = junkFrame.sample_rate
        t0 = junkFrame.time
    print('... Found #%i at %s with sample rate %.1f Hz' %
          (obsID, junkFrame.time.datetime, srate))
    tDiff = t0.datetime - tStart
    try:
        duration = duration - tDiff.total_seconds()
    except:
        duration = duration - (tDiff.seconds + tDiff.microseconds / 1e6)

    beam, pol = junkFrame.id
    beams = vdif.get_thread_count(fh)
    tunepols = vdif.get_thread_count(fh)
    tunepol = tunepols
    beampols = tunepol

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of beampols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(1.0 * 28000 / beampols * vdif.DATA_LENGTH /
                    float(2 * LFFT)) * 2 * LFFT // vdif.DATA_LENGTH * beampols

    # Number of frames per second
    nFramesSecond = int(srate) // vdif.DATA_LENGTH

    # Number of frames to integrate over
    nFramesAvg = int(round(args.average * srate / vdif.DATA_LENGTH * beampols))
    nFramesAvg = int(1.0 * nFramesAvg / beampols * vdif.DATA_LENGTH /
                     float(2 * LFFT)) * 2 * LFFT // vdif.DATA_LENGTH * beampols
    args.average = 1.0 * nFramesAvg / beampols * vdif.DATA_LENGTH / srate
    maxFrames = nFramesAvg

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    nChunks = int(round(duration / args.average))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks

    # Line up the time tags for the various tunings/polarizations
    timetags = []
    for i in xrange(16):
        junkFrame = vdif.read_frame(fh,
                                    central_freq=header['OBSFREQ'],
                                    sample_rate=header['OBSBW'] * 2.0)
        timetags.append(junkFrame.header.seconds_from_epoch * nFramesSecond +
                        junkFrame.header.frame_in_second)
    fh.seek(-16 * vdif.FRAME_SIZE, 1)

    i = 0
    if beampols == 4:
        while (timetags[i + 0] != timetags[i + 1]) or (
                timetags[i + 0] != timetags[i + 2]) or (timetags[i + 0] !=
                                                        timetags[i + 3]):
            i += 1
            fh.seek(vdif.FRAME_SIZE, 1)

    elif beampols == 2:
        while timetags[i + 0] != timetags[i + 1]:
            i += 1
            fh.seek(vdif.FRAME_SIZE, 1)

    # Date & Central Frequency
    beginDate = junkFrame.time.datetime
    central_freq1 = 0.0
    central_freq2 = 0.0
    for i in xrange(4):
        junkFrame = vdif.read_frame(fh,
                                    central_freq=header['OBSFREQ'],
                                    sample_rate=header['OBSBW'] * 2.0)
        b, p = junkFrame.id
        if p == 0:
            central_freq1 = junkFrame.central_freq
        elif p == 0:
            central_freq2 = junkFrame.central_freq
        else:
            pass
    fh.seek(-4 * vdif.FRAME_SIZE, 1)
    freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=2 / srate))
    if float(fxc.__version__) < 0.8:
        freq = freq[1:]

    dataSets['obs%i-freq1' % obsID][:] = freq + central_freq1
    dataSets['obs%i-freq2' % obsID][:] = freq + central_freq2

    obs = dataSets['obs%i' % obsID]
    obs.attrs['tInt'] = args.average
    obs.attrs['tInt_Unit'] = 's'
    obs.attrs['LFFT'] = LFFT
    obs.attrs['nchan'] = LFFT - 1 if float(fxc.__version__) < 0.8 else LFFT
    obs.attrs['RBW'] = freq[1] - freq[0]
    obs.attrs['RBW_Units'] = 'Hz'

    # Create the progress bar so that we can keep up with the conversion.
    pbar = progress.ProgressBarPlus(max=nChunks)

    data_products = ['XX', 'YY']
    done = False
    for i in xrange(nChunks):
        # Find out how many frames remain in the file.  If this number is larger
        # than the maximum of frames we can work with at a time (maxFrames),
        # only deal with that chunk
        framesRemaining = nFrames - i * maxFrames
        if framesRemaining > maxFrames:
            framesWork = maxFrames
        else:
            framesWork = framesRemaining

        count = {0: 0, 1: 0, 2: 0, 3: 0}
        data = numpy.zeros((4, framesWork * vdif.DATA_LENGTH // beampols),
                           dtype=numpy.csingle)
        # If there are fewer frames than we need to fill an FFT, skip this chunk
        if data.shape[1] < LFFT:
            break

        # Inner loop that actually reads the frames into the data array
        for j in xrange(framesWork):
            # Read in the next frame and anticipate any problems that could occur
            try:
                cFrame = vdif.read_frame(fh,
                                         central_freq=header['OBSFREQ'],
                                         sample_rate=header['OBSBW'] * 2.0,
                                         verbose=False)
            except errors.EOFError:
                done = True
                break
            except errors.SyncError:
                continue

            beam, pol = cFrame.id
            aStand = pol
            if j is 0:
                cTime = cFrame.time

            try:
                data[aStand,
                     count[aStand] * vdif.DATA_LENGTH:(count[aStand] + 1) *
                     vdif.DATA_LENGTH] = cFrame.payload.data
                count[aStand] += 1
            except ValueError:
                raise RuntimeError("Invalid Shape")

        # Save out some easy stuff
        dataSets['obs%i-time' % obsID][i] = float(cTime)

        if not args.without_sats:
            sats = ((data.real**2 + data.imag**2) >= 49).sum(axis=1)
            dataSets['obs%i-Saturation1' % obsID][i, :] = sats[0:2]
            dataSets['obs%i-Saturation2' % obsID][i, :] = sats[2:4]
        else:
            dataSets['obs%i-Saturation1' % obsID][i, :] = -1
            dataSets['obs%i-Saturation2' % obsID][i, :] = -1

        # Calculate the spectra for this block of data and then weight the results by
        # the total number of frames read.  This is needed to keep the averages correct.
        if clip1 == clip2:
            freq, tempSpec1 = fxc.SpecMaster(data,
                                             LFFT=2 * LFFT,
                                             window=args.window,
                                             verbose=args.verbose,
                                             sample_rate=srate,
                                             clip_level=clip1)
            freq, tempSpec1 = freq[LFFT:], tempSpec1[:, LFFT:]

            l = 0
            for t in (1, 2):
                for p in data_products:
                    dataSets['obs%i-%s%i' %
                             (obsID, p, t)][i, :] = tempSpec1[l, :]
                    l += 1

        else:
            freq, tempSpec1 = fxc.SpecMaster(data[:2, :],
                                             LFFT=2 * LFFT,
                                             window=args.window,
                                             verbose=args.verbose,
                                             sample_rate=srate,
                                             clip_level=clip1)
            freq, tempSpec2 = fxc.SpecMaster(data[2:, :],
                                             LFFT=2 * LFFT,
                                             window=args.window,
                                             verbose=args.verbose,
                                             sample_rate=srate,
                                             clip_level=clip2)
            freq, tempSpec1, tempSpec2 = freq[
                LFFT:], tempSpec1[:, LFFT:], tempSpec2[:, LFFT:]

            for l, p in enumerate(data_products):
                dataSets['obs%i-%s%i' % (obsID, p, 1)][i, :] = tempSpec1[l, :]
                dataSets['obs%i-%s%i' % (obsID, p, 2)][i, :] = tempSpec2[l, :]

        # We don't really need the data array anymore, so delete it
        del (data)

        # Are we done yet?
        if done:
            break

        ## Update the progress bar and remaining time estimate
        pbar.inc()
        sys.stdout.write('%s\r' % pbar.show())
        sys.stdout.flush()

    pbar.amount = pbar.max
    sys.stdout.write('%s\n' % pbar.show())
    sys.stdout.flush()

    return True