コード例 #1
0
ファイル: test_imaging.py プロジェクト: lwa-project/lsl
    def test_CorrelatedDataMS_MultiIF(self):
        """Test the utils.CorrelatedDataMS class on a file with multiple IFs."""

        # Get some data
        data = self._init_data()

        # Filename and time
        testTime, testFile = time.time(), os.path.join(self.testPath,
                                                       'ms-test-MultiIF.ms')

        # Start the file
        fits = Ms(testFile, ref_time=testTime, overwrite=True)
        fits.set_stokes(['xx'])
        fits.set_frequency(data['freq'])
        fits.set_frequency(data['freq'] + 30e6)
        fits.set_geometry(data['site'], data['antennas'])
        fits.add_data_set(
            astro.utcjd_to_taimjd(astro.unix_to_utcjd(testTime)), 6.0,
            data['bl'],
            numpy.concatenate([data['vis'], 10 * data['vis']], axis=1))
        fits.write()
        fits.close()

        # Open the measurement set
        ms = utils.CorrelatedDataMS(testFile)
        self.assertEqual(ms.freq.size, 2 * data['freq'].size)
        ds = ms.get_data_set(1, include_auto=True)

        numpy.testing.assert_allclose(ds.XX.data[:, :data['freq'].size],
                                      data['vis'])
        numpy.testing.assert_allclose(ds.XX.data[:, data['freq'].size:],
                                      10 * data['vis'])

        ms.close()
コード例 #2
0
ファイル: test_imaging.py プロジェクト: lwa-project/lsl
    def test_CorrelatedDataMS(self):
        """Test the utils.CorrelatedDataMS class."""

        testTime, testFile = time.time(), os.path.join(self.testPath,
                                                       'ms-test-W.ms')

        # Get some data
        data = self._init_data()

        # Start the table
        tbl = Ms(testFile, ref_time=testTime)
        tbl.set_stokes(['xx'])
        tbl.set_frequency(data['freq'])
        tbl.set_geometry(data['site'], data['antennas'])
        tbl.add_data_set(astro.utcjd_to_taimjd(astro.unix_to_utcjd(testTime)),
                         6.0, data['bl'], data['vis'])
        tbl.write()

        # Open the measurement set
        ms = utils.CorrelatedDataMS(testFile)

        # Basic functions (just to see that they run)
        junk = ms.get_antennaarray()
        junk = ms.get_observer()
        junk = ms.get_data_set(1)

        # Error checking
        self.assertRaises(IndexError, ms.get_data_set, 2)

        tbl.close()
コード例 #3
0
def _get_antennaarray(station, stands, utime, freqs):
    """
    Given a LWA station object, a list of stands, an observation time, and
    a list of frequencies in Hz, build an aipy AntennaArray object.
    """

    return vis.build_sim_array(station,
                               stands,
                               freqs / 1e9,
                               jd=astro.unix_to_utcjd(utime))
コード例 #4
0
def main(args):
    idf = LWA1DataFile(args[0])
    nFramesFile = idf.getInfo('nFrames')

    srate = idf.getInfo('sampleRate')
    beam = idf.getInfo('beam')
    beampols = idf.getInfo('beampols')

    # Date
    beginDate = ephem.Date(unix_to_utcjd(idf.getInfo('tStart')) - DJD_OFFSET)

    # File summary
    print "Filename: %s" % args[0]
    print "Date of First Frame: %s" % str(beginDate)
    print "Beam: %i" % beam
    print "Tune/Pols: %ii" % beampols
    print "Sample Rate: %i Hz" % srate
    print "Frames: %i (%.3f s)" % (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate)
    print "---"
コード例 #5
0
ファイル: test_imaging.py プロジェクト: lwa-project/lsl
    def test_CorrelatedDataMS_compressed(self):
        """Test the utils.CorrelatedDataMS class on a compressed file."""

        # Get some data
        data = self._init_data()

        # Filename and time
        testTime, testFile = time.time(), os.path.join(self.testPath,
                                                       'ms-test-MultiIF.ms')

        # Start the file
        fits = Ms(testFile, ref_time=testTime, overwrite=True)
        fits.set_stokes(['xx'])
        fits.set_frequency(data['freq'])
        fits.set_frequency(data['freq'] + 30e6)
        fits.set_geometry(data['site'], data['antennas'])
        fits.add_data_set(
            astro.utcjd_to_taimjd(astro.unix_to_utcjd(testTime)), 6.0,
            data['bl'],
            numpy.concatenate([data['vis'], 10 * data['vis']], axis=1))
        fits.write()
        fits.close()

        # Compress
        compressedFile = os.path.splitext(testFile)[0] + '.tar.gz'
        cmd = [
            'tar', 'czf', compressedFile, '-C', self.testPath,
            os.path.basename(testFile)
        ]
        subprocess.check_call(cmd)

        # Open the measurement set
        ms = utils.CorrelatedDataMS(compressedFile)
        self.assertEqual(ms.freq.size, 2 * data['freq'].size)
        ds = ms.get_data_set(1, include_auto=True)

        numpy.testing.assert_allclose(ds.XX.data[:, :data['freq'].size],
                                      data['vis'])
        numpy.testing.assert_allclose(ds.XX.data[:, data['freq'].size:],
                                      10 * data['vis'])

        ms.close()
コード例 #6
0
def main(args):
    reference = args.ref_source
    filenames = args.filename
    
    #
    # Gather the station meta-data from its various sources
    #
    dataDict = numpy.load(filenames[0])
    ssmifContents = dataDict['ssmifContents']
    if ssmifContents.shape == ():
        site = lwa1
    else:
        fh, tempSSMIF = tempfile.mkstemp(suffix='.txt', prefix='ssmif-')
        fh = open(tempSSMIF, 'w')
        for line in ssmifContents:
            fh.write('%s\n' % line)
        fh.close()
        
        site = parse_ssmif(tempSSMIF)
        os.unlink(tempSSMIF)
    print(site.name)
    observer = site.get_observer()
    antennas = site.antennas
    nAnts = len(antennas)
    
    #
    # Find the reference source
    #
    srcs = [ephem.Sun(),]
    for line in _srcs:
        srcs.append( ephem.readdb(line) )
        
    refSrc = None
    for i in xrange(len(srcs)):
        if srcs[i].name == reference:
            refSrc = srcs[i]
            
    if refSrc is None:
        print("Cannot find reference source '%s' in source list, aborting." % reference)
        sys.exit(1)
        
    #
    # Parse the input files
    #
    data = []
    time = []
    freq = []
    oldRef = None
    oldMD5 = None
    maxTime = -1
    for filename in filenames:
        dataDict = numpy.load(filename)
        
        ref_ant = dataDict['ref'].item()
        refX   = dataDict['refX'].item()
        refY   = dataDict['refY'].item()
        tInt = dataDict['tInt'].item()
        
        times = dataDict['times']
        phase = dataDict['simpleVis']
        
        central_freq = dataDict['central_freq'].item()
        
        ssmifContents = dataDict['ssmifContents']
        
        beginDate = datetime.utcfromtimestamp(times[0])
        observer.date = beginDate.strftime("%Y/%m/%d %H:%M:%S")
        
        # Make sure we aren't mixing reference antennas
        if oldRef is None:
            oldRef = ref_ant
        if ref_ant != oldRef:
            raise RuntimeError("Dataset has different reference antennas than previous (%i != %i)" % (ref_ant, oldRef))
            
        # Make sure we aren't mixing SSMIFs
        ssmifMD5 = md5sum(ssmifContents)
        if oldMD5 is None:
            oldMD5 = ssmifMD5
        if ssmifMD5 != oldMD5:
            raise RuntimeError("Dataset has different SSMIF than previous (%s != %s)" % (ssmifMD5, oldMD5))
            
        print("Central Frequency: %.3f Hz" % central_freq)
        print("Start date/time: %s" % beginDate.strftime("%Y/%m/%d %H:%M:%S"))
        print("Integration Time: %.3f s" % tInt)
        print("Number of time samples: %i (%.3f s)" % (phase.shape[0], phase.shape[0]*tInt))
        
        allRates = {}
        for src in srcs:
            src.compute(observer)
            if src.alt > 0:
                fRate = getFringeRate(antennas[0], antennas[refX], observer, src, central_freq)
                allRates[src.name] = fRate
        # Calculate the fringe rates of all sources - for display purposes only
        print("Starting Fringe Rates:")
        for name in allRates.keys():
            fRate = allRates[name]
            print(" %-4s: %+6.3f mHz" % (name, fRate*1e3))
            
        freq.append( central_freq )
        time.append( numpy.array([unix_to_utcjd(t) for t in times]) )
        data.append( phase )
        
        ## Save the length of the `time` entry so that we can trim them
        ## all down to the same size later
        if time[-1].size > maxTime:
            maxTime = time[-1].size
            
    # Pad with NaNs to the same length
    for i in xrange(len(filenames)):
        nTimes = time[i].size
        
        if nTimes < maxTime:
            ## Pad 'time'
            newTime = numpy.zeros(maxTime, dtype=time[i].dtype)
            newTime += numpy.nan
            newTime[0:nTimes] = time[i][:]
            time[i] = newTime
            
            ## Pad 'data'
            newData = numpy.zeros((maxTime, data[i].shape[1]), dtype=data[i].dtype)
            newData += numpy.nan
            newData[0:nTimes,:] = data[i][:,:]
            data[i] = newData
            
    # Convert to 2-D and 3-D numpy arrays
    freq = numpy.array(freq)
    time = numpy.array(time)
    data = numpy.array(data)
    
    #
    # Sort the data by frequency
    #
    order = numpy.argsort(freq)
    freq = numpy.take(freq, order)
    time = numpy.take(time, order, axis=0)
    data = numpy.take(data, order, axis=0)
    
    # 
    # Find the fringe stopping averaging times
    #
    ls = {}
    for fStart in xrange(20, 90, 5):
        fStop = fStart + 5
        l = numpy.where( (freq >= fStart*1e6) & (freq < fStop*1e6) )[0]
        if len(l) > 0:
            ls[fStart] = l
            
    ms = {}
    for fStart in ls.keys():
        m = 1e6
        for l in ls[fStart]:
            good = numpy.where( numpy.isfinite(time[l,:]) == 1 )[0]
            if len(good) < m:
                m = len(good)
        ms[fStart] = m
        
    print("Minimum fringe stopping times:")
    for fStart in sorted(ls.keys()):
        fStop = fStart + 5
        m = ms[fStart]
        print("  >=%i Mhz and <%i MHz: %.3f s" % (fStart, fStop, m*tInt,))
        
    #
    # Report on progress and data coverage
    #
    nFreq = len(freq)
    
    print("Reference stand #%i (X: %i, Y: %i)" % (ref_ant, refX, refY))
    print("-> X: %s" % str(antennas[refX]))
    print("-> Y: %s" % str(antennas[refY]))
    
    print("Using a set of %i frequencies" % nFreq)
    print("->", freq/1e6)
    
    #
    # Compute source positions/fringe stop and remove the source
    #
    print("Fringe stopping on '%s':" % refSrc.name)
    pbar = ProgressBar(max=freq.size*520)
    
    for i in xrange(freq.size):
        fq = freq[i]
        
        for j in xrange(data.shape[2]):
            # Compute the times in seconds relative to the beginning
            times  = time[i,:] - time[i,0]
            times *= 24.0
            times *= 3600.0
            
            # Compute the fringe rates across all time
            fRate = [None,]*data.shape[1]
            for k in xrange(data.shape[1]):
                jd = time[i,k]
                
                try:
                    currDate = datetime.utcfromtimestamp(utcjd_to_unix(jd))
                except ValueError:
                    pass
                observer.date = currDate.strftime("%Y/%m/%d %H:%M:%S")
                refSrc.compute(observer)
        
                if j % 2 == 0:
                    fRate[k] = getFringeRate(antennas[j], antennas[refX], observer, refSrc, fq)
                else:
                    fRate[k] = getFringeRate(antennas[j], antennas[refY], observer, refSrc, fq)
                    
            # Create the basis rate and the residual rates
            baseRate = fRate[0]
            residRate = numpy.array(fRate) - baseRate
        
            # Fringe stop to more the source of interest to the DC component
            data[i,:,j] *= numpy.exp(-2j*numpy.pi* baseRate*(times - times[0]))
            data[i,:,j] *= numpy.exp(-2j*numpy.pi*residRate*(times - times[0]))
            
            # Calculate the geometric delay term across all time
            gDelay = [None,]*data.shape[1]
            for k in xrange(data.shape[1]):
                jd = time[i,k]
                
                try:
                    currDate = datetime.utcfromtimestamp(utcjd_to_unix(jd))
                except ValueError:
                    pass
                observer.date = currDate.strftime("%Y/%m/%d %H:%M:%S")
                refSrc.compute(observer)
                
                az = refSrc.az
                el = refSrc.alt
                if j % 2 == 0:
                    gDelay[k] = getGeoDelay(antennas[j], antennas[refX], az, el, Degrees=False)
                else:
                    gDelay[k] = getGeoDelay(antennas[j], antennas[refY], az, el, Degrees=False)
                    
            # Create the basis delay and the residual delays
            baseDelay = gDelay[0]
            residDelay = numpy.array(gDelay) - baseDelay
            
            # Remove the array geometry
            data[i,:,j] *= numpy.exp(-2j*numpy.pi*fq* baseDelay)
            data[i,:,j] *= numpy.exp(-2j*numpy.pi*fq*residDelay)
            
            pbar.inc()
            sys.stdout.write("%s\r" % pbar.show())
            sys.stdout.flush()
    sys.stdout.write('\n')
    
    # Average down to remove other sources/the correlated sky
    print("Input (pre-averaging) data shapes:")
    print("  time:", time.shape)
    print("  data:", data.shape)
    time = time[:,0]
    
    data2 = numpy.zeros((data.shape[0], data.shape[2]), dtype=data.dtype)
    for j in xrange(data2.shape[1]):
        for fStart in ls.keys():
            l = ls[fStart]
            m = ms[fStart]
            data2[l,j] = data[l,:m,j].mean(axis=1)
    data = data2
    print("Output (post-averaging) data shapes:")
    print("  time:", time.shape)
    print("  data:", data.shape)

    #
    # Save
    #
    outname = args.output
    outname, ext = os.path.splitext(outname)
    outname = "%s-ref%03i%s" % (outname, ref_ant, ext)
    numpy.savez(outname, ref_ant=ref_ant, refX=refX, refY=refY, freq=freq, time=time, data=data, ssmifContents=ssmifContents)
コード例 #7
0
def main(args):
    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            station = metabundle.get_station(args.metadata, apply_sdm=True)
    else:
        station = stations.lwana
    antennas = station.antennas

    # Length of the FFT
    LFFT = args.fft_length

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped
    maxFrames = int((30000 * 20) / float(LFFT)) * LFFT
    # It seems like that would be a good idea, however...  TBW data comes one
    # capture at a time so doing something like this actually truncates data
    # from the last set of stands for the first integration.  So, we really
    # should stick with
    maxFrames = (30000 * 20)

    idf = LWA1DataFile(args.filename)

    nFrames = idf.get_info('nframe')
    srate = idf.get_info('sample_rate')
    dataBits = idf.get_info('data_bits')
    # The number of ant/pols in the file is hard coded because I cannot figure out
    # a way to get this number in a systematic fashion
    antpols = len(antennas)
    nChunks = int(math.ceil(1.0 * nFrames / maxFrames))
    if dataBits == 12:
        nSamples = 400
    else:
        nSamples = 1200

    # Read in the first frame and get the date/time of the first sample
    # of the frame.  This is needed to get the list of stands.
    beginDate = ephem.Date(
        unix_to_utcjd(idf.get_info('start_time')) - DJD_OFFSET)

    # File summary
    print("Filename: %s" % args.filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Ant/Pols: %i" % antpols)
    print("Sample Length: %i-bit" % dataBits)
    print("Frames: %i" % nFrames)
    print("Chunks: %i" % nChunks)
    print("===")

    # Setup the window function to use
    if args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window

    # Master loop over all of the file chunks
    nChunks = 1
    masterSpectra = numpy.zeros((nChunks, antpols, LFFT))
    masterWeight = numpy.zeros((nChunks, antpols, LFFT))

    readT, t, data = idf.read(0.061)

    # Calculate the spectra for this block of data and then weight the results by
    # the total number of frames read.  This is needed to keep the averages correct.
    # NB:  The weighting is the same for the x and y polarizations because of how
    # the data are packed in TBW
    freq, tempSpec = fxc.SpecMaster(data,
                                    LFFT=LFFT,
                                    window=window,
                                    pfb=args.pfb,
                                    verbose=args.verbose)
    for stand in range(masterSpectra.shape[1]):
        masterSpectra[0, stand, :] = tempSpec[stand, :]
        masterWeight[0, stand, :] = int(readT * srate / LFFT)

    # We don't really need the data array anymore, so delete it
    del (data)

    # Apply the cable loss corrections, if requested
    if args.gain_correct:
        for s in range(masterSpectra.shape[1]):
            currGain = antennas[s].cable.gain(freq)
            for c in range(masterSpectra.shape[0]):
                masterSpectra[c, s, :] /= currGain

    # Now that we have read through all of the chunks, perform the final averaging by
    # dividing by all of the chunks
    spec = masterSpectra.mean(axis=0)

    # The plots:  This is setup for the current configuration of 20 antpols
    if args.gain_correct & args.stack:
        # Stacked spectra - only if cable loss corrections are to be applied
        colors = [
            'blue', 'green', 'red', 'cyan', 'magenta', 'black', 'purple',
            'salmon', 'olive', 'maroon', 'saddlebrown', 'yellowgreen', 'teal',
            'steelblue', 'seagreen', 'slategray', 'mediumorchid', 'lime',
            'dodgerblue', 'darkorange'
        ]

        for f in range(int(numpy.ceil(antpols / 20.))):
            fig = plt.figure()
            ax1 = fig.add_subplot(1, 1, 1)
            for i in range(f * 20, f * 20 + 20):
                currSpectra = numpy.squeeze(numpy.log10(spec[i, :]) * 10.0)
                ax1.plot(freq / 1e6,
                         currSpectra,
                         label='%i,%i' %
                         (antennas[i].stand.id, antennas[i].pol),
                         color=colors[i % 20])

            ax1.set_xlabel('Frequency [MHz]')
            ax1.set_ylabel('P.S.D. [dB/RBW]')
            ax1.set_xlim([20, 88])
            #ax1.set_ylim([10,90])
            leg = ax1.legend(loc=0, ncol=3)
            for l in leg.get_lines():
                l.set_linewidth(1.7)  # the legend line width
    else:
        for f in range(int(numpy.ceil(antpols / 20))):
            # Normal plotting
            fig = plt.figure()
            figsY = 4
            figsX = 5
            fig.subplots_adjust(left=0.06,
                                bottom=0.06,
                                right=0.94,
                                top=0.94,
                                wspace=0.20,
                                hspace=0.50)
            for i in range(f * 20, f * 20 + 20):
                ax = fig.add_subplot(figsX, figsY, (i % 20) + 1)
                try:
                    currSpectra = numpy.squeeze(numpy.log10(spec[i, :]) * 10.0)
                except IndexError:
                    break
                ax.plot(freq / 1e6,
                        currSpectra,
                        label='Stand: %i, Pol: %i (Dig: %i)' %
                        (antennas[i].stand.id, antennas[i].pol,
                         antennas[i].digitizer))

                # If there is more than one chunk, plot the difference between the global
                # average and each chunk
                if nChunks > 1:
                    for j in range(nChunks):
                        # Some files are padded by zeros at the end and, thus, carry no
                        # weight in the average spectra.  Skip over those.
                        if masterWeight[j, i, :].sum() == 0:
                            continue

                        # Calculate the difference between the spectra and plot
                        subspectra = numpy.squeeze(
                            numpy.log10(masterSpectra[j, i, :]) * 10.0)
                        diff = subspectra - currSpectra
                        ax.plot(freq / 1e6, diff)

                ax.set_title(
                    'Stand: %i (%i); Dig: %i [%i]' %
                    (antennas[i].stand.id, antennas[i].pol,
                     antennas[i].digitizer, antennas[i].combined_status))
                ax.set_xlabel('Frequency [MHz]')
                ax.set_ylabel('P.S.D. [dB/RBW]')
                ax.set_xlim([10, 90])
                ax.set_ylim([10, 80])

            # Save spectra image if requested
            if args.output is not None:
                base, ext = os.path.splitext(args.output)
                outFigure = "%s-%02i%s" % (base, f + 1, ext)
                fig.savefig(outFigure)

        plt.draw()

    print("RBW: %.1f Hz" % (freq[1] - freq[0]))
    plt.show()
コード例 #8
0
ファイル: superCorrelator.py プロジェクト: lwa-project/eLWA
def main(args):
    # Select the multirate module to use
    if args.jit:
        from jit import multirate
    else:
        import multirate

    # Build up the station
    site = stations.lwa1
    ## Updated 2018/3/8 with solutions from the 2018 Feb 28 eLWA
    ## run.  See createConfigFile.py for details.
    site.lat = 34.068956328 * numpy.pi / 180
    site.long = -107.628103026 * numpy.pi / 180
    site.elev = 2132.96837346
    observer = site.get_observer()

    # Parse the correlator configuration
    config, refSrc, filenames, metanames, foffsets, readers, antennas = read_correlator_configuration(
        args.filename)
    try:
        args.fft_length = config['channels']
        args.dump_time = config['inttime']
        print(
            "NOTE: Set FFT length to %i and dump time to %.3f s per user defined configuration"
            % (args.fft_length, args.dump_time))
    except (TypeError, KeyError):
        pass
    if args.duration == 0.0:
        args.duration = refSrc.duration
    args.duration = min([args.duration, refSrc.duration])

    # Length of the FFT
    LFFT = args.fft_length

    # Get the raw configuration
    fh = open(args.filename, 'r')
    rawConfig = fh.readlines()
    fh.close()

    # Antenna report
    print("Antennas:")
    for ant in antennas:
        print("  Antenna %i: Stand %i, Pol. %i (%.3f us offset)" %
              (ant.id, ant.stand.id, ant.pol, ant.cable.clock_offset * 1e6))

    # Open and align the files
    fh = []
    nFramesFile = []
    srate = []
    beams = []
    tunepols = []
    beampols = []
    tStart = []
    cFreqs = []
    bitDepths = []
    buffers = []
    grossOffsets = []
    for i, (filename, metaname,
            foffset) in enumerate(zip(filenames, metanames, foffsets)):
        fh.append(open(filename, "rb"))

        go = numpy.int32(antennas[2 * i].cable.clock_offset)
        antennas[2 * i + 0].cable.clock_offset -= go
        antennas[2 * i + 1].cable.clock_offset -= go
        grossOffsets.append(go)
        if go != 0:
            print("Correcting time tags for gross offset of %i s" %
                  grossOffsets[i])
            print("  Antenna clock offsets are now at %.3f us, %.3f us" %
                  (antennas[2 * i + 0].cable.clock_offset * 1e6,
                   antennas[2 * i + 1].cable.clock_offset * 1e6))

        if readers[i] is vdif:
            header = vdif.read_guppi_header(fh[i])
            readers[i].FRAME_SIZE = readers[i].get_frame_size(fh[i])

        nFramesFile.append(os.path.getsize(filename) // readers[i].FRAME_SIZE)
        if readers[i] is vdif:
            junkFrame = readers[i].read_frame(fh[i],
                                              central_freq=header['OBSFREQ'],
                                              sample_rate=header['OBSBW'] *
                                              2.0)
            readers[i].DATA_LENGTH = junkFrame.payload.data.size
            beam, pol = junkFrame.id
        elif readers[i] is drx:
            junkFrame = readers[i].read_frame(fh[i])
            while junkFrame.header.decimation == 0:
                junkFrame = readers[i].read_frame(fh[i])
            readers[i].DATA_LENGTH = junkFrame.payload.data.size
            beam, tune, pol = junkFrame.id
        fh[i].seek(-readers[i].FRAME_SIZE, 1)

        beams.append(beam)
        srate.append(junkFrame.sample_rate)

        if readers[i] is vdif:
            tunepols.append(readers[i].get_thread_count(fh[i]))
            beampols.append(tunepols[i])
        elif readers[i] is drx:
            beampols.append(max(readers[i].get_frames_per_obs(fh[i])))

        skip = args.skip + foffset
        if skip != 0:
            print("Skipping forward %.3f s" % skip)
            print("-> %.6f (%s)" % (junkFrame.time, junkFrame.time.datetime))

            offset = int(skip * srate[i] / readers[i].DATA_LENGTH)
            fh[i].seek(beampols[i] * readers[i].FRAME_SIZE * offset, 1)
            if readers[i] is vdif:
                junkFrame = readers[i].read_frame(
                    fh[i],
                    central_freq=header['OBSFREQ'],
                    sample_rate=header['OBSBW'] * 2.0)
            else:
                junkFrame = readers[i].read_frame(fh[i])
            fh[i].seek(-readers[i].FRAME_SIZE, 1)

            print("-> %.6f (%s)" % (junkFrame.time, junkFrame.time.datetime))

        tStart.append(junkFrame.time + grossOffsets[i])

        # Get the frequencies
        cFreq1 = 0.0
        cFreq2 = 0.0
        for j in xrange(64):
            if readers[i] is vdif:
                junkFrame = readers[i].read_frame(
                    fh[i],
                    central_freq=header['OBSFREQ'],
                    sample_rate=header['OBSBW'] * 2.0)
                s, p = junkFrame.id
                if p == 0:
                    cFreq1 = junkFrame.central_freq
                else:
                    pass
            elif readers[i] is drx:
                junkFrame = readers[i].read_frame(fh[i])
                b, t, p = junkFrame.id
                if p == 0:
                    if t == 1:
                        cFreq1 = junkFrame.central_freq
                    else:
                        cFreq2 = junkFrame.central_freq
                else:
                    pass
        fh[i].seek(-64 * readers[i].FRAME_SIZE, 1)
        cFreqs.append([cFreq1, cFreq2])
        try:
            bitDepths.append(junkFrame.header.bits_per_sample)
        except AttributeError:
            bitDepths.append(8)

        # Setup the frame buffers
        if readers[i] is vdif:
            buffers.append(VDIFFrameBuffer(threads=[0, 1]))
        elif readers[i] is drx:
            buffers.append(
                DRXFrameBuffer(beams=[
                    beam,
                ],
                               tunes=[1, 2],
                               pols=[0, 1],
                               nsegments=16))
    for i in xrange(len(filenames)):
        # Align the files as close as possible by the time tags
        if readers[i] is vdif:
            timetags = []
            for k in xrange(16):
                junkFrame = readers[i].read_frame(fh[i])
                timetags.append(junkFrame.header.frame_in_second)
            fh[i].seek(-16 * readers[i].FRAME_SIZE, 1)

            j = 0
            while (timetags[j + 0] != timetags[j + 1]):
                j += 1
                fh[i].seek(readers[i].FRAME_SIZE, 1)

            nFramesFile[i] -= j

        elif readers[i] is drx:
            pass

        # Align the files as close as possible by the time tags
        if readers[i] is vdif:
            junkFrame = readers[i].read_frame(fh[i],
                                              central_freq=header['OBSFREQ'],
                                              sample_rate=header['OBSBW'] *
                                              2.0)
        else:
            junkFrame = readers[i].read_frame(fh[i])
        fh[i].seek(-readers[i].FRAME_SIZE, 1)

        j = 0
        while junkFrame.time + grossOffsets[i] < max(tStart):
            if readers[i] is vdif:
                for k in xrange(beampols[i]):
                    try:
                        junkFrame = readers[i].read_frame(
                            fh[i],
                            central_freq=header['OBSFREQ'],
                            sample_rate=header['OBSBW'] * 2.0)
                    except errors.SyncError:
                        print("Error - VDIF @ %i" % (i, ))
                        fh[i].seek(readers[i].FRAME_SIZE, 1)
                        continue
            else:
                for k in xrange(beampols[i]):
                    junkFrame = readers[i].read_frame(fh[i])
            j += beampols[i]

        jTime = j * readers[i].DATA_LENGTH / srate[i] / beampols[i]
        print("Shifted beam %i data by %i frames (%.4f s)" %
              (beams[i], j, jTime))

    # Set integration time
    tRead = 1.0
    nFrames = int(round(tRead * srate[-1] / readers[-1].DATA_LENGTH))
    tRead = nFrames * readers[-1].DATA_LENGTH / srate[-1]

    nFramesV = tRead * srate[0] / readers[0].DATA_LENGTH
    nFramesD = nFrames
    while nFramesV != int(nFramesV):
        nFrames += 1
        tRead = nFrames * readers[-1].DATA_LENGTH / srate[-1]

        nFramesV = tRead * srate[0] / readers[0].DATA_LENGTH
        nFramesD = nFrames
    nFramesV = int(nFramesV)

    # Read in some data
    tFileV = nFramesFile[0] / beampols[0] * readers[0].DATA_LENGTH / srate[0]
    tFileD = nFramesFile[-1] / beampols[-1] * readers[-1].DATA_LENGTH / srate[
        -1]
    tFile = min([tFileV, tFileD])
    if args.duration > 0.0:
        duration = args.duration
        duration = tRead * int(round(duration / tRead))
        tFile = duration

    # Date
    beginMJDs = []
    beginDates = []
    for i in xrange(len(filenames)):
        if readers[i] is vdif:
            junkFrame = readers[i].read_frame(fh[i],
                                              central_freq=header['OBSFREQ'],
                                              sample_rate=header['OBSBW'] *
                                              2.0)
        else:
            junkFrame = readers[i].read_frame(fh[i])
        fh[i].seek(-readers[i].FRAME_SIZE, 1)

        beginMJDs.append(junkFrame.time.mjd)
        beginDates.append(junkFrame.time.datetime)

    # Set the output base filename
    if args.tag is None:
        outbase = os.path.basename(filenames[0])
        outbase = os.path.splitext(outbase)[0][:8]
    else:
        outbase = args.tag

    # Report
    for i in xrange(len(filenames)):
        print("Filename: %s" % os.path.basename(filenames[i]))
        print("  Type/Reader: %s" % readers[i].__name__)
        print("  Date of First Frame: %s" % beginDates[i])
        print("  Sample Rate: %i Hz" % srate[i])
        print("  Tuning 1: %.3f Hz" % cFreqs[i][0])
        print("  Tuning 2: %.3f Hz" % cFreqs[i][1])
        print("  Bit Depth: %i" % bitDepths[i])
    print("  ===")
    print("  Phase Center:")
    print("    Name: %s" % refSrc.name)
    print("    RA: %s" % refSrc._ra)
    print("    Dec: %s" % refSrc._dec)
    print("  ===")
    print("  Data Read Time: %.3f s" % tRead)
    print("  Data Reads in File: %i" % int(tFile / tRead))
    print(" ")

    nVDIFInputs = sum([1 for reader in readers if reader is vdif])
    nDRXInputs = sum([1 for reader in readers if reader is drx])
    print("Processing %i VDIF and %i DRX input streams" %
          (nVDIFInputs, nDRXInputs))
    print(" ")

    nFramesV = int(round(tRead * srate[0] / readers[0].DATA_LENGTH))
    framesPerSecondV = int(srate[0] / readers[0].DATA_LENGTH)
    nFramesB = nFrames
    framesPerSecondB = srate[-1] / readers[-1].DATA_LENGTH
    if nVDIFInputs:
        print("VDIF Frames/s: %.6f" % framesPerSecondV)
        print("VDIF Frames/Integration: %i" % nFramesV)
    if nDRXInputs:
        print("DRX Frames/s: %.6f" % framesPerSecondB)
        print("DRX Frames/Integration: %i" % nFramesB)
    if nVDIFInputs * nDRXInputs:
        print("Sample Count Ratio: %.6f" %
              (1.0 * (nFramesV * readers[0].DATA_LENGTH) /
               (nFramesB * 4096), ))
        print("Sample Rate Ratio: %.6f" % (srate[0] / srate[-1], ))
    print(" ")

    vdifLFFT = LFFT * (2 if nVDIFInputs else 1
                       )  # Fix to deal with LWA-only correlations
    drxLFFT = vdifLFFT * srate[-1] / srate[0]
    while drxLFFT != int(drxLFFT):
        vdifLFFT += 1
        drxLFFT = vdifLFFT * srate[-1] / srate[0]
    vdifLFFT = vdifLFFT // (2 if nVDIFInputs else 1
                            )  # Fix to deal with LWA-only correlations
    drxLFFT = int(drxLFFT)
    if nVDIFInputs:
        print("VDIF Transform Size: %i" % vdifLFFT)
    if nDRXInputs:
        print("DRX Transform Size: %i" % drxLFFT)
    print(" ")

    vdifPivot = 1
    if abs(cFreqs[0][0] - cFreqs[-1][1]) < abs(cFreqs[0][0] - cFreqs[-1][0]):
        vdifPivot = 2
    if nVDIFInputs == 0 and args.which != 0:
        vdifPivot = args.which
    if nVDIFInputs * nDRXInputs:
        print("VDIF appears to correspond to tuning #%i in DRX" % vdifPivot)
    elif nDRXInputs:
        print("Correlating DRX tuning #%i" % vdifPivot)
    print(" ")

    nChunks = int(tFile / tRead)
    tSub = args.subint_time
    tSub = tRead / int(round(tRead / tSub))
    tDump = args.dump_time
    tDump = tSub * int(round(tDump / tSub))
    nDump = int(tDump / tSub)
    tDump = nDump * tSub
    nInt = int((nChunks * tRead) / tDump)
    print("Sub-integration time is: %.3f s" % tSub)
    print("Integration (dump) time is: %.3f s" % tDump)
    print(" ")

    if args.gpu is not None:
        try:
            from jit import xcupy
            xcupy.select_gpu(args.gpu)
            xcupy.set_memory_usage_limit(1.5 * 1024**3)
            multirate.xengine = xcupy.xengine
            multirate.xengine_full = xcupy.xengine_full
            print(
                "Loaded GPU X-engine support on GPU #%i with %.2f GB of device memory"
                % (args.gpu, xcupy.get_memory_usage_limit() / 1024.0**3))
        except ImportError as e:
            pass

    subIntTimes = []
    subIntCount = 0
    fileCount = 0
    wallStart = time.time()
    done = False
    oldStartRel = [0 for i in xrange(nVDIFInputs + nDRXInputs)]
    username = getpass.getuser()
    for i in xrange(nChunks):
        wallTime = time.time()

        tStart = []
        tStartB = []

        vdifRef = [0 for j in xrange(nVDIFInputs * 2)]
        drxRef = [0 for j in xrange(nDRXInputs * 2)]

        # Read in the data
        with InterProcessLock('/dev/shm/sc-reader-%s' % username) as lock:
            try:
                dataV *= 0.0
                dataD *= 0.0
            except NameError:
                dataV = numpy.zeros(
                    (len(vdifRef), readers[0].DATA_LENGTH * nFramesV),
                    dtype=numpy.float32)
                dataD = numpy.zeros(
                    (len(drxRef), readers[-1].DATA_LENGTH * nFramesD),
                    dtype=numpy.complex64)
            for j, f in enumerate(fh):
                if readers[j] is vdif:
                    ## VDIF
                    k = 0
                    while k < beampols[j] * nFramesV:
                        try:
                            cFrame = readers[j].read_frame(
                                f,
                                central_freq=header['OBSFREQ'],
                                sample_rate=header['OBSBW'] * 2.0)
                            buffers[j].append(cFrame)
                        except errors.SyncError:
                            print("Error - VDIF @ %i, %i" % (i, j))
                            f.seek(readers[j].FRAME_SIZE, 1)
                            continue
                        except errors.EOFError:
                            done = True
                            break

                        frames = buffers[j].get()
                        if frames is None:
                            continue

                        for cFrame in frames:
                            std, pol = cFrame.id
                            sid = 2 * j + pol

                            if k == 0:
                                tStart.append(cFrame.time)
                                tStart[-1] = tStart[-1] + grossOffsets[j]
                                tStartB.append(get_better_time(cFrame))
                                tStartB[-1][
                                    0] = tStart[-1][0] + grossOffsets[j]

                                for p in (0, 1):
                                    psid = 2 * j + p
                                    vdifRef[
                                        psid] = cFrame.header.seconds_from_epoch * framesPerSecondV + cFrame.header.frame_in_second

                            count = cFrame.header.seconds_from_epoch * framesPerSecondV + cFrame.header.frame_in_second
                            count -= vdifRef[sid]
                            dataV[sid,
                                  count * readers[j].DATA_LENGTH:(count + 1) *
                                  readers[j].DATA_LENGTH] = cFrame.payload.data
                            k += 1

                elif readers[j] is drx:
                    ## DRX
                    k = 0
                    while k < beampols[j] * nFramesD:
                        try:
                            cFrame = readers[j].read_frame(f)
                            buffers[j].append(cFrame)
                        except errors.SyncError:
                            print("Error - DRX @ %i, %i" % (i, j))
                            continue
                        except errors.EOFError:
                            done = True
                            break

                        frames = buffers[j].get()
                        if frames is None:
                            continue

                        for cFrame in frames:
                            beam, tune, pol = cFrame.id
                            if tune != vdifPivot:
                                continue
                            bid = 2 * (j - nVDIFInputs) + pol

                            if k == 0:
                                tStart.append(cFrame.time)
                                tStart[-1] = tStart[-1] + grossOffsets[j]
                                tStartB.append(get_better_time(cFrame))
                                tStartB[-1][
                                    0] = tStart[-1][0] + grossOffsets[j]

                                for p in (0, 1):
                                    pbid = 2 * (j - nVDIFInputs) + p
                                    drxRef[pbid] = cFrame.payload.timetag

                            count = cFrame.payload.timetag
                            count -= drxRef[bid]
                            count //= (4096 * int(196e6 / srate[-1]))
                            ### Fix from some LWA-SV files that seem to cause the current LSL
                            ### ring buffer problems
                            if count < 0:
                                continue
                            try:
                                dataD[bid, count *
                                      readers[j].DATA_LENGTH:(count + 1) *
                                      readers[j].
                                      DATA_LENGTH] = cFrame.payload.data
                                k += beampols[j] // 2
                            except ValueError:
                                k = beampols[j] * nFramesD
                                break

        print('RR - Read finished in %.3f s for %.3fs of data' %
              (time.time() - wallTime, tRead))

        # Figure out which DRX tuning corresponds to the VDIF data
        if nDRXInputs > 0:
            dataD /= 7.0

        # Time tag alignment (sample based)
        ## Initial time tags for each stream and the relative start time for each stream
        if args.verbose:
            ### TT = time tag
            print('TT - Start', tStartB)
        tStartMin = min([sec for sec, frac in tStartB])
        tStartRel = [(sec - tStartMin) + frac for sec, frac in tStartB]

        ## Sample offsets between the streams
        offsets = []
        for j in xrange(nVDIFInputs + nDRXInputs):
            offsets.append(
                int(round(nsround(max(tStartRel) - tStartRel[j]) * srate[j])))
        if args.verbose:
            print('TT - Offsets', offsets)

        ## Roll the data to apply the sample offsets and then trim the ends to get rid
        ## of the rolled part
        for j, offset in enumerate(offsets):
            if j < nVDIFInputs:
                if offset != 0:
                    idx0 = 2 * j + 0
                    idx1 = 2 * j + 1
                    tStart[j] += offset / (srate[j])
                    tStartB[j][1] += offset / (srate[j])
                    dataV[idx0, :] = numpy.roll(dataV[idx0, :], -offset)
                    dataV[idx1, :] = numpy.roll(dataV[idx1, :], -offset)

            else:
                if offset != 0:
                    idx0 = 2 * (j - nVDIFInputs) + 0
                    idx1 = 2 * (j - nVDIFInputs) + 1
                    tStart[j] += offset / (srate[j])
                    tStartB[j][1] += offset / (srate[j])
                    dataD[idx0, :] = numpy.roll(dataD[idx0, :], -offset)
                    dataD[idx1, :] = numpy.roll(dataD[idx1, :], -offset)

        vdifOffsets = offsets[:nVDIFInputs]
        drxOffsets = offsets[nVDIFInputs:]

        ## Apply the corrections to the original time tags and report on the sub-sample
        ## residuals
        if args.verbose:
            print('TT - Adjusted', tStartB)
        tStartMinSec = min([sec for sec, frac in tStartB])
        tStartMinFrac = min([frac for sec, frac in tStartB])
        tStartRel = [(sec - tStartMinSec) + (frac - tStartMinFrac)
                     for sec, frac in tStartB]
        if args.verbose:
            print('TT - Residual',
                  ["%.1f ns" % (r * 1e9, ) for r in tStartRel])
        for k in xrange(len(tStartRel)):
            antennas[2 * k +
                     0].cable.clock_offset -= tStartRel[k] - oldStartRel[k]
            antennas[2 * k +
                     1].cable.clock_offset -= tStartRel[k] - oldStartRel[k]
        oldStartRel = tStartRel

        # Setup everything we need to loop through the sub-integrations
        nSub = int(tRead / tSub)
        nSampV = int(srate[0] * tSub)
        nSampD = int(srate[-1] * tSub)

        #tV = i*tRead + numpy.arange(dataV.shape[1]-max(vdifOffsets), dtype=numpy.float64)/srate[ 0]
        if nDRXInputs > 0:
            tD = i * tRead + numpy.arange(dataD.shape[1] - max(drxOffsets),
                                          dtype=numpy.float64) / srate[-1]

        # Loop over sub-integrations
        for j in xrange(nSub):
            ## Select the data to work with
            tSubInt = tStart[0] + (
                j + 1) * nSampV / srate[0] - nSampV // 2 / srate[0]
            #tVSub    = tV[j*nSampV:(j+1)*nSampV]
            if nDRXInputs > 0:
                tDSub = tD[j * nSampD:(j + 1) * nSampD]
            dataVSub = dataV[:, j * nSampV:(j + 1) * nSampV]
            #if dataVSub.shape[1] != tVSub.size:
            #	dataVSub = dataVSub[:,:tVSub.size]
            #if tVSub.size == 0:
            #	continue
            dataDSub = dataD[:, j * nSampD:(j + 1) * nSampD]
            if nDRXInputs > 0:
                if dataDSub.shape[1] != tDSub.size:
                    dataDSub = dataDSub[:, :tDSub.size]
                if tDSub.size == 0:
                    continue

            ## Update the observation
            observer.date = astro.unix_to_utcjd(tSubInt) - astro.DJD_OFFSET
            refSrc.compute(observer)

            ## Correct for the LWA dipole power pattern
            if nDRXInputs > 0:
                dipoleX, dipoleY = jones.get_lwa_antenna_gain(
                    observer, refSrc, freq=cFreqs[-1][vdifPivot - 1])
                dataDSub[0::2, :] /= numpy.sqrt(dipoleX)
                dataDSub[1::2, :] /= numpy.sqrt(dipoleY)

            ## Get the Jones matrices and apply
            ## NOTE: This moves the LWA into the frame of the VLA
            if nVDIFInputs * nDRXInputs > 0:
                lwaToSky = jones.get_matrix_lwa(observer, refSrc)
                skyToVLA = jones.get_matrix_vla(observer, refSrc, inverse=True)
                dataDSub = jones.apply_matrix(
                    dataDSub,
                    numpy.matrix(skyToVLA) * numpy.matrix(lwaToSky))

            ## Correlate
            delayPadding = multirate.get_optimal_delay_padding(
                antennas[:2 * nVDIFInputs],
                antennas[2 * nVDIFInputs:],
                LFFT=drxLFFT,
                sample_rate=srate[-1],
                central_freq=cFreqs[-1][vdifPivot - 1],
                pol='*',
                phase_center=refSrc)
            if nVDIFInputs > 0:
                freqV, feoV, veoV, deoV = multirate.fengine(
                    dataVSub,
                    antennas[:2 * nVDIFInputs],
                    LFFT=vdifLFFT,
                    sample_rate=srate[0],
                    central_freq=cFreqs[0][0] - srate[0] / 4,
                    pol='*',
                    phase_center=refSrc,
                    delayPadding=delayPadding)

            if nDRXInputs > 0:
                freqD, feoD, veoD, deoD = multirate.fengine(
                    dataDSub,
                    antennas[2 * nVDIFInputs:],
                    LFFT=drxLFFT,
                    sample_rate=srate[-1],
                    central_freq=cFreqs[-1][vdifPivot - 1],
                    pol='*',
                    phase_center=refSrc,
                    delayPadding=delayPadding)

            ## Rotate the phase in time to deal with frequency offset between the VLA and LWA
            if nDRXInputs * nVDIFInputs > 0:
                subChanFreqOffset = (cFreqs[0][0] - cFreqs[-1][vdifPivot - 1]
                                     ) % (freqD[1] - freqD[0])

                if i == 0 and j == 0:
                    ## FC = frequency correction
                    tv, tu = bestFreqUnits(subChanFreqOffset)
                    print(
                        "FC - Applying fringe rotation rate of %.3f %s to the DRX data"
                        % (tv, tu))

                freqD += subChanFreqOffset
                for w in xrange(feoD.shape[2]):
                    feoD[:, :,
                         w] *= numpy.exp(-2j * numpy.pi * subChanFreqOffset *
                                         tDSub[w * drxLFFT])

            ## Sort out what goes where (channels and antennas) if we don't already know
            try:
                if nVDIFInputs > 0:
                    freqV = freqV[goodV]
                    feoV = numpy.roll(feoV, -goodV[0],
                                      axis=1)[:, :len(goodV), :]
                if nDRXInputs > 0:
                    freqD = freqD[goodD]
                    feoD = numpy.roll(feoD, -goodD[0],
                                      axis=1)[:, :len(goodD), :]

            except NameError:
                ### Frequency overlap
                fMin, fMax = -1e12, 1e12
                if nVDIFInputs > 0:
                    fMin, fMax = max([fMin,
                                      freqV.min()]), min([fMax,
                                                          freqV.max()])
                if nDRXInputs > 0:
                    fMin, fMax = max([fMin,
                                      freqD.min()]), min([fMax,
                                                          freqD.max()])

                ### Channels and antennas (X vs. Y)
                if nVDIFInputs > 0:
                    goodV = numpy.where((freqV >= fMin) & (freqV <= fMax))[0]
                    aXV = [
                        k for (k, a) in enumerate(antennas[:2 * nVDIFInputs])
                        if a.pol == 0
                    ]
                    aYV = [
                        k for (k, a) in enumerate(antennas[:2 * nVDIFInputs])
                        if a.pol == 1
                    ]
                if nDRXInputs > 0:
                    goodD = numpy.where((freqD >= fMin) & (freqD <= fMax))[0]
                    aXD = [
                        k for (k, a) in enumerate(antennas[2 * nVDIFInputs:])
                        if a.pol == 0
                    ]
                    aYD = [
                        k for (k, a) in enumerate(antennas[2 * nVDIFInputs:])
                        if a.pol == 1
                    ]

                ### Validate the channel alignent and fix it if needed
                if nVDIFInputs * nDRXInputs != 0:
                    pd = freqV[goodV[0]] - freqD[goodD[0]]
                    # Need to shift?
                    if abs(pd) >= 1.01 * abs(subChanFreqOffset):
                        ## Need to shift
                        if pd < 0.0:
                            goodV = goodV[1:]
                        else:
                            goodD = goodD[1:]

                    # Need to trim?
                    if len(goodV) > len(goodD):
                        ## Yes, goodV is too long
                        goodV = goodV[:len(goodD)]
                    elif len(goodD) > len(goodV):
                        ## Yes, goodD is too long
                        goodD = goodD[:len(goodV)]
                    else:
                        ## No, nothing needs to be done
                        pass

                    # Validate
                    fd = freqV[goodV] - freqD[goodD]
                    try:
                        assert (fd.min() >= -1.01 * subChanFreqOffset)
                        assert (fd.max() <= 1.01 * subChanFreqOffset)

                        ## FS = frequency selection
                        tv, tu = bestFreqUnits(freqV[1] - freqV[0])
                        print("FS - Found %i, %.3f %s overalapping channels" %
                              (len(goodV), tv, tu))
                        tv, tu = bestFreqUnits(freqV[goodV[-1]] -
                                               freqV[goodV[0]])
                        print("FS - Bandwidth is %.3f %s" % (tv, tu))
                        print("FS - Channels span %.3f MHz to %.3f MHz" %
                              (freqV[goodV[0]] / 1e6, freqV[goodV[-1]] / 1e6))

                    except AssertionError:
                        raise RuntimeError(
                            "Cannot find a common frequency set between the input data: offsets range between %.3f Hz and %.3f Hz, expected %.3f Hz"
                            % (fd.min(), fd.max(), subChanFreqOffset))

                ### Apply
                if nVDIFInputs > 0:
                    freqV = freqV[goodV]
                    feoV = numpy.roll(feoV, -goodV[0],
                                      axis=1)[:, :len(goodV), :]
                if nDRXInputs > 0:
                    freqD = freqD[goodD]
                    feoD = numpy.roll(feoD, -goodD[0],
                                      axis=1)[:, :len(goodD), :]
            try:
                nchan = freqV.size
                fdt = feoV.dtype
                vdt = veoV.dtype
            except NameError:
                nchan = freqD.size
                fdt = feoD.dtype
                vdt = veoD.dtype
            ## Setup the intermediate F-engine products and trim the data
            ### Figure out the minimum number of windows
            nWin = 1e12
            if nVDIFInputs > 0:
                nWin = min([nWin, feoV.shape[2]])
                nWin = min(
                    [nWin,
                     numpy.argmax(numpy.cumsum(veoV.sum(axis=0))) + 1])
            if nDRXInputs > 0:
                nWin = min([nWin, feoD.shape[2]])
                nWin = min(
                    [nWin,
                     numpy.argmax(numpy.cumsum(veoD.sum(axis=0))) + 1])

            ### Initialize the intermediate arrays
            try:
                assert (feoX.shape[2] == nWin)
            except (NameError, AssertionError):
                feoX = numpy.zeros((nVDIFInputs + nDRXInputs, nchan, nWin),
                                   dtype=fdt)
                feoY = numpy.zeros((nVDIFInputs + nDRXInputs, nchan, nWin),
                                   dtype=fdt)
                veoX = numpy.zeros((nVDIFInputs + nDRXInputs, nWin), dtype=vdt)
                veoY = numpy.zeros((nVDIFInputs + nDRXInputs, nWin), dtype=vdt)

            ### Trim
            if nVDIFInputs > 0:
                feoV = feoV[:, :, :nWin]
                veoV = veoV[:, :nWin]
            if nDRXInputs > 0:
                feoD = feoD[:, :, :nWin]
                veoD = veoD[:, :nWin]

            ## Sort it all out by polarization
            for k in xrange(nVDIFInputs):
                feoX[k, :, :] = feoV[aXV[k], :, :]
                feoY[k, :, :] = feoV[aYV[k], :, :]
                veoX[k, :] = veoV[aXV[k], :]
                veoY[k, :] = veoV[aYV[k], :]
            for k in xrange(nDRXInputs):
                feoX[k + nVDIFInputs, :, :] = feoD[aXD[k], :, :]
                feoY[k + nVDIFInputs, :, :] = feoD[aYD[k], :, :]
                veoX[k + nVDIFInputs, :] = veoD[aXD[k], :]
                veoY[k + nVDIFInputs, :] = veoD[aYD[k], :]

            ## Cross multiply
            try:
                sfreqXX = freqV
                sfreqYY = freqV
            except NameError:
                sfreqXX = freqD
                sfreqYY = freqD
            svisXX, svisXY, svisYX, svisYY = multirate.xengine_full(
                feoX, veoX, feoY, veoY)

            ## Accumulate
            if subIntCount == 0:
                subIntTimes = [
                    tSubInt,
                ]
                freqXX = sfreqXX
                freqYY = sfreqYY
                visXX = svisXX / nDump
                visXY = svisXY / nDump
                visYX = svisYX / nDump
                visYY = svisYY / nDump
            else:
                subIntTimes.append(tSubInt)
                visXX += svisXX / nDump
                visXY += svisXY / nDump
                visYX += svisYX / nDump
                visYY += svisYY / nDump
            subIntCount += 1

            ## Save
            if subIntCount == nDump:
                subIntCount = 0
                fileCount += 1

                ### CD = correlator dump
                outfile = "%s-vis2-%05i.npz" % (outbase, fileCount)
                numpy.savez(outfile,
                            config=rawConfig,
                            srate=srate[0] / 2.0,
                            freq1=freqXX,
                            vis1XX=visXX,
                            vis1XY=visXY,
                            vis1YX=visYX,
                            vis1YY=visYY,
                            tStart=numpy.mean(
                                numpy.array(subIntTimes, dtype=numpy.float64)),
                            tInt=tDump)
                print(
                    "CD - writing integration %i to disk, timestamp is %.3f s"
                    % (fileCount,
                       numpy.mean(numpy.array(subIntTimes,
                                              dtype=numpy.float64))))
                if fileCount == 1:
                    print("CD - each integration is %.1f MB on disk" %
                          (os.path.getsize(outfile) / 1024.0**2, ))
                if (fileCount - 1) % 25 == 0:
                    print(
                        "CD - average processing time per integration is %.3f s"
                        % ((time.time() - wallStart) / fileCount, ))
                    etc = (nInt - fileCount) * (time.time() -
                                                wallStart) / fileCount
                    eth = int(etc / 60.0) // 60
                    etm = int(etc / 60.0) % 60
                    ets = etc % 60
                    print(
                        "CD - estimated time to completion is %i:%02i:%04.1f" %
                        (eth, etm, ets))

        if done:
            break

    # Cleanup
    etc = time.time() - wallStart
    eth = int(etc / 60.0) // 60
    etm = int(etc / 60.0) % 60
    ets = etc % 60
    print("Processing finished after %i:%02i:%04.1f" % (eth, etm, ets))
    print("Average time per integration was %.3f s" % (etc / fileCount, ))
    for f in fh:
        f.close()
コード例 #9
0
def main(args):
    # Setup the site information
    station = stations.lwasv
    ants = station.antennas
    nAnt = len([a for a in ants if a.pol == 0])
    
    phase_freq_range = [0, 0]
    for filename in args.filename:
        # Open the file and get ready to go
        idf = CORFile(filename)
        nBL = idf.get_info('nbaseline')
        nchan = idf.get_info('nchan')
        tInt = idf.get_info('tint')
        nFpO = nBL * nchan / 72
        nInts = idf.get_info('nframe') / nFpO
        
        jd = astro.unix_to_utcjd(idf.get_info('start_time'))
        date = idf.get_info('start_time').datetime
        central_freq = idf.get_info('freq1')
        central_freq = central_freq[len(central_freq)/2]
        
        print("Data type:  %s" % type(idf))
        print("Samples per observations: %i" % (nFpO,))
        print("Integration Time: %.3f s" % tInt)
        print("Tuning frequency: %.3f Hz" % central_freq)
        print("Captures in file: %i (%.1f s)" % (nInts, nInts*tInt))
        print("==")
        print("Station: %s" % station.name)
        print("Date observed: %s" % date)
        print("Julian day: %.5f" % jd)
        print(" ")
        
        # Offset into the file
        offset = idf.offset(args.skip)
        if offset != 0.0:
            print("Skipped %.3f s into the file" % offset)
            
        # Open the file and go
        nFiles = int(args.duration /  tInt)
        if nFiles == 0:
            nFiles = numpy.inf
            
        fileCount = 0
        while fileCount < nFiles:
            try:
                tInt, tStart, data = idf.read(tInt)
            except Exception as e:
                print("ERROR: %s" % str(e))
                break
                
            freqs = idf.get_info('freq1')
            beginJD = astro.unix_to_utcjd( tStart )
            beginTime = datetime.utcfromtimestamp( tStart )
            
            if freqs[0] != phase_freq_range[0] or freqs[-1] != phase_freq_range[1]:
                print("Updating phasing for %.3f to %.3f MHz" % (freqs[0]/1e6, freqs[-1]/1e6))
                phase_freq_range[0] = freqs[ 0]
                phase_freq_range[1] = freqs[-1]
                
                k = 0
                phase = numpy.zeros((nBL, nchan, 2, 2), dtype=numpy.complex64)
                gaix = [a.cable.gain(freqs) for a in ants if a.pol == 0]
                gaiy = [a.cable.gain(freqs) for a in ants if a.pol == 1]
                dlyx = [a.cable.delay(freqs) - a.stand.z / speedOfLight for a in ants if a.pol == 0]
                dlyy = [a.cable.delay(freqs) - a.stand.z / speedOfLight for a in ants if a.pol == 1]
                for i in xrange(nAnt):
                    for j in xrange(i, nAnt):
                        phase[k,:,0,0] = numpy.exp(2j*numpy.pi*freqs*(dlyx[i] - dlyx[j])) \
                                            / numpy.sqrt(gaix[i]*gaix[j])
                        phase[k,:,0,1] = numpy.exp(2j*numpy.pi*freqs*(dlyx[i] - dlyy[j])) \
                                            / numpy.sqrt(gaix[i]*gaiy[j])
                        phase[k,:,1,0] = numpy.exp(2j*numpy.pi*freqs*(dlyy[i] - dlyx[j])) \
                                            / numpy.sqrt(gaiy[i]*gaix[j])
                        phase[k,:,1,1] = numpy.exp(2j*numpy.pi*freqs*(dlyy[i] - dlyy[j])) \
                                            / numpy.sqrt(gaiy[i]*gaiy[j])
                        
                        k += 1
                        
            for i in xrange(data.shape[-1]):
                data[...,i] *= phase
                
            # Convert to a dataDict
            try:
                blList  # pylint: disable=used-before-assignment
            except NameError:
                blList = uvutils.get_baselines(ants[0::2], include_auto=True)
                
            if args.output is None:
                outname = os.path.basename(filename)
                outname = os.path.splitext(outname)[0]
                outname = "%s_%i_%s.ms" % (outname, int(beginJD-astro.MJD_OFFSET), beginTime.strftime("%H_%M_%S"))
            else:
                base, ext = os.path.splitext(args.output)
                if ext == '':
                    ext = '.ms'
                outname = "%s_%i_%s%s" % (base, int(beginJD-astro.MJD_OFFSET), beginTime.strftime("%H_%M_%S"), ext)
                
            fits = measurementset.Ms(outname, ref_time=tStart)
            fits.set_stokes(['xx', 'xy', 'yx', 'yy'])
            fits.set_frequency(freqs)
            fits.set_geometry(station, ants[0::2])
            
            obsTime = astro.unix_to_taimjd(tStart)
            fits.add_data_set(obsTime, tInt, blList, data[:,:,0,0,0], pol='xx')
            fits.add_data_set(obsTime, tInt, blList, data[:,:,0,1,0], pol='xy')
            fits.add_data_set(obsTime, tInt, blList, data[:,:,1,0,0], pol='yx')
            fits.add_data_set(obsTime, tInt, blList, data[:,:,1,1,0], pol='yy')
            fits.write()
            fits.close()
            fileCount += 1
            
        idf.close()
コード例 #10
0
def main(args):
    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            station = metabundle.get_station(args.metadata, apply_sdm=True)
    else:
        station = stations.lwana
    antennas = []
    for a in station.antennas:
        if a.digitizer != 0:
            antennas.append(a)

    # Length of the FFT
    LFFT = args.fft_length

    idf = LWA1DataFile(args.filename)

    nFramesFile = idf.get_info('nframe')
    srate = idf.get_info('sample_rate')
    antpols = len(antennas)

    # Offset in frames for beampols beam/tuning/pol. sets
    args.skip = idf.offset(args.skip)

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of antpols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(
        (2 * 10 * 750) / antpols * 512 / float(LFFT)) * LFFT / 512 * antpols

    # Number of frames to integrate over
    nFrames = int(args.average * srate / 512 * antpols)
    nFrames = int(
        1.0 * nFrames / antpols * 512 / float(LFFT)) * LFFT / 512 * antpols
    args.average = 1.0 * nFrames / antpols * 512 / srate

    # Number of remaining chunks
    nChunks = int(math.ceil(1.0 * (nFrames) / maxFrames))

    # Read in the first frame and get the date/time of the first sample
    # of the frame.  This is needed to get the list of stands.
    beginDate = ephem.Date(
        unix_to_utcjd(idf.get_info('start_time')) - DJD_OFFSET)
    central_freq = idf.get_info('freq1')

    # File summary
    print("Filename: %s" % args.filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Ant/Pols: %i" % antpols)
    print("Sample Rate: %i Hz" % srate)
    print("Tuning Frequency: %.3f Hz" % central_freq)
    print("Frames: %i (%.3f s)" %
          (nFramesFile, 1.0 * nFramesFile / antpols * 512 / srate))
    print("---")
    print("Offset: %.3f s (%i frames)" %
          (args.skip, args.skip * srate * antpols / 512))
    print("Integration: %.3f s (%i frames; %i frames per stand/pol)" %
          (args.average, nFrames, nFrames / antpols))
    print("Chunks: %i" % nChunks)

    # Sanity check
    if args.skip * srate * antpols / 512 > nFramesFile:
        raise RuntimeError("Requested offset is greater than file length")
    if nFrames > (nFramesFile - args.skip * srate * antpols / 512):
        raise RuntimeError(
            "Requested integration time+offset is greater than file length")

    # Setup the window function to use
    if args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window

    # Master loop over all of the file chunks
    masterWeight = numpy.zeros((nChunks, antpols, LFFT))
    masterSpectra = numpy.zeros((nChunks, antpols, LFFT))

    for i in range(nChunks):
        print("Working on chunk #%i of %i" % (i + 1, nChunks))

        try:
            readT, t, data = idf.read(args.average / nChunks)
        except Exception as e:
            print("Error: %s" % str(e))
            continue

        # Calculate the spectra for this block of data and then weight the results by
        # the total number of frames read.  This is needed to keep the averages correct.

        freq, tempSpec = fxc.SpecMaster(data,
                                        LFFT=LFFT,
                                        window=window,
                                        pfb=args.pfb,
                                        verbose=args.verbose,
                                        sample_rate=srate)
        for stand in range(tempSpec.shape[0]):
            masterSpectra[i, stand, :] = tempSpec[stand, :]
            masterWeight[i, stand, :] = int(readT * srate / LFFT)

    # Apply the cable loss corrections, if requested
    if False:
        for s in range(masterSpectra.shape[1]):
            currGain = antennas[s].cable.gain(freq)
            for c in range(masterSpectra.shape[0]):
                masterSpectra[c, s, :] /= currGain

    # Now that we have read through all of the chunks, perform the final averaging by
    # dividing by all of the chunks
    spec = numpy.squeeze(
        (masterWeight * masterSpectra).sum(axis=0) / masterWeight.sum(axis=0))

    # Put the frequencies in the best units possible
    freq += central_freq
    freq, units = _best_freq_units(freq)

    # Deal with the `keep` options
    if args.keep == 'all':
        antpolsDisp = int(numpy.ceil(antpols / 20))
        js = [i for i in range(antpols)]
    else:
        antpolsDisp = int(numpy.ceil(len(args.keep) * 2 / 20))
        if antpolsDisp < 1:
            antpolsDisp = 1

        js = []
        for k in args.keep:
            for i, ant in enumerate(antennas):
                if ant.stand.id == k:
                    js.append(i)

    nPlot = len(js)
    if nPlot < 20:
        if nPlot % 4 == 0 and nPlot != 4:
            figsY = 4
        else:
            figsY = 2
        figsX = int(numpy.ceil(1.0 * nPlot / figsY))
    else:
        figsY = 4
        figsX = 5
    figsN = figsX * figsY
    for i in range(antpolsDisp):
        # Normal plotting
        fig = plt.figure()
        for k in range(i * figsN, i * figsN + figsN):
            try:
                j = js[k]
                currSpectra = numpy.squeeze(numpy.log10(spec[j, :]) * 10.0)
            except IndexError:
                break
            ax = fig.add_subplot(figsX, figsY, (k % figsN) + 1)
            ax.plot(
                freq,
                currSpectra,
                label='Stand: %i, Pol: %i (Dig: %i)' %
                (antennas[j].stand.id, antennas[j].pol, antennas[j].digitizer))

            # If there is more than one chunk, plot the difference between the global
            # average and each chunk
            if nChunks > 1 and not args.disable_chunks:
                for k in range(nChunks):
                    # Some files are padded by zeros at the end and, thus, carry no
                    # weight in the average spectra.  Skip over those.
                    if masterWeight[k, j, :].sum() == 0:
                        continue

                    # Calculate the difference between the spectra and plot
                    subspectra = numpy.squeeze(
                        numpy.log10(masterSpectra[k, j, :]) * 10.0)
                    diff = subspectra - currSpectra
                    ax.plot(freq, diff)

            ax.set_title('Stand: %i (%i); Dig: %i [%i]' %
                         (antennas[j].stand.id, antennas[j].pol,
                          antennas[j].digitizer, antennas[j].combined_status))
            ax.set_xlabel('Frequency [%s]' % units)
            ax.set_ylabel('P.S.D. [dB/RBW]')
            ax.set_ylim([-10, 30])

        # Save spectra image if requested
        if args.output is not None:
            base, ext = os.path.splitext(args.output)
            outFigure = "%s-%02i%s" % (base, i + 1, ext)
            fig.savefig(outFigure)

        plt.draw()

    print("RBW: %.4f %s" % ((freq[1] - freq[0]), units))
    plt.show()
コード例 #11
0
def main(args):
    # Parse command line options
    filename = args.filename

    # Length of the FFT
    LFFT = args.fft_length

    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            station = metabundle.get_station(args.metadata, apply_sdm=True)
    else:
        station = stations.lwana
    antennas = station.antennas

    idf = LWA1DataFile(filename)

    jd = astro.unix_to_utcjd(idf.get_info('start_time'))
    date = str(ephem.Date(jd - astro.DJD_OFFSET))
    nFpO = len(antennas)
    sample_rate = idf.get_info('sample_rate')
    nInts = idf.get_info('nframe') // nFpO

    # Get valid stands for both polarizations
    goodX = []
    goodY = []
    for i in range(len(antennas)):
        ant = antennas[i]
        if ant.combined_status != 33 and not args.all:
            pass
        else:
            if ant.pol == 0:
                goodX.append(ant)
            else:
                goodY.append(ant)

    # Now combine both lists to come up with stands that
    # are in both so we can form the cross-polarization
    # products if we need to
    good = []
    for antX in goodX:
        for antY in goodY:
            if antX.stand.id == antY.stand.id:
                good.append(antX.digitizer - 1)
                good.append(antY.digitizer - 1)

    # Report on the valid stands found.  This is a little verbose,
    # but nice to see.
    print("Found %i good stands to use" % (len(good) // 2, ))
    for i in good:
        print("%3i, %i" % (antennas[i].stand.id, antennas[i].pol))

    # Number of frames to read in at once and average
    nFrames = int(args.avg_time * sample_rate / 512)
    args.offset = idf.offset(args.offset)
    nSets = idf.get_info('nframe') // nFpO // nFrames
    nSets = nSets - int(args.offset * sample_rate / 512) // nFrames

    central_freq = idf.get_info('freq1')

    print("Data type:  %s" % type(idf))
    print("Samples per observations: %i per pol." % (nFpO / 2))
    print("Sampling rate: %i Hz" % sample_rate)
    print("Tuning frequency: %.3f Hz" % central_freq)
    print("Captures in file: %i (%.1f s)" % (nInts, nInts * 512 / sample_rate))
    print("==")
    print("Station: %s" % station.name)
    print("Date observed: %s" % date)
    print("Julian day: %.5f" % jd)
    print("Offset: %.3f s (%i frames)" %
          (args.offset, args.offset * sample_rate / 512))
    print("Integration Time: %.3f s" % (512 * nFrames / sample_rate))
    print("Number of integrations in file: %i" % nSets)

    # Make sure we don't try to do too many sets
    if args.samples > nSets:
        args.samples = nSets

    # Loop over junks of 300 integrations to make sure that we don't overflow
    # the FITS IDI memory buffer
    s = 0
    leftToDo = args.samples
    basename = os.path.split(filename)[1]
    basename, ext = os.path.splitext(basename)
    while leftToDo > 0:
        fitsFilename = "%s.FITS_%i" % (
            basename,
            (s + 1),
        )

        if leftToDo > 100:
            chunk = 100
        else:
            chunk = leftToDo

        process_chunk(idf,
                      station,
                      good,
                      fitsFilename,
                      int_time=args.avg_time,
                      LFFT=args.fft_length,
                      overlap=1,
                      pfb=args.pfb,
                      pols=args.products,
                      chunk_size=chunk)

        s += 1
        leftToDo = leftToDo - chunk

    idf.close()
コード例 #12
0
def main(args):
    # Parse command line options
    config = parseOptions(args)

    # Length of the FFT
    LFFT = config['LFFT']

    # Open the file and find good data (not spectrometer data)
    filename = config['args'][0]
    fh = open(filename, "rb")
    nFramesFile = os.path.getsize(filename) / drx.FrameSize

    try:
        for i in xrange(5):
            junkFrame = drspec.readFrame(fh)
        raise RuntimeError(
            "ERROR: '%s' appears to be a DR spectrometer file, not a raw DRX file"
            % filename)
    except errors.syncError:
        fh.seek(0)

    while True:
        try:
            junkFrame = drx.readFrame(fh)
            try:
                srate = junkFrame.getSampleRate()
                t0 = junkFrame.getTime()
                break
            except ZeroDivisionError:
                pass
        except errors.syncError:
            fh.seek(-drx.FrameSize + 1, 1)

    fh.seek(-drx.FrameSize, 1)

    beam, tune, pol = junkFrame.parseID()
    beams = drx.getBeamCount(fh)
    tunepols = drx.getFramesPerObs(fh)
    tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
    beampols = tunepol

    # Offset in frames for beampols beam/tuning/pol. sets
    inoffset = config['offset']
    offset = int(config['offset'] * srate / 4096 * beampols)
    offset = int(1.0 * offset / beampols) * beampols
    fh.seek(offset * drx.FrameSize, 1)

    # Iterate on the offsets until we reach the right point in the file.  This
    # is needed to deal with files that start with only one tuning and/or a
    # different sample rate.
    while True:
        ## Figure out where in the file we are and what the current tuning/sample
        ## rate is
        junkFrame = drx.readFrame(fh)
        srate = junkFrame.getSampleRate()
        t1 = junkFrame.getTime()
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        fh.seek(-drx.FrameSize, 1)

        ## See how far off the current frame is from the target
        tDiff = t1 - (t0 + config['offset'])

        ## Half that to come up with a new seek parameter
        tCorr = -tDiff / 2.0
        cOffset = int(tCorr * srate / 4096 * beampols)
        cOffset = int(1.0 * cOffset / beampols) * beampols
        offset += cOffset

        ## If the offset is zero, we are done.  Otherwise, apply the offset
        ## and check the location in the file again/
        if cOffset is 0:
            break
        fh.seek(cOffset * drx.FrameSize, 1)

    # Update the offset actually used
    config['offset'] = t1 - t0
    offset = int(round(config['offset'] * srate / 4096 * beampols))
    offset = int(1.0 * offset / beampols) * beampols

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of beampols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(1.0 * config['maxFrames'] / beampols * 4096 /
                    float(LFFT)) * LFFT / 4096 * beampols

    # Number of frames to integrate over
    print "Line 673: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols
    nFramesAvg = int(config['average'] * srate / 4096 * beampols)
    if (nFramesAvg == 0):
        nFramesAvg = 1 * beampols
    else:
        nFramesAvg = int(1.0 * nFramesAvg / beampols * 4096 /
                         float(LFFT)) * LFFT / 4096 * beampols
    config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
    maxFrames = nFramesAvg
    print "Line 678: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    if config['metadata'] is not None:
        config['duration'] = 0
    if config['duration'] == 0:
        config['duration'] = 1.0 * nFramesFile / beampols * 4096 / srate
    else:
        config['duration'] = int(
            round(config['duration'] * srate * beampols / 4096) / beampols *
            4096 / srate)

    nChunks = int(round(config['duration'] / config['average']))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks
    print "Line 693: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

    # Date & Central Frequency
    t1 = junkFrame.getTime()
    beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
    centralFreq1 = 0.0
    centralFreq2 = 0.0
    for i in xrange(4):
        junkFrame = drx.readFrame(fh)
        b, t, p = junkFrame.parseID()
        if p == 0 and t == 1:
            try:
                centralFreq1 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq1 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        elif p == 0 and t == 2:
            try:
                centralFreq2 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq2 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        else:
            pass
    fh.seek(-4 * drx.FrameSize, 1)

    config['freq1'] = centralFreq1
    config['freq2'] = centralFreq2

    # File summary
    print "Filename: %s" % filename
    print "Date of First Frame: %s" % str(beginDate)
    print "Beams: %i" % beams
    print "Tune/Pols: %i %i %i %i" % tunepols
    print "Sample Rate: %i Hz" % srate
    print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1,
                                                          centralFreq2)
    print "Frames: %i (%.3f s)" % (nFramesFile,
                                   1.0 * nFramesFile / beampols * 4096 / srate)
    print "---"
    print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
    print "Integration: %.6f s (%i frames; %i frames per beam/tune/pol)" % (
        config['average'], nFramesAvg, nFramesAvg / beampols)
    print "Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (
        config['average'] * nChunks, nFrames, nFrames / beampols)
    print "Chunks: %i" % nChunks
    print " "

    # Estimate clip level (if needed)
    if config['estimate']:
        clip1, clip2 = estimateClipLevel(fh, beampols)
    else:
        clip1 = config['clip']
        clip2 = config['clip']

    # Make the pseudo-antennas for Stokes calculation
    antennas = []
    for i in xrange(4):
        if i / 2 == 0:
            newAnt = stations.Antenna(1)
        else:
            newAnt = stations.Antenna(2)

        if i % 2 == 0:
            newAnt.pol = 0
        else:
            newAnt.pol = 1

        antennas.append(newAnt)

    # Setup the output file
    outname = os.path.split(filename)[1]
    outname = os.path.splitext(outname)[0]
    if (config['return'] == 'FFT'):
        outname = '%s-%d-waterfall-complex.hdf5' % (outname, inoffset)
    else:
        outname = '%s-waterfall.hdf5' % outname

    if os.path.exists(outname):
        #yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
        #if yn not in ('n', 'N'):
        #	os.unlink(outname)
        #else:
        raise RuntimeError("Output file '%s' already exists" % outname)

    f = hdfData.createNewFile(outname)

    # Look at the metadata and come up with a list of observations.  If
    # there are no metadata, create a single "observation" that covers the
    # whole file.
    obsList = {}
    if config['metadata'] is not None:
        sdf = metabundle.getSessionDefinition(config['metadata'])

        sdfBeam = sdf.sessions[0].drxBeam
        spcSetup = sdf.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, beam))

        for i, obs in enumerate(sdf.sessions[0].observations):
            sdfStart = mcs.mjdmpm2datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm2datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = drx.filterCodes[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        print "Observations:"
        for i in sorted(obsList.keys()):
            obs = obsList[i]
            print " #%i: %s to %s (%.3f s) at %.3f MHz" % (
                i, obs[0], obs[1], obs[2], obs[3] / 1e6)
        print " "

        hdfData.fillFromMetabundle(f, config['metadata'])
    else:
        obsList[1] = (datetime.utcfromtimestamp(t1),
                      datetime(2222, 12, 31, 23, 59,
                               59), config['duration'], srate)

        hdfData.fillMinimum(f, 1, beam, srate)

    if config['linear']:
        dataProducts = ['XX', 'YY']
    else:
        dataProducts = ['I', 'Q', 'U', 'V']

    for o in sorted(obsList.keys()):
        for t in (1, 2):
            hdfData.createDataSets(
                f,
                o,
                t,
                numpy.arange(LFFT -
                             1 if float(fxc.__version__) < 0.8 else LFFT,
                             dtype=numpy.float32),
                int(round(obsList[o][2] / config['average'])),
                dataProducts,
                dataOut=config['return'])
    f.attrs['FileGenerator'] = 'hdfWaterfall.py'
    f.attrs['InputData'] = os.path.basename(filename)

    # Create the various HDF group holders
    ds = {}
    for o in sorted(obsList.keys()):
        obs = hdfData.getObservationSet(f, o)

        ds['obs%i' % o] = obs
        ds['obs%i-time' % o] = obs.create_dataset(
            'time', (int(round(obsList[o][2] / config['average'])), ), 'f8')

        for t in (1, 2):
            ds['obs%i-freq%i' % (o, t)] = hdfData.getDataSet(f, o, t, 'freq')
            for p in dataProducts:
                if (config['return'] == 'PSD'):
                    ds["obs%i-%s%i" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p)
                else:
                    ds["obs%i-%s%imag" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p + 'mag')
                    ds["obs%i-%s%iphase" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p + 'phase')
            ds['obs%i-Saturation%i' % (o, t)] = hdfData.getDataSet(
                f, o, t, 'Saturation')
    # Load in the correct analysis function
    if config['linear']:
        processDataBatch = processDataBatchLinear
    else:
        processDataBatch = processDataBatchStokes

    # Go!
    for o in sorted(obsList.keys()):
        try:
            processDataBatch(fh,
                             antennas,
                             obsList[o][0],
                             obsList[o][2],
                             obsList[o][3],
                             config,
                             ds,
                             obsID=o,
                             clip1=clip1,
                             clip2=clip2)
        except RuntimeError, e:
            print "Observation #%i: %s, abandoning this observation" % (o,
                                                                        str(e))
コード例 #13
0
def processDataBatchStokes(fh,
                           antennas,
                           tStart,
                           duration,
                           sampleRate,
                           config,
                           dataSets,
                           obsID=1,
                           clip1=0,
                           clip2=0):
    """
	Process a chunk of data in a raw DRX file into Stokes parameters and 
	add the contents to an HDF5 file.
	"""

    # Length of the FFT
    LFFT = config['LFFT']

    # Find the start of the observation
    junkFrame = drx.readFrame(fh)
    srate = junkFrame.getSampleRate()
    t0 = junkFrame.getTime()
    fh.seek(-drx.FrameSize, 1)

    print 'Looking for #%i at %s with sample rate %.1f Hz...' % (obsID, tStart,
                                                                 sampleRate)
    while datetime.utcfromtimestamp(t0) < tStart or srate != sampleRate:
        junkFrame = drx.readFrame(fh)
        srate = junkFrame.getSampleRate()
        t0 = junkFrame.getTime()
    print '... Found #%i at %s with sample rate %.1f Hz' % (
        obsID, datetime.utcfromtimestamp(t0), srate)
    tDiff = datetime.utcfromtimestamp(t0) - tStart
    try:
        duration = duration - tDiff.total_seconds()
    except:
        duration = duration - (tDiff.seconds + tDiff.microseconds / 1e6)

    beam, tune, pol = junkFrame.parseID()
    beams = drx.getBeamCount(fh)
    tunepols = drx.getFramesPerObs(fh)
    tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
    beampols = tunepol

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of beampols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(1.0 * config['maxFrames'] / beampols * 4096 /
                    float(LFFT)) * LFFT / 4096 * beampols

    # Number of frames to integrate over
    print "Line 455: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols
    nFramesAvg = int(round(config['average'] * srate / 4096 * beampols))
    nFramesAvg = int(1.0 * nFramesAvg / beampols * 4096 /
                     float(LFFT)) * LFFT / 4096 * beampols
    config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
    maxFrames = nFramesAvg
    print "Line 460: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    nChunks = int(round(duration / config['average']))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks
    print "Line 468: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

    # Date & Central Frequency
    beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
    centralFreq1 = 0.0
    centralFreq2 = 0.0
    for i in xrange(4):
        junkFrame = drx.readFrame(fh)
        b, t, p = junkFrame.parseID()
        if p == 0 and t == 1:
            try:
                centralFreq1 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq1 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        elif p == 0 and t == 2:
            try:
                centralFreq2 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq2 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        else:
            pass
    fh.seek(-4 * drx.FrameSize, 1)
    freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=1 / srate))
    if float(fxc.__version__) < 0.8:
        freq = freq[1:]

    dataSets['obs%i-freq1' % obsID][:] = freq + centralFreq1
    dataSets['obs%i-freq2' % obsID][:] = freq + centralFreq2

    obs = dataSets['obs%i' % obsID]
    obs.attrs['tInt'] = config['average']
    obs.attrs['tInt_Unit'] = 's'
    obs.attrs['LFFT'] = LFFT
    obs.attrs['nChan'] = LFFT - 1 if float(fxc.__version__) < 0.8 else LFFT
    obs.attrs['RBW'] = freq[1] - freq[0]
    obs.attrs['RBW_Units'] = 'Hz'

    dataProducts = ['I', 'Q', 'U', 'V']
    done = False
    for i in xrange(nChunks):
        # Find out how many frames remain in the file.  If this number is larger
        # than the maximum of frames we can work with at a time (maxFrames),
        # only deal with that chunk
        framesRemaining = nFrames - i * maxFrames
        if framesRemaining > maxFrames:
            framesWork = maxFrames
        else:
            framesWork = framesRemaining
        print "Working on chunk %i, %i frames remaining" % (i + 1,
                                                            framesRemaining)

        count = {0: 0, 1: 0, 2: 0, 3: 0}
        data = numpy.zeros((4, framesWork * 4096 / beampols),
                           dtype=numpy.csingle)
        # If there are fewer frames than we need to fill an FFT, skip this chunk
        if data.shape[1] < LFFT:
            break

        # Inner loop that actually reads the frames into the data array
        print "Working on %.1f ms of data" % (
            (framesWork * 4096 / beampols / srate) * 1000.0)

        for j in xrange(framesWork):
            # Read in the next frame and anticipate any problems that could occur
            try:
                cFrame = drx.readFrame(fh, Verbose=False)
            except errors.eofError:
                done = True
                break
            except errors.syncError:
                continue

            beam, tune, pol = cFrame.parseID()
            aStand = 2 * (tune - 1) + pol
            if j is 0:
                cTime = cFrame.getTime()

            try:
                data[aStand, count[aStand] * 4096:(count[aStand] + 1) *
                     4096] = cFrame.data.iq
                count[aStand] += 1
            except ValueError:
                raise RuntimeError("Invalid Shape")

        # Save out some easy stuff
        dataSets['obs%i-time' % obsID][i] = cTime

        if config['countSats']:
            sats = ((data.real**2 + data.imag**2) >= 49).sum(axis=1)
            dataSets['obs%i-Saturation1' % obsID][i, :] = sats[0:2]
            dataSets['obs%i-Saturation2' % obsID][i, :] = sats[2:4]
        else:
            dataSets['obs%i-Saturation1' % obsID][i, :] = -1
            dataSets['obs%i-Saturation2' % obsID][i, :] = -1

        # Calculate the spectra for this block of data and then weight the results by
        # the total number of frames read.  This is needed to keep the averages correct.
        if clip1 == clip2:
            freq, tempSpec1 = fxc.StokesMaster(data,
                                               antennas,
                                               LFFT=LFFT,
                                               window=config['window'],
                                               verbose=config['verbose'],
                                               SampleRate=srate,
                                               ClipLevel=clip1)

            for t in (1, 2):
                for l, p in enumerate(dataProducts):
                    dataSets['obs%i-%s%i' %
                             (obsID, p, t)][i, :] = tempSpec1[l, t - 1, :]

        else:
            freq, tempSpec1 = fxc.StokesMaster(data[:2, :],
                                               antennas[:2],
                                               LFFT=LFFT,
                                               window=config['window'],
                                               verbose=config['verbose'],
                                               SampleRate=srate,
                                               ClipLevel=clip1)
            freq, tempSpec2 = fxc.StokesMaster(data[2:, :],
                                               antennas[2:],
                                               LFFT=LFFT,
                                               window=config['window'],
                                               verbose=config['verbose'],
                                               SampleRate=srate,
                                               ClipLevel=clip2)

            for l, p in enumerate(dataProducts):
                dataSets['obs%i-%s%i' % (obsID, p, 1)][i, :] = tempSpec1[l,
                                                                         0, :]
                dataSets['obs%i-%s%i' % (obsID, p, 2)][i, :] = tempSpec2[l,
                                                                         0, :]

        # We don't really need the data array anymore, so delete it
        del (data)

        # Are we done yet?
        if done:
            break

    return True
コード例 #14
0
def main(args):
    filenames = args.filenames
    filenames.sort()

    times = []
    for filename in filenames:
        dataDict = numpy.load(filename)

        tStart = datetime.utcfromtimestamp(dataDict['tStart'])
        tInt = dataDict['tInt']
        try:
            srate = dataDict['srate']
        except KeyError:
            srate = 19.6e6
        
        freq1 = dataDict['freq1']
        freq2 = dataDict['freq2']
        stand1, stand2 = dataDict['stands']

        times.append( tStart)

    print("Got %i files from %s to %s (%s)" % (len(filenames), times[0].strftime("%Y/%m/%d %H:%M:%S"), times[-1].strftime("%Y/%m/%d %H:%M:%S"), (times[-1]-times[0])))

    iTimes = []
    for i in xrange(1, len(times)):
        dt = times[i] - times[i-1]
        iTimes.append(dt.days*24*3600 + dt.seconds + dt.microseconds/1e6)
    iTimes = numpy.array(iTimes)
    print(" -> Interval: %.3f +/- %.3f seconds (%.3f to %.3f seconds)" % (iTimes.mean(), iTimes.std(), iTimes.min(), iTimes.max()))
    
    print("Number of frequency channels: %i (~%.1f Hz/channel)" % (len(freq1)+1, freq1[1]-freq1[0]))

    # Build up the station
    if args.lwasv:
        site = stations.lwasv
    else:
        site = stations.lwa1
    
    rawAntennas = site.antennas
    
    antennas = []
    for ant in rawAntennas:
        if ant.stand.id == stand1 and ant.pol == 0:
            antennas.append(ant)
    for ant in rawAntennas:
        if ant.stand.id == stand2 and ant.pol == 0:
            antennas.append(ant)
    if len(antennas) != 2:
        raise RuntimeError("Can only find stand %i, %i and %i found in the NPZ files" % (antennas[0].stand.id, stand1, stand2))

    # Create the simulated array
    refJD = unix_to_utcjd(timegm(times[0].timetuple()))
    aa1 = simVis.build_sim_array(site, antennas, freq1/1e9, jd=refJD)
    aa2 = simVis.build_sim_array(site, antennas, freq2/1e9, jd=refJD)

    # Build the model times and range.
    jdList = []
    dTimes = []
    for i in xrange(len(times)):
        tNow = timegm(times[i].timetuple())
        jdNow = unix_to_utcjd(tNow)

        jdList.append(jdNow)
        dTimes.append( (times[i]-times[0]).seconds )
        
    # Actually run the simulations
    simDict1 = simVis.build_sim_data(aa1, simVis.SOURCES, jd=jdList, pols=['xx',], verbose=False)
    simDict2 = simVis.build_sim_data(aa2, simVis.SOURCES, jd=jdList, pols=['xx',], verbose=False)

    # Plot
    fig = plt.figure()
    ax1 = fig.add_subplot(2, 1, 1)
    ax2 = fig.add_subplot(2, 1, 2)

    vis1 = []
    for vis in simDict1['vis']['xx']:
        vis1.append( vis )
    vis2 = []
    for vis in simDict2['vis']['xx']:
        vis2.append( vis )
    
    vis1 = numpy.array(vis1)
    vis1 = numpy.ma.array(vis1, mask=~numpy.isfinite(vis1))
    vis2 = numpy.array(vis2)
    vis2 = numpy.ma.array(vis2, mask=~numpy.isfinite(vis2))

    data = numpy.abs(vis1)
    data = data.ravel()
    data.sort()
    vmin1 = data[int(round(0.15*len(data)))]
    vmax1 = data[int(round(0.85*len(data)))]
    print('Plot range for tuning 1:', vmin1, vmax1)
    
    data = numpy.abs(vis2)
    data = data.ravel()
    data.sort()
    vmin2 = data[int(round(0.15*len(data)))]
    vmax2 = data[int(round(0.85*len(data)))]
    print('Plot range for tuning 2:', vmin2, vmax2)

    ax1.imshow(numpy.abs(vis1), extent=(freq1[0], freq1[-1], dTimes[0], dTimes[-1]), origin='lower', 
            vmin=vmin1, vmax=vmax1)
    ax2.imshow(numpy.abs(vis2), extent=(freq1[0], freq1[-1], dTimes[0], dTimes[-1]), origin='lower', 
            vmin=vmin2, vmax=vmax2)

    ax1.axis('auto')
    ax2.axis('auto')

    fig.suptitle("%s to %s UTC" % (times[0].strftime("%Y/%m/%d %H:%M"), times[-1].strftime("%Y/%m/%d %H:%M")))
    ax1.set_xlabel('Frequency [MHz]')
    ax2.set_xlabel('Frequency [MHz]')
    ax1.set_ylabel('Elapsed Time [s]')
    ax2.set_ylabel('Elapsed Time [s]')

    plt.show()
コード例 #15
0
    def jd(self):
        """
        JD as a floating point value.
        """

        return unix_to_utcjd(self)
コード例 #16
0
ファイル: t02.py プロジェクト: ilikeit813/Project-Backup
def main(args):
	nChunks = 1000 #the temporal shape of a file.
	LFFT = 4096*16 #Length of the FFT.
	nFramesAvg = 1*4*LFFT/4096 # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)
	saveftint = 1 # if =1 save the frequency channel and tInt info
	config = parseOptions(args)#Parse command line options
	#for offset_i in range(4306, 4309):# one offset = nChunks*nFramesAvg skiped
	for offset_i in range(0, 4):# one offset = nChunks*nFramesAvg skiped
		offset = nChunks*nFramesAvg*offset_i

	# Build the DRX file
		try:
			#drxFile = drsu.getFileByName(config['args'][0], config['args'][1])
                        fh = open(config['args'][0], "rb")
                        nFramesFile = os.path.getsize(config['args'][0]) / drx.FrameSize #drx.FrameSize = 4128
		except:
			print config['args'],' not found'
			sys.exit(1)
		try:
			junkFrame = drx.readFrame(fh)
			try:
				srate = junkFrame.getSampleRate()
				t0 = junkFrame.getTime()
				pass
			except ZeroDivisionError:
				print 'zero division error'
				break
		except errors.syncError:
			fh.seek(-drx.FrameSize+1, 1)
		fh.seek(-drx.FrameSize, 1)
		beam,tune,pol = junkFrame.parseID()
		beams = drx.getBeamCount(fh)
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		config['offset'] = offset/srate/beampols*4096
		if offset != 0:
			fh.seek(offset*drx.FrameSize, 1)

		config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
		maxFrames = nFramesAvg

	# Number of remaining chunks (and the correction to the number of
	# frames to read in).
#	nChunks = int(round(config['duration'] / config['average']))
		config['duration']=nChunks*config['average']
		if nChunks == 0:
			nChunks = 1
		nFrames = nFramesAvg*nChunks
	
	# Date & Central Frequnecy
		beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
		centralFreq1 = 0.0
		centralFreq2 = 0.0
		for i in xrange(4):
			junkFrame = drx.readFrame(fh)
			b,t,p = junkFrame.parseID()
			if p == 0 and t == 0:
				try:
					centralFreq1 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq1 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			elif p == 0 and t == 2:
				try:
					centralFreq2 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq2 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			else:
				pass
		fh.seek(-4*drx.FrameSize, 1)

		config['freq1'] = centralFreq1
		config['freq2'] = centralFreq2

	# File summary
		print "Filename: %s" % config['args']
		print "Date of First Frame: %s" % str(beginDate)
		print "Beams: %i" % beams
		print "Tune/Pols: %i %i %i %i" % tunepols
		print "Sample Rate: %i Hz" % srate
		print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1, centralFreq2)
		print "Frames: %i (%.3f s)" % (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate)
		print "---"
		print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
		print "Integration: %.4f s (%i frames; %i frames per beam/tune/pol)" % (config['average'], nFramesAvg, nFramesAvg / beampols)
		print 'beampols', beampols
		print "Duration: %.4f s (%i frames; %i frames per beam/tune/pol)" % (config['average']*nChunks, nFrames, nFrames / beampols)
		#break

	# Sanity check
		if nFrames > (nFramesFile - offset):
			raise RuntimeError("Requested integration time + offset is greater than file length")

		# Master loop over all of the file chunks
		#masterSpectra = numpy.zeros((nChunks, 2, LFFT-1))
		masterSpectra = numpy.zeros((nChunks, 4, LFFT-1))
		masterTimes = numpy.zeros(nChunks)
		for i in xrange(nChunks):
			# Find out how many frames remain in the file.  If this number is larger
			# than the maximum of frames we can work with at a time (maxFrames),
			# only deal with that chunk
			framesRemaining = nFrames - i*maxFrames
			if framesRemaining > maxFrames:
				framesWork = maxFrames
			else:
				framesWork = framesRemaining
			if framesRemaining%(nFrames/10)==0:
				print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)

			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
			#print 'data.shape', data.shape

			# If there are fewer frames than we need to fill an FFT, skip this chunk
			if data.shape[1] < LFFT:
				print 'data.shape[1]< LFFT, break'
				break
	
			# Inner loop that actually reads the frames into the data array
			#if framesRemaining%(nFrames/10)==0:
			#	print "Working on %.1f ms of data" % ((framesWork*4096/beampols/srate)*1000.0)

			for j in xrange(framesWork):
				# Read in the next frame and anticipate any problems that could occur
				try:
					cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					print "EOF Error"
					break
				except errors.syncError:
					print "Sync Error"
					continue
	
				beam,tune,pol = cFrame.parseID()

				if tune == 0:
					tune += 1
				aStand = 2*(tune-1) + pol
				if j is 0:
					cTime = cFrame.getTime()

				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
	
			# Calculate the spectra for this block of data and then weight the results by 
			# the total number of frames read.  This is needed to keep the averages correct.

			#print 'data.shape',data.shape


			#continue
			#freq, tempSpec1 = fxc.SpecMaster(data[:2,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate)
			tempSpec1 = numpy.abs(numpy.fft.fft2(data[:2,:]))[:,1:]/2.
			tempSpec1 = numpy.fft.fftshift(tempSpec1)**2./LFFT*2
			freq = numpy.fft.fftfreq(LFFT, d = 1.0/srate)
			freq = numpy.fft.fftshift(freq)

			#freq, tempSpec2 = fxc.SpecMaster(data[2:,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate)
			tempSpec2 = numpy.abs(numpy.fft.fft2(data[2:,:]))[:,1:]/2.
			tempSpec2 = numpy.fft.fftshift(tempSpec2)**2./LFFT*2

			#print 'tempSpec.shape', tempSpec1.shape	
			# Save the results to the various master arrays
			masterTimes[i] = cTime

			masterSpectra[i,0,:] = tempSpec1[0,:]
			masterSpectra[i,1,:] = tempSpec1[1,:]
			masterSpectra[i,2,:] = tempSpec2[0,:]
			masterSpectra[i,3,:] = tempSpec2[1,:]
			
	
			# We don't really need the data array anymore, so delete it
			del(data)

		#continue

		#drxFile.close()
	
		# Now that we have read through all of the chunks, perform the final averaging by
		# dividing by all of the chunks
		outname = "%s_%i_fft_offset_%.9i_LFFT_%.6i_frames" % (config['args'][0][-16:], beam,offset,LFFT)

#		numpy.savez(outname, freq=freq, freq1=freq+config['freq1'], freq2=freq+config['freq2'], times=masterTimes, spec=masterSpectra, tInt=(maxFrames*4096/beampols/srate), srate=srate,  standMapper=[4*(beam-1) + i for i in xrange(masterSpectra.shape[1])])
		if saveftint == 1:
			numpy.save('freq1', freq+config['freq1'])
			numpy.save('freq2', freq+config['freq2'])
			numpy.save('tInt',  maxFrames*4096/beampols/srate)
			saveftint = 0

                #print 'fInt = ',(freq+config['freq1'])[1]-(freq+config['freq1'])[0]
                #print 'tInt = ',maxFrames*4096/beampols/srate
                #print 'tInt = ', 1.0*LFFT/srate

                masterSpectra[:,0,:]=masterSpectra[:,0:2,:].mean(1)
                masterSpectra[:,1,:]=masterSpectra[:,2:4,:].mean(1)

		#numpy.save(outname[-46:], masterSpectra[:,0:2,:])
		#print 'spectrogram shape', masterSpectra[:,0:2,:].shape
		numpy.save(outname, masterSpectra[:,0:2,:])
コード例 #17
0
def main(args):
    # Break out the files we need
    ssmif = args.ssmif
    filenames = args.filename

    # Setup the LWA station information
    station = parse_ssmif(ssmif)
    antennas = station.antennas

    # Get an observer reader for calculations
    obs = station.get_observer()

    # Setup the beamformer gain and delay variables
    course = numpy.zeros(520)
    fine = numpy.zeros(520)
    gains = numpy.zeros((260, 4))
    gains[:, 0] = 1.0
    gains[:, 3] = 1.0
    for ant in antennas:
        if ant.combined_status != 33:
            stand = (ant.digitizer - 1) / 2
            gains[stand, :] = 0.0

    # Setup the beamformer itself
    dp = SoftwareDP(mode='DRX', filter=7, central_freq=74e6)

    # Find the target azimuth/elevation to use
    idf = TBWFile(filenames[0])
    tStart = datetime.utcfromtimestamp(idf.get_info('start_time'))
    idf.close()

    obs.date = tStart.strftime("%Y/%m/%d %H:%M:%S")
    tTransit = obs.next_transit(args.source)
    obs.date = tTransit
    args.source.compute(obs)
    targetAz = args.source.az * 180 / numpy.pi
    targetEl = args.source.alt * 180 / numpy.pi

    # Preliminary report
    print("Working on %i TBW files using SSMIF '%s'" %
          (len(filenames), os.path.basename(ssmif)))
    print("  Source: '%s'" % args.source.name)
    print("    Transit time: %s" % str(tTransit))
    print("    Transit azimuth: %.2f degrees" % targetAz)
    print("    Transet elevation: %.2f degrees" % targetEl)
    print(" ")

    # Loop over input files
    unx, lst, pwrX, pwrY = [], [], [], []
    for filename in filenames:
        ## Get the file reader
        idf = TBWFile(filename)

        ## Pull out some metadata and update the observer
        jd = astro.unix_to_utcjd(idf.get_info('start_time'))
        obs.date = ephem.Date(jd - astro.DJD_OFFSET)
        sample_rate = idf.get_info('sample_rate')
        nInts = int(
            round(idf.get_info('nframe') / (30000.0 * len(antennas) / 2)))
        transitOffset = (obs.date - tTransit) * 86400.0

        ## Metadata report
        print("Filename: %s" % os.path.basename(filename))
        print("  Data type:  %s" % type(idf))
        print("  Captures in file: %i (%.3f s)" %
              (nInts, nInts * 30000 * 400 / sample_rate))
        print("  Station: %s" % station.name)
        print("  Date observed: %s" % str(obs.date))
        print("  MJD: %.5f" % (jd - astro.MJD_OFFSET, ))
        print("  LST: %s" % str(obs.sidereal_time()))
        print("    %.1f s %s transit" %
              (abs(transitOffset), 'before' if transitOffset < 0 else 'after'))
        print(" ")

        ## Load in the data
        readT, t, data = idf.read(time_in_samples=True)

        ## Build up a time array
        t = t + numpy.arange(data.shape[1], dtype=numpy.int64)

        ## Update the beamformer delays for the pointing center(s)
        unx.append(idf.get_info('start_time'))
        lst.append(obs.sidereal_time() * 12 / numpy.pi)
        pwrX.append([])
        pwrY.append([])

        for offset in (-1, 0, 1):
            ### Compute
            delays = beamformer.calc_delay(antennas,
                                           freq=74.0e6,
                                           azimuth=targetAz,
                                           elevation=targetEl + offset)
            delays *= fS * 16
            delays = delays.max() - delays
            ### Decompose into FIFO and FIR
            course = (delays // 16)
            fine = (delays % 16)

            ## Form the beams for both polarizations
            beamX, beamY = dp.form_beam(antennas, t, data, course, fine, gains)

            ## Compute the integrated spectra
            ### Convert to int16
            beam = numpy.zeros((2, beamX.size), dtype=numpy.int16)
            beam[0, :] = (numpy.round(beamX)).astype(data.dtype)
            beam[1, :] = (numpy.round(beamY)).astype(data.dtype)
            ### Move into the frequency domain
            freq, spec = fxc.SpecMaster(beam,
                                        LFFT=8192,
                                        window=fxc.null_window,
                                        verbose=False,
                                        sample_rate=fS,
                                        clip_level=0)

            ## Save
            pwrX[-1].append(spec[0, :])
            pwrY[-1].append(spec[1, :])

        ## Done
        idf.close()

    # Convert to arrays
    unx, lst = numpy.array(unx), numpy.array(lst)
    pwrX, pwrY = numpy.array(pwrX), numpy.array(pwrY)

    # Save for later (needed for debugging)
    outname = "estimateSEFD-%s-%04i%02i%02i.npz" % (os.path.splitext(
        os.path.basename(ssmif))[0], tTransit.tuple()[0], tTransit.tuple()[1],
                                                    tTransit.tuple()[2])
    print("Saving intermediate data to '%s'" % outname)
    print(" ")
    numpy.savez(outname,
                source=args.source.name,
                freq=freq,
                unx=unx,
                lst=lst,
                pwrX=pwrX,
                pwrY=pwrY)

    # Report
    print("%s" % (args.source.name, ))
    for i in xrange(lst.size):
        print("%s:  %s  %s" %
              (str(ephem.hours(str(lst[i]))), pwrX[i, :], pwrY[i, :]))

    # Plot
    if args.plots:
        fig = plt.figure()
        ax = fig.gca()
        ax.plot(lst, pwrX, linestyle='-', marker='+')
        ax.plot(lst, pwrY, linestyle='-', marker='x')
        plt.show()
コード例 #18
0
ファイル: hdfWaterfallSW.py プロジェクト: kkirchhoff01/lslext
def processDataBatchStokes(fh, antennas, tStart, duration, sampleRate, config, dataSets, obsID=1, clip1=0, clip2=0):
	"""
	Process a chunk of data in a raw DRX file into Stokes parameters and 
	add the contents to an HDF5 file.
	"""
	
	# Length of the FFT
	LFFT = config['LFFT']
	
	# Find the start of the observation
	junkFrame = drx.readFrame(fh)
	srate = junkFrame.getSampleRate()
	t0 = junkFrame.getTime()
	fh.seek(-drx.FrameSize, 1)
	
	print 'Looking for #%i at %s with sample rate %.1f Hz...' % (obsID, tStart, sampleRate)
	while datetime.utcfromtimestamp(t0) < tStart or srate != sampleRate:
		junkFrame = drx.readFrame(fh)
		srate = junkFrame.getSampleRate()
		t0 = junkFrame.getTime()
	print '... Found #%i at %s with sample rate %.1f Hz' % (obsID, datetime.utcfromtimestamp(t0), srate)
	tDiff = datetime.utcfromtimestamp(t0) - tStart
	try:
		duration = duration - tDiff.total_seconds()
	except:
		duration = duration - (tDiff.seconds + tDiff.microseconds/1e6)
	
	beam,tune,pol = junkFrame.parseID()
	beams = drx.getBeamCount(fh)
	tunepols = drx.getFramesPerObs(fh)
	tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
	beampols = tunepol
	
	# Make sure that the file chunk size contains is an integer multiple
	# of the FFT length so that no data gets dropped.  This needs to
	# take into account the number of beampols in the data, the FFT length,
	# and the number of samples per frame.
	maxFrames = int(1.0*config['maxFrames']/beampols*4096/float(LFFT))*LFFT/4096*beampols
	
	# Number of frames to integrate over
	print "Line 455: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols
	nFramesAvg = int(round(config['average'] * srate / 4096 * beampols))
	nFramesAvg = int(1.0 * nFramesAvg / beampols*4096/float(LFFT))*LFFT/4096*beampols
	config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
	maxFrames = nFramesAvg
	print "Line 460: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

	# Number of remaining chunks (and the correction to the number of
	# frames to read in).
	nChunks = int(round(duration / config['average']))
	if nChunks == 0:
		nChunks = 1
	nFrames = nFramesAvg*nChunks
	print "Line 468: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

	# Date & Central Frequency
	beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
	centralFreq1 = 0.0
	centralFreq2 = 0.0
	for i in xrange(4):
		junkFrame = drx.readFrame(fh)
		b,t,p = junkFrame.parseID()
		if p == 0 and t == 1:
			try:
				centralFreq1 = junkFrame.getCentralFreq()
			except AttributeError:
				from lsl.common.dp import fS
				centralFreq1 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
		elif p == 0 and t == 2:
			try:
				centralFreq2 = junkFrame.getCentralFreq()
			except AttributeError:
				from lsl.common.dp import fS
				centralFreq2 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
		else:
			pass
	fh.seek(-4*drx.FrameSize, 1)
	freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=1/srate))
	if float(fxc.__version__) < 0.8:
		freq = freq[1:]
		
	dataSets['obs%i-freq1' % obsID][:] = freq + centralFreq1
	dataSets['obs%i-freq2' % obsID][:] = freq + centralFreq2
	
	obs = dataSets['obs%i' % obsID]
	obs.attrs['tInt'] = config['average']
	obs.attrs['tInt_Unit'] = 's'
	obs.attrs['LFFT'] = LFFT
	obs.attrs['nChan'] = LFFT-1 if float(fxc.__version__) < 0.8 else LFFT
	obs.attrs['RBW'] = freq[1]-freq[0]
	obs.attrs['RBW_Units'] = 'Hz'
	
	dataProducts = ['I', 'Q', 'U', 'V']
	done = False
	for i in xrange(nChunks):
		# Find out how many frames remain in the file.  If this number is larger
		# than the maximum of frames we can work with at a time (maxFrames),
		# only deal with that chunk
		framesRemaining = nFrames - i*maxFrames
		if framesRemaining > maxFrames:
			framesWork = maxFrames
		else:
			framesWork = framesRemaining
		print "Working on chunk %i, %i frames remaining" % (i+1, framesRemaining)
		
		count = {0:0, 1:0, 2:0, 3:0}
		data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
		# If there are fewer frames than we need to fill an FFT, skip this chunk
		if data.shape[1] < LFFT:
			break
			
		# Inner loop that actually reads the frames into the data array
		print "Working on %.1f ms of data" % ((framesWork*4096/beampols/srate)*1000.0)
		
		for j in xrange(framesWork):
			# Read in the next frame and anticipate any problems that could occur
			try:
				cFrame = drx.readFrame(fh, Verbose=False)
			except errors.eofError:
				done = True
				break
			except errors.syncError:
				continue

			beam,tune,pol = cFrame.parseID()
			aStand = 2*(tune-1) + pol
			if j is 0:
				cTime = cFrame.getTime()
			
			try:
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
			except ValueError:
				raise RuntimeError("Invalid Shape")

		# Save out some easy stuff
		dataSets['obs%i-time' % obsID][i] = cTime
		
		if config['countSats']:
			sats = ((data.real**2 + data.imag**2) >= 49).sum(axis=1)
			dataSets['obs%i-Saturation1' % obsID][i,:] = sats[0:2]
			dataSets['obs%i-Saturation2' % obsID][i,:] = sats[2:4]
		else:
			dataSets['obs%i-Saturation1' % obsID][i,:] = -1
			dataSets['obs%i-Saturation2' % obsID][i,:] = -1
			
		# Calculate the spectra for this block of data and then weight the results by 
		# the total number of frames read.  This is needed to keep the averages correct.
		if clip1 == clip2:
			freq, tempSpec1 = fxc.StokesMaster(data, antennas, LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip1)
			
			for t in (1,2):
				for l,p in enumerate(dataProducts):
					dataSets['obs%i-%s%i' % (obsID, p, t)][i,:] = tempSpec1[l,t-1,:]
					
		else:
			freq, tempSpec1 = fxc.StokesMaster(data[:2,:], antennas[:2], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip1)
			freq, tempSpec2 = fxc.StokesMaster(data[2:,:], antennas[2:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip2)
			
			for l,p in enumerate(dataProducts):
				dataSets['obs%i-%s%i' % (obsID, p, 1)][i,:] = tempSpec1[l,0,:]
				dataSets['obs%i-%s%i' % (obsID, p, 2)][i,:] = tempSpec2[l,0,:]
				
		# We don't really need the data array anymore, so delete it
		del(data)
		
		# Are we done yet?
		if done:
			break
			
	return True
コード例 #19
0
ファイル: vis.py プロジェクト: lwa-project/lsl
    def set_unixtime(self, timestamp):
        """
        Set the array time using a UNIX timestamp (epoch 1970).
        """

        self.set_jultime(astro.unix_to_utcjd(timestamp))
コード例 #20
0
ファイル: hdfWaterfallSW.py プロジェクト: kkirchhoff01/lslext
def main(args):
	# Parse command line options
	config = parseOptions(args)

	# Length of the FFT
	LFFT = config['LFFT']

	# Open the file and find good data (not spectrometer data)
	filename = config['args'][0]
	fh = open(filename, "rb")
	nFramesFile = os.path.getsize(filename) / drx.FrameSize
	
	try:
		for i in xrange(5):
			junkFrame = drspec.readFrame(fh)
		raise RuntimeError("ERROR: '%s' appears to be a DR spectrometer file, not a raw DRX file" % filename)
	except errors.syncError:
		fh.seek(0)
		
	while True:
		try:
			junkFrame = drx.readFrame(fh)
			try:
				srate = junkFrame.getSampleRate()
				t0 = junkFrame.getTime()
				break
			except ZeroDivisionError:
				pass
		except errors.syncError:
			fh.seek(-drx.FrameSize+1, 1)
			
	fh.seek(-drx.FrameSize, 1)
	
	beam,tune,pol = junkFrame.parseID()
	beams = drx.getBeamCount(fh)
	tunepols = drx.getFramesPerObs(fh)
	tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
	beampols = tunepol

	# Offset in frames for beampols beam/tuning/pol. sets
	inoffset = config['offset']
	offset = int(config['offset'] * srate / 4096 * beampols)
	offset = int(1.0 * offset / beampols) * beampols
	fh.seek(offset*drx.FrameSize, 1)
	
	# Iterate on the offsets until we reach the right point in the file.  This
	# is needed to deal with files that start with only one tuning and/or a 
	# different sample rate.  
	while True:
		## Figure out where in the file we are and what the current tuning/sample 
		## rate is
		junkFrame = drx.readFrame(fh)
		srate = junkFrame.getSampleRate()
		t1 = junkFrame.getTime()
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		fh.seek(-drx.FrameSize, 1)
		
		## See how far off the current frame is from the target
		tDiff = t1 - (t0 + config['offset'])
		
		## Half that to come up with a new seek parameter
		tCorr = -tDiff / 2.0
		cOffset = int(tCorr * srate / 4096 * beampols)
		cOffset = int(1.0 * cOffset / beampols) * beampols
		offset += cOffset
		
		## If the offset is zero, we are done.  Otherwise, apply the offset
		## and check the location in the file again/
		if cOffset is 0:
			break
		fh.seek(cOffset*drx.FrameSize, 1)
	
	# Update the offset actually used
	config['offset'] = t1 - t0
	offset = int(round(config['offset'] * srate / 4096 * beampols))
	offset = int(1.0 * offset / beampols) * beampols

	# Make sure that the file chunk size contains is an integer multiple
	# of the FFT length so that no data gets dropped.  This needs to
	# take into account the number of beampols in the data, the FFT length,
	# and the number of samples per frame.
	maxFrames = int(1.0*config['maxFrames']/beampols*4096/float(LFFT))*LFFT/4096*beampols

	# Number of frames to integrate over
	print "Line 673: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols
	nFramesAvg = int(config['average'] * srate / 4096 * beampols)
	if( nFramesAvg == 0):
		nFramesAvg = 1 * beampols
	else:
		nFramesAvg = int(1.0 * nFramesAvg / beampols*4096/float(LFFT))*LFFT/4096*beampols
	config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
	maxFrames = nFramesAvg
	print "Line 678: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

	# Number of remaining chunks (and the correction to the number of
	# frames to read in).
	if config['metadata'] is not None:
		config['duration'] = 0
	if config['duration'] == 0:
		config['duration'] = 1.0 * nFramesFile / beampols * 4096 / srate
	else:
		config['duration'] = int(round(config['duration'] * srate * beampols / 4096) / beampols * 4096 / srate)
	
	nChunks = int(round(config['duration'] / config['average']))
	if nChunks == 0:
		nChunks = 1
	nFrames = nFramesAvg*nChunks
	print "Line 693: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

	# Date & Central Frequency
	t1  = junkFrame.getTime()
	beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
	centralFreq1 = 0.0
	centralFreq2 = 0.0
	for i in xrange(4):
		junkFrame = drx.readFrame(fh)
		b,t,p = junkFrame.parseID()
		if p == 0 and t == 1:
			try:
				centralFreq1 = junkFrame.getCentralFreq()
			except AttributeError:
				from lsl.common.dp import fS
				centralFreq1 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
		elif p == 0 and t == 2:
			try:
				centralFreq2 = junkFrame.getCentralFreq()
			except AttributeError:
				from lsl.common.dp import fS
				centralFreq2 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
		else:
			pass
	fh.seek(-4*drx.FrameSize, 1)
	
	config['freq1'] = centralFreq1
	config['freq2'] = centralFreq2

	# File summary
	print "Filename: %s" % filename
	print "Date of First Frame: %s" % str(beginDate)
	print "Beams: %i" % beams
	print "Tune/Pols: %i %i %i %i" % tunepols
	print "Sample Rate: %i Hz" % srate
	print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1, centralFreq2)
	print "Frames: %i (%.3f s)" % (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate)
	print "---"
	print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
	print "Integration: %.6f s (%i frames; %i frames per beam/tune/pol)" % (config['average'], nFramesAvg, nFramesAvg / beampols)
	print "Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (config['average']*nChunks, nFrames, nFrames / beampols)
	print "Chunks: %i" % nChunks
	print " "
	
	# Estimate clip level (if needed)
	if config['estimate']:
		clip1, clip2 = estimateClipLevel(fh, beampols)
	else:
		clip1 = config['clip']
		clip2 = config['clip']
		
	# Make the pseudo-antennas for Stokes calculation
	antennas = []
	for i in xrange(4):
		if i / 2 == 0:
			newAnt = stations.Antenna(1)
		else:
			newAnt = stations.Antenna(2)
			
		if i % 2 == 0:
			newAnt.pol = 0
		else:
			newAnt.pol = 1
			
		antennas.append(newAnt)
		
	# Setup the output file
	outname = os.path.split(filename)[1]
	outname = os.path.splitext(outname)[0]
        if( config['return'] == 'FFT' ):
		outname = '%s-%d-waterfall-complex.hdf5' %(outname, inoffset)
	else:
		outname = '%s-waterfall.hdf5' % outname
	
	if os.path.exists(outname):
		#yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
		#if yn not in ('n', 'N'):
		#	os.unlink(outname)
		#else:
		raise RuntimeError("Output file '%s' already exists" % outname)
			
	f = hdfData.createNewFile(outname)
	
	# Look at the metadata and come up with a list of observations.  If 
	# there are no metadata, create a single "observation" that covers the
	# whole file.
	obsList = {}
	if config['metadata'] is not None:
		sdf = metabundle.getSessionDefinition(config['metadata'])
		
		sdfBeam  = sdf.sessions[0].drxBeam
		spcSetup = sdf.sessions[0].spcSetup
		if sdfBeam != beam:
			raise RuntimeError("Metadata is for beam #%i, but data is from beam #%i" % (sdfBeam, beam))
			
		for i,obs in enumerate(sdf.sessions[0].observations):
			sdfStart = mcs.mjdmpm2datetime(obs.mjd, obs.mpm)
			sdfStop  = mcs.mjdmpm2datetime(obs.mjd, obs.mpm + obs.dur)
			obsDur   = obs.dur/1000.0
			obsSR    = drx.filterCodes[obs.filter]
			
			obsList[i+1] = (sdfStart, sdfStop, obsDur, obsSR)
			
		print "Observations:"
		for i in sorted(obsList.keys()):
			obs = obsList[i]
			print " #%i: %s to %s (%.3f s) at %.3f MHz" % (i, obs[0], obs[1], obs[2], obs[3]/1e6)
		print " "
			
		hdfData.fillFromMetabundle(f, config['metadata'])
	else:
		obsList[1] = (datetime.utcfromtimestamp(t1), datetime(2222,12,31,23,59,59), config['duration'], srate)
		
		hdfData.fillMinimum(f, 1, beam, srate)
		
	if config['linear']:
		dataProducts = ['XX', 'YY']
	else:
		dataProducts = ['I', 'Q', 'U', 'V']
		
	for o in sorted(obsList.keys()):
		for t in (1,2):
			hdfData.createDataSets(f, o, t, numpy.arange(LFFT-1 if float(fxc.__version__) < 0.8 else LFFT, dtype=numpy.float32), int(round(obsList[o][2]/config['average'])), dataProducts, dataOut=config['return'])
	f.attrs['FileGenerator'] = 'hdfWaterfall.py'
	f.attrs['InputData'] = os.path.basename(filename)
	
	# Create the various HDF group holders
	ds = {}
	for o in sorted(obsList.keys()):
		obs = hdfData.getObservationSet(f, o)
		
		ds['obs%i' % o] = obs
		ds['obs%i-time' % o] = obs.create_dataset('time', (int(round(obsList[o][2]/config['average'])),), 'f8')
		
		for t in (1,2):
			ds['obs%i-freq%i' % (o, t)] = hdfData.getDataSet(f, o, t, 'freq')
			for p in dataProducts:
				if( config['return']=='PSD'):
					ds["obs%i-%s%i" % (o, p, t)] = hdfData.getDataSet(f, o, t, p)
				else:
					ds["obs%i-%s%imag" % (o, p, t)] = hdfData.getDataSet(f, o, t, p+'mag')
					ds["obs%i-%s%iphase" % (o, p, t)] = hdfData.getDataSet(f, o, t, p+'phase')
			ds['obs%i-Saturation%i' % (o, t)] = hdfData.getDataSet(f, o, t, 'Saturation')
	# Load in the correct analysis function
	if config['linear']:
		processDataBatch = processDataBatchLinear
	else:
		processDataBatch = processDataBatchStokes
		
	# Go!
	for o in sorted(obsList.keys()):
		try:
			processDataBatch(fh, antennas, obsList[o][0], obsList[o][2], obsList[o][3], config, ds, obsID=o, clip1=clip1, clip2=clip2)
		except RuntimeError, e:
			print "Observation #%i: %s, abandoning this observation" % (o, str(e))
コード例 #21
0
ファイル: t01.py プロジェクト: ilikeit813/Project-Backup
def main(args):
	t0=time.time()
	#nChunks = 10000#10000, the size of a file.
	#nFramesAvg = 1*4#200, 50 frames per pol, the subintergration time.
	nChunks = 3000 #10000 #the size of a file.
	nFramesAvg = 4*16 #the intergration time.
	fcl = 6000+7000
	fch = fcl + 3343
	for offset_i in range(0, 1409):# one offset = nChunks*nFramesAvg skiped
	#for offset_i in range(1500*2, 1500*3):# one offset = nChunks*nFramesAvg skiped
	#for offset_i in range(1500*4, 1500*5):# one offset = nChunks*nFramesAvg skiped
		offset = nChunks*nFramesAvg*offset_i
	# Parse command line options
		config = parseOptions(args)

	# Length of the FFT
		#LFFT = config['LFFT']
		LFFT = 4096*16
	# Build the DRX file
		try:
			#drxFile = drsu.getFileByName(config['args'][0], config['args'][1])
                        fh = open(config['args'][0], "rb")
                        nFramesFile = os.path.getsize(config['args'][0]) / drx.FrameSize
		except:
			print config['args']
			sys.exit(1)

		#drxFile.open()
		#nFramesFile = drxFile.size / drx.FrameSize
	
		while True:
			try:
				junkFrame = drx.readFrame(fh)
				try:
					srate = junkFrame.getSampleRate()
					#t0 = junkFrame.getTime()
					break
				except ZeroDivisionError:
					pass
			except errors.syncError:
				fh.seek(-drx.FrameSize+1, 1)
			
		fh.seek(-drx.FrameSize, 1)
	
		beam,tune,pol = junkFrame.parseID()
		beams = drx.getBeamCount(fh)
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		config['offset'] = offset/srate/beampols*4096
		if offset != 0:
			fh.seek(offset*drx.FrameSize, 1)

	# Make sure that the file chunk size contains is an integer multiple
	# of the FFT length so that no data gets dropped.  This needs to
	# take into account the number of beampols in the data, the FFT length,
	# and the number of samples per frame.
		maxFrames = int(1.0*config['maxFrames']/beampols*4096/float(LFFT))*LFFT/4096*beampols

	# Number of frames to integrate over
#	nFramesAvg = int(config['average'] * srate / 4096 * beampols)
#	nFramesAvg = int(1.0 * nFramesAvg / beampols*4096/float(LFFT))*LFFT/4096*beampols
		config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
		maxFrames = nFramesAvg

	# Number of remaining chunks (and the correction to the number of
	# frames to read in).
#	nChunks = int(round(config['duration'] / config['average']))
		config['duration']=nChunks*config['average']
		if nChunks == 0:
			nChunks = 1
		nFrames = nFramesAvg*nChunks
	
	# Date & Central Frequnecy
		beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
		centralFreq1 = 0.0
		centralFreq2 = 0.0
		for i in xrange(4):
			junkFrame = drx.readFrame(fh)
			b,t,p = junkFrame.parseID()
			if p == 0 and t == 1:
				try:
					centralFreq1 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq1 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
			elif p == 0 and t == 2:
				try:
					centralFreq2 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq2 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
			else:
				pass
		fh.seek(-4*drx.FrameSize, 1)
	
		config['freq1'] = centralFreq1
		config['freq2'] = centralFreq2

	# File summary
		print "Filename: %s" % config['args']
		print "Date of First Frame: %s" % str(beginDate)
		print "Beams: %i" % beams
		print "Tune/Pols: %i %i %i %i" % tunepols
		print "Sample Rate: %i Hz" % srate
		print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1, centralFreq2)
		print "Frames: %i (%.3f s)" % (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate)
		print "---"
		print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
		print "Integration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (config['average'], nFramesAvg, nFramesAvg / beampols)
		print "Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (config['average']*nChunks, nFrames, nFrames / beampols)
		print "Chunks: %i" % nChunks





		#sys.exit()




	# Sanity check
		if nFrames > (nFramesFile - offset):
			raise RuntimeError("Requested integration time+offset is greater than file length")

	# Estimate clip level (if needed)
		if config['estimate']:
			filePos = fh.tell()
		
		# Read in the first 100 frames for each tuning/polarization
			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4, 4096*100), dtype=numpy.csingle)
			for i in xrange(4*100):
				try:
					cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					break
				except errors.syncError:
					continue
			
				beam,tune,pol = cFrame.parseID()
				aStand = 2*(tune-1) + pol
			
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
		
		# Go back to where we started
			fh.seek(filePos)
		
		# Compute the robust mean and standard deviation for I and Q for each
		# tuning/polarization
			meanI = []
			meanQ = []
			stdsI = []
			stdsQ = []
			#for i in xrange(4):
			for i in xrange(2):
				meanI.append( robust.mean(data[i,:].real) )
				meanQ.append( robust.mean(data[i,:].imag) )
				
				stdsI.append( robust.std(data[i,:].real) )
				stdsQ.append( robust.std(data[i,:].imag) )
			
			# Come up with the clip levels based on 4 sigma
			clip1 = (meanI[0] + meanI[1] + meanQ[0] + meanQ[1]) / 4.0
			#clip2 = (meanI[2] + meanI[3] + meanQ[2] + meanQ[3]) / 4.0
			clip2 = 0
			
			clip1 += 5*(stdsI[0] + stdsI[1] + stdsQ[0] + stdsQ[1]) / 4.0
			#clip2 += 5*(stdsI[2] + stdsI[3] + stdsQ[2] + stdsQ[3]) / 4.0
			clip2 += 0
			
			clip1 = int(round(clip1))
			clip2 = int(round(clip2))
			
			# Report again
		else:
			clip1 = config['clip']
			clip2 = config['clip']
	
		# Master loop over all of the file chunks
		#masterSpectra = numpy.zeros((nChunks, 4, LFFT-1))
		masterSpectra = numpy.zeros((nChunks, 4, fch-fcl))
		masterTimes = numpy.zeros(nChunks)
		for i in xrange(nChunks):
			# Find out how many frames remain in the file.  If this number is larger
			# than the maximum of frames we can work with at a time (maxFrames),
			# only deal with that chunk
			framesRemaining = nFrames - i*maxFrames
			if framesRemaining > maxFrames:
				framesWork = maxFrames
			else:
				framesWork = framesRemaining
			
			if framesRemaining%(nFrames/10)==0:
				print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
	
	
			
			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
			# If there are fewer frames than we need to fill an FFT, skip this chunk
			if data.shape[1] < LFFT:
				break
	
			# Inner loop that actually reads the frames into the data array
			if framesRemaining%(nFrames/10)==0:
				print "Working on %.1f ms of data" % ((framesWork*4096/beampols/srate)*1000.0)
	
			for j in xrange(framesWork):
				# Read in the next frame and anticipate any problems that could occur
				try:
						cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					print "EOF Error"
					break
				except errors.syncError:
					print "Sync Error"
					continue
	
				beam,tune,pol = cFrame.parseID()
				aStand = 2*(tune-1) + pol
				if j is 0:
					cTime = cFrame.getTime()
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
	
			# Calculate the spectra for this block of data and then weight the results by 
			# the total number of frames read.  This is needed to keep the averages correct.
			#freq, tempSpec1 = fxc.SpecMaster(data[:2,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip1)
			freq, tempSpec1 = fxc.SpecMaster(data[:2,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate)
			
			#freq, tempSpec2 = fxc.SpecMaster(data[2:,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip2)
			freq, tempSpec2 = fxc.SpecMaster(data[2:,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate)
			
			# Save the results to the various master arrays
			masterTimes[i] = cTime
			
			masterSpectra[i,0,:] = tempSpec1[0,fcl:fch]
			masterSpectra[i,1,:] = tempSpec1[1,fcl:fch]
			masterSpectra[i,2,:] = tempSpec2[0,fcl:fch]
			masterSpectra[i,3,:] = tempSpec2[1,fcl:fch]
			
	
			# We don't really need the data array anymore, so delete it
			del(data)
	
		#drxFile.close()
	
		# Now that we have read through all of the chunks, perform the final averaging by
		# dividing by all of the chunks
		outname = "%s_%i_fft_offset_%.9i_frames" % (config['args'][0], beam,offset)

#		numpy.savez(outname, freq=freq, freq1=freq+config['freq1'], freq2=freq+config['freq2'], times=masterTimes, spec=masterSpectra, tInt=(maxFrames*4096/beampols/srate), srate=srate,  standMapper=[4*(beam-1) + i for i in xrange(masterSpectra.shape[1])])

                #print 'fInt = ',(freq+config['freq1'])[1]-(freq+config['freq1'])[0]
                #print 'tInt = ',maxFrames*4096/beampols/srate

                masterSpectra[:,0,:]=masterSpectra[:,0:2,:].mean(1)
                masterSpectra[:,1,:]=masterSpectra[:,2:4,:].mean(1)

		numpy.save(outname[-46:], masterSpectra[:,1,:])
		#numpy.save(outname[-46:], masterSpectra[:,0:2,:])
		#numpy.save(outname[-46:], masterSpectra)
#		spec = numpy.squeeze( (masterWeight*masterSpectra).sum(axis=0) / masterWeight.sum(axis=0) )
	
#		offset_i = offset_i + 1
	
	print time.time()-t0
コード例 #22
0
ファイル: transform.py プロジェクト: lwa-project/lsl
    def utc_dp(self, value):
        if not isinstance(value, (int, float)):
            raise TypeError("value must be type int or float")

        self._time = astro.unix_to_utcjd(float(value) / fS)