コード例 #1
0
def main(args):
    LFFT = args.fft_length

    stand1 = 0
    stand2 = int(args.dipole_id_y)
    filenames = args.filename

    # Build up the station
    if args.lwasv:
        site = stations.lwasv
    else:
        site = stations.lwa1

    # Get the antennas we need (and a fake one for the beam)
    rawAntennas = site.antennas

    antennas = []

    dipole = None
    xyz = numpy.zeros((len(rawAntennas), 3))
    i = 0
    for ant in rawAntennas:
        if ant.stand.id == stand2 and ant.pol == 0:
            dipole = ant
        xyz[i, 0] = ant.stand.x
        xyz[i, 1] = ant.stand.y
        xyz[i, 2] = ant.stand.z
        i += 1
    arrayX = xyz[:, 0].mean()
    arrayY = xyz[:, 1].mean()
    arrayZ = xyz[:, 2].mean()

    ## Fake one down here...
    beamStand = stations.Stand(0, arrayX, arrayY, arrayZ)
    beamFEE = stations.FEE('Beam', 0, gain1=0, gain2=0, status=3)
    beamCable = stations.Cable('Beam', 0, vf=1.0)
    beamAntenna = stations.Antenna(0,
                                   stand=beamStand,
                                   pol=0,
                                   theta=0,
                                   phi=0,
                                   status=3)
    beamAntenna.fee = beamFEE
    beamAntenna.feePort = 1
    beamAntenna.cable = beamCable

    antennas.append(beamAntenna)

    ## Dipole down here...
    ### NOTE
    ### Here we zero out the cable length for the dipole since the delay
    ### setup that is used for these observations already takes the
    ### cable/geometric delays into account.  We shouldn't need anything
    ### else to get good fringes.
    dipole.cable.length = 0
    antennas.append(dipole)

    # Loop over the input files...
    for filename in filenames:
        fh = open(filename, "rb")
        nFramesFile = os.path.getsize(filename) // drx.FRAME_SIZE
        #junkFrame = drx.read_frame(fh)
        #fh.seek(0)
        while True:
            try:
                junkFrame = drx.read_frame(fh)
                try:
                    srate = junkFrame.sample_rate
                    t0 = junkFrame.time
                    break
                except ZeroDivisionError:
                    pass
            except errors.SyncError:
                fh.seek(-drx.FRAME_SIZE + 1, 1)

        fh.seek(-drx.FRAME_SIZE, 1)

        beam, tune, pol = junkFrame.id
        srate = junkFrame.sample_rate

        tunepols = drx.get_frames_per_obs(fh)
        tunepols = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepols

        # Offset in frames for beampols beam/tuning/pol. sets
        offset = int(args.skip * srate / 4096 * beampols)
        offset = int(1.0 * offset / beampols) * beampols
        fh.seek(offset * drx.FRAME_SIZE, 1)

        # Iterate on the offsets until we reach the right point in the file.  This
        # is needed to deal with files that start with only one tuning and/or a
        # different sample rate.
        while True:
            ## Figure out where in the file we are and what the current tuning/sample
            ## rate is
            junkFrame = drx.read_frame(fh)
            srate = junkFrame.sample_rate
            t1 = junkFrame.time
            tunepols = drx.get_frames_per_obs(fh)
            tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
            beampols = tunepol
            fh.seek(-drx.FRAME_SIZE, 1)

            ## See how far off the current frame is from the target
            tDiff = t1 - (t0 + args.skip)

            ## Half that to come up with a new seek parameter
            tCorr = -tDiff / 8.0
            cOffset = int(tCorr * srate / 4096 * beampols)
            cOffset = int(1.0 * cOffset / beampols) * beampols
            offset += cOffset

            ## If the offset is zero, we are done.  Otherwise, apply the offset
            ## and check the location in the file again/
            if cOffset is 0:
                break
            fh.seek(cOffset * drx.FRAME_SIZE, 1)

        # Update the offset actually used
        args.skip = t1 - t0
        offset = int(round(args.skip * srate / 4096 * beampols))
        offset = int(1.0 * offset / beampols) * beampols

        tnom = junkFrame.header.time_offset
        tStart = junkFrame.time

        # Get the DRX frequencies
        cFreq1 = 0.0
        cFreq2 = 0.0
        for i in xrange(32):
            junkFrame = drx.read_frame(fh)
            b, t, p = junkFrame.id
            if p == 0 and t == 1:
                cFreq1 = junkFrame.central_freq
            elif p == 0 and t == 2:
                cFreq2 = junkFrame.central_freq
            else:
                pass
        fh.seek(-32 * drx.FRAME_SIZE, 1)

        # Align the files as close as possible by the time tags and then make sure that
        # the first frame processed is from tuning 1, pol 0.
        junkFrame = drx.read_frame(fh)
        beam, tune, pol = junkFrame.id
        pair = 2 * (tune - 1) + pol
        j = 0
        while pair != 0:
            junkFrame = drx.read_frame(fh)
            beam, tune, pol = junkFrame.id
            pair = 2 * (tune - 1) + pol
            j += 1
        fh.seek(-drx.FRAME_SIZE, 1)
        print("Shifted beam %i data by %i frames (%.4f s)" %
              (beam, j, j * 4096 / srate / 4))

        # Set integration time
        tInt = args.avg_time
        nFrames = int(round(tInt * srate / 4096))
        tInt = nFrames * 4096 / srate

        # Read in some data
        tFile = nFramesFile / 4 * 4096 / srate

        # Report
        print("Filename: %s" % filename)
        print("  Sample Rate: %i Hz" % srate)
        print("  Tuning 1: %.1f Hz" % cFreq1)
        print("  Tuning 2: %.1f Hz" % cFreq2)
        print("  ===")
        print("  Integration Time: %.3f s" % tInt)
        print("  Integrations in File: %i" % int(tFile / tInt))
        print("  Duration of File: %f" % tFile)
        print("  Offset: %f s" % offset)

        if args.duration != 0:
            nChunks = int(round(args.duration / tInt))
        else:
            nChunks = int(tFile / tInt)

        print("Processing: %i integrations" % nChunks)

        # Here we start the HDF5 file
        outname = os.path.split(filename)[1]
        outname = os.path.splitext(outname)[0]
        outname = "%s.hdf5" % outname
        outfile = h5py.File(outname)
        group1 = outfile.create_group("Time")
        group2 = outfile.create_group("Frequencies")
        group3 = outfile.create_group("Visibilities")
        out = raw_input("Target Name: ")
        outfile.attrs["OBJECT"] = out
        out = raw_input("Polarization (X/Y): ")
        outfile.attrs["POLARIZATION"] = out
        dset1 = group1.create_dataset("Timesteps", (nChunks, 3),
                                      numpy.float64,
                                      maxshape=(nChunks, 3))
        dset2 = group2.create_dataset("Tuning1", (LFFT, ),
                                      numpy.float64,
                                      maxshape=(LFFT, ))
        dset3 = group2.create_dataset("Tuning2", (LFFT, ),
                                      numpy.float64,
                                      maxshape=(LFFT, ))
        dset4 = group3.create_dataset("Tuning1", (nChunks, 3, LFFT),
                                      numpy.complex64,
                                      maxshape=(nChunks, 3, LFFT))
        dset5 = group3.create_dataset("Tuning2", (nChunks, 3, LFFT),
                                      numpy.complex64,
                                      maxshape=(nChunks, 3, LFFT))

        drxBuffer = buffer.DRXFrameBuffer(beams=[
            beam,
        ],
                                          tunes=[1, 2],
                                          pols=[0, 1])
        data = numpy.zeros((2, 2, 4096 * nFrames), dtype=numpy.complex64)

        pb = ProgressBarPlus(max=nChunks)
        tsec = numpy.zeros(1, dtype=numpy.float64)
        for i in xrange(nChunks):
            j = 0
            while j < nFrames:
                for k in xrange(4):
                    try:
                        cFrame = drx.read_frame(fh)
                        drxBuffer.append(cFrame)
                    except errors.SyncError:
                        pass

                cFrames = drxBuffer.get()
                if cFrames is None:
                    continue

                for cFrame in cFrames:
                    if j == 0:
                        tStart = cFrame.time
                    beam, tune, pol = cFrame.id
                    pair = 2 * (tune - 1) + pol

                    if tune == 1:
                        data[0, pol,
                             j * 4096:(j + 1) * 4096] = cFrame.payload.data
                    else:
                        data[1, pol,
                             j * 4096:(j + 1) * 4096] = cFrame.payload.data

                j += 1

            # Correlate
            blList1, freq1, vis1 = fxc.FXMaster(data[0, :, :],
                                                antennas,
                                                LFFT=LFFT,
                                                overlap=1,
                                                include_auto=True,
                                                verbose=False,
                                                sample_rate=srate,
                                                central_freq=cFreq1,
                                                pol='XX',
                                                return_baselines=True,
                                                gain_correct=False,
                                                clip_level=0)

            blList2, freq2, vis2 = fxc.FXMaster(data[1, :, :],
                                                antennas,
                                                LFFT=LFFT,
                                                overlap=1,
                                                include_auto=True,
                                                verbose=False,
                                                sample_rate=srate,
                                                central_freq=cFreq2,
                                                pol='XX',
                                                return_baselines=True,
                                                gain_correct=False,
                                                clip_level=0)

            if i == 0:
                tsec = tInt / 2
                outfile.attrs["STANDS"] = numpy.array([stand1, stand2])
                outfile.attrs["SRATE"] = srate
                date = datetime.fromtimestamp(tStart).date()
                outfile.attrs["DATE"] = str(date)
                dset2.write_direct(freq1)
                dset3.write_direct(freq2)
            else:
                tsec += tInt

            temp = numpy.zeros(3, dtype=numpy.float64)
            temp[0] = tStart
            temp[1] = tInt
            temp[2] = tsec
            dset1.write_direct(temp, dest_sel=numpy.s_[i])
            dset4.write_direct(vis1, dest_sel=numpy.s_[i])
            dset5.write_direct(vis2, dest_sel=numpy.s_[i])

            pb.inc(amount=1)
            sys.stdout.write(pb.show() + '\r')
            sys.stdout.flush()

        sys.stdout.write(pb.show() + '\r')
        sys.stdout.write('\n')
        sys.stdout.flush()
        outfile.close()

        # Plot
        fig = plt.figure()
        i = 0
        for bl, vi in zip(blList1, vis1):
            ax = fig.add_subplot(4, 3, i + 1)
            ax.plot(freq1 / 1e6, numpy.unwrap(numpy.angle(vi)))
            ax.set_title('Stand %i - Stand %i' %
                         (bl[0].stand.id, bl[1].stand.id))
            ax = fig.add_subplot(4, 3, i + 4)
            ax.plot(freq1 / 1e6, numpy.abs(vi))
            i += 1

            coeff = numpy.polyfit(freq1, numpy.unwrap(numpy.angle(vi)), 1)
            #print(coeff[0]/2/numpy.pi*1e9, coeff[1]*180/numpy.pi)

        i = 6
        for bl, vi in zip(blList2, vis2):
            ax = fig.add_subplot(4, 3, i + 1)
            ax.plot(freq2 / 1e6, numpy.unwrap(numpy.angle(vi)))
            ax.set_title('Stand %i - Stand %i' %
                         (bl[0].stand.id, bl[1].stand.id))
            ax = fig.add_subplot(4, 3, i + 4)
            ax.plot(freq2 / 1e6, numpy.abs(vi))
            i += 1

            coeff = numpy.polyfit(freq2, numpy.unwrap(numpy.angle(vi)), 1)
コード例 #2
0
from astropy.time import Time as AstroTime

from lsl.common import stations
from lsl.sim import vis as simVis

from lwa_antpos.station import ovro


# Convert a station.Station to a lsl.common.stations.LWAStation object
ovro2 = stations.LWAStation('OVRO', ovro.lat*180/numpy.pi, ovro.lon*180/numpy.pi, ovro.elev, 'OV')
for ant in ovro.antennas:
    enz = ovro2.get_enz_offset((ant.lat*180/numpy.pi, ant.lon*180/numpy.pi, ant.elev))
    s = stations.Stand(ant.id, *enz)
    for pol in (0, 1):
        c = stations.Cable(f"Cable{ant.id:03d}-Pol{pol}", 0.0)
        a = stations.Antenna(ant.id*2+pol, stand=s, cable=c, pol=pol)
        ovro2.antennas.append(a)
ovro = ovro2

# Simulation setup
nant = len(ovro.antennas) // 2
nbl = nant*(nant+1)//2
chan0 = 1234
nchan = 192
CHAN_BW = 196e6 / 8192
jd = AstroTime.now().jd

# Simulation array
freqs = (chan0 + numpy.arange(nchan)) * CHAN_BW + CHAN_BW/2
aa = simVis.build_sim_array(ovro, ovro.antennas[0::2], freqs/1e9, jd=jd)
コード例 #3
0
def main(args):
    # Length of the FFT and the window to use
    LFFT = args.fft_length
    if args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window
    args.window = window

    # Open the file and find good data (not spectrometer data)
    fh = open(args.filename, "rb")

    try:
        for i in xrange(5):
            junkFrame = drspec.read_frame(fh)
        raise RuntimeError(
            "ERROR: '%s' appears to be a DR spectrometer file, not a raw DRX file"
            % args.filename)
    except errors.SyncError:
        fh.seek(0)

    # Good, we seem to have a real DRX file, switch over to the LDP interface
    fh.close()
    idf = LWA1DataFile(args.filename,
                       ignore_timetag_errors=args.ignore_time_errors)

    # Metadata
    nFramesFile = idf.get_info('nframe')
    beam = idf.get_info('beam')
    srate = idf.get_info('sample_rate')
    beampols = idf.get_info('nbeampol')
    beams = max([1, beampols // 4])

    # Number of frames to integrate over
    nFramesAvg = int(args.average * srate / 4096) * beampols
    nFramesAvg = int(1.0 * (nFramesAvg // beampols) * 4096 /
                     float(LFFT)) * LFFT / 4096 * beampols
    args.average = 1.0 * (nFramesAvg // beampols) * 4096 / srate
    maxFrames = nFramesAvg

    # Offset into the file, if needed
    offset = idf.offset(args.skip)

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    if args.metadata is not None:
        args.duration = 0
    if args.duration == 0:
        args.duration = 1.0 * nFramesFile / beampols * 4096 / srate
        args.duration -= args.skip
    else:
        args.duration = int(
            round(args.duration * srate * beampols / 4096) / beampols * 4096 /
            srate)
    nChunks = int(round(args.duration / args.average))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks

    # Date & Central Frequency
    t1 = idf.get_info('start_time')
    beginDate = t1.datetime
    central_freq1 = idf.get_info('freq1')
    central_freq2 = idf.get_info('freq2')

    # File summary
    print("Filename: %s" % args.filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Beams: %i" % beams)
    print("Tune/Pols: %i" % beampols)
    print("Sample Rate: %i Hz" % srate)
    print("Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" %
          (central_freq1, central_freq2))
    print("Frames: %i (%.3f s)" %
          (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate))
    print("---")
    print("Offset: %.3f s (%i frames)" % (args.skip, offset))
    print("Integration: %.3f s (%i frames; %i frames per beam/tune/pol)" %
          (args.average, nFramesAvg, nFramesAvg / beampols))
    print("Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" %
          (args.average * nChunks, nFrames, nFrames / beampols))
    print("Chunks: %i" % nChunks)
    print(" ")

    # Estimate clip level (if needed)
    if args.estimate_clip_level:
        estimate = idf.estimate_levels(fh, sigma=5.0)
        clip1 = (estimate[0] + estimate[1]) / 2.0
        clip2 = (estimate[2] + estimate[3]) / 2.0
    else:
        clip1 = args.clip_level
        clip2 = args.clip_level

    # Make the pseudo-antennas for Stokes calculation
    antennas = []
    for i in xrange(4):
        if i // 2 == 0:
            newAnt = stations.Antenna(1)
        else:
            newAnt = stations.Antenna(2)

        if i % 2 == 0:
            newAnt.pol = 0
        else:
            newAnt.pol = 1

        antennas.append(newAnt)

    # Setup the output file
    outname = os.path.split(args.filename)[1]
    outname = os.path.splitext(outname)[0]
    outname = '%s-waterfall.hdf5' % outname

    if os.path.exists(outname):
        if not args.force:
            yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
        else:
            yn = 'y'

        if yn not in ('n', 'N'):
            os.unlink(outname)
        else:
            raise RuntimeError("Output file '%s' already exists" % outname)

    f = hdfData.create_new_file(outname)

    # Look at the metadata and come up with a list of observations.  If
    # there are no metadata, create a single "observation" that covers the
    # whole file.
    obsList = {}
    if args.metadata is not None:
        try:
            project = metabundle.get_sdf(args.metadata)
        except Exception as e:
            project = metabundleADP.get_sdf(args.metadata)

        sdfBeam = project.sessions[0].drx_beam
        spcSetup = project.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, beam))

        for i, obs in enumerate(project.sessions[0].observations):
            sdfStart = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = drx.FILTER_CODES[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        print("Observations:")
        for i in sorted(obsList.keys()):
            obs = obsList[i]
            print(" #%i: %s to %s (%.3f s) at %.3f MHz" %
                  (i, obs[0], obs[1], obs[2], obs[3] / 1e6))
        print(" ")

        hdfData.fill_from_metabundle(f, args.metadata)

    elif args.sdf is not None:
        try:
            project = sdf.parse_sdf(args.sdf)
        except Exception as e:
            project = sdfADP.parse_sdf(args.sdf)

        sdfBeam = project.sessions[0].drx_beam
        spcSetup = project.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, beam))

        for i, obs in enumerate(project.sessions[0].observations):
            sdfStart = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = drx.FILTER_CODES[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        site = 'lwa1'
        if args.lwasv:
            site = 'lwasv'
        hdfData.fill_from_sdf(f, args.sdf, station=site)

    else:
        obsList[1] = (datetime.utcfromtimestamp(t1),
                      datetime(2222, 12, 31, 23, 59, 59), args.duration, srate)

        site = 'lwa1'
        if args.lwasv:
            site = 'lwasv'
        hdfData.fill_minimum(f, 1, beam, srate, station=site)

    if (not args.stokes):
        data_products = ['XX', 'YY']
    else:
        data_products = ['I', 'Q', 'U', 'V']

    for o in sorted(obsList.keys()):
        for t in (1, 2):
            hdfData.create_observation_set(
                f, o, t, numpy.arange(LFFT, dtype=numpy.float64),
                int(round(obsList[o][2] / args.average)), data_products)

    f.attrs['FileGenerator'] = 'hdfWaterfall.py'
    f.attrs['InputData'] = os.path.basename(args.filename)

    # Create the various HDF group holders
    ds = {}
    for o in sorted(obsList.keys()):
        obs = hdfData.get_observation_set(f, o)

        ds['obs%i' % o] = obs
        ds['obs%i-time' % o] = hdfData.get_time(f, o)

        for t in (1, 2):
            ds['obs%i-freq%i' % (o, t)] = hdfData.get_data_set(f, o, t, 'freq')
            for p in data_products:
                ds["obs%i-%s%i" % (o, p, t)] = hdfData.get_data_set(f, o, t, p)
            ds['obs%i-Saturation%i' % (o, t)] = hdfData.get_data_set(
                f, o, t, 'Saturation')

    # Load in the correct analysis function
    if (not args.stokes):
        processDataBatch = processDataBatchLinear
    else:
        processDataBatch = processDataBatchStokes

    # Go!
    for o in sorted(obsList.keys()):
        try:
            processDataBatch(idf,
                             antennas,
                             obsList[o][0],
                             obsList[o][2],
                             obsList[o][3],
                             args,
                             ds,
                             obsID=o,
                             clip1=clip1,
                             clip2=clip2)
        except RuntimeError as e:
            print("Observation #%i: %s, abandoning this observation" %
                  (o, str(e)))

    # Save the output to a HDF5 file
    f.close()

    # Close out the data file
    idf.close()
コード例 #4
0
ファイル: utils.py プロジェクト: lwa-project/eLWA
def _read_correlator_configuration(filename):
    """
    Backend function for read_correlator_configuration.
    """

    context = None
    config = {}
    sources = []
    blocks = []

    fh = open(filename, 'r')
    for line in fh:
        if line[0] == '#':
            continue
        if len(line) < 3:
            continue

        line = line.strip().rstrip()

        if line == 'Context':
            temp_context = {
                'observer': 'Unknown',
                'project': 'Unknown',
                'session': None,
                'vlaref': None
            }
        elif line[:8] == 'Observer':
            temp_context['observer'] = line.split(None, 1)[1]
        elif line[:7] == 'Project':
            temp_context['project'] = line.split(None, 1)[1]
        elif line[:7] == 'Session':
            temp_context['session'] = line.split(None, 1)[1]
        elif line[:6] == 'VLARef':
            temp_context['vlaref'] = line.split(None, 1)[1]
        elif line == 'EndContext':
            context = temp_context

        elif line == 'Configuration':
            temp_config = {'inttime': None, 'channels': None, 'basis': None}
        elif line[:8] == 'Channels':
            temp_config['channels'] = int(line.split(None, 1)[1], 10)
        elif line[:7] == 'IntTime':
            temp_config['inttime'] = float(line.split(None, 1)[1])
        elif line[:8] == 'PolBasis':
            temp_config['basis'] = line.split(None, 1)[1]
        elif line == 'EndConfiguration':
            config = temp_config

        elif line == 'Source':
            source = {'intent': 'target', 'duration': 0.0}
        elif line[:4] == 'Name':
            source['name'] = line.split(None, 1)[1]
        elif line[:6] == 'Intent':
            source['intent'] = line.split(None, 1)[1].lower()
        elif line[:6] == 'RA2000':
            source['ra'] = line.split(None, 1)[1]
        elif line[:7] == 'Dec2000':
            source['dec'] = line.split(None, 1)[1]
        elif line[:6] == 'Polyco':
            source['polyco'] = line.split(None, 1)[1]
        elif line[:8] == 'Duration':
            source['duration'] = float(line.split(None, 1)[1])
        elif line == 'SourceDone':
            sources.append(source)

        elif line == 'Input':
            block = {'fileOffset': 0.0}
        elif line[:4] == 'File' and line[4] != 'O':
            block['filename'] = line.split(None, 1)[1]
        elif line[:8] == 'MetaData':
            ## Optional
            block['metadata'] = line.split(None, 1)[1]
        elif line[:4] == 'Type':
            block['type'] = line.split(None, 1)[1].lower()
        elif line[:7] == 'Antenna':
            block['antenna'] = line.split(None, 1)[1]
        elif line[:4] == 'Pols':
            block['pols'] = [
                v.strip().rstrip() for v in line.split(None, 1)[1].split(',')
            ]
        elif line[:8] == 'Location':
            block['location'] = [
                float(v) for v in line.split(None, 1)[1].split(',')
            ]
        elif line[:16] == 'ApparentLocation':
            block['appLocation'] = [
                float(v) for v in line.split(None, 1)[1].split(',')
            ]
        elif line[:11] == 'ClockOffset':
            block['clockOffset'] = [
                parse_time_string(v) for v in line.split(None, 1)[1].split(',')
            ]
        elif line[:10] == 'FileOffset':
            block['fileOffset'] = parse_time_string(line.split(None, 1)[1])
        elif line == 'InputDone':
            ## Make sure we have a metaData key since it is optional
            if 'metadata' not in block:
                block['metadata'] = None
            blocks.append(block)
    fh.close()

    # Set the context
    config['context'] = context

    # Find the reference source
    if 'ra' in sources[0].keys() and 'dec' in sources[0].keys():
        refSource = EnhancedFixedBody()
        refSource.name = sources[0]['name']
        refSource._ra = sources[0]['ra']
        refSource._dec = sources[0]['dec']
        refSource._epoch = ephem.J2000
    else:
        srcs = [EnhancedSun(), EnhancedJupiter()]
        for line in _srcs:
            srcs.append(EnhancedFixedBody(ephem.readdb(line)))

        refSource = None
        for i in range(len(srcs)):
            if srcs[i].name == sources[0]['name']:
                refSource = srcs[i]
                break
        if refSource is None:
            raise ValueError("Unknown source '%s'" % sources[0]['name'])
    refSource.intent = sources[0]['intent']
    refSource.duration = sources[0]['duration']
    try:
        if not os.path.exists(sources[0]['polyco']):
            # Maybe it is relative to the configuration file's path?
            sources[0]['polyco'] = os.path.join(os.path.dirname(filename),
                                                sources[0]['polyco'])
        refSource._polycos = PolyCos(sources[0]['polyco'],
                                     psrname=refSource.name.replace(
                                         'PSR', '').replace('_', ''))
    except KeyError:
        pass

    # Sort everything out so that the VDIF files come first
    order = sorted(range(len(blocks)), key=lambda x: blocks[x]['type'][-1])
    blocks = [blocks[o] for o in order]

    # Build up a list of filenames
    filenames = [block['filename'] for block in blocks]

    # Build up a list of metadata filenames
    metanames = [block['metadata'] for block in blocks]

    # Build up a list of file offsets
    offsets = [block['fileOffset'] for block in blocks]

    # Build up a list of readers
    readers = []
    for block in blocks:
        if block['type'] == 'vdif':
            readers.append(vdif)
        elif block['type'] == 'drx':
            readers.append(drx)
        else:
            readers.append(None)

    # Build up a list of antennas
    antennas = []
    i = 1
    for block in blocks:
        aid = None
        name = block['antenna']
        if name.lower() in ('lwa1', 'lwa-1'):
            aid = 51
        elif name.lower() in ('lwasv', 'lwa-sv'):
            aid = 52
        else:
            for j in range(len(name)):
                try:
                    aid = int(name[j:], 10)
                    if name[:j].lower() == 'lwa':
                        aid += 50
                    break
                except ValueError:
                    pass
        pols = block['pols']
        location = block['location']
        try:
            app_location = block['appLocation']
        except KeyError:
            app_location = None
        clock_offsets = block['clockOffset']

        if aid is None:
            raise RuntimeError("Cannot convert antenna name '%s' to a number" %
                               name)

        stand = stations.Stand(aid, *location)
        try:
            apparent_stand = stations.Stand(aid, *app_location)
        except TypeError:
            apparent_stand = None
        for pol, offset in zip(pols, clock_offsets):
            cable = stations.Cable('%s-%s' % (name, pol), 0.0, vf=1.0, dd=0.0)
            cable.clock_offset = offset

            if pol.lower() == 'x':
                antenna = stations.Antenna(i, stand=stand, cable=cable, pol=0)
            else:
                antenna = stations.Antenna(i, stand=stand, cable=cable, pol=1)
            antenna.apparent_stand = apparent_stand

            antennas.append(antenna)
            i += 1

    # Done
    return config, refSource, filenames, metanames, offsets, readers, antennas
コード例 #5
0
def main(args):
    # Parse command line options
    config = parseOptions(args)

    # Length of the FFT
    LFFT = config['LFFT']

    # Open the file and find good data (not spectrometer data)
    filename = config['args'][0]
    fh = open(filename, "rb")
    nFramesFile = os.path.getsize(filename) / drx.FrameSize

    try:
        for i in xrange(5):
            junkFrame = drspec.readFrame(fh)
        raise RuntimeError(
            "ERROR: '%s' appears to be a DR spectrometer file, not a raw DRX file"
            % filename)
    except errors.syncError:
        fh.seek(0)

    while True:
        try:
            junkFrame = drx.readFrame(fh)
            try:
                srate = junkFrame.getSampleRate()
                t0 = junkFrame.getTime()
                break
            except ZeroDivisionError:
                pass
        except errors.syncError:
            fh.seek(-drx.FrameSize + 1, 1)

    fh.seek(-drx.FrameSize, 1)

    beam, tune, pol = junkFrame.parseID()
    beams = drx.getBeamCount(fh)
    tunepols = drx.getFramesPerObs(fh)
    tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
    beampols = tunepol

    # Offset in frames for beampols beam/tuning/pol. sets
    inoffset = config['offset']
    offset = int(config['offset'] * srate / 4096 * beampols)
    offset = int(1.0 * offset / beampols) * beampols
    fh.seek(offset * drx.FrameSize, 1)

    # Iterate on the offsets until we reach the right point in the file.  This
    # is needed to deal with files that start with only one tuning and/or a
    # different sample rate.
    while True:
        ## Figure out where in the file we are and what the current tuning/sample
        ## rate is
        junkFrame = drx.readFrame(fh)
        srate = junkFrame.getSampleRate()
        t1 = junkFrame.getTime()
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        fh.seek(-drx.FrameSize, 1)

        ## See how far off the current frame is from the target
        tDiff = t1 - (t0 + config['offset'])

        ## Half that to come up with a new seek parameter
        tCorr = -tDiff / 2.0
        cOffset = int(tCorr * srate / 4096 * beampols)
        cOffset = int(1.0 * cOffset / beampols) * beampols
        offset += cOffset

        ## If the offset is zero, we are done.  Otherwise, apply the offset
        ## and check the location in the file again/
        if cOffset is 0:
            break
        fh.seek(cOffset * drx.FrameSize, 1)

    # Update the offset actually used
    config['offset'] = t1 - t0
    offset = int(round(config['offset'] * srate / 4096 * beampols))
    offset = int(1.0 * offset / beampols) * beampols

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of beampols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(1.0 * config['maxFrames'] / beampols * 4096 /
                    float(LFFT)) * LFFT / 4096 * beampols

    # Number of frames to integrate over
    print "Line 673: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols
    nFramesAvg = int(config['average'] * srate / 4096 * beampols)
    if (nFramesAvg == 0):
        nFramesAvg = 1 * beampols
    else:
        nFramesAvg = int(1.0 * nFramesAvg / beampols * 4096 /
                         float(LFFT)) * LFFT / 4096 * beampols
    config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
    maxFrames = nFramesAvg
    print "Line 678: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    if config['metadata'] is not None:
        config['duration'] = 0
    if config['duration'] == 0:
        config['duration'] = 1.0 * nFramesFile / beampols * 4096 / srate
    else:
        config['duration'] = int(
            round(config['duration'] * srate * beampols / 4096) / beampols *
            4096 / srate)

    nChunks = int(round(config['duration'] / config['average']))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks
    print "Line 693: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

    # Date & Central Frequency
    t1 = junkFrame.getTime()
    beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
    centralFreq1 = 0.0
    centralFreq2 = 0.0
    for i in xrange(4):
        junkFrame = drx.readFrame(fh)
        b, t, p = junkFrame.parseID()
        if p == 0 and t == 1:
            try:
                centralFreq1 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq1 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        elif p == 0 and t == 2:
            try:
                centralFreq2 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq2 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        else:
            pass
    fh.seek(-4 * drx.FrameSize, 1)

    config['freq1'] = centralFreq1
    config['freq2'] = centralFreq2

    # File summary
    print "Filename: %s" % filename
    print "Date of First Frame: %s" % str(beginDate)
    print "Beams: %i" % beams
    print "Tune/Pols: %i %i %i %i" % tunepols
    print "Sample Rate: %i Hz" % srate
    print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1,
                                                          centralFreq2)
    print "Frames: %i (%.3f s)" % (nFramesFile,
                                   1.0 * nFramesFile / beampols * 4096 / srate)
    print "---"
    print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
    print "Integration: %.6f s (%i frames; %i frames per beam/tune/pol)" % (
        config['average'], nFramesAvg, nFramesAvg / beampols)
    print "Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (
        config['average'] * nChunks, nFrames, nFrames / beampols)
    print "Chunks: %i" % nChunks
    print " "

    # Estimate clip level (if needed)
    if config['estimate']:
        clip1, clip2 = estimateClipLevel(fh, beampols)
    else:
        clip1 = config['clip']
        clip2 = config['clip']

    # Make the pseudo-antennas for Stokes calculation
    antennas = []
    for i in xrange(4):
        if i / 2 == 0:
            newAnt = stations.Antenna(1)
        else:
            newAnt = stations.Antenna(2)

        if i % 2 == 0:
            newAnt.pol = 0
        else:
            newAnt.pol = 1

        antennas.append(newAnt)

    # Setup the output file
    outname = os.path.split(filename)[1]
    outname = os.path.splitext(outname)[0]
    if (config['return'] == 'FFT'):
        outname = '%s-%d-waterfall-complex.hdf5' % (outname, inoffset)
    else:
        outname = '%s-waterfall.hdf5' % outname

    if os.path.exists(outname):
        #yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
        #if yn not in ('n', 'N'):
        #	os.unlink(outname)
        #else:
        raise RuntimeError("Output file '%s' already exists" % outname)

    f = hdfData.createNewFile(outname)

    # Look at the metadata and come up with a list of observations.  If
    # there are no metadata, create a single "observation" that covers the
    # whole file.
    obsList = {}
    if config['metadata'] is not None:
        sdf = metabundle.getSessionDefinition(config['metadata'])

        sdfBeam = sdf.sessions[0].drxBeam
        spcSetup = sdf.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, beam))

        for i, obs in enumerate(sdf.sessions[0].observations):
            sdfStart = mcs.mjdmpm2datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm2datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = drx.filterCodes[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        print "Observations:"
        for i in sorted(obsList.keys()):
            obs = obsList[i]
            print " #%i: %s to %s (%.3f s) at %.3f MHz" % (
                i, obs[0], obs[1], obs[2], obs[3] / 1e6)
        print " "

        hdfData.fillFromMetabundle(f, config['metadata'])
    else:
        obsList[1] = (datetime.utcfromtimestamp(t1),
                      datetime(2222, 12, 31, 23, 59,
                               59), config['duration'], srate)

        hdfData.fillMinimum(f, 1, beam, srate)

    if config['linear']:
        dataProducts = ['XX', 'YY']
    else:
        dataProducts = ['I', 'Q', 'U', 'V']

    for o in sorted(obsList.keys()):
        for t in (1, 2):
            hdfData.createDataSets(
                f,
                o,
                t,
                numpy.arange(LFFT -
                             1 if float(fxc.__version__) < 0.8 else LFFT,
                             dtype=numpy.float32),
                int(round(obsList[o][2] / config['average'])),
                dataProducts,
                dataOut=config['return'])
    f.attrs['FileGenerator'] = 'hdfWaterfall.py'
    f.attrs['InputData'] = os.path.basename(filename)

    # Create the various HDF group holders
    ds = {}
    for o in sorted(obsList.keys()):
        obs = hdfData.getObservationSet(f, o)

        ds['obs%i' % o] = obs
        ds['obs%i-time' % o] = obs.create_dataset(
            'time', (int(round(obsList[o][2] / config['average'])), ), 'f8')

        for t in (1, 2):
            ds['obs%i-freq%i' % (o, t)] = hdfData.getDataSet(f, o, t, 'freq')
            for p in dataProducts:
                if (config['return'] == 'PSD'):
                    ds["obs%i-%s%i" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p)
                else:
                    ds["obs%i-%s%imag" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p + 'mag')
                    ds["obs%i-%s%iphase" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p + 'phase')
            ds['obs%i-Saturation%i' % (o, t)] = hdfData.getDataSet(
                f, o, t, 'Saturation')
    # Load in the correct analysis function
    if config['linear']:
        processDataBatch = processDataBatchLinear
    else:
        processDataBatch = processDataBatchStokes

    # Go!
    for o in sorted(obsList.keys()):
        try:
            processDataBatch(fh,
                             antennas,
                             obsList[o][0],
                             obsList[o][2],
                             obsList[o][3],
                             config,
                             ds,
                             obsID=o,
                             clip1=clip1,
                             clip2=clip2)
        except RuntimeError, e:
            print "Observation #%i: %s, abandoning this observation" % (o,
                                                                        str(e))
コード例 #6
0
ファイル: vdifWaterfall.py プロジェクト: lwa-project/eLWA
def main(args):
    # Length of the FFT
    LFFT = args.fft_length
    if args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window
    args.window = window

    # Open the file and find good data (not spectrometer data)
    filename = args.filename
    fh = open(filename, "rb")
    header = vdif.read_guppi_header(fh)
    vdif.FRAME_SIZE = vdif.get_frame_size(fh)
    nFramesFile = os.path.getsize(filename) // vdif.FRAME_SIZE

    while True:
        try:
            junkFrame = vdif.read_frame(fh,
                                        central_freq=header['OBSFREQ'],
                                        sample_rate=header['OBSBW'] * 2.0)
            try:
                srate = junkFrame.sample_rate
                t0 = junkFrame.time
                vdif.DATA_LENGTH = junkFrame.payload.data.size
                break
            except ZeroDivisionError:
                pass
        except errors.SyncError:
            fh.seek(-vdif.FRAME_SIZE + 1, 1)

    fh.seek(-vdif.FRAME_SIZE, 1)

    beam, pol = junkFrame.id
    beams = 1
    tunepols = vdif.get_thread_count(fh)
    tunepol = tunepols
    beampols = tunepol

    # Offset in frames for beampols beam/tuning/pol. sets
    offset = int(args.skip * srate / vdif.DATA_LENGTH * beampols)
    offset = int(1.0 * offset / beampols) * beampols
    fh.seek(offset * vdif.FRAME_SIZE, 1)

    # Iterate on the offsets until we reach the right point in the file.  This
    # is needed to deal with files that start with only one tuning and/or a
    # different sample rate.
    while True:
        ## Figure out where in the file we are and what the current tuning/sample
        ## rate is
        junkFrame = vdif.read_frame(fh,
                                    central_freq=header['OBSFREQ'],
                                    sample_rate=header['OBSBW'] * 2.0)
        srate = junkFrame.sample_rate
        t1 = junkFrame.time
        tunepols = (vdif.get_thread_count(fh), )
        tunepol = tunepols[0]
        beampols = tunepol
        fh.seek(-vdif.FRAME_SIZE, 1)

        ## See how far off the current frame is from the target
        tDiff = t1 - (t0 + args.skip)

        ## Half that to come up with a new seek parameter
        tCorr = -tDiff / 2.0
        cOffset = int(tCorr * srate / vdif.DATA_LENGTH * beampols)
        cOffset = int(1.0 * cOffset / beampols) * beampols
        offset += cOffset

        ## If the offset is zero, we are done.  Otherwise, apply the offset
        ## and check the location in the file again/
        if cOffset is 0:
            break
        fh.seek(cOffset * vdif.FRAME_SIZE, 1)

    # Update the offset actually used
    args.skip = t1 - t0
    offset = int(round(args.skip * srate / vdif.DATA_LENGTH * beampols))
    offset = int(1.0 * offset / beampols) * beampols

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of beampols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(1.0 * 28000 / beampols * vdif.DATA_LENGTH /
                    float(2 * LFFT)) * 2 * LFFT / vdif.DATA_LENGTH * beampols

    # Number of frames to integrate over
    nFramesAvg = int(args.average * srate / vdif.DATA_LENGTH * beampols)
    nFramesAvg = int(1.0 * nFramesAvg / beampols * vdif.DATA_LENGTH /
                     float(2 * LFFT)) * 2 * LFFT / vdif.DATA_LENGTH * beampols
    args.average = 1.0 * nFramesAvg / beampols * vdif.DATA_LENGTH / srate
    maxFrames = nFramesAvg

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    if args.duration == 0:
        args.duration = 1.0 * nFramesFile / beampols * vdif.DATA_LENGTH / srate
        args.duration -= args.skip
    else:
        args.duration = int(
            round(args.duration * srate * beampols / vdif.DATA_LENGTH) /
            beampols * vdif.DATA_LENGTH / srate)
    nChunks = int(round(args.duration / args.average))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks

    # Date & Central Frequency
    t1 = junkFrame.time
    beginDate = junkFrame.time.datetime
    central_freq1 = 0.0
    central_freq2 = 0.0
    for i in xrange(4):
        junkFrame = vdif.read_frame(fh,
                                    central_freq=header['OBSFREQ'],
                                    sample_rate=header['OBSBW'] * 2.0)
        b, p = junkFrame.id
        if p == 0:
            central_freq1 = junkFrame.central_freq
        elif p == 0:
            central_freq2 = junkFrame.central_freq
        else:
            pass
    fh.seek(-4 * vdif.FRAME_SIZE, 1)

    # File summary
    print("Filename: %s" % filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Beams: %i" % beams)
    print("Tune/Pols: %i" % tunepols)
    print("Sample Rate: %i Hz" % srate)
    print("Bit Depth: %i" % junkFrame.header.bits_per_sample)
    print("Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" %
          (central_freq1, central_freq2))
    print(
        "Frames: %i (%.3f s)" %
        (nFramesFile, 1.0 * nFramesFile / beampols * vdif.DATA_LENGTH / srate))
    print("---")
    print("Offset: %.3f s (%i frames)" % (args.skip, offset))
    print("Integration: %.3f s (%i frames; %i frames per beam/tune/pol)" %
          (args.average, nFramesAvg, nFramesAvg // beampols))
    print("Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" %
          (args.average * nChunks, nFrames, nFrames // beampols))
    print("Chunks: %i" % nChunks)
    print(" ")

    # Get the clip levels
    clip1 = args.clip_level
    clip2 = args.clip_level

    # Make the pseudo-antennas for Stokes calculation
    antennas = []
    for i in xrange(4):
        if i // 2 == 0:
            newAnt = stations.Antenna(1)
        else:
            newAnt = stations.Antenna(2)

        if i % 2 == 0:
            newAnt.pol = 0
        else:
            newAnt.pol = 1

        antennas.append(newAnt)

    # Setup the output file
    outname = os.path.split(filename)[1]
    outname = os.path.splitext(outname)[0]
    outname = '%s-waterfall.hdf5' % outname

    if os.path.exists(outname):
        if not args.force:
            yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
        else:
            yn = 'y'

        if yn not in ('n', 'N'):
            os.unlink(outname)
        else:
            raise RuntimeError("Output file '%s' already exists" % outname)

    f = hdfData.createNewFile(outname)

    # Look at the metadata and come up with a list of observations.  If
    # there are no metadata, create a single "observation" that covers the
    # whole file.
    obsList = {}
    obsList[1] = (datetime.utcfromtimestamp(t1),
                  datetime(2222, 12, 31, 23, 59, 59), args.duration, srate)
    hdfData.fillMinimum(f, 1, beam, srate)

    if (not args.stokes):
        data_products = ['XX', 'YY']
    else:
        data_products = ['I', 'Q', 'U', 'V']

    for o in sorted(obsList.keys()):
        for t in (1, 2):
            hdfData.createDataSets(
                f, o, t,
                numpy.arange(LFFT -
                             1 if float(fxc.__version__) < 0.8 else LFFT,
                             dtype=numpy.float32),
                int(round(obsList[o][2] / args.average)), data_products)

    f.attrs['FileGenerator'] = 'hdfWaterfall.py'
    f.attrs['InputData'] = os.path.basename(filename)

    # Create the various HDF group holders
    ds = {}
    for o in sorted(obsList.keys()):
        obs = hdfData.getObservationSet(f, o)

        ds['obs%i' % o] = obs
        ds['obs%i-time' % o] = obs.create_dataset(
            'time', (int(round(obsList[o][2] / args.average)), ), 'f8')

        for t in (1, 2):
            ds['obs%i-freq%i' % (o, t)] = hdfData.get_data_set(f, o, t, 'freq')
            for p in data_products:
                ds["obs%i-%s%i" % (o, p, t)] = hdfData.get_data_set(f, o, t, p)
            ds['obs%i-Saturation%i' % (o, t)] = hdfData.get_data_set(
                f, o, t, 'Saturation')

    # Load in the correct analysis function
    if (not args.stokes):
        processDataBatch = processDataBatchLinear
    else:
        processDataBatch = processDataBatchStokes

    # Go!
    for o in sorted(obsList.keys()):
        try:
            processDataBatch(fh,
                             header,
                             antennas,
                             obsList[o][0],
                             obsList[o][2],
                             obsList[o][3],
                             args,
                             ds,
                             obsID=o,
                             clip1=clip1,
                             clip2=clip2)
        except RuntimeError as e:
            print("Observation #%i: %s, abandoning this observation" %
                  (o, str(e)))

    # Save the output to a HDF5 file
    f.close()