Exemplo n.º 1
0
    def run_correlator_test_complex(self,
                                    dtype,
                                    nchan=256,
                                    window=fx.null_window):
        fakeData = numpy.random.rand(
            self.nAnt,
            nchan * 4) + 1j * numpy.random.rand(self.nAnt, nchan * 4)
        fakeData = fakeData.astype(dtype)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        freq, cps = fx.FXMaster(fakeData,
                                antennas[:self.nAnt],
                                LFFT=nchan,
                                sample_rate=1e5,
                                central_freq=38e6,
                                window=window)

        # Numpy comparison
        for i in range(self.nAnt):
            antennas[i].stand.x = 0.0
            antennas[i].stand.y = 0.0
            antennas[i].stand.z = 0.0
            antennas[i].cable.length = 0.0

        freq, cps = fx.FXMaster(fakeData,
                                antennas[:self.nAnt],
                                LFFT=nchan,
                                sample_rate=1e5,
                                central_freq=38e6,
                                window=window)

        cps2 = numpy.zeros_like(cps)
        LFFT = cps.shape[1]
        nFFT = fakeData.shape[1] // LFFT
        wndw = window(LFFT)
        blc = 0
        for i in range(0, self.nAnt):
            if antennas[i].pol != 0:
                continue
            for j in range(i + 1, self.nAnt):
                if antennas[j].pol != 0:
                    continue

                for k in range(nFFT):
                    f1 = numpy.fft.fftshift(
                        numpy.fft.fft(fakeData[i, k * LFFT:(k + 1) * LFFT] *
                                      wndw))
                    f2 = numpy.fft.fftshift(
                        numpy.fft.fft(fakeData[j, k * LFFT:(k + 1) * LFFT] *
                                      wndw))

                    cps2[blc, :] += f1 * f2.conj()
                blc += 1
        cps2 /= (LFFT * nFFT)
        lsl.testing.assert_allclose(cps, cps2)
Exemplo n.º 2
0
    def test_correlator_pol(self):
        """Test various correlator polarization settings."""

        fakeData = numpy.random.rand(
            self.nAnt, 1024) + 1j * numpy.random.rand(self.nAnt, 1024)
        fakeData = fakeData.astype(numpy.csingle)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        ## XX
        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True,
                                        pol='XX')
        for (ant1, ant2) in blList:
            self.assertEqual(ant1.pol, 0)
            self.assertEqual(ant2.pol, 0)

        ## YY
        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True,
                                        pol='YY')
        for (ant1, ant2) in blList:
            self.assertEqual(ant1.pol, 1)
            self.assertEqual(ant2.pol, 1)

        ## XY
        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True,
                                        pol='XY')
        for (ant1, ant2) in blList:
            self.assertEqual(ant1.pol, 0)
            self.assertEqual(ant2.pol, 1)

        ## YX
        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True,
                                        pol='YX')
        for (ant1, ant2) in blList:
            self.assertEqual(ant1.pol, 1)
            self.assertEqual(ant2.pol, 0)
Exemplo n.º 3
0
    def test_correlator_complex_pfb(self):
        """Test the C-based PFB version of the correlator on complex-valued data."""

        for dtype in (numpy.complex64, numpy.complex128):
            fakeData = numpy.random.rand(
                self.nAnt,
                1024 * 4) + 1j * numpy.random.rand(self.nAnt, 1024 * 4)
            fakeData = fakeData.astype(dtype)

            station = stations.parse_ssmif(_SSMIF)
            antennas = station.antennas

            freq, cps = fx.FXMaster(fakeData,
                                    antennas[:self.nAnt],
                                    pfb=True,
                                    sample_rate=1e5,
                                    central_freq=38e6)

            # Numpy comparison
            for i in range(self.nAnt):
                antennas[i].stand.x = 0.0
                antennas[i].stand.y = 0.0
                antennas[i].stand.z = 0.0
                antennas[i].cable.length = 0.0

            freq, cps = fx.FXMaster(fakeData,
                                    antennas[:self.nAnt],
                                    pfb=True,
                                    sample_rate=1e5,
                                    central_freq=38e6)

            cps2 = numpy.zeros_like(cps)
            LFFT = cps.shape[1]
            nFFT = fakeData.shape[1] // LFFT
            blc = 0
            for i in range(0, self.nAnt):
                if antennas[i].pol != 0:
                    continue
                for j in range(i + 1, self.nAnt):
                    if antennas[j].pol != 0:
                        continue

                    for k in range(nFFT):
                        f1 = numpy.fft.fftshift(
                            _pfb(fakeData[i, :], LFFT * k, LFFT))
                        f2 = numpy.fft.fftshift(
                            _pfb(fakeData[j, :], LFFT * k, LFFT))

                        cps2[blc, :] += f1 * f2.conj()
                    blc += 1
            cps2 /= (LFFT * nFFT)
            lsl.testing.assert_allclose(cps, cps2)
Exemplo n.º 4
0
    def test_correlator_real_pfb(self):
        """Test the C-based PFB version of the correlator on real-valued data."""

        for dtype in (numpy.int8, numpy.int16, numpy.int32, numpy.int64,
                      numpy.float32, numpy.float64):
            fakeData = 10.0 * numpy.random.rand(self.nAnt, 1024 * 4) + 3.0
            fakeData = fakeData.astype(dtype)

            station = stations.parse_ssmif(_SSMIF)
            antennas = station.antennas

            freq, cps = fx.FXMaster(fakeData, antennas[:self.nAnt], pfb=True)

            # Numpy comparison
            for i in range(self.nAnt):
                antennas[i].stand.x = 0.0
                antennas[i].stand.y = 0.0
                antennas[i].stand.z = 0.0
                antennas[i].cable.length = 0.0

            freq, cps = fx.FXMaster(fakeData, antennas[:self.nAnt], pfb=True)

            cps2 = numpy.zeros_like(cps)
            LFFT = cps.shape[1]
            nFFT = fakeData.shape[1] // 2 // LFFT
            blc = 0
            for i in range(0, self.nAnt):
                if antennas[i].pol != 0:
                    continue
                for j in range(i + 1, self.nAnt):
                    if antennas[j].pol != 0:
                        continue

                    for k in range(nFFT):
                        f1 = _pfb(fakeData[i, :], 2 * LFFT * k,
                                  2 * LFFT)[:LFFT]
                        f2 = _pfb(fakeData[j, :], 2 * LFFT * k,
                                  2 * LFFT)[:LFFT]

                        cps2[blc, :] += f1 * f2.conj()
                    blc += 1
            cps2 /= (2 * LFFT * nFFT)
            lsl.testing.assert_allclose(cps, cps2)
Exemplo n.º 5
0
    def test_correlator_baselines(self):
        """Test that the return_baselines keyword works."""

        fakeData = numpy.random.rand(
            self.nAnt, 1024) + 1j * numpy.random.rand(self.nAnt, 1024)
        fakeData = fakeData.astype(numpy.csingle)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True)
Exemplo n.º 6
0
    def test_correlator_gaincorrect(self):
        """Test appling gain correction to the correlator output."""

        fakeData = numpy.random.rand(
            self.nAnt, 1024) + 1j * numpy.random.rand(self.nAnt, 1024)
        fakeData = fakeData.astype(numpy.csingle)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        freq, cps = fx.FXMaster(fakeData,
                                antennas[:self.nAnt],
                                sample_rate=1e5,
                                central_freq=38e6,
                                gain_correct=True)
Exemplo n.º 7
0
def process_chunk(idf,
                  site,
                  good,
                  filename,
                  int_time=5.0,
                  LFFT=64,
                  overlap=1,
                  pfb=False,
                  pols=[
                      'xx',
                  ],
                  chunk_size=100):
    """
    Given a lsl.reader.ldp.TBNFile instances and various parameters for the 
    cross-correlation, write cross-correlate the data and save it to a file.
    """

    # Get antennas
    antennas = []
    for a in site.antennas:
        if a.digitizer != 0:
            antennas.append(a)

    # Get the metadata
    sample_rate = idf.get_info('sample_rate')
    central_freq = idf.get_info('freq1')

    # Create the list of good digitizers and a digitizer to Antenna instance mapping.
    # These are:
    #  toKeep  -> mapping of digitizer number to array location
    #  mapper -> mapping of Antenna instance to array location
    toKeep = [antennas[i].digitizer - 1 for i in good]
    mapper = [antennas[i] for i in good]

    # Create a list of unqiue stands to know what style of IDI file to create
    stands = set([antennas[i].stand.id for i in good])

    # Main loop over the input file to read in the data and organize it.  Several control
    # variables are defined for this:
    #  ref_time -> time (in seconds since the UNIX epoch) for the first data set
    #  setTime -> time (in seconds since the UNIX epoch) for the current data set
    ref_time = 0.0
    setTime = 0.0
    wallTime = time.time()
    for s in range(chunk_size):
        try:
            readT, t, data = idf.read(int_time)
        except Exception as e:
            print("Error: %s" % str(e))
            continue

        ## Prune out what we don't want
        data = data[toKeep, :]

        setTime = t
        if s == 0:
            ref_time = setTime

        # Setup the set time as a python datetime instance so that it can be easily printed
        setDT = datetime.utcfromtimestamp(setTime)
        setDT.replace(tzinfo=UTC())
        print("Working on set #%i (%.3f seconds after set #1 = %s)" %
              ((s + 1),
               (setTime - ref_time), setDT.strftime("%Y/%m/%d %H:%M:%S.%f")))

        # Loop over polarization products
        for pol in pols:
            print("->  %s" % pol)
            blList, freq, vis = fxc.FXMaster(data,
                                             mapper,
                                             LFFT=LFFT,
                                             overlap=overlap,
                                             pfb=pfb,
                                             include_auto=True,
                                             verbose=False,
                                             sample_rate=sample_rate,
                                             central_freq=central_freq,
                                             pol=pol,
                                             return_baselines=True,
                                             gain_correct=True)

            # Select the right range of channels to save
            toUse = numpy.where((freq > 5.0e6) & (freq < 93.0e6))
            toUse = toUse[0]

            # If we are in the first polarazation product of the first iteration,  setup
            # the FITS IDI file.
            if s == 0 and pol == pols[0]:
                pol1, pol2 = fxc.pol_to_pols(pol)

                if len(stands) > 255:
                    fits = fitsidi.ExtendedIdi(filename, ref_time=ref_time)
                else:
                    fits = fitsidi.Idi(filename, ref_time=ref_time)
                fits.set_stokes(pols)
                fits.set_frequency(freq[toUse])
                fits.set_geometry(site, [a for a in mapper if a.pol == pol1])

            # Convert the setTime to a MJD and save the visibilities to the FITS IDI file
            obsTime = astro.unix_to_taimjd(setTime)
            fits.add_data_set(obsTime, readT, blList, vis[:, toUse], pol=pol)
        print("->  Cummulative Wall Time: %.3f s (%.3f s per integration)" %
              ((time.time() - wallTime), (time.time() - wallTime) / (s + 1)))

    # Cleanup after everything is done
    fits.write()
    fits.close()
    del (fits)
    del (data)
    del (vis)
    return True
Exemplo n.º 8
0
def main(args):
    LFFT = args.fft_length

    stand1 = int(args.dipole_id_x)
    stand2 = int(args.dipole_id_y)
    filenames = args.filename

    # Build up the station
    if args.lwasv:
        site = stations.lwasv
    else:
        site = stations.lwa1

    # Figure out which antennas we need
    antennas = []
    for ant in site.antennas:
        if ant.stand.id == stand1 and ant.pol == 0:
            antennas.append(ant)
    for ant in site.antennas:
        if ant.stand.id == stand2 and ant.pol == 0:
            antennas.append(ant)

    # Loop through the input files...
    for filename in filenames:
        fh = open(filename, "rb")
        nFramesFile = os.path.getsize(filename) // drx.FRAME_SIZE
        #junkFrame = drx.read_frame(fh)
        #fh.seek(0)
        while True:
            try:
                junkFrame = drx.read_frame(fh)
                try:
                    srate = junkFrame.sample_rate
                    t0 = junkFrame.time
                    break
                except ZeroDivisionError:
                    pass
            except errors.SyncError:
                fh.seek(-drx.FRAME_SIZE + 1, 1)

        fh.seek(-drx.FRAME_SIZE, 1)

        beam, tune, pol = junkFrame.id
        srate = junkFrame.sample_rate

        tunepols = drx.get_frames_per_obs(fh)
        tunepols = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepols

        # Offset in frames for beampols beam/tuning/pol. sets
        offset = int(args.skip * srate / 4096 * beampols)
        offset = int(1.0 * offset / beampols) * beampols
        fh.seek(offset * drx.FRAME_SIZE, 1)

        # Iterate on the offsets until we reach the right point in the file.  This
        # is needed to deal with files that start with only one tuning and/or a
        # different sample rate.
        while True:
            ## Figure out where in the file we are and what the current tuning/sample
            ## rate is
            junkFrame = drx.read_frame(fh)
            srate = junkFrame.sample_rate
            t1 = junkFrame.time
            tunepols = drx.get_frames_per_obs(fh)
            tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
            beampols = tunepol
            fh.seek(-drx.FRAME_SIZE, 1)

            ## See how far off the current frame is from the target
            tDiff = t1 - (t0 + args.skip)

            ## Half that to come up with a new seek parameter
            tCorr = -tDiff / 8.0
            cOffset = int(tCorr * srate / 4096 * beampols)
            cOffset = int(1.0 * cOffset / beampols) * beampols
            offset += cOffset

            ## If the offset is zero, we are done.  Otherwise, apply the offset
            ## and check the location in the file again/
            if cOffset is 0:
                break
            fh.seek(cOffset * drx.FRAME_SIZE, 1)

        # Update the offset actually used
        args.skip = t1 - t0
        offset = int(round(args.skip * srate / 4096 * beampols))
        offset = int(1.0 * offset / beampols) * beampols

        tnom = junkFrame.header.time_offset
        tStart = junkFrame.time

        # Get the DRX frequencies
        cFreq1 = 0.0
        cFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.read_frame(fh)
            b, t, p = junkFrame.id
            if p == 0 and t == 1:
                cFreq1 = junkFrame.central_freq
            elif p == 0 and t == 2:
                cFreq2 = junkFrame.central_freq
            else:
                pass
        fh.seek(-4 * drx.FRAME_SIZE, 1)

        # Align the files as close as possible by the time tags and then make sure that
        # the first frame processed is from tuning 1, pol 0.
        junkFrame = drx.read_frame(fh)
        beam, tune, pol = junkFrame.id
        pair = 2 * (tune - 1) + pol
        j = 0
        while pair != 0:
            junkFrame = drx.read_frame(fh)
            beam, tune, pol = junkFrame.id
            pair = 2 * (tune - 1) + pol
            j += 1
        fh.seek(-drx.FRAME_SIZE, 1)
        print("Shifted beam %i data by %i frames (%.4f s)" %
              (beam, j, j * 4096 / srate / 4))

        # Set integration time
        tInt = args.avg_time
        nFrames = int(round(tInt * srate / 4096))
        tInt = nFrames * 4096 / srate

        # Read in some data
        tFile = nFramesFile / 4 * 4096 / srate

        # Report
        print("Filename: %s" % filename)
        print("  Sample Rate: %i Hz" % srate)
        print("  Tuning 1: %.1f Hz" % cFreq1)
        print("  Tuning 2: %.1f Hz" % cFreq2)
        print("  ===")
        print("  Integration Time: %.3f s" % tInt)
        print("  Integrations in File: %i" % int(tFile / tInt))

        nChunks = int(tFile / tInt)
        pb = ProgressBar(max=nChunks)
        for i in xrange(nChunks):
            junkFrame = drx.read_frame(fh)
            tStart = junkFrame.time
            fh.seek(-drx.FRAME_SIZE, 1)

            count1 = [0, 0]
            data1 = numpy.zeros((2, 4096 * nFrames), dtype=numpy.complex64)
            count2 = [0, 0]
            data2 = numpy.zeros((2, 4096 * nFrames), dtype=numpy.complex64)
            for j in xrange(nFrames):
                for k in xrange(4):
                    cFrame = drx.read_frame(fh)
                    beam, tune, pol = cFrame.id
                    pair = 2 * (tune - 1) + pol

                    if tune == 1:
                        data1[pol, count1[pol] * 4096:(count1[pol] + 1) *
                              4096] = cFrame.payload.data
                        count1[pol] += 1
                    else:
                        data2[pol, count2[pol] * 4096:(count2[pol] + 1) *
                              4096] = cFrame.payload.data
                        count2[pol] += 1

            # Correlate
            blList1, freq1, vis1 = fxc.FXMaster(data1,
                                                antennas,
                                                LFFT=LFFT,
                                                overlap=1,
                                                include_auto=True,
                                                verbose=False,
                                                sample_rate=srate,
                                                central_freq=cFreq1,
                                                pol='XX',
                                                return_baselines=True,
                                                gain_correct=False,
                                                clip_level=0)

            blList2, freq2, vis2 = fxc.FXMaster(data2,
                                                antennas,
                                                LFFT=LFFT,
                                                overlap=1,
                                                include_auto=True,
                                                verbose=False,
                                                sample_rate=srate,
                                                central_freq=cFreq2,
                                                pol='XX',
                                                return_baselines=True,
                                                gain_correct=False,
                                                clip_level=0)

            if nChunks != 1:
                outfile = os.path.split(filename)[1]
                outfile = os.path.splitext(outfile)[0]
                outfile = "%s-vis-%04i.npz" % (outfile, i + 1)
            else:
                outfile = os.path.split(filename)[1]
                outfile = os.path.splitext(outfile)[0]
                outfile = "%s-vis.npz" % outfile
            numpy.savez(outfile,
                        srate=srate,
                        freq1=freq1,
                        vis1=vis1,
                        freq2=freq2,
                        vis2=vis2,
                        tStart=tStart,
                        tInt=tInt,
                        stands=numpy.array([stand1, stand2]))

            del data1
            del data2

            pb.inc(amount=1)
            sys.stdout.write(pb.show() + '\r')
            sys.stdout.flush()

        sys.stdout.write(pb.show() + '\r')
        sys.stdout.write('\n')
        sys.stdout.flush()

        # Plot
        fig = plt.figure()
        i = 0
        for bl, vi in zip(blList1, vis1):
            ax = fig.add_subplot(4, 3, i + 1)
            ax.plot(freq1 / 1e6, numpy.unwrap(numpy.angle(vi)))
            ax.set_title('Stand %i - Stand %i' %
                         (bl[0].stand.id, bl[1].stand.id))
            ax = fig.add_subplot(4, 3, i + 4)
            ax.plot(freq1 / 1e6, numpy.abs(vi))
            i += 1

            coeff = numpy.polyfit(freq1, numpy.unwrap(numpy.angle(vi)), 1)
            #print(coeff[0]/2/numpy.pi*1e9, coeff[1]*180/numpy.pi)

        i = 6
        for bl, vi in zip(blList2, vis2):
            ax = fig.add_subplot(4, 3, i + 1)
            ax.plot(freq2 / 1e6, numpy.unwrap(numpy.angle(vi)))
            ax.set_title('Stand %i - Stand %i' %
                         (bl[0].stand.id, bl[1].stand.id))
            ax = fig.add_subplot(4, 3, i + 4)
            ax.plot(freq2 / 1e6, numpy.abs(vi))
            i += 1

            coeff = numpy.polyfit(freq2, numpy.unwrap(numpy.angle(vi)), 1)
Exemplo n.º 9
0
def main(args):
    LFFT = args.fft_length

    stand1 = 0
    stand2 = int(args.dipole_id_y)
    filenames = args.filename

    # Build up the station
    if args.lwasv:
        site = stations.lwasv
    else:
        site = stations.lwa1

    # Get the antennas we need (and a fake one for the beam)
    rawAntennas = site.antennas

    antennas = []

    dipole = None
    xyz = numpy.zeros((len(rawAntennas), 3))
    i = 0
    for ant in rawAntennas:
        if ant.stand.id == stand2 and ant.pol == 0:
            dipole = ant
        xyz[i, 0] = ant.stand.x
        xyz[i, 1] = ant.stand.y
        xyz[i, 2] = ant.stand.z
        i += 1
    arrayX = xyz[:, 0].mean()
    arrayY = xyz[:, 1].mean()
    arrayZ = xyz[:, 2].mean()

    ## Fake one down here...
    beamStand = stations.Stand(0, arrayX, arrayY, arrayZ)
    beamFEE = stations.FEE('Beam', 0, gain1=0, gain2=0, status=3)
    beamCable = stations.Cable('Beam', 0, vf=1.0)
    beamAntenna = stations.Antenna(0,
                                   stand=beamStand,
                                   pol=0,
                                   theta=0,
                                   phi=0,
                                   status=3)
    beamAntenna.fee = beamFEE
    beamAntenna.feePort = 1
    beamAntenna.cable = beamCable

    antennas.append(beamAntenna)

    ## Dipole down here...
    ### NOTE
    ### Here we zero out the cable length for the dipole since the delay
    ### setup that is used for these observations already takes the
    ### cable/geometric delays into account.  We shouldn't need anything
    ### else to get good fringes.
    dipole.cable.length = 0
    antennas.append(dipole)

    # Loop over the input files...
    for filename in filenames:
        fh = open(filename, "rb")
        nFramesFile = os.path.getsize(filename) // drx.FRAME_SIZE
        #junkFrame = drx.read_frame(fh)
        #fh.seek(0)
        while True:
            try:
                junkFrame = drx.read_frame(fh)
                try:
                    srate = junkFrame.sample_rate
                    t0 = junkFrame.time
                    break
                except ZeroDivisionError:
                    pass
            except errors.SyncError:
                fh.seek(-drx.FRAME_SIZE + 1, 1)

        fh.seek(-drx.FRAME_SIZE, 1)

        beam, tune, pol = junkFrame.id
        srate = junkFrame.sample_rate

        tunepols = drx.get_frames_per_obs(fh)
        tunepols = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepols

        # Offset in frames for beampols beam/tuning/pol. sets
        offset = int(args.skip * srate / 4096 * beampols)
        offset = int(1.0 * offset / beampols) * beampols
        fh.seek(offset * drx.FRAME_SIZE, 1)

        # Iterate on the offsets until we reach the right point in the file.  This
        # is needed to deal with files that start with only one tuning and/or a
        # different sample rate.
        while True:
            ## Figure out where in the file we are and what the current tuning/sample
            ## rate is
            junkFrame = drx.read_frame(fh)
            srate = junkFrame.sample_rate
            t1 = junkFrame.time
            tunepols = drx.get_frames_per_obs(fh)
            tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
            beampols = tunepol
            fh.seek(-drx.FRAME_SIZE, 1)

            ## See how far off the current frame is from the target
            tDiff = t1 - (t0 + args.skip)

            ## Half that to come up with a new seek parameter
            tCorr = -tDiff / 8.0
            cOffset = int(tCorr * srate / 4096 * beampols)
            cOffset = int(1.0 * cOffset / beampols) * beampols
            offset += cOffset

            ## If the offset is zero, we are done.  Otherwise, apply the offset
            ## and check the location in the file again/
            if cOffset is 0:
                break
            fh.seek(cOffset * drx.FRAME_SIZE, 1)

        # Update the offset actually used
        args.skip = t1 - t0
        offset = int(round(args.skip * srate / 4096 * beampols))
        offset = int(1.0 * offset / beampols) * beampols

        tnom = junkFrame.header.time_offset
        tStart = junkFrame.time

        # Get the DRX frequencies
        cFreq1 = 0.0
        cFreq2 = 0.0
        for i in xrange(32):
            junkFrame = drx.read_frame(fh)
            b, t, p = junkFrame.id
            if p == 0 and t == 1:
                cFreq1 = junkFrame.central_freq
            elif p == 0 and t == 2:
                cFreq2 = junkFrame.central_freq
            else:
                pass
        fh.seek(-32 * drx.FRAME_SIZE, 1)

        # Align the files as close as possible by the time tags and then make sure that
        # the first frame processed is from tuning 1, pol 0.
        junkFrame = drx.read_frame(fh)
        beam, tune, pol = junkFrame.id
        pair = 2 * (tune - 1) + pol
        j = 0
        while pair != 0:
            junkFrame = drx.read_frame(fh)
            beam, tune, pol = junkFrame.id
            pair = 2 * (tune - 1) + pol
            j += 1
        fh.seek(-drx.FRAME_SIZE, 1)
        print("Shifted beam %i data by %i frames (%.4f s)" %
              (beam, j, j * 4096 / srate / 4))

        # Set integration time
        tInt = args.avg_time
        nFrames = int(round(tInt * srate / 4096))
        tInt = nFrames * 4096 / srate

        # Read in some data
        tFile = nFramesFile / 4 * 4096 / srate

        # Report
        print("Filename: %s" % filename)
        print("  Sample Rate: %i Hz" % srate)
        print("  Tuning 1: %.1f Hz" % cFreq1)
        print("  Tuning 2: %.1f Hz" % cFreq2)
        print("  ===")
        print("  Integration Time: %.3f s" % tInt)
        print("  Integrations in File: %i" % int(tFile / tInt))
        print("  Duration of File: %f" % tFile)
        print("  Offset: %f s" % offset)

        if args.duration != 0:
            nChunks = int(round(args.duration / tInt))
        else:
            nChunks = int(tFile / tInt)

        print("Processing: %i integrations" % nChunks)

        # Here we start the HDF5 file
        outname = os.path.split(filename)[1]
        outname = os.path.splitext(outname)[0]
        outname = "%s.hdf5" % outname
        outfile = h5py.File(outname)
        group1 = outfile.create_group("Time")
        group2 = outfile.create_group("Frequencies")
        group3 = outfile.create_group("Visibilities")
        out = raw_input("Target Name: ")
        outfile.attrs["OBJECT"] = out
        out = raw_input("Polarization (X/Y): ")
        outfile.attrs["POLARIZATION"] = out
        dset1 = group1.create_dataset("Timesteps", (nChunks, 3),
                                      numpy.float64,
                                      maxshape=(nChunks, 3))
        dset2 = group2.create_dataset("Tuning1", (LFFT, ),
                                      numpy.float64,
                                      maxshape=(LFFT, ))
        dset3 = group2.create_dataset("Tuning2", (LFFT, ),
                                      numpy.float64,
                                      maxshape=(LFFT, ))
        dset4 = group3.create_dataset("Tuning1", (nChunks, 3, LFFT),
                                      numpy.complex64,
                                      maxshape=(nChunks, 3, LFFT))
        dset5 = group3.create_dataset("Tuning2", (nChunks, 3, LFFT),
                                      numpy.complex64,
                                      maxshape=(nChunks, 3, LFFT))

        drxBuffer = buffer.DRXFrameBuffer(beams=[
            beam,
        ],
                                          tunes=[1, 2],
                                          pols=[0, 1])
        data = numpy.zeros((2, 2, 4096 * nFrames), dtype=numpy.complex64)

        pb = ProgressBarPlus(max=nChunks)
        tsec = numpy.zeros(1, dtype=numpy.float64)
        for i in xrange(nChunks):
            j = 0
            while j < nFrames:
                for k in xrange(4):
                    try:
                        cFrame = drx.read_frame(fh)
                        drxBuffer.append(cFrame)
                    except errors.SyncError:
                        pass

                cFrames = drxBuffer.get()
                if cFrames is None:
                    continue

                for cFrame in cFrames:
                    if j == 0:
                        tStart = cFrame.time
                    beam, tune, pol = cFrame.id
                    pair = 2 * (tune - 1) + pol

                    if tune == 1:
                        data[0, pol,
                             j * 4096:(j + 1) * 4096] = cFrame.payload.data
                    else:
                        data[1, pol,
                             j * 4096:(j + 1) * 4096] = cFrame.payload.data

                j += 1

            # Correlate
            blList1, freq1, vis1 = fxc.FXMaster(data[0, :, :],
                                                antennas,
                                                LFFT=LFFT,
                                                overlap=1,
                                                include_auto=True,
                                                verbose=False,
                                                sample_rate=srate,
                                                central_freq=cFreq1,
                                                pol='XX',
                                                return_baselines=True,
                                                gain_correct=False,
                                                clip_level=0)

            blList2, freq2, vis2 = fxc.FXMaster(data[1, :, :],
                                                antennas,
                                                LFFT=LFFT,
                                                overlap=1,
                                                include_auto=True,
                                                verbose=False,
                                                sample_rate=srate,
                                                central_freq=cFreq2,
                                                pol='XX',
                                                return_baselines=True,
                                                gain_correct=False,
                                                clip_level=0)

            if i == 0:
                tsec = tInt / 2
                outfile.attrs["STANDS"] = numpy.array([stand1, stand2])
                outfile.attrs["SRATE"] = srate
                date = datetime.fromtimestamp(tStart).date()
                outfile.attrs["DATE"] = str(date)
                dset2.write_direct(freq1)
                dset3.write_direct(freq2)
            else:
                tsec += tInt

            temp = numpy.zeros(3, dtype=numpy.float64)
            temp[0] = tStart
            temp[1] = tInt
            temp[2] = tsec
            dset1.write_direct(temp, dest_sel=numpy.s_[i])
            dset4.write_direct(vis1, dest_sel=numpy.s_[i])
            dset5.write_direct(vis2, dest_sel=numpy.s_[i])

            pb.inc(amount=1)
            sys.stdout.write(pb.show() + '\r')
            sys.stdout.flush()

        sys.stdout.write(pb.show() + '\r')
        sys.stdout.write('\n')
        sys.stdout.flush()
        outfile.close()

        # Plot
        fig = plt.figure()
        i = 0
        for bl, vi in zip(blList1, vis1):
            ax = fig.add_subplot(4, 3, i + 1)
            ax.plot(freq1 / 1e6, numpy.unwrap(numpy.angle(vi)))
            ax.set_title('Stand %i - Stand %i' %
                         (bl[0].stand.id, bl[1].stand.id))
            ax = fig.add_subplot(4, 3, i + 4)
            ax.plot(freq1 / 1e6, numpy.abs(vi))
            i += 1

            coeff = numpy.polyfit(freq1, numpy.unwrap(numpy.angle(vi)), 1)
            #print(coeff[0]/2/numpy.pi*1e9, coeff[1]*180/numpy.pi)

        i = 6
        for bl, vi in zip(blList2, vis2):
            ax = fig.add_subplot(4, 3, i + 1)
            ax.plot(freq2 / 1e6, numpy.unwrap(numpy.angle(vi)))
            ax.set_title('Stand %i - Stand %i' %
                         (bl[0].stand.id, bl[1].stand.id))
            ax = fig.add_subplot(4, 3, i + 4)
            ax.plot(freq2 / 1e6, numpy.abs(vi))
            i += 1

            coeff = numpy.polyfit(freq2, numpy.unwrap(numpy.angle(vi)), 1)
Exemplo n.º 10
0
def main(args):
    # this first part of the code is run by all processes

    # set up MPI environment
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    if size < 2:
        raise RuntimeError(
            f"This program requires at least two MPI processes to function. Please rerun with more resources"
        )

    # designate the last process as the supervisor/file reader
    supervisor = size - 1

    # open the TBN file for reading
    tbnf = LWASVDataFile(args.tbn_filename, ignore_timetag_errors=True)

    # figure out the details of the run we want to do
    tx_coords = known_transmitters.parse_args(args)
    antennas = station.antennas
    valid_ants, n_baselines = select_antennas(antennas, args.use_pol)
    n_ants = len(valid_ants)

    sample_rate = tbnf.get_info('sample_rate')
    # some of our TBNs claim to have frame size 1024 but they are lying
    frame_size = 512
    tbn_center_freq = tbnf.get_info('freq1')

    total_integrations, _ = compute_integration_numbers(
        tbnf, args.integration_length)

    # open the output HDF5 file and create datasets
    # because of the way parallelism in h5py works all processes (even ones
    # that don't write to the file) must do this
    h5f = build_output_file(args.hdf5_file,
                            tbnf,
                            valid_ants,
                            n_baselines,
                            args.integration_length,
                            tx_freq=args.tx_freq,
                            fft_len=args.fft_len,
                            use_pfb=args.use_pfb,
                            use_pol=args.use_pol,
                            opt_method=opt_method,
                            vis_model='gaussian',
                            transmitter_coords=tx_coords,
                            mpi_comm=comm)

    if rank == supervisor:
        # the supervisor process runs this code
        print("supervisor: started")

        # state info
        reached_end = False
        workers_alive = [True for _ in range(size - 1)]
        int_no = 0

        while True:
            if not reached_end:
                # grab data for the next available worker
                try:
                    duration, start_time, data = tbnf.read(
                        args.integration_length)
                    # only use data from valid antennas
                    data = data[[a.digitizer - 1 for a in valid_ants], :]
                except EOFError:
                    reached_end = True
                    print(f"supervisor: reached EOF")

                if int_no >= total_integrations:
                    print(f"supervisor: this is the last integration")
                    reached_end = True

            # get the next "ready" message from the workers
            st = MPI.Status()
            msg = comm.recv(status=st)
            if msg == "ready":
                print(
                    f"supervisor: received 'ready' message from worker {st.source}"
                )

                # if we're done, send an exit message and mark that we've killed this worker
                # an empty array indicates that the worker should exit
                if reached_end:
                    print(
                        f"supervisor: sending exit message to worker {st.source}"
                    )
                    comm.Send(np.array([]), dest=st.source, tag=int_no)
                    workers_alive[st.source] = False

                    if not any(workers_alive):
                        print(f"supervisor: all workers told to exit, goodbye")
                        break
                # otherwise, send the data to the worker for processing
                else:
                    print(
                        f"supervisor: sending data for integration {int_no}/{total_integrations} to worker {st.source}"
                    )
                    # Send with a capital S is optimized to send numpy arrays
                    comm.Send(data, dest=st.source, tag=int_no)
                    int_no += 1
            else:
                raise ValueError(
                    f"Supervisor received unrecognized message '{msg}' from worker {st.source}"
                )

        tbnf.close()

    else:
        # the worker processes run this code
        print(f"worker {rank} started")

        # workers don't need access to the TBN file
        tbnf.close()

        # figure out the size of the incoming data buffer
        samples_per_integration = int(
            round(args.integration_length * sample_rate /
                  frame_size)) * frame_size
        buffer_shape = (n_ants, samples_per_integration)

        while True:
            # send with a lowercase s can send any pickle-able python object
            # this is a synchronous send - it will block until the message is read by the supervisor
            # the other sends (e.g. comm.Send) only block until the message is safely taken by MPI, which might happen before the receiver actually reads it
            comm.ssend("ready", dest=supervisor)

            # build a buffer to be filled with data
            data = np.empty(buffer_shape, np.complex64)

            # receive the data from the supervisor
            st = MPI.Status()
            comm.Recv(data, source=supervisor, status=st)

            int_no = st.tag

            # if the buffer is empty, we're done
            if st.count == 0:
                print(f"worker {rank}: received exit message, exiting")
                break

            # otherwise process the data we've recieved
            print(
                f"worker {rank}: received data for integration {int_no}, starting processing"
            )

            # run the correlator
            bl, freqs, vis = fxc.FXMaster(
                data,
                valid_ants,
                LFFT=args.fft_len,
                pfb=args.use_pfb,
                sample_rate=sample_rate,
                central_freq=tbn_center_freq,
                Pol='xx' if args.use_pol == 0 else 'yy',
                return_baselines=True,
                gain_correct=True)

            # extract the frequency bin we want
            target_bin = np.argmin([abs(args.tx_freq - f) for f in freqs])
            vis_tbin = vis[:, target_bin]

            # baselines in wavelengths
            uvw = uvw_from_antenna_pairs(bl, wavelength=3e8 / args.tx_freq)

            # model fitting
            l_out, m_out, opt_result = fit_model_to_vis(uvw,
                                                        vis_tbin,
                                                        residual_function,
                                                        l_init,
                                                        m_init,
                                                        verbose=False)

            # convert direction cosines to sky coords
            src_elev, src_az = lm_to_ea(l_out, m_out)

            # write data to h5 file
            h5f['l_start'][int_no] = l_init
            h5f['m_start'][int_no] = m_init
            h5f['l_est'][int_no] = l_out
            h5f['m_est'][int_no] = m_out
            h5f['elevation'][int_no] = src_elev
            h5f['azimuth'][int_no] = src_az
            h5f['cost'][int_no] = opt_result['cost']
            h5f['nfev'][int_no] = opt_result['nfev']

            # compute the bin power and save it to the file
            # arbitrarily picking the tenth antenna in this list
            power_calc_data = data[10, :]
            h5f['snr_est'][int_no] = estimate_snr(power_calc_data,
                                                  args.fft_len, args.tx_freq,
                                                  sample_rate, tbn_center_freq)

            print(f"worker {rank}: done processing integration {int_no}")

    # back to common code for both supervisor and workers

    h5f.attrs['total_integrations'] = int_no
    h5f.close()
Exemplo n.º 11
0
def main(args):
    # this first part of the code is run by all processes

    # set up MPI environment
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    if size < 2:
        raise RuntimeError(
            f"This program requires at least two MPI processes to function. Please rerun with more resources"
        )

    # designate the last process as the supervisor/file reader
    supervisor = size - 1

    # open the TBN file for reading
    tbnf = LWASVDataFile(args.tbn_filename, ignore_timetag_errors=True)

    # figure out the details of the run we want to do
    tx_coords = known_transmitters.parse_args(args)
    antennas = station.antennas
    valid_ants, n_baselines = select_antennas(antennas, args.use_pol)
    n_ants = len(valid_ants)
    total_integrations, _ = compute_integration_numbers(
        tbnf, args.integration_length)

    sample_rate = tbnf.get_info('sample_rate')
    # some of our TBNs claim to have frame size 1024 but they are lying
    frame_size = 512
    tbn_center_freq = tbnf.get_info('freq1')

    # open the output HDF5 file and create datasets
    # because of the way parallelism in h5py works all processes (even ones
    # that don't write to the file) must do this
    h5f = build_output_file(args.hdf5_file,
                            tbnf,
                            valid_ants,
                            n_baselines,
                            args.integration_length,
                            tx_freq=args.tx_freq,
                            fft_len=args.fft_len,
                            use_pfb=args.use_pfb,
                            use_pol=args.use_pol,
                            transmitter_coords=tx_coords,
                            mpi_comm=comm)

    if args.point_finding_alg == 'all' or args.point_finding_alg == 'peak':
        h5f.create_dataset_like('l_peak', h5f['l_est'])
        h5f.create_dataset_like('m_peak', h5f['m_est'])
        h5f.create_dataset_like('elevation_peak', h5f['elevation'])
        h5f.create_dataset_like('azimuth_peak', h5f['azimuth'])
    if args.point_finding_alg == 'all' or args.point_finding_alg == 'CoM':
        h5f.create_dataset_like('l_CoM', h5f['l_est'])
        h5f.create_dataset_like('m_CoM', h5f['m_est'])
        h5f.create_dataset_like('elevation_CoM', h5f['elevation'])
        h5f.create_dataset_like('azimuth_CoM', h5f['azimuth'])
    else:
        raise NotImplementedError(
            f"Unrecognized point finding algorithm: {args.point_finding_alg}")
    del h5f['l_est']
    del h5f['m_est']
    del h5f['elevation']
    del h5f['azimuth']

    if rank == supervisor:
        # the supervisor process runs this code
        print("supervisor: started")

        # state info
        reached_end = False
        workers_alive = [True for _ in range(size - 1)]
        int_no = 0

        while True:
            if not reached_end:
                # grab data for the next available worker
                try:
                    duration, start_time, data = tbnf.read(
                        args.integration_length)
                    # only use data from valid antennas
                    data = data[[a.digitizer - 1 for a in valid_ants], :]
                except EOFError:
                    reached_end = True
                    print(f"supervisor: reached EOF")
                if int_no >= total_integrations:
                    print(f"supervisor: this is the last integration")
                    reached_end = True

            # get the next "ready" message from the workers
            st = MPI.Status()
            msg = comm.recv(status=st)
            if msg == "ready":
                print(
                    f"supervisor: received 'ready' message from worker {st.source}"
                )

                # if we're done, send an exit message and mark that we've killed this worker
                # an empty array indicates that the worker should exit
                if reached_end:
                    print(
                        f"supervisor: sending exit message to worker {st.source}"
                    )
                    comm.Send(np.array([]), dest=st.source, tag=int_no)
                    workers_alive[st.source] = False

                    if not any(workers_alive):
                        print(f"supervisor: all workers told to exit, goodbye")
                        break
                # otherwise, send the data to the worker for processing
                else:
                    print(
                        f"supervisor: sending data for integration {int_no}/{total_integrations} to worker {st.source}"
                    )
                    # Send with a capital S is optimized to send numpy arrays
                    comm.Send(data, dest=st.source, tag=int_no)
                    int_no += 1
            else:
                raise ValueError(
                    f"Supervisor received unrecognized message '{msg}' from worker {st.source}"
                )

        tbnf.close()

    else:
        # the worker processes run this code
        print(f"worker {rank} started")

        # workers don't need access to the TBN file
        tbnf.close()

        # figure out the size of the incoming data buffer
        samples_per_integration = int(
            round(args.integration_length * sample_rate /
                  frame_size)) * frame_size
        buffer_shape = (n_ants, samples_per_integration)

        while True:
            # send with a lowercase s can send any pickle-able python object
            # this is a synchronous send - it will block until the message is read by the supervisor
            # the other sends (e.g. comm.Send) only block until the message is safely taken by MPI, which might happen before the receiver actually reads it
            comm.ssend("ready", dest=supervisor)

            # build a buffer to be filled with data
            data = np.empty(buffer_shape, np.complex64)

            # receive the data from the supervisor
            st = MPI.Status()
            comm.Recv(data, source=supervisor, status=st)

            int_no = st.tag

            # if the buffer is empty, we're done
            if st.count == 0:
                print(f"worker {rank}: received exit message, exiting")
                break

            # otherwise process the data we've recieved
            print(
                f"worker {rank}: received data for integration {int_no}, starting processing"
            )

            # run the correlator
            bl, freqs, vis = fxc.FXMaster(
                data,
                valid_ants,
                LFFT=args.fft_len,
                pfb=args.use_pfb,
                sample_rate=sample_rate,
                central_freq=tbn_center_freq,
                Pol='xx' if args.use_pol == 0 else 'yy',
                return_baselines=True,
                gain_correct=True)

            gridded_image = grid_visibilities(bl, freqs, vis, args.tx_freq,
                                              station)

            save_all_sky = (args.all_sky and int_no in args.all_sky) or (
                args.all_sky_every and int_no % args.all_sky_every == 0)

            if args.point_finding_alg == 'all' or 'peak':
                result = get_gimg_max(gridded_image, return_img=save_all_sky)
                l = result[0]
                m = result[1]
                src_elev, src_az = lm_to_ea(l, m)
                h5f['l_peak'][int_no] = l
                h5f['m_peak'][int_no] = m
                h5f['elevation_peak'][int_no] = src_elev
                h5f['azimuth_peak'][int_no] = src_az

            if args.point_finding_alg == 'all' or args.point_finding_alg == 'CoM':
                result = get_gimg_center_of_mass(gridded_image,
                                                 return_img=save_all_sky)
                l = result[0]
                m = result[1]
                src_elev, src_az = lm_to_ea(l, m)
                h5f['l_CoM'][int_no] = l
                h5f['m_CoM'][int_no] = m
                h5f['elevation_CoM'][int_no] = src_elev
                h5f['azimuth_CoM'][int_no] = src_az

            if save_all_sky:
                img = result[2]
                extent = result[3]
                fig, ax = plt.subplots()
                ax.imshow(img,
                          extent=extent,
                          origin='lower',
                          interpolation='nearest')
                plt.savefig('allsky_int_{}.png'.format(int_no))

            # compute the bin power and save it to the file
            # arbitrarily picking the tenth antenna in this list
            power_calc_data = data[10, :]
            h5f['snr_est'][int_no] = estimate_snr(power_calc_data,
                                                  args.fft_len, args.tx_freq,
                                                  sample_rate, tbn_center_freq)

            print(f"worker {rank}: done processing integration {int_no}")

    # back to common code for both supervisor and workers
    h5f.attrs['total_integrations'] = int_no
    h5f.close()
Exemplo n.º 12
0
def compute_visibilities(tbn_file,
                         ants,
                         target_freq,
                         station=stations.lwasv,
                         integration_length=1,
                         fft_length=16,
                         use_pol=0,
                         use_pfb=False):
    '''
    Integrates and correlates a TBN file to create an array of visibilities.

    Parameters:
        - tbn_file: TBN file object opened using lsl.reader.ldp.LWASVDataFile
        - ants: a list of antenna objects that should be used
        - station: LSL station object (default: LWASV)
        - integration_length: each integration is this many seconds long (default: 1)
        - fft_length: length of the FFT used in the FX correlator (default: 16)
        - use_pol: currently only supports 0 (X polarization) and 1 (Y polarization) (default: 0)
        - use_pfb: configures the method that the FX correlator uses  (default: False)
    Returns:
        (baseline_pairs, visibilities)
        baseline_pairs is a list of pairs of antenna objects indicating which visibility is from where.
        visibilities is a numpy array of visibility vectors, one for each integration. Visibilities within the vectors correspond to the antenna pairs in baselines.

    '''
    print("Extracting visibilities")
    print("| Station: {}".format(station))

    sample_rate, center_freq, n_samples, samples_per_integration, n_integrations = extract_tbn_metadata(
        tbn_file, station.antennas, integration_length)

    #sometimes strings are used to indicate polarizations
    pol_string = 'xx' if use_pol == 0 else 'yy'

    n_baselines = len(ants) * (len(ants) - 1) / 2  # thanks gauss

    print("\nComputing Visibilities:")

    vis_data = np.zeros((n_integrations, n_baselines, fft_length),
                        dtype=complex)

    for i in range(0, n_integrations):
        print("| Integration {}/{}".format(i, n_integrations - 1))
        #get one integration length of data
        duration, start_time, data = tbn_file.read(integration_length)

        #only use data form the valid antennas
        data = data[[a.digitizer - 1 for a in ants], :]

        baseline_pairs, freqs, visibilities = fxc.FXMaster(
            data,
            ants,
            LFFT=fft_length,
            pfb=use_pfb,
            include_auto=False,
            verbose=True,
            sample_rate=sample_rate,
            central_freq=center_freq,
            Pol=pol_string,
            return_baselines=True,
            gain_correct=True)

        # # we only want the bin nearest to our target frequency
        # target_bin = np.argmin([abs(target_freq - f) for f in freqs])

        # visibilities = visibilities[:, target_bin]

        vis_data[i, :, :] = visibilities

    return (baseline_pairs, vis_data)
Exemplo n.º 13
0
def compute_visibilities_gen(tbn_file,
                             ants,
                             station=stations.lwasv,
                             integration_length=1,
                             fft_length=16,
                             use_pol=0,
                             use_pfb=False,
                             include_auto=False):
    '''
    Returns a generator to integrate and correlate a TBN file. Each iteration of the generator returns the baselines and the visibilities for one integration

    Parameters:
        - tbn_file: TBN file object opened using lsl.reader.ldp.LWASVDataFile
        - ants: a list of antenna objects that should be used
        - station: LSL station object (default: LWASV)
        - integration_length: each integration is this many seconds long (default: 1)
        - fft_length: length of the FFT used in the FX correlator (default: 16)
        - use_pol: currently only supports 0 (X polarization) and 1 (Y polarization) (default: 0)
        - use_pfb: configures the method that the FX correlator uses  (default: False)
    Returns:
        A generator that yields (baseline_pairs, freqs, visibilities).
        baseline_pairs is a list of pairs of antenna objects indicating which
        visibility is from where.
        freqs is a list of frequency bin centers.
        visibilities is a numpy array of visibility samples corresponding to
        the antenna pairs in baselines for each frequency bin.
    '''

    print('Generating visibilities')
    print('| Station: {}'.format(station))
    antennas = station.antennas

    sample_rate, center_freq, n_samples, samples_per_integration, n_integrations = extract_tbn_metadata(
        tbn_file, antennas, integration_length)

    #sometimes strings are used to indicate polarizations
    pol_string = 'xx' if use_pol == 0 else 'yy'

    # n_baselines = len(ants) * (len(ants) - 1) / 2

    print("\nComputing Visibilities:")

    for i in range(0, n_integrations):
        print("| Integration {}/{}".format(i, n_integrations - 1))
        # get one integration length of data
        try:
            duration, start_time, data = tbn_file.read(integration_length)
        except EOFError:
            print("Reached the end of the TBN file.")
            print("Looks like we calculated the frame numbers wrong. Oops.")
            return

        #only use data from the valid antennas
        data = data[[a.digitizer - 1 for a in ants], :]

        # correlate
        baseline_pairs, freqs, visibilities = fxc.FXMaster(
            data,
            ants,
            LFFT=fft_length,
            pfb=use_pfb,
            include_auto=include_auto,
            verbose=True,
            sample_rate=sample_rate,
            central_freq=center_freq,
            Pol=pol_string,
            return_baselines=True,
            gain_correct=True)

        yield (baseline_pairs, freqs, visibilities)

    return
Exemplo n.º 14
0
def process_chunk(idf,
                  site,
                  good,
                  filename,
                  LFFT=64,
                  overlap=1,
                  pfb=False,
                  pols=['xx', 'yy']):
    """
    Given an lsl.reader.ldp.TBWFile instances and various parameters for the 
    cross-correlation, write cross-correlate the data and save it to a file.
    """

    # Get antennas
    antennas = site.antennas

    # Get the metadata
    sample_rate = idf.get_info('sample_rate')

    # Create the list of good digitizers and a digitizer to Antenna instance mapping.
    # These are:
    #  toKeep  -> mapping of digitizer number to array location
    #  mapper -> mapping of Antenna instance to array location
    toKeep = [antennas[i].digitizer - 1 for i in good]
    mapper = [antennas[i] for i in good]

    # Create a list of unqiue stands to know what style of IDI file to create
    stands = set([antennas[i].stand.id for i in good])

    # Figure out the output mode
    if os.path.splitext(filename)[1].find('.ms_') != -1:
        writer_class = measurementset.Ms
    else:
        if len(stands) > 255:
            writer_class = fitsidi.ExtendedIdi
        else:
            writer_class = fitsidi.Idi

    wallTime = time.time()
    readT, t, data = idf.read()
    setTime = t
    ref_time = t

    # Setup the set time as a python datetime instance so that it can be easily printed
    setDT = setTime.datetime
    print("Working on set #1 (%.3f seconds after set #1 = %s)" %
          ((setTime - ref_time), setDT.strftime("%Y/%m/%d %H:%M:%S.%f")))

    # In order for the TBW stuff to actaully run, we need to run in with sub-
    # integrations.  8 sub-integrations (61.2 ms / 8 = 7.7 ms per section)
    # seems to work ok with a "reasonable" number of channels.
    nSec = 8
    secSize = data.shape[1] // nSec

    # Loop over polarizations (there should be only 1)
    for pol in pols:
        print("-> %s" % pol)
        try:
            tempVis *= 0  # pylint:disable=undefined-variable
        except NameError:
            pass

        # Set up the progress bar so we can keep up with how the sub-integrations
        # are progressing
        pb = ProgressBar(max=nSec)
        sys.stdout.write(pb.show() + '\r')
        sys.stdout.flush()

        # Loop over sub-integrations (set by nSec)
        for k in range(nSec):
            blList, freq, vis = fxc.FXMaster(data[toKeep, k * secSize:(k + 1) *
                                                  secSize],
                                             mapper,
                                             LFFT=LFFT,
                                             overlap=overlap,
                                             pfb=pfb,
                                             include_auto=True,
                                             verbose=False,
                                             sample_rate=sample_rate,
                                             central_freq=0.0,
                                             pol=pol,
                                             return_baselines=True,
                                             gain_correct=True)

            toUse = numpy.where((freq >= 5.0e6) & (freq <= 93.0e6))
            toUse = toUse[0]

            try:
                tempVis += vis
            except NameError:
                tempVis = vis

            pb.inc(amount=1)
            sys.stdout.write(pb.show() + '\r')
            sys.stdout.flush()

        # Average the sub-integrations together
        vis = tempVis / float(nSec)

        # Set up the FITS IDI file is we need to
        if pol == pols[0]:
            pol1, pol2 = fxc.pol_to_pols(pol)

            fits = writer_class(filename, ref_time=ref_time)
            fits.set_stokes(pols)
            fits.set_frequency(freq[toUse])
            fits.set_geometry(site, [a for a in mapper if a.pol == pol1])

        # Add the visibilities
        fits.add_data_set(setTime, readT, blList, vis[:, toUse], pol=pol)
        sys.stdout.write(pb.show() + '\r')
        sys.stdout.write('\n')
        sys.stdout.flush()
    print("->  Cummulative Wall Time: %.3f s (%.3f s per integration)" %
          ((time.time() - wallTime), (time.time() - wallTime)))

    fits.write()
    fits.close()
    del (fits)
    del (data)
    del (vis)
    return True