示例#1
0
    def run_stokesmaster_test_real(self,
                                   dtype,
                                   nchan=256,
                                   window=fx.null_window):
        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        fakeData = 10.0 * numpy.random.rand(self.nAnt, nchan * 8) + 3.0
        fakeData = fakeData.astype(dtype)
        freq, spectra = fx.StokesMaster(fakeData,
                                        antennas[:self.nAnt],
                                        LFFT=nchan,
                                        window=window)

        # Numpy comparison
        spectra2 = numpy.zeros_like(spectra)
        LFFT = spectra2.shape[2]
        nFFT = fakeData.shape[1] // 2 // LFFT
        wndw = window(2 * LFFT)
        for i in range(self.nAnt // 2):
            for j in range(nFFT):
                xF = numpy.fft.fft(
                    fakeData[2 * i + 0, j * 2 * LFFT:(j + 1) * 2 * LFFT] *
                    wndw)[:LFFT]
                yF = numpy.fft.fft(
                    fakeData[2 * i + 1, j * 2 * LFFT:(j + 1) * 2 * LFFT] *
                    wndw)[:LFFT]

                spectra2[0, i, :] += numpy.abs(xF)**2 + numpy.abs(yF)**2
                spectra2[1, i, :] += numpy.abs(xF)**2 - numpy.abs(yF)**2
                spectra2[2, i, :] += 2 * (xF * yF.conj()).real
                spectra2[3, i, :] += 2 * (xF * yF.conj()).imag
        spectra2 /= (2 * LFFT * nFFT)
        lsl.testing.assert_allclose(spectra, spectra2)
示例#2
0
    def test_spectra_complex_pfb(self):
        """Test the PFB version of the StokesMaster function on complex-valued data."""

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        for dtype in (numpy.complex64, numpy.complex128):
            fakeData = numpy.random.rand(
                self.nAnt, 1024 *
                4) + 1j * numpy.random.rand(self.nAnt, 1024 * 4) + 3.0 + 3.0j
            fakeData = fakeData.astype(dtype)
            freq, spectra = fx.StokesMaster(fakeData,
                                            antennas[:self.nAnt],
                                            pfb=True,
                                            sample_rate=1e5,
                                            central_freq=38e6)

            # Numpy comparison
            spectra2 = numpy.zeros_like(spectra)
            LFFT = spectra2.shape[2]
            nFFT = fakeData.shape[1] // LFFT
            for i in range(self.nAnt // 2):
                for j in range(nFFT):
                    xF = numpy.fft.fftshift(
                        _pfb(fakeData[2 * i + 0, :], j * LFFT, LFFT))
                    yF = numpy.fft.fftshift(
                        _pfb(fakeData[2 * i + 1, :], j * LFFT, LFFT))

                    spectra2[0, i, :] += numpy.abs(xF)**2 + numpy.abs(yF)**2
                    spectra2[1, i, :] += numpy.abs(xF)**2 - numpy.abs(yF)**2
                    spectra2[2, i, :] += 2 * (xF * yF.conj()).real
                    spectra2[3, i, :] += 2 * (xF * yF.conj()).imag
            spectra2 /= (LFFT * nFFT)
            lsl.testing.assert_allclose(spectra, spectra2)
示例#3
0
def main(args):
    # Get a list of stands to work on
    stands = args.stand

    # Set the station
    if args.metadata is not None:
        station = stations.parse_ssmif(args.metadata)
    else:
        station = stations.lwa1

    # Match the stands to ASP channels
    ants = []
    antennas = station.antennas
    for stand in stands:
        for antenna in antennas:
            if antenna.stand.id == stand:
                ants.append(antenna)

    # Report
    for ant in ants:
        c = ant.arx.asp_channel
        print("Stand %i, pol. %i" % (ant.stand.id, ant.pol))
        print("  Antenna: %i" % ant.id)
        print("  ARX Board: %i" % (c / 16 + 1, ))
        print("      SN: %s" % ant.arx.id)
        print("      Channel: %s" % ant.arx.channel)
        print("      Control: %s" % ((c + c % 2) / 2, ))
    def _retrieve(self):
        """
        Pull the file from the archive, parse it, and save the various results.
        """

        # Pull the data from the archive
        ah = urlopen("https://lda10g.alliance.unm.edu/metadata/lwa1/ssmif/%s" %
                     self.filename)
        contents = ah.read()
        ah.close()

        # Save it to a file
        _, filename = tempfile.mkstemp(suffix='.txt', prefix='SSMIF')
        fh = open(filename, 'wb')
        fh.write(contents)
        fh.close()

        # Parse the SSMIF
        station = parse_ssmif(filename)

        # Cleanup
        os.unlink(filename)

        # Save and done
        self.contents = contents
        self.station = station
        return True
示例#5
0
    def test_spectra_real_pfb(self):
        """Test the PFB version of the StokesMaster function on real-valued data."""

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        for dtype in (numpy.int8, numpy.int16, numpy.int32, numpy.int64,
                      numpy.float32, numpy.float64):
            fakeData = 10.0 * numpy.random.rand(self.nAnt, 1024 * 4) + 3.0
            fakeData = fakeData.astype(dtype)
            freq, spectra = fx.StokesMaster(fakeData,
                                            antennas[:self.nAnt],
                                            pfb=True)

            # Numpy comparison
            spectra2 = numpy.zeros_like(spectra)
            LFFT = spectra2.shape[2]
            nFFT = fakeData.shape[1] // 2 // LFFT
            for i in range(self.nAnt // 2):
                for j in range(nFFT):
                    xF = _pfb(fakeData[2 * i + 0, :], 2 * j * LFFT,
                              2 * LFFT)[:LFFT]
                    yF = _pfb(fakeData[2 * i + 1, :], 2 * j * LFFT,
                              2 * LFFT)[:LFFT]

                    spectra2[0, i, :] += numpy.abs(xF)**2 + numpy.abs(yF)**2
                    spectra2[1, i, :] += numpy.abs(xF)**2 - numpy.abs(yF)**2
                    spectra2[2, i, :] += 2 * (xF * yF.conj()).real
                    spectra2[3, i, :] += 2 * (xF * yF.conj()).imag
            spectra2 /= (2 * LFFT * nFFT)
            lsl.testing.assert_allclose(spectra, spectra2)
示例#6
0
    def run_correlator_test_complex(self,
                                    dtype,
                                    nchan=256,
                                    window=fx.null_window):
        fakeData = numpy.random.rand(
            self.nAnt,
            nchan * 4) + 1j * numpy.random.rand(self.nAnt, nchan * 4)
        fakeData = fakeData.astype(dtype)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        freq, cps = fx.FXMaster(fakeData,
                                antennas[:self.nAnt],
                                LFFT=nchan,
                                sample_rate=1e5,
                                central_freq=38e6,
                                window=window)

        # Numpy comparison
        for i in range(self.nAnt):
            antennas[i].stand.x = 0.0
            antennas[i].stand.y = 0.0
            antennas[i].stand.z = 0.0
            antennas[i].cable.length = 0.0

        freq, cps = fx.FXMaster(fakeData,
                                antennas[:self.nAnt],
                                LFFT=nchan,
                                sample_rate=1e5,
                                central_freq=38e6,
                                window=window)

        cps2 = numpy.zeros_like(cps)
        LFFT = cps.shape[1]
        nFFT = fakeData.shape[1] // LFFT
        wndw = window(LFFT)
        blc = 0
        for i in range(0, self.nAnt):
            if antennas[i].pol != 0:
                continue
            for j in range(i + 1, self.nAnt):
                if antennas[j].pol != 0:
                    continue

                for k in range(nFFT):
                    f1 = numpy.fft.fftshift(
                        numpy.fft.fft(fakeData[i, k * LFFT:(k + 1) * LFFT] *
                                      wndw))
                    f2 = numpy.fft.fftshift(
                        numpy.fft.fft(fakeData[j, k * LFFT:(k + 1) * LFFT] *
                                      wndw))

                    cps2[blc, :] += f1 * f2.conj()
                blc += 1
        cps2 /= (LFFT * nFFT)
        lsl.testing.assert_allclose(cps, cps2)
示例#7
0
    def run_correlator_test_real(self,
                                 dtype,
                                 nchan=256,
                                 window=fx.null_window):
        fakeData = 10.0 * numpy.random.rand(self.nAnt, nchan * 8) + 3.0
        fakeData = fakeData.astype(dtype)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        freq, cps = fx.FXStokes(fakeData,
                                antennas[:self.nAnt],
                                LFFT=nchan,
                                window=window)

        # Numpy comparison
        for i in range(self.nAnt):
            antennas[i].stand.x = 0.0
            antennas[i].stand.y = 0.0
            antennas[i].stand.z = 0.0
            antennas[i].cable.length = 0.0

        freq, cps = fx.FXStokes(fakeData,
                                antennas[:self.nAnt],
                                LFFT=nchan,
                                window=window)

        cps2 = numpy.zeros_like(cps)
        LFFT = cps.shape[2]
        nFFT = fakeData.shape[1] // 2 // LFFT
        wndw = window(2 * LFFT)
        blc = 0
        for i in range(0, self.nAnt // 2):
            for j in range(i + 1, self.nAnt // 2):
                for k in range(nFFT):
                    f1X = numpy.fft.fft(
                        fakeData[2 * i + 0, k * 2 * LFFT:(k + 1) * 2 * LFFT] *
                        wndw)[:LFFT]
                    f1Y = numpy.fft.fft(
                        fakeData[2 * i + 1, k * 2 * LFFT:(k + 1) * 2 * LFFT] *
                        wndw)[:LFFT]
                    f2X = numpy.fft.fft(
                        fakeData[2 * j + 0, k * 2 * LFFT:(k + 1) * 2 * LFFT] *
                        wndw)[:LFFT]
                    f2Y = numpy.fft.fft(
                        fakeData[2 * j + 1, k * 2 * LFFT:(k + 1) * 2 * LFFT] *
                        wndw)[:LFFT]

                    cps2[0, blc, :] += f1X * f2X.conj() + f1Y * f2Y.conj()
                    cps2[1, blc, :] += f1X * f2X.conj() - f1Y * f2Y.conj()
                    cps2[2, blc, :] += f1X * f2Y.conj() + f1X.conj() * f2Y
                    cps2[3,
                         blc, :] += (f1X * f2Y.conj() - f1X.conj() * f2Y) / 1j
                blc += 1
        cps2 /= (2 * LFFT * nFFT)
        lsl.testing.assert_allclose(cps, cps2)
示例#8
0
    def test_correlator_pol(self):
        """Test various correlator polarization settings."""

        fakeData = numpy.random.rand(
            self.nAnt, 1024) + 1j * numpy.random.rand(self.nAnt, 1024)
        fakeData = fakeData.astype(numpy.csingle)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        ## XX
        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True,
                                        pol='XX')
        for (ant1, ant2) in blList:
            self.assertEqual(ant1.pol, 0)
            self.assertEqual(ant2.pol, 0)

        ## YY
        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True,
                                        pol='YY')
        for (ant1, ant2) in blList:
            self.assertEqual(ant1.pol, 1)
            self.assertEqual(ant2.pol, 1)

        ## XY
        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True,
                                        pol='XY')
        for (ant1, ant2) in blList:
            self.assertEqual(ant1.pol, 0)
            self.assertEqual(ant2.pol, 1)

        ## YX
        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True,
                                        pol='YX')
        for (ant1, ant2) in blList:
            self.assertEqual(ant1.pol, 1)
            self.assertEqual(ant2.pol, 0)
示例#9
0
 def test_ssmif(self):
     """Test the SSMIF parser."""
     
     filenames = [os.path.join(DATA_BUILD, 'lwa1-ssmif.txt'),
                  os.path.join(DATA_BUILD, 'lwasv-ssmif.txt'),
                  os.path.join(DATA_BUILD, 'tests', 'ssmif.dat'),
                  os.path.join(DATA_BUILD, 'tests', 'ssmif-adp.dat')]
     sites = ['LWA1', 'LWA-SV', 'LWA1', 'LWA-SV']
     types = ['text', 'text', 'binary', 'binary']
     for filename,site,type in zip(filenames, sites, types):
         with self.subTest(station=site, type=type):
             out = stations.parse_ssmif(filename)
示例#10
0
    def test_correlator_complex_pfb(self):
        """Test the C-based PFB version of the correlator on complex-valued data."""

        fakeData = numpy.random.rand(
            self.nAnt, 1024 * 4) + 1j * numpy.random.rand(self.nAnt, 1024 * 4)
        fakeData = fakeData.astype(numpy.csingle)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        freq, cps = fx.FXStokes(fakeData,
                                antennas[:self.nAnt],
                                pfb=True,
                                sample_rate=1e5,
                                central_freq=38e6)

        # Numpy comparison
        for i in range(self.nAnt):
            antennas[i].stand.x = 0.0
            antennas[i].stand.y = 0.0
            antennas[i].stand.z = 0.0
            antennas[i].cable.length = 0.0

        freq, cps = fx.FXStokes(fakeData,
                                antennas[:self.nAnt],
                                pfb=True,
                                sample_rate=1e5,
                                central_freq=38e6)

        cps2 = numpy.zeros_like(cps)
        LFFT = cps.shape[2]
        nFFT = fakeData.shape[1] // LFFT
        blc = 0
        for i in range(0, self.nAnt // 2):
            for j in range(i + 1, self.nAnt // 2):
                for k in range(nFFT):
                    f1X = numpy.fft.fftshift(
                        _pfb(fakeData[2 * i + 0, :], k * LFFT, LFFT))
                    f1Y = numpy.fft.fftshift(
                        _pfb(fakeData[2 * i + 1, :], k * LFFT, LFFT))
                    f2X = numpy.fft.fftshift(
                        _pfb(fakeData[2 * j + 0, :], k * LFFT, LFFT))
                    f2Y = numpy.fft.fftshift(
                        _pfb(fakeData[2 * j + 1, :], k * LFFT, LFFT))

                    cps2[0, blc, :] += f1X * f2X.conj() + f1Y * f2Y.conj()
                    cps2[1, blc, :] += f1X * f2X.conj() - f1Y * f2Y.conj()
                    cps2[2, blc, :] += f1X * f2Y.conj() + f1X.conj() * f2Y
                    cps2[3,
                         blc, :] += (f1X * f2Y.conj() - f1X.conj() * f2Y) / 1j
                blc += 1
        cps2 /= (LFFT * nFFT)
        lsl.testing.assert_allclose(cps, cps2)
def main(args):
    #
    # Load in the data
    #
    ssmifContents = open(args.ssmif, 'r').readlines()
    site     = parse_ssmif(args.ssmif)
    dataFile = numpy.loadtxt(args.filename)
    
    #
    # Gather the station meta-data from its various sources
    #
    observer = site.get_observer()
    antennas = site.antennas
    
    #
    # Match the new stretch factors to the antennas
    #
    factors = [1.0 for i in xrange(len(antennas))]
    for i in xrange(dataFile.shape[0]):
        dig, stretch, addDelay, rms, chi2 = dataFile[i,:]
        dig = int(dig)
        antenna = antennas[dig-1]
        if antenna.stand.id in args.exclude:
            continue
            
        factors[antenna.id-1] = stretch
        
    #
    # Final results
    #
    if args.output is not None:
        fh = open(args.output, 'w')
    else:
        fh = sys.stdout
        
    for line in ssmifContents:
        if line[0:8] == 'RPD_STR[':
            start = line.find('[')
            stop  = line.find(']')
            try:
                junk, toSave = line.split('#', 1)
                toSave = " # %s" % toSave
            except ValueError:
                toSave = "\n"
            
            antID = int(line[start+1:stop])
            fh.write("RPD_STR[%i]  %.4f%s" % (antID, factors[antID-1], toSave))
        else:
            fh.write(line)
            
    if args.output is not None:
        fh.close()
示例#12
0
    def test_correlator_complex_pfb(self):
        """Test the C-based PFB version of the correlator on complex-valued data."""

        for dtype in (numpy.complex64, numpy.complex128):
            fakeData = numpy.random.rand(
                self.nAnt,
                1024 * 4) + 1j * numpy.random.rand(self.nAnt, 1024 * 4)
            fakeData = fakeData.astype(dtype)

            station = stations.parse_ssmif(_SSMIF)
            antennas = station.antennas

            freq, cps = fx.FXMaster(fakeData,
                                    antennas[:self.nAnt],
                                    pfb=True,
                                    sample_rate=1e5,
                                    central_freq=38e6)

            # Numpy comparison
            for i in range(self.nAnt):
                antennas[i].stand.x = 0.0
                antennas[i].stand.y = 0.0
                antennas[i].stand.z = 0.0
                antennas[i].cable.length = 0.0

            freq, cps = fx.FXMaster(fakeData,
                                    antennas[:self.nAnt],
                                    pfb=True,
                                    sample_rate=1e5,
                                    central_freq=38e6)

            cps2 = numpy.zeros_like(cps)
            LFFT = cps.shape[1]
            nFFT = fakeData.shape[1] // LFFT
            blc = 0
            for i in range(0, self.nAnt):
                if antennas[i].pol != 0:
                    continue
                for j in range(i + 1, self.nAnt):
                    if antennas[j].pol != 0:
                        continue

                    for k in range(nFFT):
                        f1 = numpy.fft.fftshift(
                            _pfb(fakeData[i, :], LFFT * k, LFFT))
                        f2 = numpy.fft.fftshift(
                            _pfb(fakeData[j, :], LFFT * k, LFFT))

                        cps2[blc, :] += f1 * f2.conj()
                    blc += 1
            cps2 /= (LFFT * nFFT)
            lsl.testing.assert_allclose(cps, cps2)
示例#13
0
    def test_correlator_baselines(self):
        """Test that the return_baselines keyword works."""

        fakeData = numpy.random.rand(
            self.nAnt, 1024) + 1j * numpy.random.rand(self.nAnt, 1024)
        fakeData = fakeData.astype(numpy.csingle)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        blList, freq, cps = fx.FXMaster(fakeData,
                                        antennas[:self.nAnt],
                                        sample_rate=1e5,
                                        central_freq=38e6,
                                        return_baselines=True)
示例#14
0
    def test_correlator_gaincorrect(self):
        """Test appling gain correction to the correlator output."""

        fakeData = numpy.random.rand(
            self.nAnt, 1024) + 1j * numpy.random.rand(self.nAnt, 1024)
        fakeData = fakeData.astype(numpy.csingle)

        station = stations.parse_ssmif(_SSMIF)
        antennas = station.antennas

        freq, cps = fx.FXMaster(fakeData,
                                antennas[:self.nAnt],
                                sample_rate=1e5,
                                central_freq=38e6,
                                gain_correct=True)
示例#15
0
    def test_correlator_real_pfb(self):
        """Test the C-based PFB version of the correlator on real-valued data."""

        for dtype in (numpy.int8, numpy.int16, numpy.int32, numpy.int64,
                      numpy.float32, numpy.float64):
            fakeData = 10.0 * numpy.random.rand(self.nAnt, 1024 * 4) + 3.0
            fakeData = fakeData.astype(dtype)

            station = stations.parse_ssmif(_SSMIF)
            antennas = station.antennas

            freq, cps = fx.FXStokes(fakeData, antennas[:self.nAnt], pfb=True)

            # Numpy comparison
            for i in range(self.nAnt):
                antennas[i].stand.x = 0.0
                antennas[i].stand.y = 0.0
                antennas[i].stand.z = 0.0
                antennas[i].cable.length = 0.0

            freq, cps = fx.FXStokes(fakeData, antennas[:self.nAnt], pfb=True)

            cps2 = numpy.zeros_like(cps)
            LFFT = cps.shape[2]
            nFFT = fakeData.shape[1] // 2 // LFFT
            blc = 0
            for i in range(0, self.nAnt // 2):
                for j in range(i + 1, self.nAnt // 2):
                    for k in range(nFFT):
                        f1X = _pfb(fakeData[2 * i + 0, :], k * 2 * LFFT,
                                   2 * LFFT)[:LFFT]
                        f1Y = _pfb(fakeData[2 * i + 1, :], k * 2 * LFFT,
                                   2 * LFFT)[:LFFT]
                        f2X = _pfb(fakeData[2 * j + 0, :], k * 2 * LFFT,
                                   2 * LFFT)[:LFFT]
                        f2Y = _pfb(fakeData[2 * j + 1, :], k * 2 * LFFT,
                                   2 * LFFT)[:LFFT]

                        cps2[0, blc, :] += f1X * f2X.conj() + f1Y * f2Y.conj()
                        cps2[1, blc, :] += f1X * f2X.conj() - f1Y * f2Y.conj()
                        cps2[2, blc, :] += f1X * f2Y.conj() + f1X.conj() * f2Y
                        cps2[3, blc, :] += (f1X * f2Y.conj() -
                                            f1X.conj() * f2Y) / 1j
                    blc += 1
            cps2 /= (2 * LFFT * nFFT)
            lsl.testing.assert_allclose(cps, cps2)
示例#16
0
def get_station(tarname, apply_sdm=True):
    """
    Given an MCS meta-data tarball, extract the information stored in the ssmif.dat 
    file and return a :class:`lsl.common.stations.LWAStation` object.  Optionally, 
    update the :class:`lsl.common.stations.Antenna` instances associated whith the
    LWAStation object using the included SDM file.
    
    If a ssmif.dat file cannot be found in the tarball, None is returned.  
    """

    with managed_mkdtemp(prefix='metadata-bundle-') as tempDir:
        # Extract the SSMIF and SDM files.  If the ssmif.dat file cannot be found, None
        # is returned via the try...except block
        tf = _open_tarball(tarname)
        try:
            ti = tf.getmember('ssmif.dat')
        except KeyError:
            return None
        tf.extractall(path=tempDir, members=[
            ti,
        ])

        # Read in the SSMIF
        station = stations.parse_ssmif(os.path.join(tempDir, 'ssmif.dat'))

        # Get the beamformer minimum delay, if found
        mindelay = get_beamformer_min_delay(tarname)
        if mindelay is not None:
            station.beamformer_min_delay_samples = mindelay
            station.beamformer_min_delay = mindelay / fS

        # Get the SDM (if we need to)
        if apply_sdm:
            dynamic = get_sdm(tarname)
        else:
            dynamic = None

        # Update the SSMIF entries
        if dynamic is not None:
            newAnts = dynamic.update_antennas(station.antennas)
            station.antennas = newAnts

    # Return
    return station
示例#17
0
    def test_correlator_real_pfb(self):
        """Test the C-based PFB version of the correlator on real-valued data."""

        for dtype in (numpy.int8, numpy.int16, numpy.int32, numpy.int64,
                      numpy.float32, numpy.float64):
            fakeData = 10.0 * numpy.random.rand(self.nAnt, 1024 * 4) + 3.0
            fakeData = fakeData.astype(dtype)

            station = stations.parse_ssmif(_SSMIF)
            antennas = station.antennas

            freq, cps = fx.FXMaster(fakeData, antennas[:self.nAnt], pfb=True)

            # Numpy comparison
            for i in range(self.nAnt):
                antennas[i].stand.x = 0.0
                antennas[i].stand.y = 0.0
                antennas[i].stand.z = 0.0
                antennas[i].cable.length = 0.0

            freq, cps = fx.FXMaster(fakeData, antennas[:self.nAnt], pfb=True)

            cps2 = numpy.zeros_like(cps)
            LFFT = cps.shape[1]
            nFFT = fakeData.shape[1] // 2 // LFFT
            blc = 0
            for i in range(0, self.nAnt):
                if antennas[i].pol != 0:
                    continue
                for j in range(i + 1, self.nAnt):
                    if antennas[j].pol != 0:
                        continue

                    for k in range(nFFT):
                        f1 = _pfb(fakeData[i, :], 2 * LFFT * k,
                                  2 * LFFT)[:LFFT]
                        f2 = _pfb(fakeData[j, :], 2 * LFFT * k,
                                  2 * LFFT)[:LFFT]

                        cps2[blc, :] += f1 * f2.conj()
                    blc += 1
            cps2 /= (2 * LFFT * nFFT)
            lsl.testing.assert_allclose(cps, cps2)
示例#18
0
def main(args):
    filename = args.filename

    station = parse_ssmif(filename)
    antennas = station.antennas

    digs    = numpy.array([ant.digitizer  for ant in antennas])
    ants    = numpy.array([ant.id         for ant in antennas])
    stands  = numpy.array([ant.stand.id   for ant in antennas])
    pols    = numpy.array([ant.pol        for ant in antennas])
    antStat = numpy.array([ant.status     for ant in antennas])
    feeStat = numpy.array([ant.fee.status for ant in antennas])

    badStands = numpy.where( antStat != 3 )[0]
    badFees   = numpy.where( feeStat != 3 )[0]
    bad = numpy.where( (stands > 256) | (antStat != 3) | (feeStat != 3) )[0]
    print("Number of bad stands:   %3i" % len(badStands))
    print("Number of bad FEEs:     %3i" % len(badFees))
    print("---------------------------")
    print("Total number bad inuts: %3i" % len(bad))
    print(" ")

    dftBase = 'beams_%iMHz_%iaz_%iel_%03ibg' % (args.frequency/1e6, args.azimuth, args.elevation, args.gain*100)
    gftBase = 'beams_%iMHz_%iaz_%iel_%03ibg' % (args.frequency/1e6, args.azimuth, args.elevation, args.gain*100)

    print("Calculating delays for az. %.2f, el. %.2f at %.2f MHz" % (args.azimuth, args.elevation, args.frequency/1e6))
    delays = beamformer.calc_delay(antennas, freq=args.frequency, azimuth=args.azimuth, elevation=args.elevation)
    delays *= 1e9
    delays = delays.max() - delays
    junk = delay.list2delayfile('.', dftBase, delays)

    print("Setting gains for %i good inputs, %i bad inputs" % (len(antennas)-len(bad), len(bad)))
    bgain = args.gain
    bgain_cross = 0.0000
    gains = [[bgain, bgain_cross, bgain_cross, bgain]]*260 # initialize gain list
    for d in digs[bad]:
        # Digitizers start at 1, list indicies at 0
        i = d - 1
        gains[i/2] = [0,0,0,0]
    junk = gain.list2gainfile('.', gftBase, gains)

    print("\nDelay and gain files are:\n %s.dft\n %s.gft" % (dftBase, gftBase))
示例#19
0
    def test_CorrelatedDataIDI_AltArrayGeometry(self):
        """Test the utils.CorrelatedDataIDI class on determing array geometry."""

        # Open the FITS IDI files
        idi1 = utils.CorrelatedData(idiFile)
        idi2 = utils.CorrelatedData(idiAltFile)

        # Dates
        self.assertEqual(idi1.date_obs.strftime("%Y-%m-%dT%H:%M:%S"),
                         idi2.date_obs.strftime("%Y-%m-%dT%H:%M:%S"))

        # Stand and baseline counts
        self.assertEqual(len(idi1.stands), len(idi2.stands))
        self.assertEqual(idi1.total_baseline_count, idi2.total_baseline_count)
        self.assertEqual(idi1.integration_count, idi2.integration_count)

        # Check stands
        for s1, s2 in zip(idi1.stands, idi2.stands):
            self.assertEqual(s1, s2)

        # Check stations
        station1 = parse_ssmif(idiSSMIFFile)
        station2 = idi2.station
        self.assertAlmostEqual(station1.lat, station2.lat, 3)
        self.assertAlmostEqual(station1.lon, station2.lon, 3)
        self.assertAlmostEqual(station1.elev, station2.elev, 1)

        # Check antennas
        ants1 = [a for a in station1.antennas if a.pol == 0]
        ants2 = station2.antennas
        for a1, a2 in zip(ants1, ants2):
            self.assertEqual(a1.id, a2.id)
            self.assertEqual(a1.stand.id, a2.stand.id)
            self.assertAlmostEqual(a1.stand.x, a2.stand.x, 2)
            self.assertAlmostEqual(a1.stand.y, a2.stand.y, 2)
            self.assertAlmostEqual(a1.stand.z, a2.stand.z, 2)

        idi1.close()
        idi2.close()
示例#20
0
def main(args):
    # Set the station
    if args.metadata is not None:
        station = stations.parse_ssmif(args.metadata)
        ssmifContents = open(args.metadata).readlines()
    else:
        station = stations.lwa1
        ssmifContents = open(os.path.join(dataPath,
                                          'lwa1-ssmif.txt')).readlines()
    antennas = station.antennas

    # Length of the FFT
    LFFT = args.fft_length

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped
    maxFrames = int((30000 * 260) / float(LFFT)) * LFFT
    # It seems like that would be a good idea, however...  TBW data comes one
    # capture at a time so doing something like this actually truncates data
    # from the last set of stands for the first integration.  So, we really
    # should stick with
    maxFrames = (30000 * 260)

    fh = open(args.filename, "rb")
    nFrames = os.path.getsize(args.filename) // tbw.FRAME_SIZE
    dataBits = tbw.get_data_bits(fh)
    # The number of ant/pols in the file is hard coded because I cannot figure out
    # a way to get this number in a systematic fashion
    antpols = len(antennas)
    nChunks = int(math.ceil(1.0 * nFrames / maxFrames))
    if dataBits == 12:
        nSamples = 400
    else:
        nSamples = 1200

    # Read in the first frame and get the date/time of the first sample
    # of the frame.  This is needed to get the list of stands.
    junkFrame = tbw.read_frame(fh)
    fh.seek(0)
    beginDate = junkFrame.time.datetime

    # File summary
    print("Filename: %s" % args.filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Ant/Pols: %i" % antpols)
    print("Sample Length: %i-bit" % dataBits)
    print("Frames: %i" % nFrames)
    print("Chunks: %i" % nChunks)
    print("===")

    nChunks = 1

    # Skip over any non-TBW frames at the beginning of the file
    i = 0
    junkFrame = tbw.read_frame(fh)
    while not junkFrame.header.is_tbw:
        try:
            junkFrame = tbw.read_frame(fh)
        except errors.SyncError:
            fh.seek(0)
            while True:
                try:
                    junkFrame = tbn.read_frame(fh)
                    i += 1
                except errors.SyncError:
                    break
            fh.seek(-2 * tbn.FRAME_SIZE, 1)
            junkFrame = tbw.read_frame(fh)
        i += 1
    fh.seek(-tbw.FRAME_SIZE, 1)
    print("Skipped %i non-TBW frames at the beginning of the file" % i)

    # Setup the window function to use
    if args.pfb:
        window = fxc.null_window
    elif args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window

    base, ext = os.path.splitext(args.filename)
    base = os.path.basename(base)
    if (not os.path.exists("%s.npz" % base)) or args.force:
        # Master loop over all of the file chunks
        masterSpectra = numpy.zeros((nChunks, antpols, LFFT))
        for i in range(nChunks):
            # Find out how many frames remain in the file.  If this number is larger
            # than the maximum of frames we can work with at a time (maxFrames),
            # only deal with that chunk
            framesRemaining = nFrames - i * maxFrames
            if framesRemaining > maxFrames:
                framesWork = maxFrames
            else:
                framesWork = framesRemaining
            print("Working on chunk %i, %i frames remaining" %
                  ((i + 1), framesRemaining))

            data = numpy.memmap('temp.mmap',
                                dtype=numpy.int16,
                                mode='w+',
                                shape=(antpols,
                                       2 * 30000 * 260 * nSamples // antpols))
            # If there are fewer frames than we need to fill an FFT, skip this chunk
            if data.shape[1] < 2 * LFFT:
                break
            # Inner loop that actually reads the frames into the data array
            for j in range(framesWork):
                # Read in the next frame and anticipate any problems that could occur
                try:
                    cFrame = tbw.read_frame(fh)
                except errors.EOFError:
                    break
                except errors.SyncError:
                    print("WARNING: Mark 5C sync error on frame #%i" %
                          (int(fh.tell()) // tbw.FRAME_SIZE - 1))
                    continue
                if not cFrame.header.is_tbw:
                    continue

                stand = cFrame.header.id
                # In the current configuration, stands start at 1 and go up to 10.  So, we
                # can use this little trick to populate the data array
                aStand = 2 * (stand - 1)
                if cFrame.header.frame_count % 10000 == 0 and args.verbose:
                    print("%3i -> %3i  %6.3f  %5i  %i" %
                          (stand, aStand, cFrame.time,
                           cFrame.header.frame_count, cFrame.payload.timetag))

                # Actually load the data.  x pol goes into the even numbers, y pol into the
                # odd numbers
                count = cFrame.header.frame_count - 1
                data[aStand, count * nSamples:(count + 1) *
                     nSamples] = 1 * cFrame.payload.data[0, :]
                data[aStand + 1, count * nSamples:(count + 1) *
                     nSamples] = 1 * cFrame.payload.data[1, :]
                del cFrame

            # Calculate the spectra for this block of data and then weight the results by
            # the total number of frames read.  This is needed to keep the averages correct.
            # NB:  The weighting is the same for the x and y polarizations because of how
            # the data are packed in TBW
            for j in xrange(0, masterSpectra.shape[1], 4):
                tempData = numpy.zeros((4, data.shape[1]), dtype=data.dtype)
                tempData = data[j:j + 4, :]

                freq, tempSpec = fxc.SpecMaster(tempData,
                                                LFFT=LFFT,
                                                window=window,
                                                verbose=args.verbose,
                                                clip_level=args.clip_level)
                masterSpectra[i, j:j + 4, :] = tempSpec

            # Compute the 1 ms average power and the data range within each 1 ms window
            subSize = 1960
            nsegments = data.shape[1] // subSize

            print(
                "Computing average power and data range in %i-sample intervals"
                % subSize)
            pb = ProgressBar(max=data.shape[0])
            avgPower = numpy.zeros((antpols, nsegments), dtype=numpy.float32)
            dataRange = numpy.zeros((antpols, nsegments, 3), dtype=numpy.int16)
            for s in xrange(data.shape[0]):
                for p in xrange(nsegments):
                    subData = data[s, (p * subSize):((p + 1) * subSize)]
                    avgPower[s, p] = numpy.mean(numpy.abs(subData))
                    dataRange[s, p, 0] = subData.min()
                    dataRange[s, p, 1] = subData.mean()
                    dataRange[s, p, 2] = subData.max()

                    ### This little block here looks for likely saturation events and save
                    ### the raw time series around them into individual NPZ files for stand
                    ### number 14.
                    #if (dataRange[s,p,0] < -1000 or dataRange[s,p,0] > 1000) and antennas[s].stand.id == 14:
                    #subData = data[s,((p-1)*1960):((p+2)*1960)]
                    #satFileName = 'stand-14-pol-%i-%i.npz' % (antennas[s].pol, (p-1)*1960)
                    #print(satFileName)
                    #numpy.savez(satFileName, start=(p-1)*1960, data=subData)
                pb.inc(amount=1)
                if pb.amount != 0 and pb.amount % 10 == 0:
                    sys.stdout.write(pb.show() + '\r')
                    sys.stdout.flush()
            sys.stdout.write(pb.show() + '\r')
            sys.stdout.write('\n')
            sys.stdout.flush()

            # We don't really need the data array anymore, so delete it
            del (data)
            os.unlink('temp.mmap')

        # Apply the cable loss corrections, if requested
        if True:
            for s in xrange(masterSpectra.shape[1]):
                currGain = antennas[s].cable.gain(freq)
                for c in xrange(masterSpectra.shape[0]):
                    masterSpectra[c, s, :] /= currGain

        # Now that we have read through all of the chunks, perform the final averaging by
        # dividing by all of the chunks
        spec = masterSpectra.mean(axis=0)

        # Estimate the dipole resonance frequencies
        print("Computing dipole resonance frequencies")
        pb = ProgressBar(max=spec.shape[0])
        resFreq = numpy.zeros(spec.shape[0])
        toCompare = numpy.where((freq > 31e6) & (freq < 70e6))[0]
        for i in xrange(spec.shape[0]):
            bestOrder = 0
            bestRMS = 1e34
            for j in xrange(3, 12):
                coeff = numpy.polyfit(freq[toCompare] / 1e6,
                                      numpy.log10(spec[i, toCompare]) * 10, j)
                fit = numpy.polyval(coeff, freq[toCompare] / 1e6)
                rms = ((fit - numpy.log10(spec[i, toCompare]) * 10)**2).sum()
                if rms < bestRMS:
                    bestOrder = j
                    bestRMS = rms

            coeff = numpy.polyfit(freq[toCompare] / 1e6,
                                  numpy.log10(spec[i, toCompare]) * 10,
                                  bestOrder)
            fit = numpy.polyval(coeff, freq[toCompare] / 1e6)
            try:
                resFreq[i] = freq[toCompare[numpy.where(
                    fit == fit.max())[0][0]]] / 1e6
            except:
                pass

            pb.inc(amount=1)
            if pb.amount != 0 and pb.amount % 10 == 0:
                sys.stdout.write(pb.show() + '\r')
                sys.stdout.flush()
        sys.stdout.write(pb.show() + '\r')
        sys.stdout.write('\n')
        sys.stdout.flush()

        numpy.savez("%s.npz" % base,
                    date=str(beginDate),
                    freq=freq,
                    masterSpectra=masterSpectra,
                    resFreq=resFreq,
                    avgPower=avgPower,
                    dataRange=dataRange,
                    ssmifContents=ssmifContents)
    else:
        dataDict = numpy.load("%s.npz" % base)
        freq = dataDict['freq']
        masterSpectra = dataDict['masterSpectra']
        resFreq = dataDict['resFreq']

        # Now that we have read through all of the chunks, perform the final averaging by
        # dividing by all of the chunks
        spec = masterSpectra.mean(axis=0)

    # Create a good template spectra
    specTemplate = numpy.median(spec, axis=0)
    specDiff = numpy.zeros(spec.shape[0])
    toCompare = numpy.where((freq > 32e6) & (freq < 50e6))[0]
    print(len(toCompare))
    for i in xrange(spec.shape[0]):
        specDiff[i] = (spec[i, toCompare] / specTemplate[toCompare]).mean()
    specDiff = numpy.where(specDiff < 2, specDiff, 2)

    # Get the station
    standPos = numpy.array([[ant.stand.x, ant.stand.y, ant.stand.z]
                            for ant in antennas if ant.pol == 0])

    # Plots
    if args.verbose:
        fig = plt.figure()
        ax1 = fig.add_subplot(1, 2, 1)
        ax1.scatter(standPos[:, 0],
                    standPos[:, 1],
                    c=specDiff[0::2],
                    s=40.0,
                    alpha=0.50)
        ## Add the fence as a dashed line
        ax1.plot([-59.827, 59.771, 60.148, -59.700, -59.827],
                 [59.752, 59.864, -59.618, -59.948, 59.752],
                 linestyle='--',
                 color='k')
        ## Add the shelter
        ax1.plot([55.863, 58.144, 58.062, 55.791, 55.863],
                 [45.946, 45.999, 51.849, 51.838, 45.946],
                 linestyle='-',
                 color='k')
        ## Set the limits to just zoom in on the main stations
        ax1.set_xlim([-65, 65])
        ax1.set_ylim([-65, 65])

        ax2 = fig.add_subplot(1, 2, 2)
        ax2.plot(freq / 1e6, numpy.log10(specTemplate) * 10, alpha=0.50)

        print("RBW: %.1f Hz" % (freq[1] - freq[0]))
        plt.show()
示例#21
0
def main(args):
    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            station = metabundle.get_station(args.metadata, apply_sdm=True)
    else:
        station = stations.lwana
    antennas = []
    for a in station.antennas:
        if a.digitizer != 0:
            antennas.append(a)

    # Length of the FFT
    LFFT = args.fft_length

    idf = LWA1DataFile(args.filename)

    nFramesFile = idf.get_info('nframe')
    srate = idf.get_info('sample_rate')
    antpols = len(antennas)

    # Offset in frames for beampols beam/tuning/pol. sets
    args.skip = idf.offset(args.skip)

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of antpols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(
        (2 * 10 * 750) / antpols * 512 / float(LFFT)) * LFFT / 512 * antpols

    # Number of frames to integrate over
    nFrames = int(args.average * srate / 512 * antpols)
    nFrames = int(
        1.0 * nFrames / antpols * 512 / float(LFFT)) * LFFT / 512 * antpols
    args.average = 1.0 * nFrames / antpols * 512 / srate

    # Number of remaining chunks
    nChunks = int(math.ceil(1.0 * (nFrames) / maxFrames))

    # Read in the first frame and get the date/time of the first sample
    # of the frame.  This is needed to get the list of stands.
    beginDate = ephem.Date(
        unix_to_utcjd(idf.get_info('start_time')) - DJD_OFFSET)
    central_freq = idf.get_info('freq1')

    # File summary
    print("Filename: %s" % args.filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Ant/Pols: %i" % antpols)
    print("Sample Rate: %i Hz" % srate)
    print("Tuning Frequency: %.3f Hz" % central_freq)
    print("Frames: %i (%.3f s)" %
          (nFramesFile, 1.0 * nFramesFile / antpols * 512 / srate))
    print("---")
    print("Offset: %.3f s (%i frames)" %
          (args.skip, args.skip * srate * antpols / 512))
    print("Integration: %.3f s (%i frames; %i frames per stand/pol)" %
          (args.average, nFrames, nFrames / antpols))
    print("Chunks: %i" % nChunks)

    # Sanity check
    if args.skip * srate * antpols / 512 > nFramesFile:
        raise RuntimeError("Requested offset is greater than file length")
    if nFrames > (nFramesFile - args.skip * srate * antpols / 512):
        raise RuntimeError(
            "Requested integration time+offset is greater than file length")

    # Setup the window function to use
    if args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window

    # Master loop over all of the file chunks
    masterWeight = numpy.zeros((nChunks, antpols, LFFT))
    masterSpectra = numpy.zeros((nChunks, antpols, LFFT))

    for i in range(nChunks):
        print("Working on chunk #%i of %i" % (i + 1, nChunks))

        try:
            readT, t, data = idf.read(args.average / nChunks)
        except Exception as e:
            print("Error: %s" % str(e))
            continue

        # Calculate the spectra for this block of data and then weight the results by
        # the total number of frames read.  This is needed to keep the averages correct.

        freq, tempSpec = fxc.SpecMaster(data,
                                        LFFT=LFFT,
                                        window=window,
                                        pfb=args.pfb,
                                        verbose=args.verbose,
                                        sample_rate=srate)
        for stand in range(tempSpec.shape[0]):
            masterSpectra[i, stand, :] = tempSpec[stand, :]
            masterWeight[i, stand, :] = int(readT * srate / LFFT)

    # Apply the cable loss corrections, if requested
    if False:
        for s in range(masterSpectra.shape[1]):
            currGain = antennas[s].cable.gain(freq)
            for c in range(masterSpectra.shape[0]):
                masterSpectra[c, s, :] /= currGain

    # Now that we have read through all of the chunks, perform the final averaging by
    # dividing by all of the chunks
    spec = numpy.squeeze(
        (masterWeight * masterSpectra).sum(axis=0) / masterWeight.sum(axis=0))

    # Put the frequencies in the best units possible
    freq += central_freq
    freq, units = _best_freq_units(freq)

    # Deal with the `keep` options
    if args.keep == 'all':
        antpolsDisp = int(numpy.ceil(antpols / 20))
        js = [i for i in range(antpols)]
    else:
        antpolsDisp = int(numpy.ceil(len(args.keep) * 2 / 20))
        if antpolsDisp < 1:
            antpolsDisp = 1

        js = []
        for k in args.keep:
            for i, ant in enumerate(antennas):
                if ant.stand.id == k:
                    js.append(i)

    nPlot = len(js)
    if nPlot < 20:
        if nPlot % 4 == 0 and nPlot != 4:
            figsY = 4
        else:
            figsY = 2
        figsX = int(numpy.ceil(1.0 * nPlot / figsY))
    else:
        figsY = 4
        figsX = 5
    figsN = figsX * figsY
    for i in range(antpolsDisp):
        # Normal plotting
        fig = plt.figure()
        for k in range(i * figsN, i * figsN + figsN):
            try:
                j = js[k]
                currSpectra = numpy.squeeze(numpy.log10(spec[j, :]) * 10.0)
            except IndexError:
                break
            ax = fig.add_subplot(figsX, figsY, (k % figsN) + 1)
            ax.plot(
                freq,
                currSpectra,
                label='Stand: %i, Pol: %i (Dig: %i)' %
                (antennas[j].stand.id, antennas[j].pol, antennas[j].digitizer))

            # If there is more than one chunk, plot the difference between the global
            # average and each chunk
            if nChunks > 1 and not args.disable_chunks:
                for k in range(nChunks):
                    # Some files are padded by zeros at the end and, thus, carry no
                    # weight in the average spectra.  Skip over those.
                    if masterWeight[k, j, :].sum() == 0:
                        continue

                    # Calculate the difference between the spectra and plot
                    subspectra = numpy.squeeze(
                        numpy.log10(masterSpectra[k, j, :]) * 10.0)
                    diff = subspectra - currSpectra
                    ax.plot(freq, diff)

            ax.set_title('Stand: %i (%i); Dig: %i [%i]' %
                         (antennas[j].stand.id, antennas[j].pol,
                          antennas[j].digitizer, antennas[j].combined_status))
            ax.set_xlabel('Frequency [%s]' % units)
            ax.set_ylabel('P.S.D. [dB/RBW]')
            ax.set_ylim([-10, 30])

        # Save spectra image if requested
        if args.output is not None:
            base, ext = os.path.splitext(args.output)
            outFigure = "%s-%02i%s" % (base, i + 1, ext)
            fig.savefig(outFigure)

        plt.draw()

    print("RBW: %.4f %s" % ((freq[1] - freq[0]), units))
    plt.show()
示例#22
0
def main(args):
    # The task at hand
    filename = args.filename

    # The station
    if args.metadata is not None:
        site = parse_ssmif(args.metadata)
        ssmifContents = open(args.metadata).readlines()
    else:
        site = lwa1
        ssmifContents = open(os.path.join(dataPath,
                                          'lwa1-ssmif.txt')).readlines()
    observer = site.get_observer()
    antennas = site.antennas

    # The file's parameters
    fh = open(filename, 'rb')

    nFramesFile = os.path.getsize(filename) // tbn.FRAME_SIZE
    srate = tbn.get_sample_rate(fh)
    antpols = len(antennas)
    fh.seek(0)
    if srate < 1000:
        fh.seek(len(antennas) * 4 * tbn.FRAME_SIZE)
        srate = tbn.get_sample_rate(fh)
        antpols = len(antennas)
        fh.seek(len(antennas) * 4 * tbn.FRAME_SIZE)

    # Reference antenna
    ref = args.reference
    foundRef = False
    for i, a in enumerate(antennas):
        if a.stand.id == ref and a.pol == 0:
            refX = i
            foundRef = True
        elif a.stand.id == ref and a.pol == 1:
            refY = i
        else:
            pass
    if not foundRef:
        raise RuntimeError("Cannot file Stand #%i" % ref)

    # Integration time (seconds and frames)
    tInt = args.average
    nFrames = int(round(tInt * srate / 512 * antpols))
    tInt = nFrames / antpols * 512 / srate

    # Total run length
    nChunks = int(1.0 * nFramesFile / antpols * 512 / srate / tInt)

    # Read in the first frame and get the date/time of the first sample
    # of the frame.  This is needed to get the list of stands.
    junkFrame = tbn.read_frame(fh)
    fh.seek(-tbn.FRAME_SIZE, 1)
    startFC = junkFrame.header.frame_count
    try:
        central_freq = junkFrame.central_freq
    except AttributeError:
        from lsl.common.dp import fS
        central_freq = fS * junkFrame.header.second_count / 2**32
    beginDate = junkFrame.time.datetime

    observer.date = beginDate
    srcs = [
        ephem.Sun(),
    ]
    for line in _srcs:
        srcs.append(ephem.readdb(line))

    for i in xrange(len(srcs)):
        srcs[i].compute(observer)

        if srcs[i].alt > 0:
            print("source %s: alt %.1f degrees, az %.1f degrees" %
                  (srcs[i].name, srcs[i].alt * 180 / numpy.pi,
                   srcs[i].az * 180 / numpy.pi))

    # File summary
    print("Filename: %s" % filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Ant/Pols: %i" % antpols)
    print("Sample Rate: %i Hz" % srate)
    print("Tuning Frequency: %.3f Hz" % central_freq)
    print("Frames: %i (%.3f s)" %
          (nFramesFile, 1.0 * nFramesFile / antpols * 512 / srate))
    print("---")
    print("Integration: %.3f s (%i frames; %i frames per stand/pol)" %
          (tInt, nFrames, nFrames // antpols))
    print("Chunks: %i" % nChunks)

    # Create the FrameBuffer instance
    buffer = TBNFrameBuffer(stands=range(1, antpols // 2 + 1), pols=[0, 1])

    # Create the phase average and times
    LFFT = 512
    times = numpy.zeros(nChunks, dtype=numpy.float64)
    simpleVis = numpy.zeros((nChunks, antpols), dtype=numpy.complex64)
    central_freqs = numpy.zeros(nChunks, dtype=numpy.float64)

    # Go!
    k = 0
    for i in xrange(nChunks):
        # Find out how many frames remain in the file.  If this number is larger
        # than the maximum of frames we can work with at a time (maxFrames),
        # only deal with that chunk
        framesRemaining = nFramesFile - k
        if framesRemaining > nFrames:
            framesWork = nFrames
            data = numpy.zeros((antpols, framesWork // antpols * 512),
                               dtype=numpy.complex64)
        else:
            framesWork = framesRemaining + antpols * buffer.nsegments
            data = numpy.zeros((antpols, framesWork // antpols * 512),
                               dtype=numpy.complex64)
        print("Working on chunk %i, %i frames remaining" %
              (i + 1, framesRemaining))

        count = [0 for a in xrange(len(antennas))]

        j = 0
        fillsWork = framesWork // antpols
        # Inner loop that actually reads the frames into the data array
        done = False
        while j < fillsWork:
            cFrames = deque()
            for l in xrange(len(antennas)):
                try:
                    cFrames.append(tbn.read_frame(fh))
                    k = k + 1
                except errors.EOFError:
                    ## Exit at the EOF
                    done = True
                    break
                except errors.SyncError:
                    #print("WARNING: Mark 5C sync error on frame #%i" % (int(fh.tell())/tbn.FRAME_SIZE-1))
                    ## Exit at the first sync error
                    done = True
                    break

            buffer.append(cFrames)
            cFrames = buffer.get()

            if cFrames is None:
                continue

            for cFrame in cFrames:
                stand, pol = cFrame.header.id

                # In the current configuration, stands start at 1 and go up to 260.  So, we
                # can use this little trick to populate the data array
                aStand = 2 * (stand - 1) + pol

                # Save the time
                if j == 0 and aStand == 0:
                    times[i] = cFrame.time
                    try:
                        central_freqs[i] = cFrame.central_freq
                    except AttributeError:
                        central_freqs[
                            i] = fS * cFrame.header.second_count / 2**32
                    if i > 0:
                        if central_freqs[i] != central_freqs[i - 1]:
                            print(
                                "Frequency change from %.3f to %.3f MHz at chunk %i"
                                % (central_freqs[i - 1] / 1e6,
                                   central_freqs[i] / 1e6, i + 1))

                data[aStand, count[aStand] * 512:(count[aStand] + 1) *
                     512] = cFrame.payload.data

                # Update the counters so that we can average properly later on
                count[aStand] = count[aStand] + 1

            j += 1

            if done:
                break

        if done:
            break

        # Time-domain blanking and cross-correlation with the outlier
        simpleVis[i, :] = fringe.Simple(data, refX, refY, args.clip)

    fh.close()

    # Save the data
    outname = os.path.split(filename)[1]
    outname = os.path.splitext(outname)[0]
    outname = "%s-ref%03i-multi-vis.npz" % (outname, args.reference)
    numpy.savez(outname,
                ref=ref,
                refX=refX,
                refY=refY,
                tInt=tInt,
                central_freqs=central_freqs,
                times=times,
                simpleVis=simpleVis,
                ssmifContents=ssmifContents)
def main(args):
    reference = args.ref_source
    filenames = args.filename
    
    #
    # Gather the station meta-data from its various sources
    #
    dataDict = numpy.load(filenames[0])
    ssmifContents = dataDict['ssmifContents']
    if ssmifContents.shape == ():
        site = lwa1
    else:
        fh, tempSSMIF = tempfile.mkstemp(suffix='.txt', prefix='ssmif-')
        fh = open(tempSSMIF, 'w')
        for line in ssmifContents:
            fh.write('%s\n' % line)
        fh.close()
        
        site = parse_ssmif(tempSSMIF)
        os.unlink(tempSSMIF)
    print(site.name)
    observer = site.get_observer()
    antennas = site.antennas
    nAnts = len(antennas)
    
    #
    # Find the reference source
    #
    srcs = [ephem.Sun(),]
    for line in _srcs:
        srcs.append( ephem.readdb(line) )
        
    refSrc = None
    for i in xrange(len(srcs)):
        if srcs[i].name == reference:
            refSrc = srcs[i]
            
    if refSrc is None:
        print("Cannot find reference source '%s' in source list, aborting." % reference)
        sys.exit(1)
        
    #
    # Parse the input files
    #
    data = []
    time = []
    freq = []
    oldRef = None
    oldMD5 = None
    maxTime = -1
    for filename in filenames:
        dataDict = numpy.load(filename)
        
        ref_ant = dataDict['ref'].item()
        refX   = dataDict['refX'].item()
        refY   = dataDict['refY'].item()
        tInt = dataDict['tInt'].item()
        
        times = dataDict['times']
        phase = dataDict['simpleVis']
        
        central_freq = dataDict['central_freq'].item()
        
        ssmifContents = dataDict['ssmifContents']
        
        beginDate = datetime.utcfromtimestamp(times[0])
        observer.date = beginDate.strftime("%Y/%m/%d %H:%M:%S")
        
        # Make sure we aren't mixing reference antennas
        if oldRef is None:
            oldRef = ref_ant
        if ref_ant != oldRef:
            raise RuntimeError("Dataset has different reference antennas than previous (%i != %i)" % (ref_ant, oldRef))
            
        # Make sure we aren't mixing SSMIFs
        ssmifMD5 = md5sum(ssmifContents)
        if oldMD5 is None:
            oldMD5 = ssmifMD5
        if ssmifMD5 != oldMD5:
            raise RuntimeError("Dataset has different SSMIF than previous (%s != %s)" % (ssmifMD5, oldMD5))
            
        print("Central Frequency: %.3f Hz" % central_freq)
        print("Start date/time: %s" % beginDate.strftime("%Y/%m/%d %H:%M:%S"))
        print("Integration Time: %.3f s" % tInt)
        print("Number of time samples: %i (%.3f s)" % (phase.shape[0], phase.shape[0]*tInt))
        
        allRates = {}
        for src in srcs:
            src.compute(observer)
            if src.alt > 0:
                fRate = getFringeRate(antennas[0], antennas[refX], observer, src, central_freq)
                allRates[src.name] = fRate
        # Calculate the fringe rates of all sources - for display purposes only
        print("Starting Fringe Rates:")
        for name in allRates.keys():
            fRate = allRates[name]
            print(" %-4s: %+6.3f mHz" % (name, fRate*1e3))
            
        freq.append( central_freq )
        time.append( numpy.array([unix_to_utcjd(t) for t in times]) )
        data.append( phase )
        
        ## Save the length of the `time` entry so that we can trim them
        ## all down to the same size later
        if time[-1].size > maxTime:
            maxTime = time[-1].size
            
    # Pad with NaNs to the same length
    for i in xrange(len(filenames)):
        nTimes = time[i].size
        
        if nTimes < maxTime:
            ## Pad 'time'
            newTime = numpy.zeros(maxTime, dtype=time[i].dtype)
            newTime += numpy.nan
            newTime[0:nTimes] = time[i][:]
            time[i] = newTime
            
            ## Pad 'data'
            newData = numpy.zeros((maxTime, data[i].shape[1]), dtype=data[i].dtype)
            newData += numpy.nan
            newData[0:nTimes,:] = data[i][:,:]
            data[i] = newData
            
    # Convert to 2-D and 3-D numpy arrays
    freq = numpy.array(freq)
    time = numpy.array(time)
    data = numpy.array(data)
    
    #
    # Sort the data by frequency
    #
    order = numpy.argsort(freq)
    freq = numpy.take(freq, order)
    time = numpy.take(time, order, axis=0)
    data = numpy.take(data, order, axis=0)
    
    # 
    # Find the fringe stopping averaging times
    #
    ls = {}
    for fStart in xrange(20, 90, 5):
        fStop = fStart + 5
        l = numpy.where( (freq >= fStart*1e6) & (freq < fStop*1e6) )[0]
        if len(l) > 0:
            ls[fStart] = l
            
    ms = {}
    for fStart in ls.keys():
        m = 1e6
        for l in ls[fStart]:
            good = numpy.where( numpy.isfinite(time[l,:]) == 1 )[0]
            if len(good) < m:
                m = len(good)
        ms[fStart] = m
        
    print("Minimum fringe stopping times:")
    for fStart in sorted(ls.keys()):
        fStop = fStart + 5
        m = ms[fStart]
        print("  >=%i Mhz and <%i MHz: %.3f s" % (fStart, fStop, m*tInt,))
        
    #
    # Report on progress and data coverage
    #
    nFreq = len(freq)
    
    print("Reference stand #%i (X: %i, Y: %i)" % (ref_ant, refX, refY))
    print("-> X: %s" % str(antennas[refX]))
    print("-> Y: %s" % str(antennas[refY]))
    
    print("Using a set of %i frequencies" % nFreq)
    print("->", freq/1e6)
    
    #
    # Compute source positions/fringe stop and remove the source
    #
    print("Fringe stopping on '%s':" % refSrc.name)
    pbar = ProgressBar(max=freq.size*520)
    
    for i in xrange(freq.size):
        fq = freq[i]
        
        for j in xrange(data.shape[2]):
            # Compute the times in seconds relative to the beginning
            times  = time[i,:] - time[i,0]
            times *= 24.0
            times *= 3600.0
            
            # Compute the fringe rates across all time
            fRate = [None,]*data.shape[1]
            for k in xrange(data.shape[1]):
                jd = time[i,k]
                
                try:
                    currDate = datetime.utcfromtimestamp(utcjd_to_unix(jd))
                except ValueError:
                    pass
                observer.date = currDate.strftime("%Y/%m/%d %H:%M:%S")
                refSrc.compute(observer)
        
                if j % 2 == 0:
                    fRate[k] = getFringeRate(antennas[j], antennas[refX], observer, refSrc, fq)
                else:
                    fRate[k] = getFringeRate(antennas[j], antennas[refY], observer, refSrc, fq)
                    
            # Create the basis rate and the residual rates
            baseRate = fRate[0]
            residRate = numpy.array(fRate) - baseRate
        
            # Fringe stop to more the source of interest to the DC component
            data[i,:,j] *= numpy.exp(-2j*numpy.pi* baseRate*(times - times[0]))
            data[i,:,j] *= numpy.exp(-2j*numpy.pi*residRate*(times - times[0]))
            
            # Calculate the geometric delay term across all time
            gDelay = [None,]*data.shape[1]
            for k in xrange(data.shape[1]):
                jd = time[i,k]
                
                try:
                    currDate = datetime.utcfromtimestamp(utcjd_to_unix(jd))
                except ValueError:
                    pass
                observer.date = currDate.strftime("%Y/%m/%d %H:%M:%S")
                refSrc.compute(observer)
                
                az = refSrc.az
                el = refSrc.alt
                if j % 2 == 0:
                    gDelay[k] = getGeoDelay(antennas[j], antennas[refX], az, el, Degrees=False)
                else:
                    gDelay[k] = getGeoDelay(antennas[j], antennas[refY], az, el, Degrees=False)
                    
            # Create the basis delay and the residual delays
            baseDelay = gDelay[0]
            residDelay = numpy.array(gDelay) - baseDelay
            
            # Remove the array geometry
            data[i,:,j] *= numpy.exp(-2j*numpy.pi*fq* baseDelay)
            data[i,:,j] *= numpy.exp(-2j*numpy.pi*fq*residDelay)
            
            pbar.inc()
            sys.stdout.write("%s\r" % pbar.show())
            sys.stdout.flush()
    sys.stdout.write('\n')
    
    # Average down to remove other sources/the correlated sky
    print("Input (pre-averaging) data shapes:")
    print("  time:", time.shape)
    print("  data:", data.shape)
    time = time[:,0]
    
    data2 = numpy.zeros((data.shape[0], data.shape[2]), dtype=data.dtype)
    for j in xrange(data2.shape[1]):
        for fStart in ls.keys():
            l = ls[fStart]
            m = ms[fStart]
            data2[l,j] = data[l,:m,j].mean(axis=1)
    data = data2
    print("Output (post-averaging) data shapes:")
    print("  time:", time.shape)
    print("  data:", data.shape)

    #
    # Save
    #
    outname = args.output
    outname, ext = os.path.splitext(outname)
    outname = "%s-ref%03i%s" % (outname, ref_ant, ext)
    numpy.savez(outname, ref_ant=ref_ant, refX=refX, refY=refY, freq=freq, time=time, data=data, ssmifContents=ssmifContents)
示例#24
0
def main(args):
    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            station = metabundle.get_station(args.metadata, apply_sdm=True)
    else:
        station = stations.lwa1
    antennas = station.antennas

    # Length of the FFT
    LFFT = args.fft_length

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped
    maxFrames = int((30000 * 260) / float(LFFT)) * LFFT
    # It seems like that would be a good idea, however...  TBW data comes one
    # capture at a time so doing something like this actually truncates data
    # from the last set of stands for the first integration.  So, we really
    # should stick with
    maxFrames = (30000 * 260)

    idf = LWA1DataFile(args.filename)
    if not isinstance(idf, TBWFile):
        raise RuntimeError("File '%s' does not appear to be a valid TBW file" %
                           os.path.basename(filename))

    nFrames = idf.get_info('nframe')
    srate = idf.get_info('sample_rate')
    dataBits = idf.get_info('data_bits')
    # The number of ant/pols in the file is hard coded because I cannot figure out
    # a way to get this number in a systematic fashion
    antpols = len(antennas)
    nChunks = int(math.ceil(1.0 * nFrames / maxFrames))
    if dataBits == 12:
        nSamples = 400
    else:
        nSamples = 1200

    # Read in the first frame and get the date/time of the first sample
    # of the frame.  This is needed to get the list of stands.
    beginDate = idf.get_info('start_time').datetime

    # File summary
    print("Filename: %s" % args.filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Ant/Pols: %i" % antpols)
    print("Sample Length: %i-bit" % dataBits)
    print("Frames: %i" % nFrames)
    print("Chunks: %i" % nChunks)
    print("===")

    # Setup the window function to use
    if args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window

    # Master loop over all of the file chunks
    nChunks = 1
    masterSpectra = numpy.zeros((nChunks, antpols, LFFT))
    masterWeight = numpy.zeros((nChunks, antpols, LFFT))

    readT, t, data = idf.read(0.061)

    # Calculate the spectra for this block of data and then weight the results by
    # the total number of frames read.  This is needed to keep the averages correct.
    # NB:  The weighting is the same for the x and y polarizations because of how
    # the data are packed in TBW
    freq, tempSpec = fxc.SpecMaster(data,
                                    LFFT=LFFT,
                                    window=window,
                                    pfb=args.pfb,
                                    verbose=args.verbose)
    for stand in range(masterSpectra.shape[1]):
        masterSpectra[0, stand, :] = tempSpec[stand, :]
        masterWeight[0, stand, :] = int(readT * srate / LFFT)

    # We don't really need the data array anymore, so delete it
    del (data)

    # Apply the cable loss corrections, if requested
    if args.gain_correct:
        for s in range(masterSpectra.shape[1]):
            currGain = antennas[s].cable.gain(freq)
            for c in range(masterSpectra.shape[0]):
                masterSpectra[c, s, :] /= currGain

    # Now that we have read through all of the chunks, perform the final averaging by
    # dividing by all of the chunks
    spec = masterSpectra.mean(axis=0)

    # The plots:  This is setup for the current configuration of 20 antpols
    if args.gain_correct & args.stack:
        # Stacked spectra - only if cable loss corrections are to be applied
        colors = [
            'blue', 'green', 'red', 'cyan', 'magenta', 'black', 'purple',
            'salmon', 'olive', 'maroon', 'saddlebrown', 'yellowgreen', 'teal',
            'steelblue', 'seagreen', 'slategray', 'mediumorchid', 'lime',
            'dodgerblue', 'darkorange'
        ]

        for f in range(int(numpy.ceil(antpols / 20.))):
            fig = plt.figure()
            ax1 = fig.add_subplot(1, 1, 1)
            for i in range(f * 20, f * 20 + 20):
                currSpectra = numpy.squeeze(numpy.log10(spec[i, :]) * 10.0)
                ax1.plot(freq / 1e6,
                         currSpectra,
                         label='%i,%i' %
                         (antennas[i].stand.id, antennas[i].pol),
                         color=colors[i % 20])

            ax1.set_xlabel('Frequency [MHz]')
            ax1.set_ylabel('P.S.D. [dB/RBW]')
            ax1.set_xlim([20, 88])
            #ax1.set_ylim([10,90])
            leg = ax1.legend(loc=0, ncol=3)
            for l in leg.get_lines():
                l.set_linewidth(1.7)  # the legend line width
    else:
        for f in range(int(numpy.ceil(antpols / 20))):
            # Normal plotting
            fig = plt.figure()
            figsY = 4
            figsX = 5
            fig.subplots_adjust(left=0.06,
                                bottom=0.06,
                                right=0.94,
                                top=0.94,
                                wspace=0.20,
                                hspace=0.50)
            for i in range(f * 20, f * 20 + 20):
                ax = fig.add_subplot(figsX, figsY, (i % 20) + 1)
                try:
                    currSpectra = numpy.squeeze(numpy.log10(spec[i, :]) * 10.0)
                except IndexError:
                    break
                ax.plot(freq / 1e6,
                        currSpectra,
                        label='Stand: %i, Pol: %i (Dig: %i)' %
                        (antennas[i].stand.id, antennas[i].pol,
                         antennas[i].digitizer))

                # If there is more than one chunk, plot the difference between the global
                # average and each chunk
                if nChunks > 1:
                    for j in range(nChunks):
                        # Some files are padded by zeros at the end and, thus, carry no
                        # weight in the average spectra.  Skip over those.
                        if masterWeight[j, i, :].sum() == 0:
                            continue

                        # Calculate the difference between the spectra and plot
                        subspectra = numpy.squeeze(
                            numpy.log10(masterSpectra[j, i, :]) * 10.0)
                        diff = subspectra - currSpectra
                        ax.plot(freq / 1e6, diff)

                ax.set_title(
                    'Stand: %i (%i); Dig: %i [%i]' %
                    (antennas[i].stand.id, antennas[i].pol,
                     antennas[i].digitizer, antennas[i].combined_status))
                ax.set_xlabel('Frequency [MHz]')
                ax.set_ylabel('P.S.D. [dB/RBW]')
                ax.set_xlim([10, 90])
                ax.set_ylim([10, 80])

            # Save spectra image if requested
            if args.output is not None:
                base, ext = os.path.splitext(args.output)
                outFigure = "%s-%02i%s" % (base, f + 1, ext)
                fig.savefig(outFigure)

        plt.draw()

    print("RBW: %.1f Hz" % (freq[1] - freq[0]))
    plt.show()
示例#25
0
def main(args):
    #
    # Load in the data
    #
    site = parse_ssmif(args.ssmif)
    dataFile = numpy.loadtxt(args.filename)

    #
    # Gather the station meta-data from its various sources
    #
    observer = site.get_observer()
    antennas = site.antennas

    #
    # Calculate the new stretch factors
    #
    output = [[None, None, None, None, None] for ant in antennas]
    for i in xrange(dataFile.shape[0]):
        ## Parse the line
        stand, ampX, addDelayX, ampY, addDelayY = dataFile[i, :]
        stand = int(stand)

        ## Pull out the cables
        digX, digY = None, None
        cableX, cableY = None, None
        for ant in antennas:
            if ant.stand.id == stand and ant.pol == 0:
                digX = ant.digitizer
                cableX = ant.cable
            elif ant.stand.id == stand and ant.pol == 1:
                digY = ant.digitizer
                cableY = ant.cable

        ## Find the current stretch factor/delay
        origX = cableX.stretch * 1.0
        origY = cableY.stretch * 1.0
        freq = numpy.linspace(35e6, 85e6, 101)
        baseDelayX = cableX.delay(freq, ns=True)
        baseDelayY = cableY.delay(freq, ns=True)

        bestX, bestY = 1e9, 1e9
        stretchX, stretchY = 0.90, 0.90
        for stretch in numpy.linspace(0.90, 1.10, 2001):
            cableX.stretch = stretch
            cableY.stretch = stretch

            newDelayX = cableX.delay(freq, ns=True)
            newDelayY = cableY.delay(freq, ns=True)

            diffX = (newDelayX - baseDelayX).mean() - addDelayX
            diffY = (newDelayY - baseDelayY).mean() - addDelayY

            if numpy.abs(diffX) < bestX:
                bestX = numpy.abs(diffX)
                stretchX = stretch
            if numpy.abs(diffY) < bestY:
                bestY = numpy.abs(diffY)
                stretchY = stretch

        output[digX - 1] = [digX, stretchX, addDelayX, bestX, 9.0]
        output[digY - 1] = [digY, stretchY, addDelayY, bestY, 9.0]

    #
    # Report
    #
    if args.output is not None:
        fh = open(args.output, 'w')
    else:
        fh = sys.stdout

    fh.write("##########################################\n")
    fh.write("#                                        #\n")
    fh.write("# Columns:                               #\n")
    fh.write("# 1) Digitizer                           #\n")
    fh.write("# 2) Cable length stretch factor         #\n")
    fh.write("# 3) Additional delay over previous (ns) #\n")
    fh.write("# 4) Mean delay error in new stretch     #\n")
    fh.write("# 5) The number 9                        #\n")
    fh.write("#                                        #\n")
    fh.write("##########################################\n")
    for entry in output:
        fh.write("%3i  %6.4f  %6.4f  %6.4f  %6.4f\n" % tuple(entry))

    if args.output is not None:
        fh.close()
示例#26
0
def main(args):
    # Parse command line
    toMark = numpy.array(args.stand)-1
    
    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            try:
                station = metabundle.get_station(args.metadata, apply_sdm=True)
            except:
                station = metabundleADP.get_station(args.metadata, apply_sdm=True)
    elif args.lwasv:
        station = stations.lwasv
    else:
        station = stations.lwa1
    stands = station.stands
    stands.sort()

    # Load in the stand position data
    data = numpy.zeros((len(stands)//2,3))
    
    i = 0
    for stand in stands[::2]:
        data[i,0] = stand.x
        data[i,1] = stand.y
        data[i,2] = stand.z
        i += 1
        
    # Color-code the stands by their elevation
    color = data[:,2]
    
    # Plot the stands as colored circles
    fig = plt.figure(figsize=(8,8))
    
    ax1 = plt.axes([0.30, 0.30, 0.60, 0.60])
    ax2 = plt.axes([0.30, 0.05, 0.60, 0.15])
    ax3 = plt.axes([0.05, 0.30, 0.15, 0.60])
    ax4 = plt.axes([0.05, 0.05, 0.15, 0.15])
    c = ax1.scatter(data[:,0], data[:,1], c=color, s=40.0, alpha=0.50)	
    ax1.set_xlabel('$\Delta$X [E-W; m]')
    ax1.set_xlim([-80, 80])
    ax1.set_ylabel('$\Delta$Y [N-S; m]')
    ax1.set_ylim([-80, 80])
    ax1.set_title('%s Site:  %.3f$^\circ$N, %.3f$^\circ$W' % (station.name, station.lat*180.0/numpy.pi, -station.long*180.0/numpy.pi))
    
    ax2.scatter(data[:,0], data[:,2], c=color, s=40.0)
    ax2.xaxis.set_major_formatter( NullFormatter() )
    ax2.set_ylabel('$\Delta$Z [m]')
    ax3.scatter(data[:,2], data[:,1], c=color, s=40.0)
    ax3.yaxis.set_major_formatter( NullFormatter() )
    ax3.set_xlabel('$\Delta$Z [m]')
    
    # Explicitly mark those that need to be marked
    if toMark.size != 0:
        for i in range(toMark.size):
            ax1.plot(data[toMark[i],0], data[toMark[i],1], marker='x', linestyle=' ', color='black')
            ax2.plot(data[toMark[i],0], data[toMark[i],2], marker='x', linestyle=' ', color='black')
            ax3.plot(data[toMark[i],2], data[toMark[i],1], marker='x', linestyle=' ', color='black')
            
            if args.label:
                ax1.annotate('%i' % (toMark[i]+1), xy=(data[toMark[i],0], data[toMark[i],1]), xytext=(data[toMark[i],0]+1, data[toMark[i],1]+1))
                
    # Add and elevation colorbar to the right-hand side of the figure
    plt.colorbar(c, cax=ax4, orientation='vertical', ticks=[-2, -1, 0, 1, 2])
    
    # Set the axis limits
    ax1.set_xlim([-60, 60])
    ax1.set_ylim([-60, 60])
    ax2.set_xlim( ax1.get_xlim() )
    ax3.set_ylim( ax1.get_ylim() )
    
    # Show n' save
    plt.show()
    if args.output is not None:
        fig.savefig(args.output)
示例#27
0
def main(args):
    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            try:
                station = metabundle.get_station(args.metadata, apply_sdm=True)
            except:
                station = metabundleADP.get_station(args.metadata,
                                                    apply_sdm=True)
    elif args.lwasv:
        station = stations.lwasv
    else:
        station = stations.lwa1

    antennas = []
    for ant in station.antennas[0::2]:
        if ant.combined_status == 33:
            antennas.append(ant)
    print("Displaying uv coverage for %i good stands" % len(antennas))

    HA = 0.0
    dec = station.lat * 180.0 / math.pi

    uvw = uvutils.compute_uvw(antennas, HA=HA, dec=dec, freq=args.frequency)
    uvw = numpy.squeeze(uvw[:, :, 0])

    # Coursely grid the uv data to come up with a rough beam
    grid = numpy.zeros((1 * 240, 1 * 240))
    for i in range(uvw.shape[0]):
        u = round((uvw[i, 0] + 120) * 1)
        v = round((uvw[i, 1] + 120) * 1)
        try:
            grid[u, v] += 1
        except IndexError:
            pass

    # Plot
    # Part 1 - Setup
    fig = plt.figure(figsize=(8, 8))
    ax1 = plt.axes([0.30, 0.30, 0.60, 0.60])
    ax2 = plt.axes([0.30, 0.05, 0.60, 0.15])
    ax3 = plt.axes([0.05, 0.30, 0.15, 0.60])
    ax4 = plt.axes([0.08, 0.08, 0.15, 0.15])
    ax5 = plt.axes([0.32, 0.32, 0.15, 0.15])

    # Part 2 - Beam response (in dB)
    beam = numpy.fft.fft2(grid)
    beam = numpy.fft.fftshift(beam)
    beam = numpy.abs(beam)**2
    beam = numpy.log10(beam) * 10.0
    ax5.imshow(beam[40:200, 40:200],
               interpolation="nearest",
               vmin=numpy.median(beam),
               vmax=beam.max())
    ax5.xaxis.set_major_formatter(NullFormatter())
    ax5.yaxis.set_major_formatter(NullFormatter())

    # Part 3 - uv plane plot
    c = ax1.scatter(uvw[:, 0], uvw[:, 1], c=uvw[:, 2], s=10.0, alpha=0.75)
    d = ax1.scatter(-uvw[:, 0], -uvw[:, 1], c=-uvw[:, 2], s=10.0, alpha=0.75)
    ax1.set_xlabel('u [$\\lambda$]')
    ax1.set_ylabel('v [$\\lambda$]')
    ax1.set_title(
        'UV Coverage for HA=%+.3f$^h$, $\delta$=%+.3f$^\circ$ at %s' %
        (HA, dec, station.name))

    # Part 4 - uw plane plot
    ax2.scatter(uvw[:, 0], uvw[:, 2], c=uvw[:, 2], s=10.0)
    ax2.scatter(-uvw[:, 0], -uvw[:, 2], c=-uvw[:, 2], s=10.0)
    ax2.xaxis.set_major_formatter(NullFormatter())
    ax2.set_ylabel('w [$\\lambda$]')

    # Part 5 - wv plane plot
    ax3.scatter(uvw[:, 2], uvw[:, 1], c=uvw[:, 2], s=10.0)
    ax3.scatter(-uvw[:, 2], -uvw[:, 1], c=-uvw[:, 2], s=10.0)
    ax3.yaxis.set_major_formatter(NullFormatter())
    ax3.set_xlabel('w [$\\lambda$]')

    # Part 6 - Histogram of uvw distances in lambda
    rad = numpy.zeros(uvw.shape[0])
    for i in range(rad.shape[0]):
        rad[i] = math.sqrt(uvw[i, 0]**2.0 + uvw[i, 1]**2.0 + uvw[i, 2]**2.0)
    try:
        ax4.hist(rad, 20)
    except TypeError:
        ## I don't know why this happens
        pass
    ax4.set_xlabel('uvw Radius [$\lambda$]')
    ax4.set_ylabel('Baselines')

    # Plot adjustment
    xlim = ax1.get_xlim()
    ylim = ax1.get_ylim()
    ax1.set_xlim([
        numpy.floor(xlim[0] / 25.0) * 25.0,
        numpy.ceil(xlim[1] / 25.0) * 25.0
    ])
    ax1.set_ylim([
        numpy.floor(ylim[0] / 25.0) * 25.0,
        numpy.ceil(ylim[1] / 25.0) * 25.0
    ])

    ax2.set_xlim(ax1.get_xlim())
    ax2.yaxis.set_major_locator(MaxNLocator(nbins=4))

    ax3.set_ylim(ax1.get_ylim())
    ax3.xaxis.set_major_locator(MaxNLocator(nbins=4))

    xlim = ax4.get_xlim()
    ylim = ax4.get_ylim()
    ax4.set_xlim([
        numpy.floor(xlim[0] / 25.0) * 25.0,
        numpy.ceil(xlim[1] / 25.0) * 25.0
    ])
    ax4.set_ylim([
        numpy.floor(ylim[0] / 5.e3) * 5.e3,
        numpy.ceil(ylim[1] / 5.e3) * 5.e3
    ])
    ax4.xaxis.set_major_locator(MaxNLocator(nbins=4))
    ax4.yaxis.set_major_locator(MaxNLocator(nbins=4))

    # Show n' save
    plt.show()
    if args.output is not None:
        fig.savefig(args.output)
示例#28
0
def main(args):
    # Parse command line options
    filename = args.filename

    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            station = metabundleADP.get_station(args.metadata, apply_sdm=True)
    else:
        station = stations.lwasv
    antennas = station.antennas

    idf = LWASVDataFile(filename)
    if not isinstance(idf, TBFFile):
        raise RuntimeError("File '%s' does not appear to be a valid TBF file" %
                           os.path.basename(filename))

    jd = idf.get_info('start_time').jd
    date = idf.get_info('start_time').datetime
    nFpO = idf.get_info('nchan') // 12
    sample_rate = idf.get_info('sample_rate')
    nInts = idf.get_info('nframe') // nFpO

    # Get valid stands for both polarizations
    goodX = []
    goodY = []
    for i in range(len(antennas)):
        ant = antennas[i]
        if ant.combined_status != 33 and not args.all:
            pass
        else:
            if ant.pol == 0:
                goodX.append(ant)
            else:
                goodY.append(ant)

    # Now combine both lists to come up with stands that
    # are in both so we can form the cross-polarization
    # products if we need to
    good = []
    for antX in goodX:
        for antY in goodY:
            if antX.stand.id == antY.stand.id:
                good.append(antX.digitizer - 1)
                good.append(antY.digitizer - 1)

    # Report on the valid stands found.  This is a little verbose,
    # but nice to see.
    print("Found %i good stands to use" % (len(good) // 2, ))
    for i in good:
        print("%3i, %i" % (antennas[i].stand.id, antennas[i].pol))

    # Number of frames to read in at once and average
    nFrames = min([int(args.avg_time * sample_rate), nInts])
    args.offset = idf.offset(args.offset)
    nSets = idf.get_info('nframe') // nFpO // nFrames
    nSets = nSets - int(args.offset * sample_rate) // nFrames

    central_freq = idf.get_info('freq1')
    central_freq = central_freq[len(central_freq) // 2]

    print("Data type:  %s" % type(idf))
    print("Samples per observations: %i" % nFpO)
    print("Sampling rate: %i Hz" % sample_rate)
    print("Tuning frequency: %.3f Hz" % central_freq)
    print("Captures in file: %i (%.3f s)" % (nInts, nInts / sample_rate))
    print("==")
    print("Station: %s" % station.name)
    print("Date observed: %s" % date)
    print("Julian day: %.5f" % jd)
    print("Offset: %.3f s (%i frames)" %
          (args.offset, args.offset * sample_rate))
    print("Integration Time: %.3f s" % (nFrames / sample_rate))
    print("Number of integrations in file: %i" % nSets)

    # Make sure we don't try to do too many sets
    if args.samples > nSets:
        args.samples = nSets

    # Loop over junks of 100 integrations to make sure that we don't overflow
    # the FITS IDI memory buffer
    s = 0
    leftToDo = args.samples
    basename = os.path.split(filename)[1]
    basename, ext = os.path.splitext(basename)
    while leftToDo > 0:
        fitsFilename = "%s.FITS_%i" % (
            basename,
            (s + 1),
        )

        if leftToDo > 100:
            chunk = 100
        else:
            chunk = leftToDo

        process_chunk(idf,
                      station,
                      good,
                      fitsFilename,
                      int_time=args.avg_time,
                      pols=args.products,
                      chunk_size=chunk)

        s += 1
        leftToDo = leftToDo - chunk

    idf.close()
示例#29
0
def main(args):
    # Divy up the command line arguments
    filename = args[0]
    source = args[1]
    startDate = args[2]
    startTime = args[3]
    duration  = float(args[4])
    
    year, month, day = startDate.split('/', 2)
    year = int(year)
    month = int(month)
    day = int(day)
    
    hour, minute, second = startTime.split(':', 2)
    hour = int(hour)
    minute = int(minute)
    second = int(second)
        
    tStart = _MST.localize(datetime(year, month, day, hour, minute, second))
    tStart = tStart.astimezone(_UTC)
    
    # Load the SSMIF
    station = stations.parse_ssmif(filename)

    # Gather the necessary information to figure out where things are
    observer = station.get_observer()
    antennas = station.antennas

    # Find the "good" antennas to use
    digs    = numpy.array([ant.digitizer  for ant in antennas])
    ants    = numpy.array([ant.id         for ant in antennas])
    stands  = numpy.array([ant.stand.id   for ant in antennas])
    pols    = numpy.array([ant.pol        for ant in antennas])
    antStat = numpy.array([ant.status     for ant in antennas])
    feeStat = numpy.array([ant.fee.status for ant in antennas])

    badStands = numpy.where( antStat != 3 )[0]
    badFees   = numpy.where( feeStat != 3 )[0]
    bad = numpy.where( (stands > 256) | (antStat != 3) | (feeStat != 3) )[0]
    ## print("Number of bad stands:   %3i" % len(badStands))
    ## print("Number of bad FEEs:     %3i" % len(badFees))
    ## print("---------------------------")
    ## print("Total number bad inuts: %3i" % len(bad))
    ## print(" ")
    
    # Build the source list
    srcs = [ephem.Sun(), ephem.Jupiter(),]
    for line in _srcs:
        srcs.append( ephem.readdb(line) )
        
    # Identify the source to track
    refSource  = None
    for i in xrange(len(srcs)):
        if srcs[i].name.lower() == source.lower():
            refSource = srcs[i]
            source = refSource.name
    
    # Make sure we have a source to track
    if refSource is None:
        print("Unknown source '%s', quitting" % source)
        sys.exit(1)
    
    print("""#!/bin/bash
    
#
# Source tracking script for %s starting at %s
# -> tuning frequency is %.3f Hz
# -> track duration is %.3f hours
# -> update interval is %.3f minutes
#

""" % (source, tStart.astimezone(_MST), central_freq, duration, tStep))
    
    # Create the DFT files and build the script
    nSteps = int(numpy.ceil(duration * 60 / 4))
    stepSize = timedelta(0, int(tStep*60), int((tStep*60*1000000) % 1000000))
    for s in xrange(nSteps):
        # Compute the source location half-way into the step
        tBeam = tStart + timedelta(0, int(tStep*60/2), int((tStep*60/2*1000000) % 1000000))
        observer.date = tBeam.strftime("%Y/%m/%d %H:%M:%S")
        refSource.compute(observer)
        
        pointingAz = refSource.az  * 180.0 / numpy.pi
        pointingEl = refSource.alt * 180.0 / numpy.pi
        
        # Compute the delays
        delays = calc_delay(antennas, freq=central_freq, azimuth=pointingAz, elevation=pointingEl)
        delays *= 1e9
        delays = delays.max() - delays
        
        # Save - delays
        import delay
        dftBase = 'delay_beam_%s_%03i_%0.fMHz' % (source, (s+1), central_freq/1e6,)
        junk = delay.list2delayfile('.', dftBase, delays)

        # Compute gains
        gains = [[1.0, 0.0, 0.0, 1.0]]*260 # initialize gain list
        for d in digs[bad]:
            # Digitizers start at 1, list indicies at 0
            i = d - 1
            gains[i/2] = [0,0,0,0]

        # Save - gains
        import gain
        gftBase = 'delay_beam_%s_%03i_%.0fMHz' % (source, (s+1), central_freq/1e6,)
        junk = gain.list2gainfile('.', gftBase, gains)
        
        # Output script command - step start
        print("""
#
# Begin step #%i at %s
# -> %s at %.3f az, %.3f el
#
tNow=`date -u +%%s `
tNow=$(($tNow*1))

## Wait for the right time
while [ $tNow -lt %s ]; do
    sleep 5
    tNow=`date -u +%%s `
    tNow=$(($tNow*1))
done

## Send BAM commands
tString=`date `
echo "Sending BAM commands for step #%i at $tString"
""" % ((s+1), tStart.astimezone(_MST), source, pointingAz, pointingEl, tStart.astimezone(_MST).strftime('%s'), (s+1)))

        # Output script command - BAM commands
        for beam in beamsToUse:
            print("""/home/joecraig/MCS/exec/mesix DP_ BAM "%i %s.df %s.gf 1"
sleep 1""" % (beam, dftBase, gftBase))

        # Output script command - step end
        print("""
#
# End step #%i
#
""" % ((s+1),))
        
        # Update time
        tStart = tStart + stepSize
示例#30
0
def main(args):
    # Break out the files we need
    ssmif = args.ssmif
    filenames = args.filename

    # Setup the LWA station information
    station = parse_ssmif(ssmif)
    antennas = station.antennas

    # Get an observer reader for calculations
    obs = station.get_observer()

    # Setup the beamformer gain and delay variables
    course = numpy.zeros(520)
    fine = numpy.zeros(520)
    gains = numpy.zeros((260, 4))
    gains[:, 0] = 1.0
    gains[:, 3] = 1.0
    for ant in antennas:
        if ant.combined_status != 33:
            stand = (ant.digitizer - 1) / 2
            gains[stand, :] = 0.0

    # Setup the beamformer itself
    dp = SoftwareDP(mode='DRX', filter=7, central_freq=74e6)

    # Find the target azimuth/elevation to use
    idf = TBWFile(filenames[0])
    tStart = datetime.utcfromtimestamp(idf.get_info('start_time'))
    idf.close()

    obs.date = tStart.strftime("%Y/%m/%d %H:%M:%S")
    tTransit = obs.next_transit(args.source)
    obs.date = tTransit
    args.source.compute(obs)
    targetAz = args.source.az * 180 / numpy.pi
    targetEl = args.source.alt * 180 / numpy.pi

    # Preliminary report
    print("Working on %i TBW files using SSMIF '%s'" %
          (len(filenames), os.path.basename(ssmif)))
    print("  Source: '%s'" % args.source.name)
    print("    Transit time: %s" % str(tTransit))
    print("    Transit azimuth: %.2f degrees" % targetAz)
    print("    Transet elevation: %.2f degrees" % targetEl)
    print(" ")

    # Loop over input files
    unx, lst, pwrX, pwrY = [], [], [], []
    for filename in filenames:
        ## Get the file reader
        idf = TBWFile(filename)

        ## Pull out some metadata and update the observer
        jd = astro.unix_to_utcjd(idf.get_info('start_time'))
        obs.date = ephem.Date(jd - astro.DJD_OFFSET)
        sample_rate = idf.get_info('sample_rate')
        nInts = int(
            round(idf.get_info('nframe') / (30000.0 * len(antennas) / 2)))
        transitOffset = (obs.date - tTransit) * 86400.0

        ## Metadata report
        print("Filename: %s" % os.path.basename(filename))
        print("  Data type:  %s" % type(idf))
        print("  Captures in file: %i (%.3f s)" %
              (nInts, nInts * 30000 * 400 / sample_rate))
        print("  Station: %s" % station.name)
        print("  Date observed: %s" % str(obs.date))
        print("  MJD: %.5f" % (jd - astro.MJD_OFFSET, ))
        print("  LST: %s" % str(obs.sidereal_time()))
        print("    %.1f s %s transit" %
              (abs(transitOffset), 'before' if transitOffset < 0 else 'after'))
        print(" ")

        ## Load in the data
        readT, t, data = idf.read(time_in_samples=True)

        ## Build up a time array
        t = t + numpy.arange(data.shape[1], dtype=numpy.int64)

        ## Update the beamformer delays for the pointing center(s)
        unx.append(idf.get_info('start_time'))
        lst.append(obs.sidereal_time() * 12 / numpy.pi)
        pwrX.append([])
        pwrY.append([])

        for offset in (-1, 0, 1):
            ### Compute
            delays = beamformer.calc_delay(antennas,
                                           freq=74.0e6,
                                           azimuth=targetAz,
                                           elevation=targetEl + offset)
            delays *= fS * 16
            delays = delays.max() - delays
            ### Decompose into FIFO and FIR
            course = (delays // 16)
            fine = (delays % 16)

            ## Form the beams for both polarizations
            beamX, beamY = dp.form_beam(antennas, t, data, course, fine, gains)

            ## Compute the integrated spectra
            ### Convert to int16
            beam = numpy.zeros((2, beamX.size), dtype=numpy.int16)
            beam[0, :] = (numpy.round(beamX)).astype(data.dtype)
            beam[1, :] = (numpy.round(beamY)).astype(data.dtype)
            ### Move into the frequency domain
            freq, spec = fxc.SpecMaster(beam,
                                        LFFT=8192,
                                        window=fxc.null_window,
                                        verbose=False,
                                        sample_rate=fS,
                                        clip_level=0)

            ## Save
            pwrX[-1].append(spec[0, :])
            pwrY[-1].append(spec[1, :])

        ## Done
        idf.close()

    # Convert to arrays
    unx, lst = numpy.array(unx), numpy.array(lst)
    pwrX, pwrY = numpy.array(pwrX), numpy.array(pwrY)

    # Save for later (needed for debugging)
    outname = "estimateSEFD-%s-%04i%02i%02i.npz" % (os.path.splitext(
        os.path.basename(ssmif))[0], tTransit.tuple()[0], tTransit.tuple()[1],
                                                    tTransit.tuple()[2])
    print("Saving intermediate data to '%s'" % outname)
    print(" ")
    numpy.savez(outname,
                source=args.source.name,
                freq=freq,
                unx=unx,
                lst=lst,
                pwrX=pwrX,
                pwrY=pwrY)

    # Report
    print("%s" % (args.source.name, ))
    for i in xrange(lst.size):
        print("%s:  %s  %s" %
              (str(ephem.hours(str(lst[i]))), pwrX[i, :], pwrY[i, :]))

    # Plot
    if args.plots:
        fig = plt.figure()
        ax = fig.gca()
        ax.plot(lst, pwrX, linestyle='-', marker='+')
        ax.plot(lst, pwrY, linestyle='-', marker='x')
        plt.show()