示例#1
0
def main(args):
    otz = MST
    if args.utc:
        otz = UTC

    if not args.pairs:
        for arg in args.mjd:
            mjd1 = int(arg)
            mjd2 = float(mjd1) + 0.99999

            d1 = mjdmpm_to_datetime(mjd1, 0)
            d1 = UTC.localize(d1)
            d1 = d1.astimezone(otz)

            d2 = mjdmpm_to_datetime(mjd2, 0)
            d2 = UTC.localize(d2)
            d2 = d2.astimezone(otz)

            tzname = d1.strftime('%Z')

            print("MJD: %i" % mjd1)
            print("%s: %s to %s" %
                  (tzname, d1.strftime("%B %d, %Y at %H:%M:%S %Z"),
                   d2.strftime("%B %d, %Y at %H:%M:%S %Z")))
    else:
        for arg in zip(args.mjd[0::2], args.mjd[1::2]):
            mjd, mpm = [int(i) for i in arg]
            d = mjdmpm_to_datetime(mjd, mpm)
            d = UTC.localize(d)
            d = d.astimezone(otz)

            tzname = d.strftime('%Z')

            print("MJD: %i, MPM: %i" % (mjd, mpm))
            print("%s: %s" % (tzname, d.strftime("%B %d, %Y at %H:%M:%S %Z")))
示例#2
0
def _obs_comp(x, y):
    """
    Function to help sort observations in time.
    """
    
    tX = mjdmpm_to_datetime(x['mjd'], x['mpm'])
    tY = mjdmpm_to_datetime(y['mjd'], y['mpm'])
    if tX < tY:
        return -1
    elif tX > tY:
        return 1
    else:
        return 0
示例#3
0
def _download_code(mjd, type='final'):
    """
    Given an MJD value, download the corresponding CODE final data product 
    for that day.
    
    .. note::
        The 'type' keyword is ignored in the call.  It is included for 
        compatiability with _download_igs().
    """

    # Convert the MJD to a datetime instance so that we can pull out the year
    # and the day-of-year
    mpm = int((mjd - int(mjd)) * 24.0 * 3600.0 * 1000)
    dt = mjdmpm_to_datetime(int(mjd), mpm)

    year = dt.year
    dayOfYear = int(dt.strftime('%j'), 10)

    # Figure out which file we need to download
    filename = 'codg%03i0.%02ii.Z' % (dayOfYear, year % 100)

    # Attempt to download the data
    status = _download_worker(
        '%s/%04i/%03i/%s' %
        (IONO_CONFIG.get('code_url'), year, dayOfYear, filename), filename)
    if not status:
        status = _download_worker(
            '%s/%04i/%03i/%s' %
            (IONO_CONFIG.get('code_mirror'), year, dayOfYear, filename),
            filename)
    return status
示例#4
0
def _download_ustec(mjd):
    """
    Given an MJD value, download the corresponding JPL final data product 
    for that day.
    
    .. note::
        By default the "final" product is downloaded.  However, the "rapid" 
        data product may be downloaded if the 'type' keyword is set to 
        "rapid".
    """

    # Convert the MJD to a datetime instance so that we can pull out the year
    # and the day-of-year
    mpm = int((mjd - int(mjd)) * 24.0 * 3600.0 * 1000)
    dt = mjdmpm_to_datetime(int(mjd), mpm)

    year = dt.year
    month = dt.month
    dateStr = dt.strftime("%Y%m%d")
    # Build up the filename
    filename = '%s_ustec.tar.gz' % dateStr

    # Attempt to download the data
    return _download_worker(
        '%s/%04i/%02i/%s' %
        (IONO_CONFIG.get('ustec_url'), year, month, filename), filename)
示例#5
0
 def test_mjdmpm(self):
     """Test the MJD, MPM to datetime conversion"""
     
     mjd, mpm = 56093, 23649000
     dt = mcs.mjdmpm_to_datetime(mjd, mpm)
     
     self.assertEqual(dt.strftime("%Y-%m-%d %H:%M:%S"), "2012-06-15 06:34:09")
def main(args):
    # Loop over the input files
    for filename in args.filename:
        ## Is this file valid?
        try:
            db = PasiImageDB(filename, 'r')
        except Exception as e:
            print("ERROR: %s" % str(e))
            continue
            
        ## Report - overall
        print("Filename: %s" % os.path.basename(filename))
        print("  Correlator: %s" % db.header.corrVersion)
        print("  Imager: %s" % db.header.imagerVersion)
        print("  Station: %s" % db.header.station)
        print("  Stokes Parameters: %s" % db.header.stokesParams)
        print("  Image Size: %i by %i with %.3f deg/px" % (db.header.xSize, db.header.ySize, db.header.xPixelSize))
        print("  Number of Integrations: %i" % db.nIntegrations)
        
        ## Report - first image
        db.seek(0)
        hdr, data, spc = db.readImage()
        mjd = int(hdr.startTime)
        mpm = int((hdr.startTime - mjd)*86400*1000.0)
        tStart = mjdmpm_to_datetime(mjd, mpm)
        print("    First Image:")
        print("      Start Time: %s" % tStart.strftime("%Y/%m/%d %H:%M:%S.%f"))
        print("      Integration Time: %.3f s" % (hdr.intLen*86400.0,))
        print("      Tuning: %.3f MHz" % (hdr.freq/1e6,))
        print("      Bandwidth: %.3f kHz" % (hdr.bandwidth/1e3,))
        
        ## Report - last image
        db.seek(db.nIntegrations-1)
        hdr, data, spc = db.readImage()
        mjd = int(hdr.startTime)
        mpm = int((hdr.startTime - mjd)*86400*1000.0)
        tStart = mjdmpm_to_datetime(mjd, mpm)
        print("    Last Image:")
        print("      Start Time: %s" % tStart.strftime("%Y/%m/%d %H:%M:%S.%f"))
        print("      Integration Time: %.3f s" % (hdr.intLen*86400.0,))
        print("      Tuning: %.3f MHz" % (hdr.freq/1e6,))
        print("      Bandwidth: %.3f kHz" % (hdr.bandwidth/1e3,))
        
        ## Done
        print(" ")
        db.close()
def main(args):
    # Sort out the filenames
    npfile, msfile = args[:2]
    if npfile[-4:] != '.npy':
        npfile, msfile = msfile, npfile
    assert(npfile[-4:] == '.npy')
    assert((msfile[-4:] == '.tar') or (msfile[-3:] == '.ms'))
    print(f"Comparing {os.path.basename(npfile)} with {os.path.basename(msfile)}")
    
    # Make sure we have simultaneous data
    np_mjd, np_hms, _ = os.path.basename(npfile).split('_', 2)
    np_ymd = mjdmpm_to_datetime(int(np_mjd, 10), 0).strftime("%Y%m%d")
    ms_ymd, ms_hms, _ = os.path.basename(msfile).split('_', 2)
    assert(np_ymd == ms_ymd)
    assert(np_hms == ms_hms)
    
    # Load in the "everything" file that contains, well, everything
    everything = numpy.load(npfile)
    everything = everything[...,0] + 1j*everything[...,1]
    everything = everything.astype(numpy.complex64)
    
    # Parse out the shape to figure out what we have
    nint, nbl, nchan, npol = everything.shape
    nant = int(numpy.sqrt(nbl*2))
    assert(nant*(nant+1)//2 == nbl)
    
    # CorrelatedDataMS only returns the baselines and not the auto-correlations.
    # Figure out where those are in "everything"
    bl = []
    k = 0
    for i in range(nant):
        for j in range(i, nant):
            if i != j:
                bl.append(k)
            k += 1
            
    # Load in the measurment set
    ms = CorrelatedDataMS(msfile)
    
    # Loop over integrations
    for i in range(nint):
        ## Everything's bit
        e = everything[i,...]
        
        ## Measurement set's bit - converted to a numpy array
        m = ms.get_data_set(i)
        m = [getattr(m, p, None).data for p in ('XX', 'XY', 'YX', 'YY')]
        m = numpy.array(m)
        m = m.transpose(1,2,0)
        
        # Compare
        print(f"  Integration {i+1}:")
        for p,pol in enumerate(('XX', 'XY', 'YX', 'YY')):
            diff = e[bl,:,p] - m[:,:,p]
            print(f"    {pol} -> min={diff.min()}, mean={diff.mean()}, max={diff.max()}")
示例#8
0
文件: parser.py 项目: lwa-project/lsl
def date(string):
    """
    Convert a data as either a YYYY[-/]MM[-/]DD or MJD string into a 
    YYYY/MM/DD string.
    """

    try:
        mjd = int(string, 10)
        dt = mjdmpm_to_datetime(mjd, 0)
    except ValueError:
        cstring = string.replace('-', '/')
        try:
            dt = datetime.strptime("%s 00:00:00" % cstring,
                                   "%Y/%m/%d %H:%M:%S")
        except ValueError:
            msg = "%r cannot be interpretted as an MJD or date string" % string
            raise ArgumentTypeError(msg)

    date = dt.strftime('%Y/%m/%d')
    return date
示例#9
0
def _download_uqr(mjd, type='final'):
    """
    Given an MJD value, download the corresponding JPL final data product 
    for that day.
    
    .. note::
        By default the "final" product is downloaded.  However, the "rapid" 
        data product may be downloaded if the 'type' keyword is set to 
        "rapid".
    """

    # Convert the MJD to a datetime instance so that we can pull out the year
    # and the day-of-year
    mpm = int((mjd - int(mjd)) * 24.0 * 3600.0 * 1000)
    dt = mjdmpm_to_datetime(int(mjd), mpm)

    year = dt.year
    dayOfYear = int(dt.strftime('%j'), 10)

    # Figure out which file we need to download
    if type == 'final':
        ## Final
        filename = 'uqrg%03i0.%02ii.Z' % (dayOfYear, year % 100)
    elif type == 'rapid':
        ## Rapid
        filename = 'uqrg%03i0.%02ii.Z' % (dayOfYear, year % 100)
    else:
        ## ???
        raise ValueError("Unknown TEC file type '%s'" % type)

    # Attempt to download the data
    status = _download_worker(
        '%s/%04i/%03i/%s' %
        (IONO_CONFIG.get('uqr_url'), year, dayOfYear, filename), filename)
    if not status:
        status = _download_worker(
            '%s/%04i/%03i/%s' %
            (IONO_CONFIG.get('uqr_mirror'), year, dayOfYear, filename),
            filename)
    return status
示例#10
0
def main(args):
    # Set the site
    site = None
    if args.lwa1:
        site = 'lwa1'
    elif args.lwasv:
        site = 'lwasv'
        
    # Open the file and file good data (not raw DRX data)
    fh = open(args.filename, 'rb')

    try:
        for i in xrange(5):
            junkFrame = drx.read_frame(fh)
        raise RuntimeError("ERROR: '%s' appears to be a raw DRX file, not a DR spectrometer file" % args.filename)
    except errors.SyncError:
        fh.seek(0)
        
    # Interrogate the file to figure out what frames sizes to expect, now many 
    # frames there are, and what the transform length is
    FRAME_SIZE = drspec.get_frame_size(fh)
    nFrames = os.path.getsize(args.filename) // FRAME_SIZE
    nChunks = nFrames
    LFFT = drspec.get_transform_size(fh)

    # Read in the first frame to figure out the DP information
    junkFrame = drspec.read_frame(fh)
    fh.seek(-FRAME_SIZE, 1)
    srate = junkFrame.sample_rate
    t0 = junkFrame.time
    tInt = junkFrame.header.nints*LFFT/srate
    
    # Offset in frames for beampols beam/tuning/pol. sets
    offset = int(round(args.skip / tInt))
    fh.seek(offset*FRAME_SIZE, 1)
    
    # Iterate on the offsets until we reach the right point in the file.  This
    # is needed to deal with files that start with only one tuning and/or a 
    # different sample rate.  
    while True:
        ## Figure out where in the file we are and what the current tuning/sample 
        ## rate is
        junkFrame = drspec.read_frame(fh)
        srate = junkFrame.sample_rate
        t1 = junkFrame.time
        tInt = junkFrame.header.nints*LFFT/srate
        fh.seek(-FRAME_SIZE, 1)
        
        ## See how far off the current frame is from the target
        tDiff = t1 - (t0 + args.skip)
        
        ## Half that to come up with a new seek parameter
        tCorr = -tDiff / 2.0
        cOffset = int(round(tCorr / tInt))
        offset += cOffset
        
        ## If the offset is zero, we are done.  Otherwise, apply the offset
        ## and check the location in the file again/
        if cOffset is 0:
            break
        fh.seek(cOffset*FRAME_SIZE, 1)
        
    # Update the offset actually used
    args.skip = t1 - t0
    nChunks = (os.path.getsize(args.filename) - fh.tell()) // FRAME_SIZE
    
    # Update the file contents
    beam = junkFrame.id
    central_freq1, central_freq2 = junkFrame.central_freq
    srate = junkFrame.sample_rate
    data_products = junkFrame.data_products
    t0 = junkFrame.time
    tInt = junkFrame.header.nints*LFFT/srate
    beginDate = junkFrame.time.datetime
        
    # Report
    print("Filename: %s" % args.filename)
    if args.metadata is not None:
        print("Metadata: %s" % args.metadata)
    elif args.sdf is not None:
        print("SDF: %s" % args.sdf)
    print("Date of First Frame: %s" % beginDate)
    print("Beam: %i" % beam)
    print("Sample Rate: %i Hz" % srate)
    print("Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (central_freq1, central_freq2))
    print("Data Products: %s" % ','.join(data_products))
    print("Frames: %i (%.3f s)" % (nFrames, nFrames*tInt))
    print("---")
    print("Offset: %.3f s (%i frames)" % (args.skip, offset))
    print("Transform Length: %i" % LFFT)
    print("Integration: %.3f s" % tInt)
    
    # Setup the output file
    outname = os.path.split(args.filename)[1]
    outname = os.path.splitext(outname)[0]
    outname = '%s-waterfall.hdf5' % outname
    
    if os.path.exists(outname):
        if not args.force:
            yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
        else:
            yn = 'y'
            
        if yn not in ('n', 'N'):
            os.unlink(outname)
        else:
            raise RuntimeError("Output file '%s' already exists" % outname)
            
    f = hdfData.create_new_file(outname)
    obsList = {}
    if args.metadata is not None:
        try:
            project = metabundle.get_sdf(args.metadata)
        except Exception as e:
            if adpReady:
                project = metabundleADP.get_sdf(args.metadata)
            else:
                raise e
                
        sdfBeam  = project.sessions[0].drx_beam
        spcSetup = project.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError("Metadata is for beam #%i, but data is from beam #%i" % (sdfBeam, beam))
            
        for i,obs in enumerate(project.sessions[0].observations):
            sdfStart = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm)
            sdfStop  = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
            obsChunks = int(numpy.ceil(obs.dur/1000.0 * drx.FILTER_CODES[obs.filter] / (spcSetup[0]*spcSetup[1])))
            
            obsList[i+1] = (sdfStart, sdfStop, obsChunks)
            
        hdfData.fill_from_metabundle(f, args.metadata)
        
    elif args.sdf is not None:
        try:
            project = sdf.parse_sdf(args.sdf)
        except Exception as e:
            if adpReady:
                project = sdfADP.parse_sdf(args.sdf)
            else:
                raise e
                
        sdfBeam  = project.sessions[0].drx_beam
        spcSetup = project.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError("Metadata is for beam #%i, but data is from beam #%i" % (sdfBeam, beam))
            
        for i,obs in enumerate(project.sessions[0].observations):
            sdfStart = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm)
            sdfStop  = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
            obsChunks = int(numpy.ceil(obs.dur/1000.0 * drx.FILTER_CODES[obs.filter] / (spcSetup[0]*spcSetup[1])))
            
            obsList[i+1] = (sdfStart, sdfStop, obsChunks)
            
        hdfData.fill_from_sdf(f, args.sdf, station=site)
        
    else:
        obsList[1] = (beginDate, datetime(2222,12,31,23,59,59), nChunks)
        
        hdfData.fill_minimum(f, 1, beam, srate, station=site)
        
    data_products = junkFrame.data_products
    for o in sorted(obsList.keys()):
        for t in (1,2):
            hdfData.create_observation_set(f, o, t, numpy.arange(LFFT, dtype=numpy.float64), obsList[o][2], data_products)
            
    f.attrs['FileGenerator'] = 'drspec2hdf.py'
    f.attrs['InputData'] = os.path.basename(args.filename)
    
    # Create the various HDF group holders
    ds = {}
    for o in sorted(obsList.keys()):
        obs = hdfData.get_observation_set(f, o)
        
        ds['obs%i' % o] = obs
        ds['obs%i-time' % o] = hdfData.get_time(f, o)
        
        for t in (1,2):
            ds['obs%i-freq%i' % (o, t)] = hdfData.get_data_set(f, o, t, 'freq')
            for p in data_products:
                ds["obs%i-%s%i" % (o, p, t)] = hdfData.get_data_set(f, o, t, p)
            ds['obs%i-Saturation%i' % (o, t)] = hdfData.get_data_set(f, o, t, 'Saturation')
            
    # Loop over DR spectrometer frames to fill in the HDF5 file
    pbar = progress.ProgressBar(max=nChunks)
    o = 1
    j = 0
    
    firstPass = True
    for i in xrange(nChunks):
        frame = drspec.read_frame(fh)
        
        cTime = frame.time.datetime
        if cTime > obsList[o][1]:
            # Increment to the next observation
            o += 1
            
            # If we have reached the end, exit...
            try:
                obsList[o]
                
                firstPass = True
            except KeyError:
                sys.stdout.write('%s\r' % (' '*pbar.span))
                sys.stdout.flush()
                print("End of observing block according to SDF, exiting")
                break
                
        if cTime < obsList[o][0]:
            # Skip over data that occurs before the start of the observation
            continue
            
        try:
            if frame.time > oTime + 1.001*tInt:
                print('Warning: Time tag error at frame %i; %.3f > %.3f + %.3f' % (i, frame.time, oTime, tInt))
        except NameError:
            pass
        oTime = frame.time
        
        if firstPass:
            # Otherwise, continue on...
            central_freq1, central_freq2 = frame.central_freq
            srate = frame.sample_rate
            tInt  = frame.header.nints*LFFT/srate
            
            freq = numpy.fft.fftshift( numpy.fft.fftfreq(LFFT, d=1.0/srate) )
            freq = freq.astype(numpy.float64)
            
            sys.stdout.write('%s\r' % (' '*pbar.span))
            sys.stdout.flush()
            print("Switching to Obs. #%i" % o)
            print("-> Tunings: %.1f Hz, %.1f Hz" % (central_freq1, central_freq2))
            print("-> Sample Rate: %.1f Hz" % srate)
            print("-> Integration Time: %.3f s" % tInt)
            sys.stdout.write(pbar.show()+'\r')
            sys.stdout.flush()
            
            j = 0
            ds['obs%i-freq1' % o][:] = freq + central_freq1
            ds['obs%i-freq2' % o][:] = freq + central_freq2
            
            obs = ds['obs%i' % o]
            obs.attrs['tInt'] = tInt
            obs.attrs['tInt_Units'] = 's'
            obs.attrs['LFFT'] = LFFT
            obs.attrs['nChan'] = LFFT
            obs.attrs['RBW'] = freq[1]-freq[0]
            obs.attrs['RBW_Units'] = 'Hz'
            
            firstPass = False
            
        # Load the data from the spectrometer frame into the HDF5 group
        ds['obs%i-time' % o][j] = (frame.time[0], frame.time[1])
        
        ds['obs%i-Saturation1' % o][j,:] = frame.payload.saturations[0:2]
        ds['obs%i-Saturation2' % o][j,:] = frame.payload.saturations[2:4]
        
        for t in (1,2):
            for p in data_products:
                ds['obs%i-%s%i' % (o, p, t)][j,:] = getattr(frame.payload, "%s%i" % (p, t-1), None)
        j += 1
        
        # Update the progress bar
        pbar.inc()
        if i % 10 == 0:
            sys.stdout.write(pbar.show()+'\r')
            sys.stdout.flush()
            
    sys.stdout.write(pbar.show()+'\n')
    sys.stdout.flush()
    
    # Done
    fh.close()

    # Save the output to a HDF5 file
    f.close()
示例#11
0
def main(args):
    # Loop over the input files
    for filename in args.filename:
        ## Is this file valid?
        try:
            db = PasiImageDB(filename, 'r')
        except Exception as e:
            print("ERROR: %s" % str(e))
            continue

        ## Setup the array
        if db.header.station == 'LWA1':
            aa = simVis.build_sim_array(lwa1, lwa1.antennas[0::2],
                                        numpy.array([
                                            38e6,
                                        ]))
        else:
            aa = None

        ## Go!
        for i, (hdr, img, spec) in enumerate(db):
            if args.dataset != 0 and args.dataset != (i + 1):
                continue

            mjd = int(hdr.startTime)
            mpm = int((hdr.startTime - mjd) * 86400 * 1000.0)
            tStart = mjdmpm_to_datetime(mjd, mpm)
            if aa is not None:
                aa.set_jultime(hdr.centroidTime + astro.MJD_OFFSET)

            stokes = hdr.stokesParams.split(',')

            ### Save the image size for later
            imSize = img.shape[-1]

            ### Zero outside of the horizon so avoid problems
            pCntr = imSize / 2 + 1 + 0.5 * ((imSize + 1) % 2)
            pScale = hdr['xPixelSize']
            sRad = 360.0 / pScale / numpy.pi / 2
            x = numpy.arange(1, img.shape[-2] + 1, dtype=numpy.float32) - pCntr
            y = numpy.arange(1, img.shape[-1] + 1, dtype=numpy.float32) - pCntr
            x /= -sRad
            y /= sRad
            x, y = numpy.meshgrid(x, y)
            invalid = numpy.where((x**2 + y**2) > 1)
            img[:, invalid[0], invalid[1]] = 0.0

            ### Try and set the image scale correctly for the display
            x2 = x - 1 / sRad
            y2 = y - 1 / sRad
            extent = (x2.max(), x2.min(), y.min(), y.max())

            ### Loop over Stokes parameters
            fig = plt.figure()
            for j, label in enumerate(stokes):
                ax = fig.add_subplot(2, 2, j + 1)
                ax.imshow(img[j, :, :].T,
                          origin='lower',
                          interpolation='nearest',
                          extent=extent)
                ax.set_title(label)
                ax.set_xlim((1, -1))
                ax.set_ylim((-1, 1))

                ## Turn off tick marks
                ax.xaxis.set_major_formatter(NullFormatter())
                ax.yaxis.set_major_formatter(NullFormatter())

                ## Is we know where we are, overlay some stuff
                if aa is not None:
                    # Horizon
                    overlay.horizon(ax, aa)
                    # RA/Dec graticle
                    if not args.no_grid:
                        overlay.graticule_radec(ax, aa)
                    # Source positions
                    overlay.sources(ax,
                                    aa,
                                    simVis.SOURCES,
                                    label=(not args.no_labels))

            fig.suptitle(
                '%.3f MHz @ %s' %
                (hdr.freq / 1e6, tStart.strftime("%Y/%m/%d %H:%M:%S")))
            plt.show()

        ## Done
        db.close()
示例#12
0
def main(args):
    # Length of the FFT and the window to use
    LFFT = args.fft_length
    if args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window
    args.window = window

    # Open the file and find good data (not spectrometer data)
    fh = open(args.filename, "rb")

    try:
        for i in xrange(5):
            junkFrame = drspec.read_frame(fh)
        raise RuntimeError(
            "ERROR: '%s' appears to be a DR spectrometer file, not a raw DRX file"
            % args.filename)
    except errors.SyncError:
        fh.seek(0)

    # Good, we seem to have a real DRX file, switch over to the LDP interface
    fh.close()
    idf = LWA1DataFile(args.filename,
                       ignore_timetag_errors=args.ignore_time_errors)

    # Metadata
    nFramesFile = idf.get_info('nframe')
    beam = idf.get_info('beam')
    srate = idf.get_info('sample_rate')
    beampols = idf.get_info('nbeampol')
    beams = max([1, beampols // 4])

    # Number of frames to integrate over
    nFramesAvg = int(args.average * srate / 4096) * beampols
    nFramesAvg = int(1.0 * (nFramesAvg // beampols) * 4096 /
                     float(LFFT)) * LFFT / 4096 * beampols
    args.average = 1.0 * (nFramesAvg // beampols) * 4096 / srate
    maxFrames = nFramesAvg

    # Offset into the file, if needed
    offset = idf.offset(args.skip)

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    if args.metadata is not None:
        args.duration = 0
    if args.duration == 0:
        args.duration = 1.0 * nFramesFile / beampols * 4096 / srate
        args.duration -= args.skip
    else:
        args.duration = int(
            round(args.duration * srate * beampols / 4096) / beampols * 4096 /
            srate)
    nChunks = int(round(args.duration / args.average))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks

    # Date & Central Frequency
    t1 = idf.get_info('start_time')
    beginDate = t1.datetime
    central_freq1 = idf.get_info('freq1')
    central_freq2 = idf.get_info('freq2')

    # File summary
    print("Filename: %s" % args.filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Beams: %i" % beams)
    print("Tune/Pols: %i" % beampols)
    print("Sample Rate: %i Hz" % srate)
    print("Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" %
          (central_freq1, central_freq2))
    print("Frames: %i (%.3f s)" %
          (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate))
    print("---")
    print("Offset: %.3f s (%i frames)" % (args.skip, offset))
    print("Integration: %.3f s (%i frames; %i frames per beam/tune/pol)" %
          (args.average, nFramesAvg, nFramesAvg / beampols))
    print("Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" %
          (args.average * nChunks, nFrames, nFrames / beampols))
    print("Chunks: %i" % nChunks)
    print(" ")

    # Estimate clip level (if needed)
    if args.estimate_clip_level:
        estimate = idf.estimate_levels(fh, sigma=5.0)
        clip1 = (estimate[0] + estimate[1]) / 2.0
        clip2 = (estimate[2] + estimate[3]) / 2.0
    else:
        clip1 = args.clip_level
        clip2 = args.clip_level

    # Make the pseudo-antennas for Stokes calculation
    antennas = []
    for i in xrange(4):
        if i // 2 == 0:
            newAnt = stations.Antenna(1)
        else:
            newAnt = stations.Antenna(2)

        if i % 2 == 0:
            newAnt.pol = 0
        else:
            newAnt.pol = 1

        antennas.append(newAnt)

    # Setup the output file
    outname = os.path.split(args.filename)[1]
    outname = os.path.splitext(outname)[0]
    outname = '%s-waterfall.hdf5' % outname

    if os.path.exists(outname):
        if not args.force:
            yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
        else:
            yn = 'y'

        if yn not in ('n', 'N'):
            os.unlink(outname)
        else:
            raise RuntimeError("Output file '%s' already exists" % outname)

    f = hdfData.create_new_file(outname)

    # Look at the metadata and come up with a list of observations.  If
    # there are no metadata, create a single "observation" that covers the
    # whole file.
    obsList = {}
    if args.metadata is not None:
        try:
            project = metabundle.get_sdf(args.metadata)
        except Exception as e:
            project = metabundleADP.get_sdf(args.metadata)

        sdfBeam = project.sessions[0].drx_beam
        spcSetup = project.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, beam))

        for i, obs in enumerate(project.sessions[0].observations):
            sdfStart = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = drx.FILTER_CODES[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        print("Observations:")
        for i in sorted(obsList.keys()):
            obs = obsList[i]
            print(" #%i: %s to %s (%.3f s) at %.3f MHz" %
                  (i, obs[0], obs[1], obs[2], obs[3] / 1e6))
        print(" ")

        hdfData.fill_from_metabundle(f, args.metadata)

    elif args.sdf is not None:
        try:
            project = sdf.parse_sdf(args.sdf)
        except Exception as e:
            project = sdfADP.parse_sdf(args.sdf)

        sdfBeam = project.sessions[0].drx_beam
        spcSetup = project.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, beam))

        for i, obs in enumerate(project.sessions[0].observations):
            sdfStart = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = drx.FILTER_CODES[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        site = 'lwa1'
        if args.lwasv:
            site = 'lwasv'
        hdfData.fill_from_sdf(f, args.sdf, station=site)

    else:
        obsList[1] = (datetime.utcfromtimestamp(t1),
                      datetime(2222, 12, 31, 23, 59, 59), args.duration, srate)

        site = 'lwa1'
        if args.lwasv:
            site = 'lwasv'
        hdfData.fill_minimum(f, 1, beam, srate, station=site)

    if (not args.stokes):
        data_products = ['XX', 'YY']
    else:
        data_products = ['I', 'Q', 'U', 'V']

    for o in sorted(obsList.keys()):
        for t in (1, 2):
            hdfData.create_observation_set(
                f, o, t, numpy.arange(LFFT, dtype=numpy.float64),
                int(round(obsList[o][2] / args.average)), data_products)

    f.attrs['FileGenerator'] = 'hdfWaterfall.py'
    f.attrs['InputData'] = os.path.basename(args.filename)

    # Create the various HDF group holders
    ds = {}
    for o in sorted(obsList.keys()):
        obs = hdfData.get_observation_set(f, o)

        ds['obs%i' % o] = obs
        ds['obs%i-time' % o] = hdfData.get_time(f, o)

        for t in (1, 2):
            ds['obs%i-freq%i' % (o, t)] = hdfData.get_data_set(f, o, t, 'freq')
            for p in data_products:
                ds["obs%i-%s%i" % (o, p, t)] = hdfData.get_data_set(f, o, t, p)
            ds['obs%i-Saturation%i' % (o, t)] = hdfData.get_data_set(
                f, o, t, 'Saturation')

    # Load in the correct analysis function
    if (not args.stokes):
        processDataBatch = processDataBatchLinear
    else:
        processDataBatch = processDataBatchStokes

    # Go!
    for o in sorted(obsList.keys()):
        try:
            processDataBatch(idf,
                             antennas,
                             obsList[o][0],
                             obsList[o][2],
                             obsList[o][3],
                             args,
                             ds,
                             obsID=o,
                             clip1=clip1,
                             clip2=clip2)
        except RuntimeError as e:
            print("Observation #%i: %s, abandoning this observation" %
                  (o, str(e)))

    # Save the output to a HDF5 file
    f.close()

    # Close out the data file
    idf.close()
示例#13
0
def main(args):
    # Get the file names
    meta = args.metadata
    data = args.filename

    # Get all observations and their start times
    try:
        ## LWA-1
        sdf = metabundle.get_sdf(meta)
        ses = metabundle.get_session_spec(meta)
        obs = metabundle.get_observation_spec(meta)
    except:
        ## LWA-SV
        ### Try again
        sdf = metabundleADP.get_sdf(meta)
        ses = metabundleADP.get_session_spec(meta)
        obs = metabundleADP.get_observation_spec(meta)
    obs.sort(_obs_comp)
    tStart = []
    oDetails = []
    for i,o in enumerate(obs):
        tStart.append( mjdmpm_to_datetime(o['mjd'], o['mpm']) )
        oDetails.append( {'m': o['mode'], 'd': o['dur'] / 1000.0, 'f': o['bw'], 
                          'p': o['project_id'], 's': o['session_id'], 'o': o['obs_id'], 
                          't': sdf.sessions[0].observations[o['obs_id']-1].target} )

        print("Observation #%i" % (o['obs_id']))
        print(" Start: %i, %i -> %s" % (o['mjd'], o['mpm'], tStart[-1]))
        print(" Mode: %s" % mode_to_string(o['mode']))
        print(" BW: %i" % o['bw'])
        print(" Target: %s" % sdf.sessions[0].observations[o['obs_id']-1].target)
    print(" ")

    # Figure out where in the file the various bits are.
    fh = open(data, 'rb')
    lf = drx.read_frame(fh)
    beam, j, k = lf.id
    if beam != obs[0]['drx_beam']:
        print('ERROR: Beam mis-match, metadata is for #%i, file is for #%i' % (obs[0]['drx_beam'], beam))
        sys.exit()
    firstFrame = lf.time.datetime
    if abs(firstFrame - min(tStart)) > timedelta(seconds=30):
        print('ERROR: Time mis-match, metadata is for %s, file is for %s' % (min(tStart), firstFrame))
        sys.exit()
    fh.seek(0)

    for i in range(len(tStart)):
        eof = False

        ## Get observation properties
        oStart = tStart[i]
        oMode = mode_to_string(oDetails[i]['m'])
        oDur  = oDetails[i]['d']
        oBW   = oDetails[i]['f']
        print("Seeking %s observation of %.3f seconds at %s" % (oMode, oDur, oStart))

        ## Get the correct reader to use
        if oMode == 'TBW':
            reader = tbw
            bwKey = None
            bwMult = 520.0 / 400
            fCount = 400
        elif oMode == 'TBN':
            reader = tbn
            bwKey = tbn.FILTER_CODES
            bwMult = 520.0 / 512
            fCount = 512
        else:
            reader = drx
            bwKey = drx.FILTER_CODES
            bwMult = 4.0 / 4096
            fCount = 4096

        ## Jump ahead to where the next frame should be, if needed
        if i != 0:
            pDur  = oDetails[i-1]['d']
            pBW   = oDetails[i-1]['f']

            nFramesSkip = int(pDur*bwKey[pBW]*bwMult)
            fh.seek(nFramesSkip*reader.FRAME_SIZE, 1)
            if fh.tell() >= os.path.getsize(data):
                fh.seek(-10*reader.FRAME_SIZE, 2)
                
        ## Figure out where we are and make sure we line up on a frame
        ## NOTE: This should never be needed
        fail = True
        while fail:
            try:
                frame = reader.read_frame(fh)
                fail = False
            except errors.SyncError:
                fh.seek(1, 1)
            except errors.EOFError:
                break
        fh.seek(-reader.FRAME_SIZE, 1)	

        ## Go in search of the start of the observation
        if frame.time.datetime < oStart:
            ### We aren't at the beginning yet, seek fowards
            print("-> At byte %i, time is %s < %s" % (fh.tell(), frame.time.datetime, oStart))

            while frame.time.datetime < oStart:
                try:
                    frame = reader.read_frame(fh)
                except errors.SyncError:		
                    fh.seek(1, 1)
                except errors.EOFError:
                    break
                #print(frame.time.datetime, oStart)

        elif frame.time.datetime > oStart:
            ### We've gone too far, seek backwards
            print("-> At byte %i, time is %s > %s" % (fh.tell(), frame.time.datetime, oStart))

            while frame.time.datetime > oStart:
                if fh.tell() == 0:
                    break
                fh.seek(-2*reader.FRAME_SIZE, 1)
                try:
                    frame = reader.read_frame(fh)
                except errors.SyncError:		
                    fh.seek(-1, 1)
                except errors.EOFError:
                    break
                #print(frame.time.datetime, oStart)
                
        else:
            ### We're there already
            print("-> At byte %i, time is %s = %s" % (fh.tell(), frame.time.datetime, oStart))
            
        ## Jump back exactly one frame so that the filehandle is in a position 
        ## to read the first frame that is part of the observation
        try:
            frame = reader.read_frame(fh)
            print("-> At byte %i, time is %s = %s" % (fh.tell(), frame.time.datetime, oStart))
            fh.seek(-reader.FRAME_SIZE, 1)
        except errors.EOFError:
            pass
            
        ## Update the bytes ranges
        if fh.tell() < os.path.getsize(data):
            oDetails[i]['b'] = fh.tell()
            oDetails[i]['e'] = -1
        else:
            oDetails[i]['b'] = -1
            oDetails[i]['e'] = -1

        if i != 0:
            oDetails[i-1]['e'] = fh.tell()

        ## Progress report
        if oDetails[i]['b'] >= 0:
            print('-> Obs.', oDetails[i]['o'], 'starts at byte', oDetails[i]['b'])
        else:
            print('-> Obs.', oDetails[i]['o'], 'starts after the end of the file')
    print(" ")

    # Report
    for i in range(len(tStart)):
        if oDetails[i]['b'] < 0:
            print("%s, Session %i, Observation %i: not found" % (oDetails[i]['p'], oDetails[i]['s'], oDetails[i]['o']))

        else:
            print("%s, Session %i, Observation %i: %i to %i (%i bytes)" % (oDetails[i]['p'], oDetails[i]['s'], oDetails[i]['o'], oDetails[i]['b'], oDetails[i]['e'], (oDetails[i]['e'] - oDetails[i]['b'])))
    print(" ")

    # Split
    if not args.list:
        for i in range(len(tStart)):
            if oDetails[i]['b'] < 0:
                continue
                
            ## Report
            print("Working on Observation %i" % (i+1,))
            
            ## Create the output name
            if args.source:
                outname = '%s_%i_%s.dat' % (oDetails[i]['p'], oDetails[i]['s'], oDetails[i]['t'].replace(' ', '').replace('/','').replace('&','and'))
            else:
                outname = '%s_%i_%i.dat' % (oDetails[i]['p'], oDetails[i]['s'], oDetails[i]['o'])
                
            oMode = mode_to_string(oDetails[i]['m'])

            ## Get the correct reader to use
            if oMode == 'TBW':
                reader = tbw

            elif oMode == 'TBN':
                reader = tbn
            else:
                reader = drx

            ## Get the number of frames
            if oDetails[i]['e'] > 0:
                nFramesRead = (oDetails[i]['e'] - oDetails[i]['b']) // reader.FRAME_SIZE
            else:
                nFramesRead = (os.path.getsize(data) - oDetails[i]['b']) // reader.FRAME_SIZE

            ## Split
            if os.path.exists(outname):
                if not args.force:
                    yn = input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
                else:
                    yn = 'y'
                    
                if yn not in ('n', 'N'):
                    os.unlink(outname)
                else:
                    print("WARNING: output file '%s' already exists, skipping" % outname)
                    continue
                    
            fh.seek(oDetails[i]['b'])
            
            t0 = time.time()
            oh = open(outname, 'wb')
            for sl in [2**i for i in range(17)[::-1]]:
                while nFramesRead >= sl:
                    temp = fh.read(sl*reader.FRAME_SIZE)
                    oh.write(temp)
                    nFramesRead -= sl
            oh.close()
            t1 = time.time()
            print("  Copied %i bytes in %.3f s (%.3f MB/s)" % (os.path.getsize(outname), t1-t0, os.path.getsize(outname)/1024.0**2/(t1-t0)))
    print(" ")
示例#14
0
def main(args):
    # Length of the FFT and the window to use
    LFFT = args.fft_length
    if args.bartlett:
        window = numpy.bartlett
    elif args.blackman:
        window = numpy.blackman
    elif args.hanning:
        window = numpy.hanning
    else:
        window = fxc.null_window
    args.window = window

    # Open the file and find good data
    idf = LWA1DataFile(args.filename,
                       ignore_timetag_errors=args.ignore_time_errors)

    # Metadata
    nFramesFile = idf.get_info('nframe')
    srate = idf.get_info('sample_rate')
    antpols = idf.get_info('nantenna')

    # Number of frames to integrate over
    nFramesAvg = int(args.average * srate / 512) * antpols
    nFramesAvg = int(1.0 * (nFramesAvg // antpols) * 512 /
                     float(LFFT)) * LFFT / 512 * antpols
    args.average = 1.0 * (nFramesAvg // antpols) * 512 / srate
    maxFrames = nFramesAvg

    # Offset into the file, if needed
    offset = idf.offset(args.skip)

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    if args.metadata is not None:
        args.duration = 0
    if args.duration == 0:
        args.duration = 1.0 * nFramesFile / antpols * 512 / srate
        args.duration -= args.skip
    else:
        args.duration = int(
            round(args.duration * srate * antpols / 512) // antpols * 512 //
            srate)
    nChunks = int(round(args.duration / args.average))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks

    # Date & Central Frequency
    t1 = idf.get_info('start_time')
    beginDate = t1.datetime
    central_freq1 = idf.get_info('freq1')

    # File summary
    print("Filename: %s" % args.filename)
    print("Date of First Frame: %s" % str(beginDate))
    print("Antenna/Pols: %i" % antpols)
    print("Sample Rate: %i Hz" % srate)
    print("Tuning Frequency: %.3f Hz" % (central_freq1, ))
    print("Frames: %i (%.3f s)" %
          (nFramesFile, 1.0 * nFramesFile / antpols * 512 / srate))
    print("---")
    print("Offset: %.3f s (%i frames)" % (args.skip, offset))
    print("Integration: %.3f s (%i frames; %i frames per antenna/pol)" %
          (args.average, nFramesAvg, nFramesAvg // antpols))
    print("Duration: %.3f s (%i frames; %i frames per antenna/pol)" %
          (args.average * nChunks, nFrames, nFrames // antpols))
    print("Chunks: %i" % nChunks)
    print(" ")

    # Estimate clip level (if needed)
    if args.estimate_clip_level:
        estimate = idf.estimate_levels(sigma=5.0)
        clip1 = 1.0 * sum(estimate) / len(estimate)
    else:
        clip1 = args.clip_level

    # Get the antennas for Stokes calculation
    if args.metadata is not None:
        try:
            project = metabundle.get_sdf(args.metadata)
            station = stations.lwa1
        except Exception as e:
            project = metabundleADP.get_sdf(args.metadata)
            station = stations.lwasv
    elif args.lwasv:
        station = stations.lwasv
    else:
        station = stations.lwa1
    antennas = station.antennas

    # Setup the output file
    outname = os.path.split(args.filename)[1]
    outname = os.path.splitext(outname)[0]
    outname = '%s-tbn-waterfall.hdf5' % outname

    if os.path.exists(outname):
        if not args.force:
            yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
        else:
            yn = 'y'

        if yn not in ('n', 'N'):
            os.unlink(outname)
        else:
            raise RuntimeError("Output file '%s' already exists" % outname)

    f = hdfData.create_new_file(outname)

    # Look at the metadata and come up with a list of observations.  If
    # there are no metadata, create a single "observation" that covers the
    # whole file.
    obsList = {}
    if args.metadata is not None:
        try:
            project = metabundle.get_sdf(args.metadata)
        except Exception as e:
            project = metabundleADP.get_sdf(args.metadata)

        sdfBeam = project.sessions[0].drx_beam
        if sdfBeam != 5:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, 5))

        for i, obs in enumerate(project.sessions[0].observations):
            sdfStart = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = tbn.FILTER_CODES[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        print("Observations:")
        for i in sorted(obsList.keys()):
            obs = obsList[i]
            print(" #%i: %s to %s (%.3f s) at %.3f MHz" %
                  (i, obs[0], obs[1], obs[2], obs[3] / 1e6))
        print(" ")

        hdfData.fill_from_metabundle(f, args.metadata)

    elif args.sdf is not None:
        try:
            project = sdf.parse_sdf(args.sdf)
        except Exception as e:
            project = sdfADP.parse_sdf(args.sdf)

        sdfBeam = project.sessions[0].drx_beam
        if sdfBeam != 5:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, 5))

        for i, obs in enumerate(project.sessions[0].observations):
            sdfStart = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = tbn.FILTER_CODES[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        site = 'lwa1'
        if args.lwasv:
            site = 'lwasv'
        hdfData.fill_from_sdf(f, args.sdf, station=site)

    else:
        obsList[1] = (datetime.utcfromtimestamp(t1),
                      datetime(2222, 12, 31, 23, 59, 59), args.duration, srate)

        site = 'lwa1'
        if args.lwasv:
            site = 'lwasv'
        hdfData.fill_minimum(f, 1, 5, srate, station=site)

    if (not args.stokes):
        data_products = ['XX', 'YY']
    else:
        data_products = ['I', 'Q', 'U', 'V']

    for o in sorted(obsList.keys()):
        for t in range(len(antennas) // 2):
            hdfData.create_observation_set(
                f, o, t + 1, numpy.arange(LFFT, dtype=numpy.float64),
                int(round(obsList[o][2] / args.average)), data_products)

    f.attrs['FileGenerator'] = 'tbnWaterfall.py'
    f.attrs['InputData'] = os.path.basename(args.filename)

    # Create the various HDF group holders
    ds = {}
    for o in sorted(obsList.keys()):
        obs = hdfData.get_observation_set(f, o)

        ds['obs%i' % o] = obs
        ds['obs%i-time' % o] = hdfData.get_time(f, o)

        for t in range(len(antennas) // 2):
            ds['obs%i-freq%i' % (o, t + 1)] = hdfData.get_data_set(
                f, o, t + 1, 'freq')
            for p in data_products:
                ds["obs%i-%s%i" % (o, p, t + 1)] = hdfData.get_data_set(
                    f, o, t + 1, p)
            ds['obs%i-Saturation%i' % (o, t + 1)] = hdfData.get_data_set(
                f, o, t + 1, 'Saturation')

    # Load in the correct analysis function
    if (not args.stokes):
        process_data = process_data_to_linear
    else:
        process_data = process_data_to_stokes

    # Go!
    for o in sorted(obsList.keys()):
        try:
            process_data(idf,
                         antennas,
                         obsList[o][0],
                         obsList[o][2],
                         obsList[o][3],
                         args,
                         ds,
                         obsID=o,
                         clip1=clip1)
        except RuntimeError as e:
            print("Observation #%i: %s, abandoning this observation" %
                  (o, str(e)))

    # Save the output to a HDF5 file
    f.close()

    # Close out the data file
    idf.close()
示例#15
0
def get_magnetic_field(lat, lng, elev, mjd=None, ecef=False):
    """
    Given a geodetic location described by a latitude in degrees (North 
    positive), a longitude in degrees (West negative), an elevation 
    in meters and an MJD value, compute the Earth's magnetic field in that 
    location and return a three-element tuple of the magnetic field's 
    components in nT.  By default these are in topocentric coordinates of
    (North, East, Up).  To return values in ECEF, set the 'ecef' keyword to
    True.  If the MJD file is None, the current time is used.
    
    .. note::
        The convention used for the topocentric coordinates differs
        from what the IGRF uses in the sense that the zenith direction
        points up rather than down.
    """

    # Get the current time if mjd is None
    if mjd is None:
        mjd, mpm = datetime_to_mjdmpm(datetime.utcnow())
        mjd = mjd + mpm / 1000.0 / 3600.0 / 24.0

    # Convert the MJD to a decimal year.  This is a bit tricky
    ## Break the MJD into an integer MJD and an MPM in order to build a datetime instance
    mpm = int((mjd - int(mjd)) * 24.0 * 3600.0 * 1000.0)
    mjd0 = mjdmpm_to_datetime(int(mjd), mpm)
    ## Convert the datetime instance to January 1
    mjd0 = mjd0.replace(month=1, day=1, hour=0, second=0, microsecond=0)
    ## Figure out January 1 for the following year
    mjd1 = mjd0.replace(year=mjd0.year + 1)
    ## Figure out how long the year is in days
    diffDays = mjd1 - mjd0
    diffDays = diffDays.days + diffDays.seconds / 86400.0 + diffDays.microseconds / 1e6 / 86400.0
    ## Convert the January 1 date back to an MJD
    mjd0, mpm0 = datetime_to_mjdmpm(mjd0)
    mjd0 = mjd0 + mpm / 1000.0 / 3600.0 / 24.0
    year = (mjd1.year - 1) + (mjd - mjd0) / diffDays

    # Convert the geodetic position provided to a geocentric one for calculation
    ## Deal with the poles
    if 90.0 - lat < 0.001:
        xyz = numpy.array(
            geo_to_ecef(89.999 * numpy.pi / 180, lng * numpy.pi / 180, elev))
    elif 90.0 + lat < 0.001:
        xyz = numpy.array(
            geo_to_ecef(-89.999 * numpy.pi / 180, lng * numpy.pi / 180, elev))
    else:
        xyz = numpy.array(
            geo_to_ecef(lat * numpy.pi / 180, lng * numpy.pi / 180, elev))
    ## To geocentric
    r = numpy.sqrt((xyz**2).sum())
    lt = numpy.arcsin(xyz[2] / r)
    ln = numpy.arctan2(xyz[1], xyz[0])

    # Load in the coefficients
    try:
        coeffs = _ONLINE_CACHE['IGRF']
    except KeyError:
        filename = os.path.join(dataPath, 'igrf13coeffs.txt')
        _ONLINE_CACHE['IGRF'] = _load_igrf(filename)

        coeffs = _ONLINE_CACHE['IGRF']

    # Compute the coefficients for the epoch
    coeffs = _compute_igrf_coefficents(year, coeffs)

    # Compute the field strength in spherical coordinates
    Br, Bth, Bph = 0.0, 0.0, 0.0
    for n in coeffs['g'].keys():
        for m in range(0, n + 1):
            Br += (n + 1.0) * (_RADIUS_EARTH / r)**(n + 2) * _Snm(
                n, m) * coeffs['g'][n][m] * numpy.cos(m * ln) * _Pnm(
                    n, m, numpy.sin(lt))
            Br += (n + 1.0) * (_RADIUS_EARTH / r)**(n + 2) * _Snm(
                n, m) * coeffs['h'][n][m] * numpy.sin(m * ln) * _Pnm(
                    n, m, numpy.sin(lt))

            Bth -= (_RADIUS_EARTH / r)**(n + 2) * _Snm(
                n, m) * coeffs['g'][n][m] * numpy.cos(m * ln) * _dPnm(
                    n, m, numpy.sin(lt))
            Bth -= (_RADIUS_EARTH / r)**(n + 2) * _Snm(
                n, m) * coeffs['h'][n][m] * numpy.sin(m * ln) * _dPnm(
                    n, m, numpy.sin(lt))

            Bph += (_RADIUS_EARTH / r)**(n + 2) / numpy.cos(lt) * _Snm(
                n, m) * coeffs['g'][n][m] * m * numpy.sin(m * ln) * _Pnm(
                    n, m, numpy.sin(lt))
            Bph -= (_RADIUS_EARTH / r)**(n + 2) / numpy.cos(lt) * _Snm(
                n, m) * coeffs['h'][n][m] * m * numpy.cos(m * ln) * _Pnm(
                    n, m, numpy.sin(lt))
    ## And deal with NaNs
    if numpy.isnan(Br):
        Br = 0.0
    if numpy.isnan(Bth):
        Bth = 0.0
    if numpy.isnan(Bph):
        Bph = 0.0

    # Convert from spherical to ECEF
    Bx = Br * numpy.cos(lt) * numpy.cos(ln) + Bth * numpy.sin(lt) * numpy.cos(
        ln) - Bph * numpy.sin(ln)
    By = Br * numpy.cos(lt) * numpy.sin(ln) + Bth * numpy.sin(lt) * numpy.sin(
        ln) + Bph * numpy.cos(ln)
    Bz = Br * numpy.sin(lt) - Bth * numpy.cos(lt)

    # Are we done?
    if ecef:
        # For ECEF we don't need to do anything else
        outputField = Bx, By, Bz

    else:
        # Convert from ECEF to topocentric (geodetic)
        ## Update the coordinates for geodetic
        lt = lat * numpy.pi / 180.0
        if 90.0 - lat < 0.001:
            lt = 89.999 * numpy.pi / 180.0
        elif 90.0 + lat < 0.001:
            lt = -89.999 * numpy.pi / 180.0
        else:
            lt = lat * numpy.pi / 180.0
        ln = lng * numpy.pi / 180.0

        ## Build the rotation matrix for ECEF to SEZ
        rot = numpy.array([[
            numpy.sin(lt) * numpy.cos(ln),
            numpy.sin(lt) * numpy.sin(ln), -numpy.cos(lt)
        ], [-numpy.sin(ln), numpy.cos(ln), 0],
                           [
                               numpy.cos(lt) * numpy.cos(ln),
                               numpy.cos(lt) * numpy.sin(ln),
                               numpy.sin(lt)
                           ]])

        ## Apply and extract
        sez = numpy.dot(rot, numpy.array([Bx, By, Bz]))
        Bn, Be, Bz = -sez[0], sez[1], sez[2]

        outputField = Bn, Be, Bz

    # Done
    return outputField
示例#16
0
def _load_map(mjd, type='IGS'):
    """
    Given an MJD value, load the corresponding TEC map.  If the map is not
    avaliable on disk, download it.
    """

    # Figure out which map to use
    if type.upper() == 'IGS':
        ## Cache entry name
        cacheName = 'TEC-IGS-%i' % mjd

        ## Download helper
        downloader = _download_igs

        ## Filename templates
        filenameTemplate = 'igsg%03i0.%02ii.Z'
        filenameAltTemplate = 'igrg%03i0.%02ii.Z'

    elif type.upper() == 'JPL':
        ## Cache entry name
        cacheName = 'TEC-JPL-%i' % mjd

        ## Download helper
        downloader = _download_jpl

        ## Filename templates
        filenameTemplate = 'jplg%03i0.%02ii.Z'
        filenameAltTemplate = 'jprg%03i0.%02ii.Z'

    elif type.upper() == 'UQR':
        ## Cache entry name
        cacheName = 'TEC-UQR-%i' % mjd

        ## Download helper
        downloader = _download_uqr

        ## Filename templates
        filenameTemplate = 'uqrg%03i0.%02ii.Z'
        filenameAltTemplate = 'uqrg%03i0.%02ii.Z'

    elif type.upper() == 'CODE':
        ## Cache entry name
        cacheName = 'TEC-CODE-%i' % mjd

        ## Download helper
        downloader = _download_code

        ## Filename templates
        filenameTemplate = 'codg%03i0.%02ii.Z'
        filenameAltTemplate = 'codg%03i0.%02ii.Z'

    elif type.upper() == 'USTEC':
        ## Cache entry name
        cacheName = 'TEC-USTEC-%i' % mjd

        ## Download helper
        downloader = _download_ustec

        ## Filename templates
        filenameTemplate = '%s_ustec.tar.gz'
        filenameAltTemplate = '%s_ustec.tar.gz'

    else:
        raise ValueError("Unknown data source '%s'" % type)

    try:
        # Is it already in the on-line cache?
        tecMap = _ONLINE_CACHE[cacheName]
    except KeyError:
        # Nope, we need to fetch it

        # Convert the MJD to a datetime instance so that we can pull out the year
        # and the day-of-year
        mpm = int((mjd - int(mjd)) * 24.0 * 3600.0 * 1000)
        dt = mjdmpm_to_datetime(int(mjd), mpm)

        if type.upper() == 'USTEC':
            # Pull out a YMD string
            dateStr = dt.strftime("%Y%m%d")

            # Figure out the filenames in order of preference.  We'd rather have
            # final values than rapid values
            filename = filenameTemplate % (dateStr)

            # Is the primary file in the disk cache?
            if filename not in _CACHE_DIR:
                ## Can we download it?
                status = downloader(mjd)

            else:
                ## Good we have the primary file
                pass

            # Parse it
            with _CACHE_DIR.open(filename, 'rb') as fh:
                _ONLINE_CACHE[cacheName] = _parse_ustec_map(fh)

        else:

            # Pull out the year and the day-of-year
            year = dt.year
            dayOfYear = int(dt.strftime('%j'), 10)

            # Figure out the filenames in order of preference.  We'd rather have
            # final values than rapid values
            filename = filenameTemplate % (dayOfYear, year % 100)
            filenameAlt = filenameAltTemplate % (dayOfYear, year % 100)

            # Is the primary file in the disk cache?
            if filename not in _CACHE_DIR:
                ## Can we download it?
                status = downloader(mjd, type='final')
                if not status:
                    ## Nope, now check for the secondary file on disk
                    if filenameAlt not in _CACHE_DIR:
                        ## Can we download it?
                        status = downloader(mjd, type='rapid')
                        if status:
                            ### Good, we have the secondary file
                            filename = filenameAlt
                    else:
                        ### Good, we have the secondary file
                        filename = filenameAlt
            else:
                ## Good we have the primary file
                pass

            # Parse it
            with _CACHE_DIR.open(filename, 'rb') as fh:
                _ONLINE_CACHE[cacheName] = _parse_tec_map(fh)

        tecMap = _ONLINE_CACHE[cacheName]

    # Done
    return tecMap
示例#17
0
def main(args):
    # Parse the command line
    filenames = args.filename

    # Check if the first argument on the command line is a directory.  If so,
    # use what is in that directory
    if os.path.isdir(filenames[0]):
        filenames = [
            os.path.join(filenames[0], filename)
            for filename in os.listdir(filenames[0])
        ]
        filenames.sort()

    # Convert the filenames to absolute paths
    filenames = [os.path.abspath(filename) for filename in filenames]

    # Open the database connection to NRAO to find the antenna locations
    try:
        db = database('params')
    except Exception as e:
        sys.stderr.write("WARNING: %s" % str(e))
        sys.stderr.flush()
        db = None

    # Pass 1 - Get the LWA metadata so we know where we are pointed
    context = {
        'observer': 'Unknown',
        'project': 'Unknown',
        'session': None,
        'vlaref': None
    }
    setup = None
    sources = []
    metadata = {}
    lwasite = {}
    for filename in filenames:
        # Figure out what to do with the file
        ext = os.path.splitext(filename)[1]
        if ext == '.tgz':
            ## LWA Metadata
            try:
                ## Extract the SDF
                if len(sources) == 0:
                    try:
                        sdf = metabundle.get_sdf(filename)
                    except Exception as e:
                        sdf = metabundleADP.get_sdf(filename)

                    context['observer'] = sdf.observer.name
                    context['project'] = sdf.id
                    context['session'] = sdf.sessions[0].id

                    comments = sdf.project_office.sessions[0]
                    mtch = CORR_CHANNELS.search(comments)
                    if mtch is not None:
                        corr_channels = int(mtch.group('channels'), 10)
                    else:
                        corr_channels = None
                    mtch = CORR_INTTIME.search(comments)
                    if mtch is not None:
                        corr_inttime = float(mtch.group('inttime'))
                    else:
                        corr_inttime = None
                    mtch = CORR_BASIS.search(comments)
                    if mtch is not None:
                        corr_basis = mtch.group('basis')
                    else:
                        sys.stderr.write(
                            "WARNING: No output correlation polarization basis defined, assuming 'linear'.\n"
                        )
                        corr_basis = 'linear'
                    if corr_channels is not None and corr_inttime is not None:
                        setup = {
                            'channels': corr_channels,
                            'inttime': corr_inttime,
                            'basis': corr_basis
                        }
                    else:
                        sys.stderr.write(
                            "WARNING: No or incomplete correlation configuration defined, setting to be defined at correlation time.\n"
                        )

                    for o, obs in enumerate(sdf.sessions[0].observations):
                        if type(obs).__name__ == 'Solar':
                            name = 'Sun'
                            intent = 'target'
                            ra = None
                            dec = None
                        elif type(obs).__name__ == 'Jovian':
                            name = 'Jupiter'
                            intent = 'target'
                            ra = None
                            dec = None
                        else:
                            name = obs.target
                            intent = obs.name
                            ra = ephem.hours(str(obs.ra))
                            dec = ephem.degrees(str(obs.dec))
                        tStart = mjdmpm_to_datetime(obs.mjd, obs.mpm)
                        tStop = mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur)
                        sources.append({
                            'name': name,
                            'intent': intent,
                            'ra2000': ra,
                            'dec2000': dec,
                            'start': tStart,
                            'stop': tStop
                        })

                        ### Alternate phase centers
                        comments = sdf.project_office.observations[0][o]

                        alts = {}
                        for mtch in ALT_TARGET.finditer(comments):
                            alt_id = int(mtch.group('id'), 10)
                            alt_name = mtch.group('target')
                            try:
                                alts[alt_id]['name'] = alt_name
                            except KeyError:
                                alts[alt_id] = {
                                    'name': alt_name,
                                    'intent': 'dummy',
                                    'ra': None,
                                    'dec': None
                                }
                        for mtch in ALT_INTENT.finditer(comments):
                            alt_id = int(mtch.group('id'), 10)
                            alt_intent = mtch.group('intent')
                            try:
                                alts[alt_id]['intent'] = alt_intent
                            except KeyError:
                                alts[alt_id] = {
                                    'name': None,
                                    'intent': alt_intent,
                                    'ra': None,
                                    'dec': None
                                }
                        for mtch in ALT_RA.finditer(comments):
                            alt_id = int(mtch.group('id'), 10)
                            alt_ra = ephem.hours(mtch.group('ra'))
                            try:
                                alts[alt_id]['ra'] = alt_ra
                            except KeyError:
                                alts[alt_id] = {
                                    'name': None,
                                    'intent': 'dummy',
                                    'ra': alt_ra,
                                    'dec': None
                                }
                        for mtch in ALT_DEC.finditer(comments):
                            alt_id = int(mtch.group('id'), 10)
                            alt_dec = ephem.degrees(mtch.group('dec'))
                            try:
                                alts[alt_id]['dec'] = alt_dec
                            except KeyError:
                                alts[alt_id] = {
                                    'name': None,
                                    'intent': 'dummy',
                                    'ra': None,
                                    'dec': alt_dec
                                }
                        for alt_id in sorted(alts.keys()):
                            alt_name, alt_ra, alt_dec = alts[alt_id]
                            if alt_name is None or alt_ra is None or alt_dec is None:
                                sys.stderr.write(
                                    "WARNING: Incomplete alternate phase center %i, skipping.\n"
                                    % alt_id)
                            else:
                                sources.append({
                                    'name': alt_name,
                                    'ra2000': alt_ra,
                                    'dec2000': alt_dec,
                                    'start': tStart,
                                    'stop': tStop
                                })

                ## Extract the file information so that we can pair things together
                fileInfo = metabundle.get_session_metadata(filename)
                for obsID in fileInfo.keys():
                    metadata[fileInfo[obsID]['tag']] = filename

                ## Figure out LWA1 vs LWA-SV
                try:
                    cs = metabundle.get_command_script(filename)
                    for c in cs:
                        if c['subsystem_id'] == 'DP':
                            site = 'LWA1'
                            break
                        elif c['subsystem_id'] == 'ADP':
                            site = 'LWA-SV'
                            break
                except (RuntimeError, ValueError):
                    site = 'LWA-SV'
                for obsID in fileInfo.keys():
                    lwasite[fileInfo[obsID]['tag']] = site

            except Exception as e:
                sys.stderr.write("ERROR reading metadata file: %s\n" % str(e))
                sys.stderr.flush()

    # Setup what we need to write out a configuration file
    corrConfig = {
        'context': context,
        'setup': setup,
        'source': {
            'name': '',
            'ra2000': '',
            'dec2000': ''
        },
        'inputs': []
    }

    metadata = {}
    for filename in filenames:
        #print("%s:" % os.path.basename(filename))

        # Skip over empty files
        if os.path.getsize(filename) == 0:
            continue

        # Open the file
        fh = open(filename, 'rb')

        # Figure out what to do with the file
        ext = os.path.splitext(filename)[1]
        if ext == '':
            ## DRX
            try:
                ## Get the site
                try:
                    sitename = lwasite[os.path.basename(filename)]
                except KeyError:
                    sitename = 'LWA1'

                ## Get the location so that we can set site-specific parameters
                if sitename == 'LWA1':
                    xyz = LWA1_ECEF
                    off = args.lwa1_offset
                elif sitename == 'LWA-SV':
                    xyz = LWASV_ECEF
                    off = args.lwasv_offset
                else:
                    raise RuntimeError("Unknown LWA site '%s'" % site)

                ## Move into the LWA1 coordinate system
                ### ECEF to LWA1
                rho = xyz - LWA1_ECEF
                sez = numpy.dot(LWA1_ROT, rho)
                enz = sez[[1, 0, 2]]  # pylint: disable=invalid-sequence-index
                enz[1] *= -1

                ## Read in the first few frames to get the start time
                frames = [drx.read_frame(fh) for i in xrange(1024)]
                streams = []
                freq1, freq2 = 0.0, 0.0
                for frame in frames:
                    beam, tune, pol = frame.id
                    if tune == 1:
                        freq1 = frame.central_freq
                    else:
                        freq2 = frame.central_freq
                    if (beam, tune, pol) not in streams:
                        streams.append((beam, tune, pol))
                tStart = frames[0].time.datetime
                tStartAlt = (frames[-1].time - 1023 // len(streams) * 4096 /
                             frames[-1].sample_rate).datetime
                tStartDiff = tStart - tStartAlt
                if abs(tStartDiff) > timedelta(microseconds=10000):
                    sys.stderr.write(
                        "WARNING: Stale data found at the start of '%s', ignoring\n"
                        % os.path.basename(filename))
                    sys.stderr.flush()
                    tStart = tStartAlt
                ### ^ Adjustment to the start time to deal with occasional problems
                ###   with stale data in the DR buffers at LWA-SV

                ## Read in the last few frames to find the end time
                fh.seek(os.path.getsize(filename) - 1024 * drx.FRAME_SIZE)
                backed = 0
                while backed < 2 * drx.FRAME_SIZE:
                    try:
                        drx.read_frame(fh)
                        fh.seek(-drx.FRAME_SIZE, 1)
                        break
                    except errors.SyncError:
                        backed += 1
                        fh.seek(-drx.FRAME_SIZE - 1, 1)
                for i in xrange(32):
                    try:
                        frame = drx.read_frame(fh)
                        beam, tune, _ = frame.id
                        if tune == 1:
                            freq1 = frame.central_freq
                        else:
                            freq2 = frame.central_freq
                    except errors.SyncError:
                        continue
                tStop = frame.time.datetime

                ## Save
                corrConfig['inputs'].append({
                    'file':
                    filename,
                    'type':
                    'DRX',
                    'antenna':
                    sitename,
                    'pols':
                    'X, Y',
                    'location': (enz[0], enz[1], enz[2]),
                    'clockoffset': (off, off),
                    'fileoffset':
                    0,
                    'beam':
                    beam,
                    'tstart':
                    tStart,
                    'tstop':
                    tStop,
                    'freq': (freq1, freq2)
                })

            except Exception as e:
                sys.stderr.write("ERROR reading DRX file: %s\n" % str(e))
                sys.stderr.flush()

        elif ext == '.vdif':
            ## VDIF
            try:
                ## Read in the GUPPI header
                header = vdif.read_guppi_header(fh)

                ## Read in the first frame
                vdif.FRAME_SIZE = vdif.get_frame_size(fh)
                frame = vdif.read_frame(fh)
                antID = frame.id[0] - 12300
                tStart = frame.time.datetime
                nThread = vdif.get_thread_count(fh)

                ## Read in the last frame
                nJump = int(os.path.getsize(filename) / vdif.FRAME_SIZE)
                nJump -= 30
                fh.seek(nJump * vdif.FRAME_SIZE, 1)
                mark = fh.tell()
                while True:
                    try:
                        frame = vdif.read_frame(fh)
                        tStop = frame.time.datetime
                    except Exception as e:
                        break

                ## Find the antenna location
                pad, edate = db.get_pad('EA%02i' % antID, tStart)
                x, y, z = db.get_xyz(pad, tStart)
                #print("  Pad: %s" % pad)
                #print("  VLA relative XYZ: %.3f, %.3f, %.3f" % (x,y,z))

                ## Move into the LWA1 coordinate system
                ### relative to ECEF
                xyz = numpy.array([x, y, z])
                xyz += VLA_ECEF
                ### ECEF to LWA1
                rho = xyz - LWA1_ECEF
                sez = numpy.dot(LWA1_ROT, rho)
                enz = sez[[1, 0, 2]]  # pylint: disable=invalid-sequence-index
                enz[1] *= -1

                ## Set an apparent position if WiDAR is already applying a delay model
                apparent_enz = (None, None, None)
                if args.no_vla_delay_model:
                    apparent_xyz = VLA_ECEF
                    apparent_rho = apparent_xyz - LWA1_ECEF
                    apparent_sez = numpy.dot(LWA1_ROT, apparent_rho)
                    apparent_enz = apparent_sez[[1, 0, 2]]  # pylint: disable=invalid-sequence-index
                    apparent_enz[1] *= -1

                ## VLA time offset
                off = args.vla_offset

                ## Save
                corrConfig['context']['observer'] = header['OBSERVER']
                try:
                    corrConfig['context']['project'] = header[
                        'BASENAME'].split('_')[0]
                    corrConfig['context']['session'] = header[
                        'BASENAME'].split('_')[1].replace('sb', '')
                except IndexError:
                    corrConfig['context']['project'] = header[
                        'BASENAME'].split('.')[0]
                    corrConfig['context']['session'] = header[
                        'BASENAME'].split('.')[1].replace('sb', '')
                corrConfig['context']['vlaref'] = re.sub(
                    '\.[0-9]+\.[0-9]+\.[AB][CD]-.*', '', header['BASENAME'])
                corrConfig['source']['name'] = header['SRC_NAME']
                corrConfig['source']['intent'] = 'target'
                corrConfig['source']['ra2000'] = header['RA_STR']
                corrConfig['source']['dec2000'] = header['DEC_STR']
                corrConfig['inputs'].append({
                    'file':
                    filename,
                    'type':
                    'VDIF',
                    'antenna':
                    'EA%02i' % antID,
                    'pols':
                    'Y, X',
                    'location': (enz[0], enz[1], enz[2]),
                    'apparent_location':
                    (apparent_enz[0], apparent_enz[1], apparent_enz[2]),
                    'clockoffset': (off, off),
                    'fileoffset':
                    0,
                    'pad':
                    pad,
                    'tstart':
                    tStart,
                    'tstop':
                    tStop,
                    'freq':
                    header['OBSFREQ']
                })

            except Exception as e:
                sys.stderr.write("ERROR reading VDIF file: %s\n" % str(e))
                sys.stderr.flush()

        elif ext == '.tgz':
            ## LWA Metadata
            try:
                ## Extract the file information so that we can pair things together
                fileInfo = metabundle.get_session_metadata(filename)
                for obsID in fileInfo.keys():
                    metadata[fileInfo[obsID]['tag']] = filename

            except Exception as e:
                sys.stderr.write("ERROR reading metadata file: %s\n" % str(e))
                sys.stderr.flush()

        # Done
        fh.close()

    # Close out the connection to NRAO
    try:
        db.close()
    except AttributeError:
        pass

    # Choose a VDIF reference file, if there is one, and mark whether or
    # not DRX files were found
    vdifRefFile = None
    isDRX = False
    for cinp in corrConfig['inputs']:
        if cinp['type'] == 'VDIF':
            if vdifRefFile is None:
                vdifRefFile = cinp
        elif cinp['type'] == 'DRX':
            isDRX = True

    # Set a state variable so that we can generate a warning about missing
    # DRX files
    drxFound = False

    # Purge DRX files that don't make sense
    toPurge = []
    drxFound = False
    lwasvFound = False
    for cinp in corrConfig['inputs']:
        ### Sort out multiple DRX files - this only works if we have only one LWA station
        if cinp['type'] == 'DRX':
            if vdifRefFile is not None:
                l0, l1 = cinp['tstart'], cinp['tstop']
                v0, v1 = vdifRefFile['tstart'], vdifRefFile['tstop']
                ve = (v1 - v0).total_seconds()
                overlapWithVDIF = (v0 >= l0 and v0 < l1) or (l0 >= v0
                                                             and l0 < v1)
                lvo = (min([v1, l1]) - max([v0, l0])).total_seconds()
                if not overlapWithVDIF or lvo < 0.25 * ve:
                    toPurge.append(cinp)
                drxFound = True
            if cinp['antenna'] == 'LWA-SV':
                lwasvFound = True
    for cinp in toPurge:
        del corrConfig['inputs'][corrConfig['inputs'].index(cinp)]

    # Sort the inputs based on the antenna name - this puts LWA1 first,
    # LWA-SV second, and the VLA at the end in 'EA' antenna order, i.e.,
    # EA01, EA02, etc.
    corrConfig['inputs'].sort(key=lambda x: 0 if x['antenna'] == 'LWA1' else (
        1 if x['antenna'] == 'LWA-SV' else int(x['antenna'][2:], 10)))

    # VDIF/DRX warning check/report
    if vdifRefFile is not None and isDRX and not drxFound:
        sys.stderr.write(
            "WARNING: DRX files provided but none overlapped with VDIF data")

    # Duplicate antenna check
    antCounts = {}
    for cinp in corrConfig['inputs']:
        try:
            antCounts[cinp['antenna']] += 1
        except KeyError:
            antCounts[cinp['antenna']] = 1
    for ant in antCounts.keys():
        if antCounts[ant] != 1:
            sys.stderr.write("WARNING: Antenna '%s' is defined %i times" %
                             (ant, antCounts[ant]))

    # Update the file offsets to get things lined up better
    tMax = max([cinp['tstart'] for cinp in corrConfig['inputs']])
    for cinp in corrConfig['inputs']:
        diff = tMax - cinp['tstart']
        offset = diff.days * 86400 + diff.seconds + diff.microseconds / 1e6
        cinp['fileoffset'] = max([0, offset])

    # Reconcile the source lists for when we have eLWA data.  This is needed so
    # that we use the source information contained in the VDIF files rather than
    # the stub information contained in the SDFs
    if len(sources) <= 1:
        if corrConfig['source']['name'] != '':
            ## Update the source information with what comes from the VLA
            try:
                sources[0] = corrConfig['source']
            except IndexError:
                sources.append(corrConfig['source'])
    # Update the dwell time using the minimum on-source time for all inputs if
    # there is only one source, i.e., for full eLWA runs
    if len(sources) == 1:
        sources[0]['start'] = max(
            [cinp['tstart'] for cinp in corrConfig['inputs']])
        sources[0]['stop'] = min(
            [cinp['tstop'] for cinp in corrConfig['inputs']])

    # Render the configuration
    startRef = sources[0]['start']
    s = 0
    for source in sources:
        startOffset = source['start'] - startRef
        startOffset = startOffset.total_seconds()

        dur = source['stop'] - source['start']
        dur = dur.total_seconds()

        ## Skip over dummy scans and scans that start after the files end
        if source['intent'] in (None, 'dummy'):
            continue
        if source['start'] > max(
            [cinp['tstop'] for cinp in corrConfig['inputs']]):
            print(
                "Skipping scan of %s which starts at %s, %.3f s after the data end"
                % (source['name'], source['start'],
                   (source['start'] -
                    max([cinp['tstop']
                         for cinp in corrConfig['inputs']])).total_seconds()))
            continue

        ## Small correction for the first scan to compensate for stale data at LWA-SV
        if lwasvFound and s == 0:
            startOffset += 10.0
            dur -= 10.0

        ## Skip over scans that are too short
        if dur < args.minimum_scan_length:
            continue

        ## Setup
        if args.output is None:
            fh = sys.stdout
        else:
            outname = args.output
            if len(sources) > 1:
                outname += str(s + 1)
            fh = open(outname, 'w')

        try:
            repo = git.Repo(os.path.dirname(os.path.abspath(__file__)))
            try:
                branch = repo.active_branch.name
                hexsha = repo.active_branch.commit.hexsha
            except TypeError:
                branch = '<detached>'
                hexsha = repo.head.commit.hexsha
            shortsha = hexsha[-7:]
            dirty = ' (dirty)' if repo.is_dirty() else ''
        except git.exc.GitError:
            branch = 'unknown'
            hexsha = 'unknown'
            shortsha = 'unknown'
            dirty = ''

        ## Preamble
        fh.write("# Created\n")
        fh.write("#  on %s\n" % datetime.now())
        fh.write("#  using %s, revision %s.%s%s\n" %
                 (os.path.basename(__file__), branch, shortsha, dirty))
        fh.write("\n")
        ## Observation context
        fh.write("Context\n")
        fh.write("  Observer  %s\n" % corrConfig['context']['observer'])
        fh.write("  Project   %s\n" % corrConfig['context']['project'])
        if corrConfig['context']['session'] is not None:
            fh.write("  Session   %s\n" % corrConfig['context']['session'])
        if corrConfig['context']['vlaref'] is not None:
            fh.write("  VLARef    %s\n" % corrConfig['context']['vlaref'])
        fh.write("EndContext\n")
        fh.write("\n")
        ## Configuration, if present
        if corrConfig['setup'] is not None:
            fh.write("Configuration\n")
            fh.write("  Channels     %i\n" % corrConfig['setup']['channels'])
            fh.write("  IntTime      %.3f\n" % corrConfig['setup']['inttime'])
            fh.write("  PolBasis     %s\n" % corrConfig['setup']['basis'])
            fh.write("EndConfiguration\n")
            fh.write("\n")
        ## Source
        fh.write("Source\n")
        fh.write("# Observation start is %s\n" % source['start'])
        fh.write("# Duration is %s\n" % (source['stop'] - source['start'], ))
        fh.write("  Name     %s\n" % source['name'])
        fh.write("  Intent   %s\n" % source['intent'].lower())
        if source['name'] not in ('Sun', 'Jupiter'):
            fh.write("  RA2000   %s\n" % source['ra2000'])
            fh.write("  Dec2000  %s\n" % source['dec2000'])
        fh.write("  Duration %.3f\n" % dur)
        fh.write("SourceDone\n")
        fh.write("\n")
        ## Input files
        for cinp in corrConfig['inputs']:
            fh.write("Input\n")
            fh.write("# Start time is %s\n" % cinp['tstart'])
            fh.write("# Stop time is %s\n" % cinp['tstop'])
            try:
                fh.write("# Beam is %i\n" % cinp['beam'])
            except KeyError:
                pass
            try:
                fh.write("# VLA pad is %s\n" % cinp['pad'])
            except KeyError:
                pass
            try:
                fh.write("# Frequency tuning 1 is %.3f Hz\n" % cinp['freq'][0])
                fh.write("# Frequency tuning 2 is %.3f Hz\n" % cinp['freq'][1])
            except TypeError:
                fh.write("# Frequency tuning is %.3f Hz\n" % cinp['freq'])
            fh.write("  File             %s\n" % cinp['file'])
            try:
                metaname = metadata[os.path.basename(cinp['file'])]
                fh.write("  MetaData         %s\n" % metaname)
            except KeyError:
                if cinp['type'] == 'DRX':
                    sys.stderr.write(
                        "WARNING: No metadata found for '%s', source %i\n" %
                        (os.path.basename(cinp['file']), s + 1))
                    sys.stderr.flush()
                pass
            fh.write("  Type             %s\n" % cinp['type'])
            fh.write("  Antenna          %s\n" % cinp['antenna'])
            fh.write("  Pols             %s\n" % cinp['pols'])
            fh.write("  Location         %.6f, %.6f, %.6f\n" %
                     cinp['location'])
            try:
                if cinp['apparent_location'][0] is not None:
                    fh.write("  ApparentLocation %.6f, %.6f, %.6f\n" %
                             cinp['apparent_location'])
            except KeyError:
                pass
            fh.write("  ClockOffset      %s, %s\n" % cinp['clockoffset'])
            fh.write("  FileOffset       %.3f\n" %
                     (startOffset + cinp['fileoffset'], ))
            fh.write("InputDone\n")
            fh.write("\n")
        if fh != sys.stdout:
            fh.close()

        # Increment the source/file counter
        s += 1
示例#18
0
def main(args):
    # Loop over input .pims files
    for filename in args.filename:
        print("Working on '%s'..." % os.path.basename(filename))

        ## Open the image database
        try:
            db = PasiImageDB(filename, mode='r')
        except Exception as e:
            print("ERROR: %s" % str(e))
            continue

        ##  Loop over the images contained in it
        fitsCounter = 0
        for i, (header, data, spec) in enumerate(db):
            if args.verbose:
                print("  working on integration #%i" % (i + 1))

            ## Reverse the axis order so we can get it right in the FITS file
            data = numpy.transpose(data, [0, 2, 1])

            ## Save the image size for later
            imSize = data.shape[-1]

            ## Zero outside of the horizon so avoid problems
            pScale = header['xPixelSize']
            sRad = 360.0 / pScale / numpy.pi / 2
            x = numpy.arange(data.shape[-2]) - 0.5
            y = numpy.arange(data.shape[-1]) - 0.5
            x, y = numpy.meshgrid(x, y)
            invalid = numpy.where(((x - imSize / 2.0)**2 +
                                   (y - imSize / 2.0)**2) > (sRad**2))
            data[:, invalid[0], invalid[1]] = 0.0
            ext = imSize / (2 * sRad)

            ## Convert the start MJD into a datetime instance and then use
            ## that to come up with a stop time
            mjd = int(header['startTime'])
            mpm = int((header['startTime'] - mjd) * 86400.0 * 1000.0)
            tInt = header['intLen'] * 86400.0
            dateObs = mjdmpm_to_datetime(mjd, mpm)
            dateEnd = dateObs + timedelta(seconds=int(tInt),
                                          microseconds=int(
                                              (tInt - int(tInt)) * 1000000))
            if args.verbose:
                print("    start time: %s" % dateObs)
                print("    end time: %s" % dateEnd)
                print("    integration time: %.3f s" % tInt)
                print("    frequency: %.3f MHz" % header['freq'])

            ## Create the FITS HDU and fill in the header information
            hdu = astrofits.PrimaryHDU(data=data)
            hdu.header['TELESCOP'] = 'LWA1'
            ### Date and time
            hdu.header['DATE-OBS'] = dateObs.strftime("%Y-%m-%dT%H:%M:%S")
            hdu.header['END_UTC'] = dateEnd.strftime("%Y-%m-%dT%H:%M:%S")
            hdu.header['EXPTIME'] = tInt
            ### Coordinates - sky
            hdu.header['CTYPE1'] = 'RA---SIN'
            hdu.header['CRPIX1'] = imSize / 2 + 1 + 0.5 * ((imSize + 1) % 2)
            hdu.header['CDELT1'] = -360.0 / (2 * sRad) / numpy.pi
            hdu.header['CRVAL1'] = header['zenithRA']
            hdu.header['CUNIT1'] = 'deg'
            hdu.header['CTYPE2'] = 'DEC--SIN'
            hdu.header['CRPIX2'] = imSize / 2 + 1 + 0.5 * ((imSize + 1) % 2)
            hdu.header['CDELT2'] = 360.0 / (2 * sRad) / numpy.pi
            hdu.header['CRVAL2'] = header['zenithDec']
            hdu.header['CUNIT2'] = 'deg'
            ### Coordinates - Stokes parameters
            hdu.header['CTYPE3'] = 'STOKES'
            hdu.header['CRPIX3'] = 1
            hdu.header['CDELT3'] = 1
            hdu.header['CRVAL3'] = 1
            hdu.header['LONPOLE'] = 180.0
            hdu.header['LATPOLE'] = 90.0
            ### LWA1 approximate beam size
            beamSize = 2.2 * 74e6 / header['freq']
            hdu.header['BMAJ'] = beamSize / header['xPixelSize']
            hdu.header['BMIN'] = beamSize / header['xPixelSize']
            hdu.header['BPA'] = 0.0
            ### Frequency
            hdu.header['RESTFREQ'] = header['freq']

            ## Write it to disk
            outName = "pasi_%.3fMHz_%s.fits" % (
                header['freq'] / 1e6, dateObs.strftime("%Y-%m-%dT%H-%M-%S"))
            hdulist = astrofits.HDUList([
                hdu,
            ])
            hdulist.writeto(outName, overwrite=args.force)

            ## Update the counter
            fitsCounter += 1

        ## Done with this collection
        db.close()

        ## Report
        print("-> wrote %i FITS files" % fitsCounter)