コード例 #1
0
ファイル: UVW.py プロジェクト: nodarai/acdc
    def addPhasePerAntenna(self,phase,antennaId,dataType="DATA"):
        "Add a phase (degree) per antennaId. Careful with the order !"
        
        tb.open(self.visName,nomodify=False)
        
        ant1 = tb.getcol("ANTENNA1")
        ant2 = tb.getcol("ANTENNA2")
        
        data = tb.getcol(dataType)

        phase = DEGREE2RAD*phase

        print "Phase rotation: %f"%(phase)

        nDim = data.shape
        
        for i in range(nDim[0]):
            for j in range(nDim[1]):
                for k in range(nDim[2]):
                    antenna1 = ant1[k]
                    antenna2 = ant2[k]
                    
                    
                    if antenna1 == antennaId:
                        value = data[i][j][k]
                        real = value.real
                        image = value.imag
                        cosRot = math.cos(phase)
                        sinRot = math.sin(phase)
                        
                        xRot = cosRot*real-sinRot*image
                        yRot = sinRot*real+sinRot*image
                        
                        data[i][j][k] = complex(xRot,yRot)
                        
                        # print "Amplitude:%f"%(math.sqrt(xRot*xRot+yRot*yRot))
 
                        
                    if antenna2 == antennaId:
                        value = data[i][j][k]
                        real = value.real
                        image = value.imag
                        cosRot = math.cos(-phase)
                        sinRot = math.sin(-phase)
                        
                        xRot = cosRot*real-sinRot*image
                        yRot = sinRot*real+sinRot*image
                        
                        data[i][j][k] = complex(xRot,yRot)
                        # print "Amplitude:%f"%(math.sqrt(xRot*xRot+yRot*yRot))
                        
                    
        tb.putcol(dataType,data)
        tb.close()
コード例 #2
0
def goodenough_field_solutions(tablename,
                               minsnr=5,
                               maxphasenoise=np.pi / 4.,
                               pols=[0]):
    """
    After an initial self-calibration run, determine which fields have good
    enough solutions.  This only inspects the *phase* component of the
    solutions.

    Parameters
    ----------
    tablename : str
        The name of the calibration table (e.g., phase.cal)
    minsnr : float
        The minimum *average* signal to noise ratio for a given field
    maxphasenoise : float
        The maximum average phase noise permissible for a given field in
        radians
    pols : list
        The list of polarizations to include in the heuristics

    Returns
    -------
    An array of field IDs.  This will need to be converted to a list of strings
    for use in CASA tasks
    """
    tb.open(tablename)
    solns = tb.getcol('CPARAM')
    fields = tb.getcol('FIELD_ID')
    snr = tb.getcol('SNR')
    tb.close()

    all_angles = []
    all_snrs = []

    ufields = np.unique(fields)

    for field in ufields:
        sel = fields == field
        angles = np.angle(solns[:, :, sel])
        all_angles.append(angles)
        all_snrs.append(snr[:, :, sel])

    all_angles = np.array(all_angles)
    all_snrs = np.array(all_snrs)
    # not sure what 2nd column of these is...

    # first is mean across pols, then mean across all data points
    good_enough = (
        (all_snrs[:, pols, 0, :].mean(axis=1).mean(axis=1) > minsnr) &
        (all_angles[:, pols, 0, :].std(axis=2).mean(axis=1) < maxphasenoise))

    return ufields[good_enough]
コード例 #3
0
def pixelmask2cleanmask(imagename='',
                        maskname='mask0',
                        maskimage='',
                        usemasked=False):
    """
    convert pixel(T/F) mask (in a CASA image) to a mask image (1/0)
    used for clean
    imagename - input imagename that contain a mask to be used
    maskname - mask name in the image (default: mask0)
    maskimage - output mask image name
    usemasked - if True use masked region as a valid region
    """
    ia.open(imagename)
    masks = ia.maskhandler('get')
    ia.close()

    inmaskname = ''
    if type(masks) != list:
        masks = [masks]
    for msk in masks:
        if maskname == msk:
            inmaskname = msk
            break
    if inmaskname == '':
        raise Exception, "mask %s does not exist. Available masks are: %s" % (
            maskname, masks)

    tb.open(imagename + '/' + maskname)
    dat0 = tb.getcol('PagedArray')
    tb.close()

    #os.system('cp -r %s %s' % (imagename, maskimage))
    shutil.copytree(imagename, maskimage)
    ia.open(maskimage)
    # to unset mask
    ia.maskhandler('set', [''])
    # make all valid
    if (usemasked):
        ia.set(1)
    else:
        ia.set(0)
    ia.close()
    #
    tb.open(maskimage, nomodify=False)
    imd = tb.getcol('map')
    # maybe shape check here
    #by default use True part of bool mask
    masked = 1
    if (usemasked): masked = 0
    imd[dat0] = masked
    tb.putcol('map', imd)
    tb.close()
コード例 #4
0
def goodenough_field_solutions(tablename, minsnr=5, maxphasenoise=np.pi/4.,
                               pols=[0]):
    """
    After an initial self-calibration run, determine which fields have good
    enough solutions.  This only inspects the *phase* component of the
    solutions.

    Parameters
    ----------
    tablename : str
        The name of the calibration table (e.g., phase.cal)
    minsnr : float
        The minimum *average* signal to noise ratio for a given field
    maxphasenoise : float
        The maximum average phase noise permissible for a given field in
        radians
    pols : list
        The list of polarizations to include in the heuristics

    Returns
    -------
    An array of field IDs.  This will need to be converted to a list of strings
    for use in CASA tasks
    """
    tb.open(tablename)
    solns = tb.getcol('CPARAM')
    fields = tb.getcol('FIELD_ID')
    snr = tb.getcol('SNR')
    tb.close()

    all_angles = []
    all_snrs = []

    ufields = np.unique(fields)

    for field in ufields:
        sel = fields==field
        angles = np.angle(solns[:,:,sel])
        all_angles.append(angles)
        all_snrs.append(snr[:,:,sel])

    all_angles = np.array(all_angles)
    all_snrs = np.array(all_snrs)
    # not sure what 2nd column of these is...

    # first is mean across pols, then mean across all data points
    good_enough = ((all_snrs[:,pols,0,:].mean(axis=1).mean(axis=1) > minsnr) &
                   (all_angles[:,pols,0,:].std(axis=2).mean(axis=1) < maxphasenoise))

    return ufields[good_enough]
コード例 #5
0
ファイル: pixelmask2cleanmask.py プロジェクト: keflavich/casa
def pixelmask2cleanmask(imagename='',maskname='mask0',maskimage='',usemasked=False):
    """
    convert pixel(T/F) mask (in a CASA image) to a mask image (1/0)
    used for clean
    imagename - input imagename that contain a mask to be used
    maskname - mask name in the image (default: mask0)
    maskimage - output mask image name
    usemasked - if True use masked region as a valid region
    """
    ia.open(imagename)
    masks=ia.maskhandler('get')
    ia.close()

    inmaskname=''
    if type(masks)!=list:
        masks=[masks]
    for msk in masks:
        if maskname == msk:
             inmaskname=msk
             break
    if inmaskname=='':
        raise Exception, "mask %s does not exist. Available masks are: %s" % (maskname,masks)

    tb.open(imagename+'/'+maskname)
    dat0=tb.getcol('PagedArray')
    tb.close()

    #os.system('cp -r %s %s' % (imagename, maskimage))
    shutil.copytree(imagename,maskimage)
    ia.open(maskimage)
    # to unset mask
    ia.maskhandler('set',[''])
    # make all valid
    if (usemasked):
        ia.set(1)
    else:
        ia.set(0) 
    ia.close()
    #
    tb.open(maskimage,nomodify=False)
    imd=tb.getcol('map')
    # maybe shape check here
    #by default use True part of bool mask
    masked=1
    if (usemasked): masked=0
    imd[dat0]=masked
    tb.putcol('map',imd)
    tb.close()
コード例 #6
0
def goodenough_field_solutions(tablename,
                               minsnr=5,
                               maxphasenoise=np.pi / 4.,
                               pols=[0]):
    """
    After an initial self-calibration run, determine which fields have good
    enough solutions.  This only inspects the *phase* component of the
    solutions.

    Parameters
    ----------
    tablename : str
        The name of the calibration table (e.g., phase.cal)
    minsnr : float
        The minimum *average* signal to noise ratio for a given field
    maxphasenoise : float
        The maximum average phase noise permissible for a given field in
        radians
    pols : list
        The list of polarizations to include in the heuristics

    Returns
    -------
    An array of field IDs.  This will need to be converted to a list of strings
    for use in CASA tasks
    """
    tb.open(tablename)
    solns = tb.getcol('CPARAM')
    fields = tb.getcol('FIELD_ID')
    snr = tb.getcol('SNR')
    tb.close()

    okfields = []
    not_ok_fields = []

    ufields = np.unique(fields)

    for field in ufields:
        sel = fields == field
        angles = np.angle(solns[:, :, sel])
        field_ok = (angles.std() < maxphasenoise) & (snr[:, :, sel].mean() >
                                                     minsnr)
        if field_ok:
            okfields.append(field)
        else:
            not_ok_fields.append(field)

    return okfields, not_ok_fields
コード例 #7
0
def goodenough_field_solutions(tablename, minsnr=5, maxphasenoise=np.pi/4.,
                               pols=[0]):
    """
    After an initial self-calibration run, determine which fields have good
    enough solutions.  This only inspects the *phase* component of the
    solutions.

    Parameters
    ----------
    tablename : str
        The name of the calibration table (e.g., phase.cal)
    minsnr : float
        The minimum *average* signal to noise ratio for a given field
    maxphasenoise : float
        The maximum average phase noise permissible for a given field in
        radians
    pols : list
        The list of polarizations to include in the heuristics

    Returns
    -------
    An array of field IDs.  This will need to be converted to a list of strings
    for use in CASA tasks
    """
    tb.open(tablename)
    solns = tb.getcol('CPARAM')
    fields = tb.getcol('FIELD_ID')
    snr = tb.getcol('SNR')
    tb.close()

    okfields=[]
    not_ok_fields = []

    ufields = np.unique(fields)

    for field in ufields:
        sel = fields==field
        angles = np.angle(solns[:,:,sel])
        field_ok = (angles.std() < maxphasenoise) & (snr[:,:,sel].mean() > minsnr)
        if field_ok:
            okfields.append(field)
        else:
            not_ok_fields.append(field)

    return okfields, not_ok_fields
コード例 #8
0
ファイル: applycals.py プロジェクト: caseyjlaw/misc
    def parseflux(self, gainfile):
        """Takes CASA cal table and scales self.gain to it per spw,pol
        """

        tb.open(gainfile)
        mjd = tb.getcol('TIME')/(24*3600)     # mjd days, as for telcal
        spw = tb.getcol('SPECTRAL_WINDOW_ID')
        antnum = tb.getcol('ANTENNA1')
        gain = tb.getcol('CPARAM')    # dimensions of (npol, 1?, ntimes*nants)
        flagged = tb.getcol('FLAG')
        tb.close()
                   
        nants = len(n.unique(antnum))
        nspw = len(n.unique(spw))
        spwlist = n.unique(spw)
        npol = len(gain)

        # merge times less than 2s
        nsol = 0
        newmjd = [n.unique(mjd)[0]]
        for i in range(1, len(n.unique(mjd))):
            if 24*3600*(n.unique(mjd)[i] - n.unique(mjd)[i-1]) < 2.:
                print 'Flux solution at %.5f closer than 1s to previous. Skipping.' % (n.unique(mjd)[i])
                continue
            else:
                newmjd.append(n.unique(mjd)[i])
        
        uniquemjd = n.array(newmjd)
        nsol = len(uniquemjd)

        print 'Parsed flux table solutions for %d solutions, %d ants, %d spw, and %d pols' % (nsol, nants, nspw, npol)

        flux = n.zeros( (nsol, nants, nspw, npol), dtype='complex' )
        flags = n.zeros( (nsol, nants, nspw, npol), dtype='complex' )
        for sol in range(nsol):
            for ant in range(nants):
                for spw in range(nspw):
                    for pol in range(npol):
                        flux[sol, ant, spw, pol] = gain[pol,0,spw*nsol*nants+sol*nants+ant]
                        flags[sol, ant, spw, pol] = flagged[pol,0,spw*nsol*nants+sol*nants+ant]
        flux = n.ma.masked_array(flux, flags)
        ampref = n.abs(flux).mean(axis=1)
        scale = ampref/n.abs(self.gain).mean(axis=1)
        self.gain = self.gain * scale[:,None,:,:]
コード例 #9
0
ファイル: band.py プロジェクト: plaplant/paperdata
def bandpass_normalize(bandpass_table, bandpass_table_inv):
    shutil.copytree(bandpass_table, bandpass_table_inv)
    tb.open(bandpass_table_inv, nomodify=False)
    gain = tb.getcol('CPARAM')
    gain_norm = gain

    #for each antenna and for each channel of the bandpass I divide out by the modulo of the complex number
    for i in range(0, gain.shape[2]):
        for j in range(0, gain.shape[1]):
            a = gain[0, j, i]

            #if the real part of the antenna gain is set to 1 it means that that antenna a/o channel is flag, so don't bother looking at it
            if a.real != 1:
                gain_norm[0, j, i] = gain_norm[0, j, i] / abs(gain_norm[0, j, i]) 
                gain_norm[1, j, i] = gain_norm[1, j, i] / abs(gain_norm[1, j, i])
    #           print abs(gain_norm[i,j,0]),abs(gain[i,j,0]),i,j

    # put back the normalized bandpass
    tb.putcol('CPARAM', gain_norm)
    tb.close()
    tb.done()
コード例 #10
0
def flag_extreme_amplitudes(tablename, maxpctchange=50, pols=[0], channels=[0]):
    """
    Flag out all gain amplitudes with >``maxpctchange``% change (e.g., for the
    default 50%, flag everything outside the range 0.5 < G < 1.5).  This is a
    *very simple* cut, but it cannot be easily applied with existing tools
    since it is cutting on the value of the amplitude correction, not on any
    particular normal data selection type.  It is still highly advisable to
    plot the amp vs snr or similar diagnostics after running this to make sure
    you understand what's going on.  For example, after I ran this on a data
    set, I discovered that one antenna had high gain corrections even in the
    high SNR regime, which probably indicates a problem with that antenna.

    Parameters
    ----------
    maxpctchange : float
        The maximum percent change permitted in an amplitude
    pols : list
        The list of polarizations to include in the heuristics
    channels : list
        The list of channels to include in the heuristics

    Returns
    -------
    None
    """

    tb.open(tablename)
    solns = tb.getcol('CPARAM')
    snr = tb.getcol('SNR')
    # true flag = flagged out, bad data
    flags = tb.getcol('FLAG')
    tb.close()

    amp = np.abs(solns)
    maxfrac = maxpctchange / 100.

    bad = ((amp[pols, channels] > (1+maxfrac)) |
           (amp[pols, channels] < (1-maxfrac)))

    bad_snr = snr[pols, channels, :][bad]

    print("Found {0} bad amplitudes with mean snr={1}".format(bad.sum(), bad_snr.mean()))
    print("Total flags in tb.flag: {0}".format(flags.sum()))

    flags[pols, channels, :] = bad | flags[pols, channels, :]
    assert all(flags[pols, channels, :][bad]), "Failed to modify array"

    tb.open(tablename, nomodify=False)
    tb.putcol(columnname='FLAG', value=flags)
    tb.flush()
    tb.close()

    tb.open(tablename, nomodify=True)
    flags = tb.getcol('FLAG')
    print("Total flags in tb.flag after: {0}".format(flags.sum()))

    assert all(flags[pols, channels, :][bad]), "Failed to modify table"

    tb.close()
コード例 #11
0
ファイル: mstools.py プロジェクト: seanandrews/p484
def LSRKvel_to_chan(msfile, field, spw, restfreq, LSRKvelocity):
    """
    Identifies the channel(s) corresponding to input LSRK velocities. 
    Useful for choosing which channels to split out or flag if a line is expected to be present

    Parameters:
    msfile: Name of measurement set (string)
    spw: Spectral window number (int)
    restfreq: Rest frequency in Hz (float)
    LSRKvelocity: input velocity in LSRK frame in km/s (float or array of floats)

    Returns: 
    Channel number most closely corresponding to input LSRK velocity 
    """
    tb.open(msfile + "/SPECTRAL_WINDOW")
    chanfreqs = tb.getcol("CHAN_FREQ", startrow=0, nrow=1)
    tb.close()
    tb.open(msfile + "/FIELD")
    fieldnames = tb.getcol("NAME")
    tb.close()
    nchan = len(chanfreqs)
    ms.open(msfile)
    lsrkfreqs = ms.cvelfreqs(spwids=[spw],
                             fieldids=np.where(fieldnames == field)[0][0],
                             mode='channel',
                             nchan=nchan,
                             start=0,
                             outframe='LSRK')
    chanvelocities = (
        restfreq - lsrkfreqs
    ) / restfreq * cc / 1.e3  #converted to LSRK velocities in km/s
    if type(LSRKvelocity) == np.ndarray:
        outchans = np.zeros_like(LSRKvelocity)
        for i in range(len(LSRKvelocity)):
            outchans[i] = np.argmin(np.abs(chanvelocities - LSRKvelocity[i]))
        return outchans
    else:
        return np.argmin(np.abs(chanvelocities - LSRKvelocity))
コード例 #12
0
ファイル: applycals.py プロジェクト: caseyjlaw/misc
    def parsegain(self, gainfile):
        """Takes .g1 CASA cal table and places values in numpy arrays.
        """

        tb.open(gainfile)
        mjd = tb.getcol('TIME')/(24*3600)     # mjd days, as for telcal
        spw = tb.getcol('SPECTRAL_WINDOW_ID')
        antnum = tb.getcol('ANTENNA1')
        gain = tb.getcol('CPARAM')    # dimensions of (npol, 1?, ntimes*nants)
        snr = tb.getcol('SNR')
        flagged = tb.getcol('FLAG')
        tb.close()

        # # need to find parent data MS to get some metadata
        # mslist = glob.glob(gainfile[:-3] + '*.ms')
        # try:
        #     msfile = mslist[0]
        #     print 'Found parent data MS %s' % msfile
        # except IndexError:
        #     print 'Could not find parent data MS for metadata...'

        # tb.open(msfile + '/ANTENNA')
        # antname = tb.getcol('NAME')      # one name per ant
        # tb.close()
        # tb.open(msfile + '/SPECTRAL_WINDOW')
        # reffreq = 1e-6*(tb.getcol('REF_FREQUENCY')+tb.getcol('TOTAL_BANDWIDTH')/2)   # similar to telcal "skyfreq"
        # specname = tb.getcol('NAME')
        # tb.close()
        # tb.open(msfile + '/SOURCE')
        # source = [name for name in tb.getcol('NAME') if 'J' in name][0]          # should return single cal name **hack**
        # tb.close()
        # nsol = len(gain[0,0])

        # ifid0R = specname[0][7] + '-' + specname[0][8]       # one value
        # ifid0L = specname[0][9] + '-' + specname[0][10]       # one value
        # ifid1R = specname[1][7] + '-' + specname[1][8]       # one value
        # ifid1L = specname[1][9] + '-' + specname[1][10]       # one value

        # # paste R,L end to end, so first loop over time, then spw, then pol
        # mjd = n.concatenate( (time, time), axis=0)
        # ifid = [ifid0R]*(nsol/2) + [ifid1R]*(nsol/2) + [ifid0L]*(nsol/2) + [ifid1L]*(nsol/2)   # first quarter is spw0,pol0, then spw1,pol0, ...
        # skyfreq = n.concatenate( (reffreq[0]*n.ones(nsol/2), reffreq[1]*n.ones(nsol/2), reffreq[0]*n.ones(nsol/2), reffreq[1]*n.ones(nsol/2)), axis=0)
        # gain = n.concatenate( (gain[0,0],gain[1,0]), axis=0)
        # amp = n.abs(gain)
        # phase = n.degrees(n.angle(gain))
        # source = [source]*nsol*2
        # flagged = n.concatenate( (flag[0,0],flag[1,0]), axis=0)
                   
        nants = len(n.unique(antnum))
        nspw = len(n.unique(spw))
        self.spwlist = n.unique(spw)
        npol = len(gain)

        # merge times less than 2s
        nsol = 0
        newmjd = [n.unique(mjd)[0]]
        for i in range(1, len(n.unique(mjd))):
            if 24*3600*(n.unique(mjd)[i] - n.unique(mjd)[i-1]) < 2.:
                print 'Gain solution at %.5f closer than 2s to previous. Skipping.' % (n.unique(mjd)[i])
                continue
            else:
                newmjd.append(n.unique(mjd)[i])
        
        self.uniquemjd = n.array(newmjd)
        nsol = len(self.uniquemjd)

        print 'Parsed gain table solutions for %d solutions, %d ants, %d spw, and %d pols' % (nsol, nants, nspw, npol)
        print 'Unique solution times', self.uniquemjd

        self.gain = n.zeros( (nsol, nants, nspw, npol), dtype='complex' )
        flags = n.zeros( (nsol, nants, nspw, npol), dtype='complex' )
        for sol in range(nsol):
            for ant in range(nants):
                for spw in range(nspw):
                    for pol in range(npol):
                        self.gain[sol, ant, spw, pol] = gain[pol,0,spw*nsol*nants+sol*nants+ant]
                        flags[sol, ant, spw, pol] = flagged[pol,0,spw*nsol*nants+sol*nants+ant]
        self.gain = n.ma.masked_array(self.gain, flags)

#        gain = n.concatenate( (n.concatenate( (gain[0,0,:nants*nsol].reshape(nsol,nants,1,1), gain[1,0,:nants*nsol].reshape(nsol,nants,1,1)), axis=3), n.concatenate( (gain[0,0,nants*nsol:].reshape(nsol,nants,1,1), gain[1,0,nants*nsol:].reshape(nsol,nants,1,1)), axis=3)), axis=2)
#        flagged = n.concatenate( (n.concatenate( (flagged[0,0,:nants*nsol].reshape(nsol,nants,1,1), flagged[1,0,:nants*nsol].reshape(nsol,nants,1,1)), axis=3), n.concatenate( (flagged[0,0,nants*nsol:].reshape(nsol,nants,1,1), flagged[1,0,nants*nsol:].reshape(nsol,nants,1,1)), axis=3)), axis=2)
#        self.gain = n.ma.masked_array(gain, flagged == True)        

        self.mjd = n.array(mjd); self.antnum = antnum
コード例 #13
0
# specific PAPER options
#
#cmd = 'rm -rf '+filename
#os.system(cmd)
#cmd = 'python /home/gianni/Python_code/swap_column.py /home/gianni/PAPER/psa32/data/Original/'+filename+' '+filename 
#os.system(cmd)
#ft(vis=filename,model='First_model/2455819.50285.model',usescratch=True)
#
  cmd = 'rm -rf ' + filename
  os.system(cmd)
  cmd = 'cp -r /home/gianni/PAPER/psa32/data/Original/'+ filename + ' ' + filename
  os.system(cmd)
  tb.open(filename,nomodify=False)
  data = tb.getcol('DATA')
  tb.putcol('CORRECTED_DATA',data)				# the CORRECTED_DATA column is initialized to DATA
  tb.close()
  tb.done()
#
  tflagdata(vis=filename,mode='manual',spw='0:167~202',action='apply',datacolumn='DATA')
  flagdata(vis=filename,mode='clip',clipminmax=[0,300],correlation='ABS_XY,YX',action='apply',datacolumn='DATA')
  flagdata(vis=filename,mode='clip',clipminmax=[0,1000],correlation='ABS_XX,YY',action='apply',datacolumn='DATA')
#
  out_fits  = lst+'.fits'
  out_fits_res  = lst+'_residual.fits'
  out_fits_model  = lst+'_model.fits'
#
  cmd = 'rm -rf '+ lst + '.image ' + lst + '.model ' + lst + '.residual ' + lst + '.flux' + lst + '.psf'
  os.system(cmd)
#
# make an image
#
コード例 #14
0
def make_primary_beams(image_name,lst,stokes_choice,beam_filename):
#
# define the frequencies of the simulated beams
# 
 freq_beams = np.zeros(11)
 freq_beams[0] = 100e6
 for j in range(1,11):
  freq_beams[j] = freq_beams[j - 1] + 10e6
#
# read the input cube
#
 tb.open(image_name)
 q_cube = tb.getcol('map')
 tb.close()
#  ra = np.ndarray(shape=(image.shape[0],image.shape[1]),dtype=float)
#  dec = np.ndarray(shape=(image.shape[0],image.shape[1]),dtype=float)
 ia.open(image_name)
 summary = ia.summary()		# read the image summary
 cube_shape = summary['shape']		# read the image shape: RA, DEC, Stokes, Freq
 ra = np.ndarray(shape=(cube_shape[0],cube_shape[1]),dtype=float)
 dec = np.ndarray(shape=(cube_shape[0],cube_shape[1]),dtype=float)
 nchan = cube_shape[3]			# number of channels in the cube
 start_freq = summary['refval'][3]	# start frequencies in Hz
 df = summary['incr'][3]		# frequency increment in Hz
#
# ra and dec will contain the RA and DEC corresponding to the pixel values in the image
#
#
 for j in range(0,cube_shape[0]):
  for k in range(0,cube_shape[1]):
   a=ia.toworld([j,k,0,0])	# a dictionary is returned with the world coordinates of that pixel
   b=a['numeric']		# the array is extracted from the dictionary a 
   ra[j,k] = b[0]		# save the RA for pixel j,k
   dec[j,k] = b[1]		# save the DEC for pixel j,k
#   print ra[j,k] * 12/np.pi,dec[j,k] * 180/np.pi,j,k
#
 print 'RA and DEC calculated'
 ia.close()
#
#
#
# read the beams
#  
 fekoX=fmt.FEKO('/home/gianni/PAPER/beams/fitBeam/data/PAPER_FF_X.ffe')
 fekoY=fmt.FEKO('/home/gianni/PAPER/beams/fitBeam/data/PAPER_FF_Y.ffe')   
 feko_xpol=fekoX.fields[0]
 feko_ypol=fekoY.fields[0]
 phi=feko_xpol.phi*np.pi/180.					# phi is the azimuth
 theta=feko_xpol.theta*np.pi/180.				# theta is the zenith angle
 theta = np.pi/2 - theta					# pyephem wants the elevation rather than the zenith angle
 ra_beams = np.ndarray(shape=(phi.shape[0]),dtype=float)	# array of RA corresponding to (phi,theta)
 dec_beams = np.ndarray(shape=(phi.shape[0]),dtype=float)	# array of DEC corresponding to (phi,theta)
#
# compute complex beams
#
 gxx=feko_xpol.etheta*np.conj(feko_xpol.etheta)+feko_xpol.ephi*np.conj(feko_xpol.ephi)
#
# define an array that will contain all the simulated beams
#
 beams = np.ndarray(shape=(gxx.shape[0],4,freq_beams.shape[0]),dtype=float)
#
# read all the beam models and save them in the beams array
#
 for j in range(0,freq_beams.shape[0]):
  feko_xpol = fekoX.fields[j]		# these two lines give an error, I need to check with Griffin how to fix it
  feko_ypol = fekoY.fields[j]
#
  gxx = feko_xpol.etheta*np.conj(feko_xpol.etheta)+feko_xpol.ephi*np.conj(feko_xpol.ephi)
  gyy = feko_ypol.etheta*np.conj(feko_ypol.etheta)+feko_ypol.ephi*np.conj(feko_ypol.ephi)
  gxy = feko_xpol.etheta*np.conj(feko_ypol.etheta)+feko_xpol.ephi*np.conj(feko_ypol.ephi)
  gyx = feko_ypol.etheta*np.conj(feko_xpol.etheta)+feko_ypol.ephi*np.conj(feko_xpol.ephi)
#
# make the stokes beams
#
  beams[:,0,j] = (gxx+gyy).real
#  beams[:,0,j] = beams[:,0,j] / np.max(beams[:,0,j])	# normalize the beams to be 1 at zenith
  beams[:,1,j] = (gxx-gyy).real
  beams[:,2,j] = (gxy+gyx).real
  beams[:,3,j] = (gxy-gyx).imag
#
 norm_beam = np.max(beams[:,0,5]) 			# beam peak at 150 MHz
 beams[:,0,:] = beams[:,0,:] / norm_beam		# normalize the beams to be 1 at zenith at 150 MHz
#
 print norm_beam,np.max(beams[:,0,:])
 print 'Beams read'
#
# bring the beam to RA,DEC coordinates
#
# Create an observer 
#
 paper = Observer()
#
# Set the observer at the Karoo
#
 paper.lat, paper.long, paper.elevation = '-30:43:17', '21:25:40.08', 0.0
# j0 = ephem.julian_date(0)			# invoked this way if    import ephem
 j0 = julian_date(0)				# invoked this way if    from ephem import *
# paper.date = float(lst)
 paper.date = float(lst) - j0 + 5./60/24	# I think I need this. At http://stackoverflow.com/questions/8962426/convert-topocentric-coordinates-azimuth-elevation-to-equatorial-coordinates
  						# they seem to suggest that this is needed in order to get the right date and I seem to get the right 
						# RA if I include this. The factor 5./60/24 needs to be added as the lst reported in the filename refers to
						# the beginning of the integration which is ~10 min long
 for j in range(0,ra_beams.shape[0]):
  a = paper.radec_of(phi[j],theta[j])
  ra_beams[j] = a[0]				# RA is in radians
  dec_beams[j] = a[1]				# DEC is in radians
#
#
# now interpolate the beams in frequency
#
 interp_beam = np.ndarray(shape=(beams.shape[0],beams.shape[1],nchan),dtype=float)
 cube_freq = start_freq
 for chan in range(0,nchan):
  a = np.max(q_cube[:,:,0,chan,0])
  b = np.min(q_cube[:,:,0,chan,0])
  if (a != 0 and b != 0):			# if the image is not empty, then proceed
   freq_dist = np.abs(cube_freq - freq_beams)
   freq_dist_s = np.sort(freq_dist)
   w = np.where(freq_dist == freq_dist_s[0])
   if freq_dist_s[0] == 0:
#
# if the beam is simulated at the exact frequency channel, then do not interpolate
#
    for j in range(0,4):
     interp_beam[:,j,chan] = beams[:,j,w[0][0]]
#
# if they are not, perform a weighted average of the two closest beams in frequency. The weights are the inverse of the frequency distance squared
#
   else:
    w1 = np.where(freq_dist == freq_dist_s[1])
    for j in range(0,4):
     interp_beam[:,j,chan] = (beams[:,j,w[0][0]] * freq_dist_s[0]**(-2) + beams[:,j,w1[0][0]] * freq_dist_s[1]**(-2)) / (freq_dist_s[0]**(-2) + freq_dist_s[1]**(-2))
#
  cube_freq = cube_freq + df
#
 print 'Beams interpolated in frequency'
#
# now interpolate the beam at the observed RA,DEC
#
 interp_beam_maps_q = np.ndarray(shape=(ra.shape[0],ra.shape[1],1,nchan,1),dtype=float) 
#
 for j in range(0,ra.shape[0]):
  for k in range(0,ra.shape[1]):
#
# interpolating amongst the three closest points
#
#   x = np.cos(ra[j,k])*np.cos(ra_beams)*np.cos(dec[j,k])*np.cos(dec_beams)
#   y = np.sin(ra[j,k])*np.sin(ra_beams)*np.cos(dec[j,k])*np.cos(dec_beams)
#   z = np.sin(dec[j,k])*np.sin(dec_beams)
#   dist = np.sqrt(x**2 + y**2 + z**2)
#
   dist = np.sqrt((ra[j,k] - ra_beams)**2 + (dec[j,k] - dec_beams)**2)
   dist_s = np.sort(dist)
   w0 = np.where(dist == dist_s[0])
   w1 = np.where(dist == dist_s[1])
   w2 = np.where(dist == dist_s[2])
#
   interp_beam_maps_q[j,k,0,:,0] = interp_beam[w0[0][0],stokes_choice,:] / dist_s[0] + interp_beam[w1[0][0],stokes_choice,:] / dist_s[1] + interp_beam[w2[0][0],stokes_choice,:] / dist_s[2]
   interp_beam_maps_q[j,k,0,:,0] = interp_beam_maps_q[j,k,0,:,0] / (dist_s[0]**(-1) + dist_s[1]**(-1) + dist_s[2]**(-1))
#
# nearest neighbour interpolation
#
#   dist = np.sqrt((ra[j,k] - ra_beams)**2 + (dec[j,k] - dec_beams)**2)
#   dist_s = np.sort(dist)
#   w0 = np.where(dist == dist_s[0])
#   interp_beam_maps_q[j,k,0,:,0] = interp_beam[w0[0][0],stokes_choice,:]
#
 print 'Beams interpolated in angle'
# 
# store the beams into an image
#
# beam_filename = image_name.strip('.image')
 cmd = 'rm -rf ' + beam_filename + '.beams'
 os.system(cmd)
 cmd = 'cp -r ' + image_name + ' ' + beam_filename + '.beams'
 os.system(cmd)
#
 tb.open(beam_filename + '.beams',nomodify=False)
 tb.putcol('map',interp_beam_maps_q)
 tb.close()
 tb.done()
 ia.open(beam_filename + '.beams')
 ia.tofits(beam_filename + '_beams.fits',overwrite=true)
 ia.close()
コード例 #15
0
def flag_extreme_amplitudes(tablename,
                            maxpctchange=50,
                            pols=[0],
                            channels=[0]):
    """
    Flag out all gain amplitudes with >``maxpctchange``% change (e.g., for the
    default 50%, flag everything outside the range 0.5 < G < 1.5).  This is a
    *very simple* cut, but it cannot be easily applied with existing tools
    since it is cutting on the value of the amplitude correction, not on any
    particular normal data selection type.  It is still highly advisable to
    plot the amp vs snr or similar diagnostics after running this to make sure
    you understand what's going on.  For example, after I ran this on a data
    set, I discovered that one antenna had high gain corrections even in the
    high SNR regime, which probably indicates a problem with that antenna.

    Parameters
    ----------
    maxpctchange : float
        The maximum percent change permitted in an amplitude
    pols : list
        The list of polarizations to include in the heuristics
    channels : list
        The list of channels to include in the heuristics

    Returns
    -------
    None
    """

    tb.open(tablename)
    solns = tb.getcol('CPARAM')
    snr = tb.getcol('SNR')
    # true flag = flagged out, bad data
    flags = tb.getcol('FLAG')
    tb.close()

    amp = np.abs(solns)
    maxfrac = maxpctchange / 100.

    bad = ((amp[pols, channels] > (1 + maxfrac)) | (amp[pols, channels] <
                                                    (1 - maxfrac)))

    bad_snr = snr[pols, channels, :][bad]

    print("Found {0} bad amplitudes with mean snr={1}".format(
        bad.sum(), bad_snr.mean()))
    print("Total flags in tb.flag: {0}".format(flags.sum()))

    flags[pols, channels, :] = bad | flags[pols, channels, :]
    assert all(flags[pols, channels, :][bad]), "Failed to modify array"

    tb.open(tablename, nomodify=False)
    tb.putcol(columnname='FLAG', value=flags)
    tb.flush()
    tb.close()

    tb.open(tablename, nomodify=True)
    flags = tb.getcol('FLAG')
    print("Total flags in tb.flag after: {0}".format(flags.sum()))

    assert all(flags[pols, channels, :][bad]), "Failed to modify table"

    tb.close()
コード例 #16
0
def ms2ripples(vis,
               savepath,
               timebinsec,
               Nsam=None,
               timebin=False,
               overwrite=True,
               verbose=True,
               debug=False):

    import astropy.constants as c

    if timebin:
        print("Performing time binning, by {:}s:".format(timebinsec))
        prefix = vis.replace('.ms', '_timebin' + '{:}s.ms'.format(timebinsec))
        prefix = os.path.basename(prefix)
        timebinVis = os.path.join(savepath, prefix)
        if overwrite:
            rmtables(timebinVis)
        default(split2)
        split2(vis=vis,
               timebin=str(timebinsec) + 's',
               outputvis=timebinVis,
               datacolumn='data',
               keepflags=False)
        if overwrite:
            plotms(vis=timebinVis,
                   xaxis='uvdist',
                   yaxis='amp',
                   coloraxis='spw')
            if debug and not os.path.exists(
                    timebinVis[:timebinVis.find('.ms')] + '.image'):
                clean(vis=timebinVis,
                      imagename=timebinVis[:timebinVis.find('.ms')],
                      spw='',
                      mode='mfs',
                      nchan=-1,
                      imsize=800,
                      cell='0.0300arcsec',
                      niter=0,
                      interactive=False,
                      stokes='I')
        oldvis = vis
        vis = timebinVis
    else:
        raw_input(
            "No time binning...proceed with caution. Press Enter to continue.")

    # check that the MS has only 1 science target
    tb.open(timebinVis + '/FIELD')
    src = tb.getcell('NAME')
    print("Source in {}: {}\n").format(timebinVis, src)
    tb.close()

    ms.open(timebinVis)
    spwInfo = ms.getspectralwindowinfo()
    nchan = spwInfo["0"]["NumChan"]
    npol = spwInfo["0"]["NumCorr"]
    ms.close()

    tb.open(oldvis)
    _dataShape = tb.getcol('DATA').shape
    tb.close()

    tb.open(timebinVis)
    # tb.colnames
    data = tb.getcol('DATA')
    uvw = tb.getcol('UVW')
    uvwShape = uvw.shape
    nvis = len(uvw[1, :])  # before dropping flagged data
    flagRow = tb.getcol('FLAG_ROW')
    assert (flagRow == True).any() == False
    data_desc_id = tb.getcol("DATA_DESC_ID")
    sigma = tb.getcol('SIGMA')
    # weight = tb.getcol('WEIGHT')
    weight = 1. / sigma**2
    # del sigma

    if Nsam is not None:
        from ripples_utils import pick_highSNR
        idx = pick_highSNR(weight, Nsam, plothist=verbose)
        uvw = uvw[:, idx]

    with open(os.path.join(savepath, 'u.bin'), 'wb') as f:
        f.write(uvw[0, :])
    with open(os.path.join(savepath, 'v.bin'), 'wb') as f:
        f.write(uvw[1, :])

    if debug:
        print uvw[0, :].max()
        print uvw[0, :].min()
        print uvw[1, :].max()
        print uvw[1, :].min()
        print uvw.dtype  # float64
    # del uvw

    ant1 = tb.getcol('ANTENNA1')
    ant2 = tb.getcol('ANTENNA2')
    assert len(ant1) == nvis
    assert len(ant2) == nvis
    tb.done()

    if Nsam is not None:
        ant1 = ant1[idx]
        ant2 = ant2[idx]
        assert len(ant1) == len(uvw[0, :])
    with open(os.path.join(savepath, 'ant1.bin'), 'wb') as f:
        ant1 = np.asarray(ant1, dtype=np.float_)
        # print ant1.dtype
        f.write(ant1)
    with open(os.path.join(savepath, 'ant2.bin'), 'wb') as f:
        ant2 = np.asarray(ant2, dtype=np.float_)
        f.write(ant2)

    # ant1.tofile(os.path.join(savepath, 'ant1.bin'))

    if debug:
        print ant1.max()
        print ant1.min()
        print ant2.max()
        print ant2.min()
        xx = np.fromfile(os.path.join(savepath, 'ant1.bin'))
        if Nsam is None:
            assert len(xx) == nvis
        else:
            assert len(xx) == len(idx)
        assert (xx == ant1).all()
        xx = np.fromfile(os.path.join(savepath, 'ant2.bin'))
        if Nsam is None:
            assert len(xx) == nvis
        else:
            assert len(xx) == len(idx)
        assert (xx == ant2).all()
    # del ant1, ant2

    if verbose:
        print("Number of coorelation: ", npol)
        print("data shape", data.shape)
        print("data shape before time binning", _dataShape)
        print("uvw shape", uvwShape)
        print("weight shpae", weight.shape)  # (npol, nvis)

    tb.open(vis + '/SPECTRAL_WINDOW')
    SPWFreqs = np.squeeze(tb.getcol("CHAN_FREQ"))
    tb.done()

    freq_per_vis = np.array([SPWFreqs[fff] for fff in data_desc_id])
    # freqs = np.mean(SPWFreqs)
    assert len(freq_per_vis) == nvis

    if Nsam is not None:
        freq_per_vis = freq_per_vis[idx]
    with open(os.path.join(savepath, 'frequencies.bin'), 'wb') as f:
        f.write(freq_per_vis)
    del data_desc_id, SPWFreqs

    if Nsam is not None:
        data = data[:, :, idx]
        weight = weight[:, idx]

    if debug:
        # randomly sample 1000 to image
        from ripples_utils import calc_img_from_vis
        _idx = np.random.choice(len(weight[0, :]), size=3000)
        if npol == 2:
            if data.shape[1] == 1:
                # expand the channel axis for weight
                __weight = weight[:, np.newaxis, :]
            _data = np.average(data, weights=__weight, axis=0)
        _weight = np.average(weight, axis=0)  # npol, nvis

        print _weight.shape
        print _data.shape
        _real = _data[0, _idx].real  # nchan, nvis
        _imag = _data[0, _idx].imag
        visOut = np.array(zip(_real, _imag)).flatten()
        _weight = _weight[_idx]
        __weight_factor = len(_idx) / np.sum(_weight * _real**2 +
                                             _weight * _imag**2)
        print __weight_factor
        _weight *= __weight_factor
        test_img = calc_img_from_vis(uvw[0, _idx],
                                     uvw[1, _idx],
                                     _weight,
                                     visOut,
                                     freq_per_vis[_idx],
                                     800,
                                     pixsize=0.01)
        import pdb
        pdb.set_trace()

    if npol == 1:
        real = data.real
        imag = data.imag
    elif npol == 2:
        print "Real and Imag shape before averaging two hands (npol, nchan, nvis): ", data.real.shape
        if data.shape[1] == 1:
            # expand the channel axis for weight
            weight = weight[:, np.newaxis, :]
        else:
            print weight.shape
            print("We shouldn't have to enter this condition.")
            import pdb
            pdb.set_trace()

        # average the two hands
        data = np.average(data, weights=weight, axis=0)
        # print data.shape      (nchan, nvis)
        weight = np.average(weight, axis=0)
        print weight.shape  # should be (nchan, nvis)

        real = data.real
        imag = data.imag
        del data
        print "Shape after averaging two hands: ", real.shape
    elif npol > 2:
        raise NotImplementedError("more than 2 hands..")

    # rescale weight
    # uvmcmcfit way
    if Nsam is None:
        _factor = nvis / np.sum(weight * real**2 + weight * imag**2)
    else:
        _factor = len(idx) / np.sum(weight * real**2 + weight * imag**2)
    _sigmas = (weight**-0.5) * _factor
    if verbose:
        print "simple rescale, factor of: ", _factor
        print _sigmas.min(), _sigmas.max(), _sigmas.std()
        print "New sigma in [mjy/beam]", (_sigmas**-2).sum()**-0.5 * 1.e3
        del _sigmas

    # Yashar way
    # first grouping the visibilities into bins that probe the same signal
    # take differences btw visibilities that null the sky
    # then, we simply assume that the variance in the subtracted visibilities is equal to twice the noise variance
    plotms(timebinVis, xaxis='V', yaxis='U', coloraxis='baseline')

    scaling = []
    for a1 in np.unique(ant1):
        for a2 in np.unique(ant2):
            if a1 < a2:
                baselineX = (ant1 == a1) & (ant2 == a2)

                if debug:
                    print a1, a2
                    print ant1, ant2
                    print ""
                    print a1 in ant1
                    print a2 in ant2
                    print ""

                    print np.where(a1 == ant1)
                    print np.where(a2 == ant2)
                    print np.where((ant1 == a1) & (ant2 == a2) == True)

                if baselineX.any(
                ) == True:  # important line! if we are picking a subset of points with nsam since we may miss some baselines.
                    if nchan == 1:
                        print real.shape
                        reals = real[0, baselineX]
                        imags = imag[0, baselineX]
                        sigs = weight[0, baselineX]**-0.5
                    else:
                        raise NotImplementedError(
                            "Not implemented for MS files with more than 1 channel per spw..."
                        )

                    # randomly split points into "two sets"
                    # subtract from "two set"
                    # subtract from neighboring
                    diffrs = reals - np.roll(reals, -1)
                    diffis = imags - np.roll(imags, -1)
                    std = np.mean([diffrs.std(), diffis.std()])

                    if debug:
                        print diffrs.min(), diffis.min()
                        print diffrs.max(), diffis.max()
                        print diffrs.std(), diffis.std()
                        print std / sigs.mean() / np.sqrt(2)
                    scaling.append(std / sigs.mean() / np.sqrt(2))
    del ant1, ant2
    sigma = weight**-0.5
    scaling = np.asarray(scaling).mean()
    sigma *= scaling
    print 'Scaling factor: ', scaling
    print 'Sigma after scaling [mJy/beam]: ', ((sigma**-2).sum())**-0.5 * 1E+3

    # real_1, imag_1, real_2, imag_2, etc
    visOut = np.array(zip(real, imag)).flatten()
    if Nsam is None:
        assert len(visOut) == int(nvis * 2)
    weight = sigma**-2
    weight = np.array(zip(
        weight,
        weight)).flatten()  # TODO: want to make sure this works for nchan > 1
    assert len(weight) == len(visOut)

    if Nsam is not None:
        assert len(visOut) == Nsam * 2
    if Nsam is not None:
        assert len(weight) == Nsam * 2
    with open(os.path.join(savepath, 'vis_chan_0.bin'), 'wb') as f:
        f.write(visOut)
    with open(os.path.join(savepath, 'sigma_squared_inv.bin'), 'wb') as f:
        f.write(weight)

    if Nsam is None:
        blah = np.zeros((nvis))
    else:
        blah = np.zeros((len(idx)))
    with open(os.path.join(savepath, 'chan.bin'), 'wb') as f:
        f.write(blah)

    if debug:
        # image the SAVED visibilities
        uu = np.fromfile(os.path.join(savepath, 'u.bin'), 'wb')
        vv = np.fromfile(os.path.join(savepath, 'v.bin'), 'wb')
        weight = np.fromfile(os.path.join(savepath, 'sigma_squared_inv.bin'),
                             'wb')
        visout = np.fromfile(os.path.join(savepath, 'vis_chan_0.bin'), 'wb')
        calc_img_from_vis(uu, vv, weight, visOut, 1000, pixsize=0.05)

    return None
コード例 #17
0
ファイル: applycals.py プロジェクト: caseyjlaw/misc
def openBpolyFile(caltable, debug=False):
#    mytb = au.createCasaTool(tbtool)    # from analysisutilities by corder
    tb.open(caltable)
    desc = tb.getdesc()
    if ('POLY_MODE' in desc):
        polyMode = tb.getcol('POLY_MODE')
        polyType = tb.getcol('POLY_TYPE')
        scaleFactor = tb.getcol('SCALE_FACTOR')
        antenna1 = tb.getcol('ANTENNA1')
        times = tb.getcol('TIME')
        cal_desc_id = tb.getcol('CAL_DESC_ID')
        nRows = len(polyType)
        for pType in polyType:
            if (pType != 'CHEBYSHEV'):
                print "I do not recognized polynomial type = %s" % (pType)
                return
        # Here we assume that all spws have been solved with the same mode
        uniqueTimesBP = n.unique(tb.getcol('TIME'))
        nUniqueTimesBP = len(uniqueTimesBP)
        if (nUniqueTimesBP == 2):
            print "Two BP sols found with times differing by %g seconds. Using first." % (uniqueTimesBP[1]-uniqueTimesBP[0])
            nUniqueTimesBP = 1
            uniqueTimesBP = uniqueTimesBP[0]
        mystring = ''
        nPolyAmp = tb.getcol('N_POLY_AMP')
        nPolyPhase = tb.getcol('N_POLY_PHASE')
        frequencyLimits = tb.getcol('VALID_DOMAIN')
        increments = 0.001*(frequencyLimits[1,:]-frequencyLimits[0,:])
        frequenciesGHz = []
        for i in range(len(frequencyLimits[0])):
           freqs = (1e-9)*n.arange(frequencyLimits[0,i],frequencyLimits[1,i],increments[i])       # **for some reason this is nch-1 long?**
           frequenciesGHz.append(freqs)
        polynomialAmplitude = []
        polynomialPhase = []
        for i in range(len(polyMode)):
            polynomialAmplitude.append([1])
            polynomialPhase.append([0])
            if (polyMode[i] == 'A&P' or polyMode[i] == 'A'):
                polynomialAmplitude[i]  = tb.getcell('POLY_COEFF_AMP',i)[0][0][0]
            if (polyMode[i] == 'A&P' or polyMode[i] == 'P'):
                polynomialPhase[i] = tb.getcell('POLY_COEFF_PHASE',i)[0][0][0]
  
        tb.close()
        tb.open(caltable+'/CAL_DESC')
        nSpws = len(tb.getcol('NUM_SPW'))
        spws = tb.getcol('SPECTRAL_WINDOW_ID')
        spwBP = []
        for c in cal_desc_id:
            spwBP.append(spws[0][c])
        tb.close()
        nPolarizations = len(polynomialAmplitude[0]) / nPolyAmp[0]
        if (debug):
            mystring += '%.3f, ' % (uniqueTimesBP/(24*3600))
            print 'BP solution has unique time(s) %s and %d pols' % (mystring, nPolarizations)
        
        # This value is overridden by the new function doPolarizations in ValueMapping.
        # print "Inferring %d polarizations from size of polynomial array" % (nPolarizations)
        return([polyMode, polyType, nPolyAmp, nPolyPhase, scaleFactor, nRows, nSpws, nUniqueTimesBP,
                uniqueTimesBP, nPolarizations, frequencyLimits, increments, frequenciesGHz,
                polynomialPhase, polynomialAmplitude, times, antenna1, cal_desc_id, spwBP])
    else:
        tb.close()
        return([])