Esempio n. 1
0
def compareSubTables(input,reference,order=None,excluded_cols=[]):
    
    tbinput = tbtool()
    tbinput.open(input)
    if order is not None:
        tbinput_sorted = tbinput.taql("SELECT * from " + input + " order by " + order)
    else:
        tbinput_sorted = tbinput
    
    tbreference = tbtool()
    tbreference.open(reference)
    if order is not None:
        tbreference_sorted = tbreference.taql("SELECT * from " + reference + " order by " + order)
    else:
        tbreference_sorted = tbreference
    
    columns = tbinput.colnames()
    for col in columns:
        if not col in excluded_cols:
            col_input = tbinput_sorted.getcol(col)
            col_reference = tbreference_sorted.getcol(col)
            if not (col_input == col_reference).all():
                tbinput.close()
                tbreference.close()
                del tbinput
                del tbreference
                return (False,col)
    
    tbinput.close()
    tbreference.close()
    del tbinput
    del tbreference
    
    return (True,"OK")
Esempio n. 2
0
    def test_vla_mixed_polarizations_mms2(self):
        
        self.outputms = 'test_vla_mixed_polarizations_2.mms'
        
        mstransform(vis=self.vis,outputvis=self.outputms,scan='16',datacolumn='DATA', createmms=True,
                    separationaxis='spw',spw='16~18',correlation='XX')
        
        # Check that DDI sub-table is consistent with POLARIZATION sub-table
        mytb = tbtool()
        mytb.open(self.outputms + '/POLARIZATION')
        npols = mytb.nrows()
        mytb.close()
        
        mytb = tbtool()
        mytb.open(self.outputms + '/DATA_DESCRIPTION')
        polIds = mytb.getcol('POLARIZATION_ID')
        mytb.close()    
        for id in polIds:
            self.assertTrue(id in range(npols),'PolarizationId in DATA_DESCRIPTION not consistent with POLARIZATION table')
        
#        self.assertTrue(all(polIds < npols), 'PolarizationId in DATA_DESCRIPTION not consistent with POLARIZATION table') 
        
        # Check that flagdata can run properly with output MS
        summary = flagdata(vis=self.outputms,mode='summary')
        self.assertTrue(summary.has_key('correlation'), 'Flagdata failure due to missformated MS') 
Esempio n. 3
0
def fixfeedpa(vis, defband='', forceband=''):

    mytb = taskinit.tbtool()

    mytb.open(vis + '/SPECTRAL_WINDOW')
    spwnames = mytb.getcol('NAME')
    mytb.close()
    if len(forceband) > 0:
        print 'Forcing band = ', forceband
        spwnames[:] = forceband
        defband = forceband
    mytb.open(vis + '/FEED', nomodify=False)
    spwids = mytb.getcol('SPECTRAL_WINDOW_ID')
    ra = mytb.getcol('RECEPTOR_ANGLE')
    ra[:, :] = 0.0
    spwmask = (spwids > -1)
    ra[0, spwmask] = [bandpa(spwnames[ispw]) for ispw in spwids[spwmask]]
    spwmask = pl.logical_not(spwmask)
    if (sum(spwmask) > 0):
        if (len(defband) > 0):
            print 'NB: Setting spwid=-1 rows in FEED table to RECEPTOR_ANGLE for band=' + str(
                defband)
            ra[0, spwmask] = bandpa(defband)
        else:
            print 'NB: Setting spwid=-1 rows in FEED table to RECEPTOR_ANGLE=(0,pi/2)'
    ra[1, :] = ra[0, :] + (pi / 2.)
    mytb.putcol('RECEPTOR_ANGLE', ra)
    mytb.close()
Esempio n. 4
0
def scanbystate(vis, undo=False):

    mytb = taskinit.tbtool()

    mytb.open(vis, nomodify=False)
    scans = mytb.getcol('SCAN_NUMBER')
    states = mytb.getcol('STATE_ID')
    print 'Unique STATE_IDs = ', str(pl.unique(states))
    maxstate = states.max()

    if undo:
        d = 10**int(floor(log10(scans.min())))
        if d < 10:
            mytb.close()
            raise Exception, 'Apparently, nothing to undo'
        scans -= states
        scans /= d
        print 'New SCAN_NUMBER = (SCAN_NUMBER - STATE_ID) / ' + str(d)
    else:
        m = 10**int(floor(log10(states.max()) + 1.0))
        scans *= m
        scans += states
        print 'New SCAN_NUMBER = SCAN_NUMBER * ' + str(m) + ' + STATE_ID'

    mytb.putcol('SCAN_NUMBER', scans)
    mytb.close()
Esempio n. 5
0
def abschanwidth(vis="", spw=""):

    """
    Usage: abschanwidth(vis, spw)
           Get the absolute channel width for the given spw.
           Returns 0 upon error.
    """

    if vis == "" or spw == "":
        print "Usage: abschanwidth(vis, spw)"
        return 0

    myvis = vis
    myspw = spw
    mytb = taskinit.tbtool()

    mytb.open(myvis + "/SPECTRAL_WINDOW")
    if spw >= mytb.nrows() or spw < 0:
        print "Error: spw out of range. Min is 0. Max is ", mytb.nrows() - 1
        return 0

    mychw = mytb.getcell("CHAN_WIDTH", spw)[0]
    mytb.close()

    return numpy.fabs(mychw)
Esempio n. 6
0
def getnch(vis="", spw=""):

    """
    Usage: getnch(vis, spw)
           Get the nchan for the given spw.
           Returns 0 upon error.
    """

    if vis == "" or spw == "":
        print "Usage: abschanwidth(vis, spw)"
        return 0

    myvis = vis
    myspw = spw
    mytb = taskinit.tbtool()

    mytb.open(myvis + "/SPECTRAL_WINDOW")
    if spw >= mytb.nrows() or spw < 0:
        print "Error: spw out of range. Min is 0. Max is ", mytb.nrows() - 1
        return 0

    mynch = mytb.getcell("NUM_CHAN", spw)
    mytb.close()

    return numpy.abs(mynch)
Esempio n. 7
0
def dxy(dtab, xytab, dout):

    mytb = taskinit.tbtool()

    os.system('cp -r ' + dtab + ' ' + dout)

    # How many spws
    mytb.open(dtab + '/SPECTRAL_WINDOW')
    nspw = mytb.nrows()
    mytb.close()

    for ispw in range(nspw):
        mytb.open(xytab)
        st = mytb.query('SPECTRAL_WINDOW_ID==' + str(ispw))
        x = st.getcol('CPARAM')
        st.close()
        mytb.close()

        mytb.open(dout, nomodify=False)
        st = mytb.query('SPECTRAL_WINDOW_ID==' + str(ispw))
        d = st.getcol('CPARAM')

        # the following assumes all antennas and chans same in both tables.

        # Xinv.D.X:
        d[0, :, :] *= pl.conj(x[0, :, :])
        d[1, :, :] *= x[0, :, :]

        st.putcol('CPARAM', d)
        st.close()
        mytb.close()
Esempio n. 8
0
def effectiveResolutionAtFreq(vis, spw, freq, kms=True):
    """
    Returns the effective resolution of a channel (in Hz or km/s)
    of the specified measurement set and spw ID.
    Note: For ALMA, this will only be correct for cycle 3 data onward.
    freq: frequency in quanity
    kms: if True, then return the value in km/s (otherwise Hz)
    To see this information for an ASDM, use
       printLOsFromASDM(showEffective=True)
    -Todd Hunter
    """
    if (not os.path.exists(vis + '/SPECTRAL_WINDOW')):
        raise ValueError("Could not find ms (or its SPECTRAL_WINDOW table).")
    mytb = tbtool()
    mytb.open(vis + '/SPECTRAL_WINDOW')
    if (type(spw) != list and type(spw) != np.ndarray):
        spws = [int(spw)]
    else:
        spws = [int(s) for s in spw]
    bws = []
    for spw in spws:
        chfreq = mytb.getcell('CHAN_FREQ', spw)  # Hz
        sepfreq = np.abs(chfreq - freq.to(u.Hz).value)
        ind = np.where(sepfreq == sepfreq.min())
        bwarr = mytb.getcell('RESOLUTION', spw)  # Hz
        bw = bwarr[ind]
        if kms:
            bw = constants.c.to(u.km / u.s).value * bw / freq.to(u.Hz).value
        bws.append(bw)
    mytb.close()
    if (len(bws) == 1):
        bws = bws[0]
    return bws
Esempio n. 9
0
def clearcal(
    vis=None,
    field=None,
    spw=None,
    intent=None,
    addmodel=None,
):

    casalog.origin('clearcal')

    # Do the trivial parallelization
    if ParallelTaskHelper.isParallelMS(vis):
        helper = ParallelTaskHelper('clearcal', locals())
        helper.go()
        return

    # Local versions of the tools
    tblocal = tbtool()
    cblocal = cbtool()
    mslocal = mstool()

    try:

        # we will initialize scr cols only if we don't create them
        doinit = False

        if (type(vis) == str) & os.path.exists(vis):
            tblocal.open(vis)
            doinit = tblocal.colnames().count('CORRECTED_DATA') > 0
            tblocal.close()

            # We ignore selection if creating the scratch columns
            if not doinit:
                casalog.post(
                    'Need to create scratch columns; ignoring selection.')

            cblocal.setvi(old=True, quiet=False)
            # Old VI for now
            cblocal.open(vis, addmodel=addmodel)
        else:
            raise Exception, \
                'Visibility data set not found - please verify the name'

        # If necessary (scr col not just created), initialize scr cols
        if doinit:
            cblocal.selectvis(field=field, spw=spw, intent=intent)
            cblocal.initcalset(1)
        cblocal.close()

        # Write history to the MS
        param_names = clearcal.func_code.co_varnames[:clearcal.func_code.
                                                     co_argcount]
        param_vals = [eval(p) for p in param_names]
        casalog.post('Updating the history in the output', 'DEBUG1')
        write_history(mslocal, vis, 'clearcal', param_names, param_vals,
                      casalog)

    except Exception, instance:

        print '*** Error ***', instance
Esempio n. 10
0
def dxy(dtab,xytab,dout):

    mytb=taskinit.tbtool()

    os.system('cp -r '+dtab+' '+dout)

    # How many spws
    mytb.open(dtab+'/SPECTRAL_WINDOW')
    nspw=mytb.nrows()
    mytb.close()


    for ispw in range(nspw):
        mytb.open(xytab)
        st=mytb.query('SPECTRAL_WINDOW_ID=='+str(ispw))
        x=st.getcol('CPARAM')
        st.close()
        mytb.close()

        mytb.open(dout,nomodify=False)
        st=mytb.query('SPECTRAL_WINDOW_ID=='+str(ispw))
        d=st.getcol('CPARAM')

        # the following assumes all antennas and chans same in both tables.

        # Xinv.D.X:
        d[0,:,:]*=pl.conj(x[0,:,:])
        d[1,:,:]*=x[0,:,:]

        st.putcol('CPARAM',d)
        st.close()
        mytb.close()
Esempio n. 11
0
def getColShape(table, col, start_row=0, nrow=1, row_inc=1):
    """ Get the shape of the given column.
    Keyword arguments:
        table      --    input table or MS
        col        --    column to get the shape
        start_row  --    start row (default 0)
        nrow       --    number of rows to read (default 1)
        row_inc    --    increment of rows to read (default 1)
        
        Return a list of strings with the shape of each row in the column.
    
    """

    col_shape = []
    try:
        try:
            tblocal = tbtool()
            tblocal.open(table)
            col_shape = tblocal.getcolshapestring(col, start_row, nrow,
                                                  row_inc)
        except:
            print 'Cannot get shape of col %s from table %s ' % (col, table)

    finally:
        tblocal.close()

    return col_shape
Esempio n. 12
0
def getColShape(table,col,start_row=0,nrow=1,row_inc=1):
    """ Get the shape of the given column.
    Keyword arguments:
        table      --    input table or MS
        col        --    column to get the shape
        start_row  --    start row (default 0)
        nrow       --    number of rows to read (default 1)
        row_inc    --    increment of rows to read (default 1)
        
        Return a list of strings with the shape of each row in the column.
    
    """

    col_shape = []
    try:
        try:
            tblocal = tbtool()
            tblocal.open(table)
            col_shape = tblocal.getcolshapestring(col,start_row,nrow,row_inc)
        except:
            print 'Cannot get shape of col %s from table %s '%(col,table)

    finally:
        tblocal.close()
            
    return col_shape
Esempio n. 13
0
def fixfeedpa(vis,defband='',forceband=''):

    mytb=taskinit.tbtool()

    mytb.open(vis+'/SPECTRAL_WINDOW')
    spwnames=mytb.getcol('NAME')
    mytb.close()
    if len(forceband)>0:
        print 'Forcing band = ',forceband
        spwnames[:]=forceband
        defband=forceband
    mytb.open(vis+'/FEED',nomodify=False)
    spwids=mytb.getcol('SPECTRAL_WINDOW_ID')
    ra=mytb.getcol('RECEPTOR_ANGLE')
    ra[:,:]=0.0
    spwmask=(spwids>-1)
    ra[0,spwmask]=[bandpa(spwnames[ispw]) for ispw in spwids[spwmask]]
    spwmask=pl.logical_not(spwmask)
    if (sum(spwmask)>0):
        if (len(defband)>0):
            print 'NB: Setting spwid=-1 rows in FEED table to RECEPTOR_ANGLE for band='+str(defband)
            ra[0,spwmask]=bandpa(defband)
        else:
            print 'NB: Setting spwid=-1 rows in FEED table to RECEPTOR_ANGLE=(0,pi/2)'
    ra[1,:]=ra[0,:]+(pi/2.)
    mytb.putcol('RECEPTOR_ANGLE',ra)
    mytb.close()
Esempio n. 14
0
    def test2_setjy_scratchless_mode_multiple_model(self):
        """Test 2: Set vis model header in one multiple fields """

        retval = setjy(vis=self.vis, field='1331+305*',fluxdensity=[1331.,0.,0.,0.], 
                       scalebychan=False, usescratch=False,standard='manual')
        self.assertTrue(retval, "setjy run failed")
        retval = setjy(vis=self.vis, field='1445+099*',fluxdensity=[1445.,0.,0.,0.], 
                       scalebychan=False, usescratch=False,standard='manual')
        self.assertTrue(retval, "setjy run failed")
                   
        mslocal = mstool()
        mslocal.open(self.vis)
        listSubMSs = mslocal.getreferencedtables()
        mslocal.close()
        #listSubMSs.append(self.vis)
        for subMS in listSubMSs:
            tblocal = tbtool()
            tblocal.open(subMS + '/SOURCE')
            nrows = tblocal.nrows()
            for row_i in range(0,nrows):
                try:
                    model_i = tblocal.getcell('SOURCE_MODEL',row_i)
                    if (row_i == 0):
                        self.assertEqual(model_i['cl_0']['fields'][0],row_i)
                        self.assertEqual(model_i['cl_0']['container']['component0']['flux']['value'][0],1331.)
                    elif (row_i == 1):
                        self.assertEqual(model_i['cl_0']['fields'][0],row_i)
                        self.assertEqual(model_i['cl_0']['container']['component0']['flux']['value'][0],1445.)                    
                    else:
                        self.assertEqual(len(model_i),0)
                except:
                    casalog.post("Problem accesing SOURCE_MODEL col from subMS %s" % subMS ,
                                 "SEVERE","test2_setjy_scratchless_mode_multiple_model")                        
            tblocal.close()            
Esempio n. 15
0
    def __init__(self, **keyval):
        linefinder.__init__(self)
        self.name = "hlinefinder.tmp.asap"
        self.spec = None
        self.vals = [
            "threshold", "min_nchan", "avg_limit", "box_size", "noise_box",
            "noise_stat"
        ]
        for v in ["name", "spec"]:
            try:
                setattr(self, v, keyval[v])
            except KeyError:
                pass
        self.nchan = 0
        if self.spec is not None:
            self.nchan = len(self.spec)
        self.scantab = None
        self.spectrum = []
        self.freq = []
        self.lines_merged = []
        self.tb = taskinit.tbtool()
        self.tb.create()

        # create dummy scantable
        self.init()
        if self.spec is not None:
            self.set_spectrum()
        self.set_options(keyval)
Esempio n. 16
0
def scanbystate(vis,undo=False):

    mytb=taskinit.tbtool()

    mytb.open(vis,nomodify=False)
    scans=mytb.getcol('SCAN_NUMBER')
    states=mytb.getcol('STATE_ID')
    print 'Unique STATE_IDs = ',str(pl.unique(states))
    maxstate=states.max()

    if undo:
        d=10**int(floor(log10(scans.min())))
        if d<10:
            mytb.close()
            raise Exception, 'Apparently, nothing to undo'
        scans-=states
        scans/=d
        print 'New SCAN_NUMBER = (SCAN_NUMBER - STATE_ID) / '+str(d)
    else:
        m=10**int(floor(log10(states.max())+1.0))
        scans*=m
        scans+=states
        print 'New SCAN_NUMBER = SCAN_NUMBER * '+str(m)+' + STATE_ID'

    mytb.putcol('SCAN_NUMBER',scans)
    mytb.close()
Esempio n. 17
0
def getdata_raw(imgname):
    """Return data as a 2D numpy array
       For more detailed access to cubes, don't use this method, 
       as it will use all the memory. instead use ia.open()
       and getslice/putslice() to cycle through the cube.
       Note we're not getting the mask this way.... see Moment_AT for
       a version that return a numpy.ma array with masking

       See putdata(imgname,data) for the reverse operation, but you
       will need to create a clone of the image to ensure the header.

       Parameters
       ----------
       imagename : str 
           The (absolute) CASA image filename 
  
       Returns
       -------
       array 
           data in a 2D numpy array
    """
    tb = taskinit.tbtool()
    tb.open(imgname)
    data=tb.getcol('map')
    tb.close()
    shp=data.shape
    nx = shp[0]
    ny = shp[1]
    d = data.reshape(nx,ny)
    return d
Esempio n. 18
0
 def _column_exists(self, tbname, colname):
     """Returns True if the column exists in the table"""
     self._check_file(tbname)
     tb = tbtool()
     tb.open(tbname)
     cols = tb.colnames()
     tb.close()
     return (colname in cols)
Esempio n. 19
0
    def test_unapply_clip_and_unset_flagrow(self):
        '''flagcmd: Check that FLAG_ROW is unset after un-applying a clip agent'''
        # Remove any cmd from table
        flagcmd(vis=self.vis, action='clear', clearall=True)

        # Flag using manual agent
        myinput = "scan='4'"
        filename = create_input(myinput)
        flagcmd(vis=self.vis, inpmode='list', inpfile=filename, action='apply', savepars=False)
        
        # Check FLAG_ROW is all set to true
        mytb = tbtool()
        mytb.open(self.vis)
        selectedtb = mytb.query('SCAN_NUMBER in [4]')
        FLAG_ROW = selectedtb.getcol('FLAG_ROW')
        mytb.close()        
        selectedtb.close()
        self.assertEqual(FLAG_ROW.sum(), FLAG_ROW.size)
        
        # Flag using tfcrop agent from file
        myinput = "scan='4' mode=clip "
        filename = create_input(myinput)
        flagcmd(vis=self.vis, inpmode='list', inpfile=filename, action='apply', savepars=True,
                flagbackup=False)
        
        # Check FLAG_ROW is all set to true
        mytb = tbtool()
        mytb.open(self.vis)
        selectedtb = mytb.query('SCAN_NUMBER in [4]')
        FLAG_ROW = selectedtb.getcol('FLAG_ROW')
        mytb.close()           
        selectedtb.close()
        self.assertEqual(FLAG_ROW.sum(), FLAG_ROW.size)
        
        # Unapply only the tfcrop line
        flagcmd(vis=self.vis, action='unapply', useapplied=True, tablerows=0, savepars=False)
       
        # Check FLAG_ROW is now all set to false
        mytb = tbtool()
        mytb.open(self.vis)
        selectedtb = mytb.query('SCAN_NUMBER in [4]')
        FLAG_ROW = selectedtb.getcol('FLAG_ROW')
        mytb.close()        
        selectedtb.close()
        self.assertEqual(FLAG_ROW.sum(), 0)
Esempio n. 20
0
def clearcal(
    vis=None,
    field=None,
    spw=None,
    intent=None,
    addmodel=None,
    ):

    casalog.origin('clearcal')

    # Do the trivial parallelization
    if ParallelTaskHelper.isParallelMS(vis):
        helper = ParallelTaskHelper('clearcal', locals())
        helper.go()
        return

    # Local versions of the tools
    tblocal = tbtool()
    cblocal = cbtool()
    mslocal = mstool()

    try:

        # we will initialize scr cols only if we don't create them
        doinit = False

        if (type(vis) == str) & os.path.exists(vis):
            tblocal.open(vis)
            doinit = tblocal.colnames().count('CORRECTED_DATA') > 0
            tblocal.close()

            # We ignore selection if creating the scratch columns
            if not doinit:
                casalog.post('Need to create scratch columns; ignoring selection.'
                             )

            cblocal.open(vis, addmodel=addmodel)
        else:
            raise Exception, \
                'Visibility data set not found - please verify the name'

        # If necessary (scr col not just created), initialize scr cols
        if doinit:
            cblocal.selectvis(field=field, spw=spw, intent=intent)
            cblocal.initcalset(1)
        cblocal.close()

        # Write history to the MS
        param_names = clearcal.func_code.co_varnames[:clearcal.func_code.co_argcount]
        param_vals = [eval(p) for p in param_names]
        casalog.post('Updating the history in the output', 'DEBUG1')
        write_history(mslocal, vis, 'clearcal', param_names,
                      param_vals, casalog)
        
    except Exception, instance:

        print '*** Error ***', instance
Esempio n. 21
0
def xyamb(xytab,qu,xyout=''):

    mytb=taskinit.tbtool()

    if not isinstance(qu,tuple):
        raise Exception,'qu must be a tuple: (Q,U)'

    if xyout=='':
        xyout=xytab
    if xyout!=xytab:
        os.system('cp -r '+xytab+' '+xyout)

    QUexp=complex(qu[0],qu[1])
    print 'Expected QU = ',qu   # , '  (',pl.angle(QUexp)*180/pi,')'

    mytb.open(xyout,nomodify=False)

    QU=mytb.getkeyword('QU')['QU']
    P=pl.sqrt(QU[0,:]**2+QU[1,:]**2)

    nspw=P.shape[0]
    for ispw in range(nspw):
        st=mytb.query('SPECTRAL_WINDOW_ID=='+str(ispw))
        if (st.nrows()>0):
            q=QU[0,ispw]
            u=QU[1,ispw]
            qufound=complex(q,u)
            c=st.getcol('CPARAM')
            fl=st.getcol('FLAG')
            xyph0=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
            print 'Spw = '+str(ispw)+': Found QU = '+str(QU[:,ispw])  # +'   ('+str(pl.angle(qufound)*180/pi)+')'
            #if ( (abs(q)>0.0 and abs(qu[0])>0.0 and (q/qu[0])<0.0) or
            #     (abs(u)>0.0 and abs(qu[1])>0.0 and (u/qu[1])<0.0) ):
            if ( pl.absolute(pl.angle(qufound/QUexp)*180/pi)>90.0 ):
                c[0,:,:]*=-1.0
                xyph1=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
                st.putcol('CPARAM',c)
                QU[:,ispw]*=-1
                print '   ...CONVERTING X-Y phase from '+str(xyph0)+' to '+str(xyph1)+' deg'
            else:
                print '      ...KEEPING X-Y phase '+str(xyph0)+' deg'
            st.close()
    QUr={}
    QUr['QU']=QU
    mytb.putkeyword('QU',QUr)
    mytb.close()
    QUm=pl.mean(QU[:,P>0],1)
    QUe=pl.std(QU[:,P>0],1)
    Pm=pl.sqrt(QUm[0]**2+QUm[1]**2)
    Xm=0.5*atan2(QUm[1],QUm[0])*180/pi

    print 'Ambiguity resolved (spw mean): Q=',QUm[0],'U=',QUm[1],'(rms=',QUe[0],QUe[1],')','P=',Pm,'X=',Xm

    stokes=[1.0,QUm[0],QUm[1],0.0]
    print 'Returning the following Stokes vector: '+str(stokes)
    
    return stokes
Esempio n. 22
0
def copy_model_RRtoLL(vis):
    # copy RR column of model_data column to LL
    tb = tbtool()
    tb.open(vis,nomodify=False)
    model_vis = tb.getcol('MODEL_DATA')
    model_vis[3,:,:] = model_vis[0,:,:] # copy RR model column to LL model column
    tb.putcol('MODEL_DATA',model_vis)
    tb.unlock()
    tb.close()
Esempio n. 23
0
def get_mosaic_centre(ms_name, return_string=True,
                      field_name='M33'):
    '''
    Assuming a fully sampled mosaic, take the median
    as the phase centre.
    '''

    try:
        # CASA 6
        import casatools
        # iatool = casatools.image()
        tb = casatools.table()
    except ImportError:
        try:
            from taskinit import tbtool
            # iatool = iatool()
            tb = tbtool()
        except ImportError:
            raise ImportError("Could not import CASA (casac).")

    tb.open(ms_name + "/FIELD")
    ptgs = tb.getcol("PHASE_DIR").squeeze()

    ras, decs = (ptgs * u.rad).to(u.deg)

    if field_name is not None:

        field_names = tb.getcol('NAME')

        valids = np.array([True if field_name in name else False
                           for name in field_names])

        ras = ras[valids]
        decs = decs[valids]

        if ras.size == 0:
            raise ValueError("No fields with given sourceid.")

    tb.close()

    med_ra = np.median(ras)
    med_dec = np.median(decs)

    if return_string:

        med_ptg = SkyCoord(med_ra, med_dec, frame='icrs')

        ptg_str = "ICRS "
        ptg_str += med_ptg.to_string('hmsdms')

        # tclean was rejecting this b/c of a string type
        # change? Anyways this seems to fix it.
        ptg_str = str(ptg_str)

        return ptg_str

    return med_ra, med_dec
Esempio n. 24
0
    def __init__(self, filename, ia_kwargs={}):

        try:
            import casatools
            self.iatool = casatools.image
            tb = casatools.table()
        except ImportError:
            try:
                from taskinit import iatool, tbtool
                self.iatool = iatool
                tb = tbtool()
            except ImportError:
                raise ImportError(
                    "Could not import CASA (casac) and therefore cannot read CASA .image files"
                )

        self.ia_kwargs = ia_kwargs

        self.filename = filename

        self._cache = {}

        log.debug("Creating ArrayLikeCasa object")

        # try to trick CASA into destroying the ia object
        def getshape():
            ia = self.iatool()
            # use the ia tool to get the file contents
            try:
                ia.open(self.filename, cache=False)
            except AssertionError as ex:
                if 'must be of cReqPath type' in str(ex):
                    raise IOError("File {0} not found.  Error was: {1}".format(
                        self.filename, str(ex)))
                else:
                    raise ex

            self.shape = tuple(ia.shape()[::-1])
            self.dtype = np.dtype(ia.pixeltype())

            ia.done()
            ia.close()

        getshape()

        self.ndim = len(self.shape)

        tb.open(self.filename)
        dminfo = tb.getdminfo()
        tb.done()

        # unclear if this is always the correct callspec!!!
        # (transpose requires this be backwards)
        self.chunksize = dminfo['*1']['SPEC']['DEFAULTTILESHAPE'][::-1]

        log.debug("Finished with initialization of ArrayLikeCasa object")
Esempio n. 25
0
def getavweight(vis="", field=[], spw=""):

    """
    Usage: getavweight(vis, field, spw)
           Get the average weight for the given field and spw.
           The field parameter takes a list of fields.
    """

    if vis == "" or spw == "" or field == [] or not type(field) == list:
        print "Usage: getavweight(vis, field, spw)"
        print "       The field parameter takes a list of fields."
        return False

    myvis = vis
    myspw = spw
    myfields = field
    mytb = taskinit.tbtool()

    mytb.open(myvis)
    w = mytb.getcol("WEIGHT")
    dd = mytb.getcol("DATA_DESC_ID")
    ff = mytb.getcol("FIELD_ID")
    mytb.close()

    mytb.open(myvis + "/DATA_DESCRIPTION")
    mydds = []

    for i in range(0, mytb.nrows()):
        if mytb.getcell("SPECTRAL_WINDOW_ID", i) != myspw:
            continue
        else:
            mydds.append(i)

    mytb.close()

    mynrows = 0
    mysumw = 0

    npol = len(w)

    if len(mydds) > 0:
        for row in range(0, len(dd)):
            if (dd[row] in mydds) and (ff[row] in myfields):
                mynrows += 1
                for i in range(0, npol):
                    mysumw += w[i][row]

    rval = 0.0

    if mynrows > 0:
        rval = mysumw / float(npol) / float(mynrows)
        print "Average weight is ", rval
    else:
        print "No rows selected."

    return rval
Esempio n. 26
0
def copy_model_RRtoLL(vis):
    # copy RR column of model_data column to LL
    tb = tbtool()
    tb.open(vis, nomodify=False)
    model_vis = tb.getcol('MODEL_DATA')
    model_vis[3, :, :] = model_vis[
        0, :, :]  # copy RR model column to LL model column
    tb.putcol('MODEL_DATA', model_vis)
    tb.unlock()
    tb.close()
Esempio n. 27
0
    def test_model_keys(self):
        '''partition: CAS-4398, handle the MODEL keywords correctly'''

        print '*** Check that MODEL_DATA is not present in MS first'
        mytb = tbtool()
        try:
            mytb.open(self.msfile + '/MODEL_DATA')
        except Exception, instance:
            print '*** Expected exception. \"%s\"' % instance
            mytb.close()
Esempio n. 28
0
 def test_model_keys(self):
     '''partition: CAS-4398, handle the MODEL keywords correctly'''
     
     print '*** Check that MODEL_DATA is not present in MS first'
     mytb = tbtool()
     try:
         mytb.open(self.msfile+'/MODEL_DATA')
     except Exception, instance:
         print '*** Expected exception. \"%s\"'%instance
         mytb.close()
Esempio n. 29
0
def copy_data_RRtoLL(vis):
    # copy RR column of data column to LL
    # doesn't account for different flags on RR and LL
    tb = tbtool()
    tb.open(vis,nomodify=False)
    data_vis = tb.getcol('DATA')
    data_vis[3,:,:] = data_vis[0,:,:] # copy RR model column to LL model column
    tb.putcol('DATA',data_vis)
    tb.unlock()
    tb.close()
Esempio n. 30
0
def getavweight(vis="", field=[], spw=""):
    """
    Usage: getavweight(vis, field, spw)
           Get the average weight for the given field and spw.
           The field parameter takes a list of fields.
    """

    if (vis == "" or spw == "" or field == [] or not type(field) == list):
        print "Usage: getavweight(vis, field, spw)"
        print "       The field parameter takes a list of fields."
        return False

    myvis = vis
    myspw = spw
    myfields = field
    mytb = taskinit.tbtool()

    mytb.open(myvis)
    w = mytb.getcol("WEIGHT")
    dd = mytb.getcol("DATA_DESC_ID")
    ff = mytb.getcol("FIELD_ID")
    mytb.close()

    mytb.open(myvis + "/DATA_DESCRIPTION")
    mydds = []

    for i in range(0, mytb.nrows()):
        if (mytb.getcell("SPECTRAL_WINDOW_ID", i) != myspw):
            continue
        else:
            mydds.append(i)

    mytb.close()

    mynrows = 0
    mysumw = 0

    npol = len(w)

    if len(mydds) > 0:
        for row in range(0, len(dd)):
            if (dd[row] in mydds) and (ff[row] in myfields):
                mynrows += 1
                for i in range(0, npol):
                    mysumw += w[i][row]

    rval = 0.

    if mynrows > 0:
        rval = mysumw / float(npol) / float(mynrows)
        print "Average weight is ", rval
    else:
        print "No rows selected."

    return rval
Esempio n. 31
0
 def _make_consistent(self):
     tb = tbtool()
     tb.open(self.inputms, nomodify=False)
     try:
         for irow in xrange(tb.nrows()):
             tb.putcell("SIGMA", irow,
                        1. / numpy.sqrt(tb.getcell("WEIGHT", irow)))
     except:
         raise RuntimeError, "Failed to manually make SIGMA and WEIGHT consistent."
     finally:
         tb.close()
Esempio n. 32
0
def adjustweights2(vis="", field="", spws=[]):
    """
       Usage: adjustweights2(vis, field, spws)
              Scale the weights in specified spws by a factor
              2*df*dt/nchan, where df is the channel bandwidth,
              dt is the integration time, and nchan is the number
              of channels in the spw.  This enables imaging of
              mixed mode spws in CASA v4.2 and earlier.  (Note
              that there will be no net effect if the spws
              share the same df, dt, and nchan.

              spws are of type list,
              field should be given as field id.
    """

    myvis = vis
    myfield = int(field)
    mytb = taskinit.tbtool()

    if (vis == "" or myfield == "" or spws == [] or not type(spws) == list):
        print "Usage: adjustweights2(vis, field, spws)"
        print "       spws are of type list,"
        print "       field should be given as field id"
        return False

    # get avweight and chanwidth from spws
    chw = []
    nch = []
    for spw in spws:
        cw = abschanwidth(myvis, spw)
        if cw == 0:
            print "Error reading channel width of spw ", spw
            return False
        chw.append(cw)

        nc = getnch(myvis, spw)
        if (nc == 0):
            print "Error: nch of spw ", spw, " is zero (could also mean no data)."
            return False
        nch.append(nc)

        print "Spw ", spw, ", channelwidth", cw, ", nchan ", nc

    # calculate scale factor and apply scaling to the spws

    for i in range(0, len(spws)):
        myscale = chw[i] / nch[i]
        print "Scale factor for weights in spw ", spws[i], " is ", myscale
        scaleweights(myvis, [myfield], spws[i], myscale,
                     True)  # include integ time

    print "Done."

    return True
Esempio n. 33
0
def adjustweights2(vis="", field="", spws=[]):

    """
       Usage: adjustweights2(vis, field, spws)
              Scale the weights in specified spws by a factor
              2*df*dt/nchan, where df is the channel bandwidth,
              dt is the integration time, and nchan is the number
              of channels in the spw.  This enables imaging of
              mixed mode spws in CASA v4.2 and earlier.  (Note
              that there will be no net effect if the spws
              share the same df, dt, and nchan.

              spws are of type list,
              field should be given as field id.
    """

    myvis = vis
    myfield = int(field)
    mytb = taskinit.tbtool()

    if vis == "" or myfield == "" or spws == [] or not type(spws) == list:
        print "Usage: adjustweights2(vis, field, spws)"
        print "       spws are of type list,"
        print "       field should be given as field id"
        return False

    # get avweight and chanwidth from spws
    chw = []
    nch = []
    for spw in spws:
        cw = abschanwidth(myvis, spw)
        if cw == 0:
            print "Error reading channel width of spw ", spw
            return False
        chw.append(cw)

        nc = getnch(myvis, spw)
        if nc == 0:
            print "Error: nch of spw ", spw, " is zero (could also mean no data)."
            return False
        nch.append(nc)

        print "Spw ", spw, ", channelwidth", cw, ", nchan ", nc

    # calculate scale factor and apply scaling to the spws

    for i in range(0, len(spws)):
        myscale = chw[i] / nch[i]
        print "Scale factor for weights in spw ", spws[i], " is ", myscale
        scaleweights(myvis, [myfield], spws[i], myscale, True)  # include integ time

    print "Done."

    return True
Esempio n. 34
0
def copy_data_RRtoLL(vis):
    # copy RR column of data column to LL
    # doesn't account for different flags on RR and LL
    tb = tbtool()
    tb.open(vis, nomodify=False)
    data_vis = tb.getcol('DATA')
    data_vis[3, :, :] = data_vis[
        0, :, :]  # copy RR model column to LL model column
    tb.putcol('DATA', data_vis)
    tb.unlock()
    tb.close()
Esempio n. 35
0
def compareSubTables(input, reference, order=None, excluded_cols=[]):

    tbinput = tbtool()
    tbinput.open(input)
    if order is not None:
        tbinput_sorted = tbinput.taql("SELECT * from " + input + " order by " +
                                      order)
    else:
        tbinput_sorted = tbinput

    tbreference = tbtool()
    tbreference.open(reference)
    if order is not None:
        tbreference_sorted = tbreference.taql("SELECT * from " + reference +
                                              " order by " + order)
    else:
        tbreference_sorted = tbreference

    columns = tbinput.colnames()
    for col in columns:
        if not col in excluded_cols:
            col_input = tbinput_sorted.getcol(col)
            col_reference = tbreference_sorted.getcol(col)
            if not (col_input == col_reference).all():
                tbinput.close()
                tbreference.close()
                del tbinput
                del tbreference
                return (False, col)

    tbinput.close()
    tbreference.close()
    del tbinput
    del tbreference

    return (True, "OK")
Esempio n. 36
0
def scan_reindex(vis,gap=50.):
   # replace scan numbers to count up from 1, changing every time there is a gap in time
   #   greater than gap (in seconds) between entries
   # assumes ms is already chronological; does not account for multiple fields
   # (but should be fine w/ multiple fields if gap < slew time)
   t = tbtool()
   t.open(vis, nomodify=False)
   scanlist0 = t.getcol('SCAN_NUMBER')
   times = t.getcol('TIME')
   scanlist = numpy.ones(scanlist0.shape)
   dt = times[1:]-times[:-1]   # should be in seconds
   scan_inc = numpy.cumsum(dt>gap)
   scanlist[1:] += scan_inc
   scanlist = scanlist.astype(int)
   t.putcol('SCAN_NUMBER',scanlist)
   t.unlock()
   t.close()
Esempio n. 37
0
    def test_shape3(self):
        '''mstransform: DATA and FLAG tileshapes should be the same'''
        self.outputms = "shape3.ms"
        inptsh = [4,10,1024]
        mstransform(vis=self.vis, outputvis=self.outputms, createmms=True, tileshape=inptsh)

        self.assertTrue(os.path.exists(self.outputms))

        # Get the tile shape for the DATA output
        tblocal = tbtool()
        tblocal.open(self.outputms)
        outdm = tblocal.getdminfo()
        tblocal.close()
        outtsh = th.getTileShape(outdm)
        # And for the FLAG column
        flagtsh = th.getTileShape(outdm, 'FLAG')

        self.assertTrue((outtsh==flagtsh).all(), 'Tile shapes are different')
Esempio n. 38
0
 def test_alma_wvr_correlation_products_mms1(self):
     
     self.outputms = 'test_alma_wvr_correlation_products_1.mms'
     # Only spw=2 exist in MS
     mstransform(vis=self.vis,outputvis=self.outputms,spw='0,1,2',datacolumn='DATA',createmms=True)
     
     # Check that POLARIZATION sub-table is properly sorted
     mytb = tbtool()
     mytb.open(self.outputms + '/POLARIZATION')
     numCorr = mytb.getcol('NUM_CORR')
     mytb.close()    
     
     self.assertEqual(numCorr[0],2,'POLARIZATION table miss-sorted')         
     self.assertEqual(numCorr[1],1, 'POLARIZATION table miss-sorted')         
     
     # Check that flagdata can run properly with output MS
     summary = flagdata(vis=self.outputms,mode='summary')
     self.assertTrue(summary.has_key('correlation'), 'Flagdata failure due to missformated MS')   
Esempio n. 39
0
 def test_CAS6206(self):
     '''mstransform: verify that all columns are re-indexed in SPW sub-table'''
     self.outputmms='test.mms'
     self.outputms='assoc.ms'
     self.setUp_CAS_5013()
     mstransform(vis=self.vis, outputvis=self.outputmms,createmms=True, datacolumn='corrected')
     
     # Check that optional ASSOC_SPW_ID is the same in input and output
     tblocal = tbtool()
     tblocal.open(self.vis+'/SPECTRAL_WINDOW',nomodify=True)
     in_assoc = tblocal.iscelldefined('ASSOC_SPW_ID',0)
     tblocal.close()
     tblocal.open(self.outputmms+'/SPECTRAL_WINDOW',nomodify=True)
     out_assoc = tblocal.iscelldefined('ASSOC_SPW_ID',0)
     tblocal.close()
     self.assertEqual(in_assoc, out_assoc, 'Error in SPW sub-table creation; ASSOC_SPW_ID is different')
     
     # if SPW sub-table is not correct, the next step might fail
     self.assertTrue(mstransform(vis=self.outputmms, outputvis=self.outputms, hanning=True, datacolumn='data'))
Esempio n. 40
0
def Dgen(dtab,dout):

    mytb=taskinit.tbtool()

    os.system('cp -r '+dtab+' '+dout)

    mytb.open(dout,nomodify=False)

    irec=mytb.info()
    st=irec['subType']
    if st.count('Df')>0:
        irec['subType']='Dfgen Jones'
    elif st.count('D')>0:
        irec['subType']='Dgen Jones'
    else:
        mytb.close()
        raise Exception, 'Not a D?'

    mytb.putinfo(irec)
    mytb.putkeyword('VisCal',irec['subType'])
    mytb.close()
Esempio n. 41
0
def opacal(vis, calname, asap=True, interpolate=0, height=200):
    """Create an opacity calibration table for the MeasurementSet given.
  If calname is empty, one is created based on vis and returned.
  Use asap=True to use the ASAP/Miriad atmosphere model and
  asap=False to use the CASA ATM model.
  Use interpolate>1 to use piecewise linear interpolation across each spectrum
  or leave at 0 to use a constant value per spectral window.
  The height parameter should specify the observatory height above mean
  sea level (not height above WGS84 ellipsoid)"""

    mytb = tbtool()
    # get data we need from weather table
    try:
        mytb.open(vis + '/WEATHER')
        time = mytb.getcol('TIME')
        press = mytb.getcol('PRESSURE')  #Pa
        temp = mytb.getcol('TEMPERATURE')  # C (sometimes returns Kelvin)
        hum = mytb.getcol('REL_HUMIDITY')  # %
    except Exception, e:
        print "Error: Cannot read required data from WEATHER table"
        return
Esempio n. 42
0
def opacal(vis,calname,asap=True,interpolate=0,height=200):
  """Create an opacity calibration table for the MeasurementSet given.
  If calname is empty, one is created based on vis and returned.
  Use asap=True to use the ASAP/Miriad atmosphere model and
  asap=False to use the CASA ATM model.
  Use interpolate>1 to use piecewise linear interpolation across each spectrum
  or leave at 0 to use a constant value per spectral window.
  The height parameter should specify the observatory height above mean
  sea level (not height above WGS84 ellipsoid)"""

  mytb = tbtool()
  # get data we need from weather table
  try:
    mytb.open(vis+'/WEATHER')
    time = mytb.getcol('TIME')
    press = mytb.getcol('PRESSURE') #Pa
    temp = mytb.getcol('TEMPERATURE') # C (sometimes returns Kelvin)
    hum = mytb.getcol('REL_HUMIDITY') # %
  except Exception, e:
    print "Error: Cannot read required data from WEATHER table"
    return
Esempio n. 43
0
def Dgen(dtab, dout):

    mytb = taskinit.tbtool()

    os.system('cp -r ' + dtab + ' ' + dout)

    mytb.open(dout, nomodify=False)

    irec = mytb.info()
    st = irec['subType']
    if st.count('Df') > 0:
        irec['subType'] = 'Dfgen Jones'
    elif st.count('D') > 0:
        irec['subType'] = 'Dgen Jones'
    else:
        mytb.close()
        raise Exception, 'Not a D?'

    mytb.putinfo(irec)
    mytb.putkeyword('VisCal', irec['subType'])
    mytb.close()
Esempio n. 44
0
def putdata_raw(imgname, data, clone=None):
    """Store (overwrite) data in an existing CASA image.
       See getdata_raw(imgname) for the reverse operation.

       Parameters
       ----------
       imagename : str
           The (absolute) CASA image filename.  It should exist
           already, unless **clone** was given.

       data : 2D numpy array or a list of 2D numpy arrays
           The data...

       clone : str, optional
           An optional filename from which to clone the image
           for output. It needs to be an absolute filename.
  
    """
    ia = taskinit.iatool()    
    if clone != None:
        ia.fromimage(infile=clone,outfile=imgname,overwrite=True) 
        ia.close()
    # @todo this seems circumvent to have to borrow the odd dimensions (nx,ny,1,1,1) shape was seen
    if type(data) == type([]):
        # @todo since this needs to extend the axes, the single plane clone and replace data doesn't work here
        raise Exception,"Not Implemented Yet"
        bigim = ia.imageconcat(outfile=imgname, infiles=infiles, axis=2, relax=T, tempclose=F, overwrite=T)
        bigim.close()
    else:
        tb = taskinit.tbtool()
        tb.open(imgname,nomodify=False)
        d = tb.getcol('map')
        pdata = ma.getdata(data).reshape(d.shape)
        tb.putcol('map',pdata)
        tb.flush()
        tb.close()
    return
Esempio n. 45
0
def getnch(vis="", spw=""):
    """
    Usage: getnch(vis, spw)
           Get the nchan for the given spw.
           Returns 0 upon error.
    """

    if (vis == "" or spw == ""):
        print "Usage: abschanwidth(vis, spw)"
        return 0

    myvis = vis
    myspw = spw
    mytb = taskinit.tbtool()

    mytb.open(myvis + "/SPECTRAL_WINDOW")
    if (spw >= mytb.nrows() or spw < 0):
        print "Error: spw out of range. Min is 0. Max is ", mytb.nrows() - 1
        return 0

    mynch = mytb.getcell("NUM_CHAN", spw)
    mytb.close()

    return numpy.abs(mynch)
Esempio n. 46
0
def abschanwidth(vis="", spw=""):
    """
    Usage: abschanwidth(vis, spw)
           Get the absolute channel width for the given spw.
           Returns 0 upon error.
    """

    if (vis == "" or spw == ""):
        print "Usage: abschanwidth(vis, spw)"
        return 0

    myvis = vis
    myspw = spw
    mytb = taskinit.tbtool()

    mytb.open(myvis + "/SPECTRAL_WINDOW")
    if (spw >= mytb.nrows() or spw < 0):
        print "Error: spw out of range. Min is 0. Max is ", mytb.nrows() - 1
        return 0

    mychw = mytb.getcell("CHAN_WIDTH", spw)[0]
    mytb.close()

    return numpy.fabs(mychw)
Esempio n. 47
0
            
        mtlocal.done()
                    
    except Exception, instance:
        mtlocal.done()
        casalog.post('%s'%instance,'ERROR')
        return False

    # Update the FLAG_CMD sub-table to reflect any spw/channels selection
    # If the spw selection is by name or FLAG_CMD contains spw with names, skip the updating    
    
    if ((spw != '') and (spw != '*')) or chanaverage == True:
        isopen = False

        try:
            mytb = tbtool()
            mytb.open(outputvis + '/FLAG_CMD', nomodify=False)
            isopen = True
            nflgcmds = mytb.nrows()
            
            if nflgcmds > 0:
                updateFlagCmd = False

                # If spw selection is by name in FLAG_CMD, do not update, CAS-7751
                mycmd = mytb.getcell('COMMAND', 0)
                cmdlist = mycmd.split()
                for cmd in cmdlist:
                    # Match only spw indices, not names
                    if cmd.__contains__('spw'):
                        cmd = cmd.strip("spw=")
                        spwstr = re.search('^[^a-zA-Z]+$', cmd)
Esempio n. 48
0
def dict_to_table(indict, tablepath, kwkeys=[], colkeys=[], info=None, keepcolorder=False):
    """
    Converts a dictionary to a CASA table, and attempts to
    save it to tablepath.  Returns whether or not it was successful.

    kwkeys is a list of keys in dict that should be treated as table keywords,
    and colkeys is a list of keys to be treated as table columns.  If a key in
    indict is not in either kwkeys or colkeys, it will be appended to colkeys
    if it refers to a list, array, or specially formed dict with the right
    number of rows, or kwkeys otherwise.

    "Specially formed dict" means a python dictionary with the right keys to
    provide a comment and/or keywords to specify a (measure) frame or
    (quantity) unit for the column.

    The number of rows is set by the first column.  The order of the columns is
    the order of colkeys, followed by the remaining columns in alphabetical
    order.

    Example:
    mydict = {'delta': [1.2866, 1.2957, 1.3047],
              'obs_code': ['*', 'U', 't'],
              'date': {'m0': {'unit': 'd',
                              'value': [55317.0, 55318.0, 55319.0]},
                       'refer': 'UT1',
                       'type': 'epoch'},
              'phang': {'comment': 'phase angle',
                        'data': {'unit': 'deg',
                                 'value': array([37.30, 37.33, 37.36])}}}
                                 
    # Produces a table with, in order, a measure column (date), two bare
    # columns (delta and obs_code), and a commented quantity column (phang).
    # The comment goes in the 'comment' field of the column description.
    # Measure and straight array columns can also be described by using a
    # {'comment': (description), 'data': (measure, quantity, numpy.array or
    # list)} dict.
    dict_to_table(mydict, 'd_vs_phang.tab')

    TODO: detect non-float data types, including array cells.
    """
    nrows = 0
    dkeys = indict.keys()
    keywords = []
    cols = []

    def get_bare_col(col):
        """
        Given a col that could be a bare column (list or array), or measure or
        quantity containing a bare column, return the bare column.
        """
        barecol = col
        if hasattr(barecol, 'has_key'):
            if barecol.has_key('comment'):
                barecol = barecol.get('data')
            if type(barecol)==dict and me.ismeasure(barecol):
                barecol = barecol['m0']
            # if qa.isquantity(data) can't be trusted.
            if hasattr(barecol, 'has_key') and barecol.has_key('unit') and barecol.has_key('value'):
                barecol = barecol['value']
        return barecol
        
    # Divvy up the known keywords and columns, if present, preserving the
    # requested order.
    for kw in kwkeys:
        if kw in dkeys:
            # Take kw out of dkeys and put it in keywords.
            keywords.append(dkeys.pop(dkeys.index(kw)))
    for c in colkeys:
        if c in dkeys:
            cols.append(dkeys.pop(dkeys.index(c)))
            if nrows == 0:
                nrows = len(get_bare_col(indict[c]))
                print "Got nrows =", nrows, "from", c

    # Go through what's left of dkeys and assign them to either keywords or
    # cols.
    dkeys.sort()
    for d in dkeys:
        used_as_col = False
        colcand = get_bare_col(indict[d])
        # Treat it as a column if it has the right number of rows.
        if type(colcand) in (list, numpy.ndarray):
            if nrows == 0:
                nrows = len(colcand)
            if len(colcand) == nrows:
                cols.append(d)
                used_as_col = True
        if not used_as_col:
            keywords.append(d)

    # Make the table's description.
    tabdesc = {}
    # Initialize the column descriptor with defaults (these come from
    # data/ephemerides/DE200, but I replaced IncrementalStMan with StandardStMan).
    coldesc = {'comment': '',
               'dataManagerGroup': '',
               'dataManagerType': 'StandardStMan',
               'maxlen': 0,
               'option': 0,
               'valueType': 'double'} # Use double (not float!) for columns
                                      # that will be read by MeasIERS.
    for c in cols:
        #print "Setting coldesc for", c
        data = indict[c]  # Place to find the valueType.
        
        if hasattr(data, 'has_key'):
            #print "comment =", data.get('comment', '')
            coldesc['comment'] = data.get('comment', '')
            
        data = get_bare_col(data)
        valtype = str(type(data[0]))[7:-2]
        if valtype == 'str':
            valtype = 'string'
        valtype = valtype.replace('64', '')      # Table uses 'float', not 'float64'.
        valtype = valtype.replace('numpy.', '')  # or 'numpy.float'.

        # Use double (not float!) for columns that will be read by MeasIERS.
        if valtype == 'float':
            valtype = 'double'
            
        coldesc['valueType'] = valtype

        tabdesc[c] = coldesc.copy()

    # Since tables are directories, it saves a lot of grief if we first check
    # whether the table exists and is under svn control.
    svndir = None
    if os.path.isdir(tablepath):
        if os.path.isdir(tablepath + '/.svn'):
            # tempfile is liable to use /tmp, which can be too small and/or slow.
            # Use the directory that tablepath is in, since we know the user
            # approves of writing to it.
            workingdir = os.path.abspath(os.path.dirname(tablepath.rstrip('/')))

            svndir = tempfile.mkdtemp(dir=workingdir)
            shutil.move(tablepath + '/.svn', svndir)
        print "Removing %s directory" % tablepath
        shutil.rmtree(tablepath)

    # Create and fill the table.
    retval = True
    try:
        mytb = tbtool()
        tmpfname='_tmp_fake.dat'
        if keepcolorder:
            # try to keep order of cols 
            # Ugly, but since tb.create() cannot accept odered dictionary
            # for tabledesc, I cannot find any other way to keep column order.
            # * comment for each column will not be filled
            f = open(tmpfname,'w')
            zarr=numpy.zeros(len(cols))
            szarr=str(zarr.tolist())
            szarr=szarr.replace('[','')
            szarr=szarr.replace(']','')
            szarr=szarr.replace(',','')
            scollist=''
            sdtypes='' 
            for c in cols:
                scollist+=c+' '   
                vt=tabdesc[c]['valueType']
                if vt=='string':
                   sdtypes+='A '    
                elif vt=='integer':
                   sdtypes+='I '
                elif vt=='double':
                   sdtypes+='D '
                elif vt=='float':
                   sdtypes+='R '
            f.write(scollist+'\n')
            f.write(sdtypes+'\n')
            f.write(szarr)
            f.close()
            mytb.fromascii(tablepath,tmpfname,sep=' ')     
            # close and re-open since tb.fromascii(nomodify=False) has not
            # implemented yet
            mytb.close() 
            os.remove(tmpfname) 
            mytb.open(tablepath, nomodify=False)
            mytb.removerows(0)
        else: 
            mytb.create(tablepath, tabdesc)
        if type(info) == dict:
            mytb.putinfo(info)
        mytb.addrows(nrows)     # Must be done before putting the columns.
    except Exception, e:
        print "Error", e, "trying to create", tablepath
        retval = False
Esempio n. 49
0
def oldsplit(vis, outputvis, datacolumn, field, spw, width, antenna,
          timebin, timerange, scan, intent, array, uvrange,
          correlation, observation, combine, keepflags, keepmms):
    """Create a visibility subset from an existing visibility set:

    Keyword arguments:
    vis -- Name of input visibility file (MS)
            default: none; example: vis='ngc5921.ms'
    outputvis -- Name of output visibility file (MS)
                  default: none; example: outputvis='ngc5921_src.ms'
    datacolumn -- Which data column to split out
                  default='corrected'; example: datacolumn='data'
                  Options: 'data', 'corrected', 'model', 'all',
                  'float_data', 'lag_data', 'float_data,data', and
                  'lag_data,data'.
                  note: 'all' = whichever of the above that are present.
    field -- Field name
              default: field = '' means  use all sources
              field = 1 # will get field_id=1 (if you give it an
                          integer, it will retrieve the source with that index)
              field = '1328+307' specifies source '1328+307'.
                 Minimum match can be used, egs  field = '13*' will
                 retrieve '1328+307' if it is unique or exists.
                 Source names with imbedded blanks cannot be included.
    spw -- Spectral window index identifier
            default=-1 (all); example: spw=1
    antenna -- antenna names
               default '' (all),
               antenna = '3 & 7' gives one baseline with antennaid = 3,7.
    timebin -- Interval width for time averaging.
               default: '0s' or '-1s' (no averaging)
               example: timebin='30s'
    timerange -- Time range
                 default='' means all times.  examples:
                 timerange = 'YYYY/MM/DD/hh:mm:ss~YYYY/MM/DD/hh:mm:ss'
                 timerange='< YYYY/MM/DD/HH:MM:SS.sss'
                 timerange='> YYYY/MM/DD/HH:MM:SS.sss'
                 timerange='< ddd/HH:MM:SS.sss'
                 timerange='> ddd/HH:MM:SS.sss'
    scan -- Scan numbers to select.
            default '' (all).
    intent -- Scan intents to select.
            default '' (all).
    array -- (Sub)array IDs to select.     
             default '' (all).
    uvrange -- uv distance range to select.
               default '' (all).
    correlation -- Select correlations, e.g. 'rr, ll' or ['XY', 'YX'].
                   default '' (all).
    observation -- Select by observation ID(s).
                   default '' (all).
    combine -- Data descriptors that time averaging can ignore:
                  scan, and/or state
                  Default '' (none)
    keepflags -- Keep flagged data, if possible
                 Default True

    keepmms -- If the input is a multi-MS, make the output one, too. (experimental)
               Default: False
                 
    """

    casalog.origin('oldsplit')
    mylocals = locals()
    rval = True
    try:

        if (keepmms and ParallelTaskHelper.isParallelMS(vis)): 
            if (timebin!='0s' and timebin!='-1s'): 
                casalog.post('Averaging over time with keepmms=True may lead to results different\n'
                             +'  from those obtained with keepmms=False due to different binning.', 'WARN')
                            
            myms = mstool()
            myms.open(vis)
            mses = myms.getreferencedtables()
            myms.close() 
            mses.sort()

            nfail = 0
            if os.path.exists(outputvis):
                raise ValueError, "Output MS %s already exists - will not overwrite." % outputvis
            tempout = outputvis+str(time.time())
            os.mkdir(tempout)
            successfulmses = []
            mastersubms = ''
            masterptab = ''
            emptyptab = tempout+'/EMPTY_POINTING'
            nochangeinpointing = (str(antenna)+str(timerange)=='')

            if nochangeinpointing:    
                # resulting pointing table is the same for all
                #  -> replace by empty table if it is a link and won't be modified anyway
                #     and put back original into the master after split

                # find the master
                for m in mses:
                    theptab = m+'/POINTING'
                    if not os.path.islink(theptab):
                        #print "is master ", theptab
                        mastersubms = m
                        masterptab = m+'/POINTING'
                        # save time by not copying the POINTING table len(mses) times
                        myttb = tbtool()
                        myttb.open(masterptab)
                        tmpp = myttb.copy(newtablename=emptyptab, norows=True)
                        myttb.close()
                        del myttb
                        tmpp.close()
                        del tmpp
                        break

            mytb = tbtool()

            # prepare the input MMS for processing
            replaced = []
            outputviss = []
            theptabs = []
            
            for m in mses:

                # make sure the SORTED_TABLE keywords are disabled
                mytb.open(m, nomodify=False)
                if 'SORTED_TABLE' in mytb.keywordnames():
                    tobedel = mytb.getkeyword('SORTED_TABLE').split(' ')[1]
                    mytb.removekeyword('SORTED_TABLE')
                    os.system('rm -rf '+tobedel)
                mytb.close()

                # deal with the POINTING table
                theptab = m+'/POINTING'
                theptabs.append(theptab)

                if nochangeinpointing and os.path.islink(theptab):
                    #print "is link ", theptab
                    os.remove(theptab)
                    shutil.copytree(emptyptab, theptab)
                    replaced.append(True)
                else:
                    replaced.append(False)

                # run oldsplit
                outputviss.append(os.path.abspath(tempout+'/'+os.path.basename(m)))
            # end for

            # send off the jobs
            print 'Running split_core ... '
            helper = ParallelTaskHelper('oldsplit', mylocals)
            helper.override_arg('outputvis',outputviss)
            helper._consolidateOutput = False
            goretval = helper.go()

            for i in xrange(len(mses)):
                m = mses[i]

                # deal with the POINTING table
                if replaced[i]:
                    # restore link
                    shutil.rmtree(theptabs[i], ignore_errors=True)
                    os.symlink('../'+os.path.basename(mastersubms)+'/POINTING', theptabs[i])
                    # (link in target will be created my makeMMS)

                # accumulate list of successful splits
                if not goretval[m]:
                    nfail+=1
                else:
                    successfulmses.append(outputviss[i])

            if nfail>0: # there were unsuccessful splits
                if len(successfulmses)==0:
                    casalog.post('Split failed in all subMSs.', 'WARN')
                    rval=False
                else:
                    casalog.post('*** Summary: there were failures in '+str(nfail)+' SUBMSs:', 'WARN')
                    casalog.post('*** (these are harmless if they are caused by selection):', 'WARN')
                    for m in mses:
                        if not goretval[m]:
                            casalog.post(os.path.basename(m)+': '+str(goretval[m]), 'WARN')
                        else:
                            casalog.post(os.path.basename(m)+': '+str(goretval[m]), 'NORMAL') 

                    casalog.post('Will construct MMS from subMSs with successful selection ...', 'NORMAL')

                    if nochangeinpointing: # need to take care of POINTING table
                        # in case the master subms did not make it
                        if not (tempout+'/'+os.path.basename(mastersubms) in successfulmses):
                            # old master subms was not selected.
                            # copy the original masterptab into the new master
                            shutil.rmtree(successfulmses[0]+'/POINTING')
                            shutil.copytree(masterptab, successfulmses[0]+'/POINTING')
                    
            if rval: # construct new MMS from the output
                if(width==1 and str(field)+str(spw)+str(antenna)+str(timerange)+str(scan)+str(intent)\
                   +str(array)+str(uvrange)+str(correlation)+str(observation)==''):
                    ph.makeMMS(outputvis, successfulmses)
                else:
                    myms.open(successfulmses[0], nomodify=False)
                    auxfile = "split_aux_"+str(time.time())
                    for i in xrange(1,len(successfulmses)):
                        myms.virtconcatenate(successfulmses[i], auxfile, '1Hz', '10mas', True)
                    myms.close()
                    os.remove(auxfile)
                    ph.makeMMS(outputvis, successfulmses, True, ['POINTING']) 


            shutil.rmtree(tempout, ignore_errors=True)



        else: # do not output an MMS

            rval = split_core(vis, outputvis, datacolumn, field, spw, width, antenna,
                              timebin, timerange, scan, intent, array, uvrange,
                              correlation, observation, combine, keepflags)

    except Exception, instance:
            casalog.post("*** Error: %s" % (instance), 'SEVERE')
            rval = False
Esempio n. 50
0
    # Write history to output MS, not the input ms.
    try:
        param_names = split_core.func_code.co_varnames[:split_core.func_code.co_argcount]
        param_vals = [eval(p) for p in param_names]   
        retval &= write_history(myms, outputvis, 'oldsplit', param_names, param_vals,
                                casalog)
    except Exception, instance:
        casalog.post("*** Error \'%s\' updating HISTORY" % (instance),
                     'WARN')

    # Update FLAG_CMD if necessary.
    # If the spw selection is by name or FLAG_CMD contains spw with names, skip the updating    

    if ((spw != '') and (spw != '*')) or do_chan_mod:
        isopen = False
        mytb = tbtool()
        try:
            mytb.open(outputvis + '/FLAG_CMD', nomodify=False)
            isopen = True
            nflgcmds = mytb.nrows()
            
            if nflgcmds > 0:
                updateFlagCmd = False
                # If spw selection is by name in FLAG_CMD, do not update, CAS-7751
                mycmd = mytb.getcell('COMMAND', 0)
                cmdlist = mycmd.split()
                for cmd in cmdlist:
                    # Match only spw indices, not names
                    if cmd.__contains__('spw'):
                        cmd = cmd.strip("spw=")
                        spwstr = re.search('^[^a-zA-Z]+$', cmd)
Esempio n. 51
0
# default ASDM dataset name
myasdm_dataset_name = 'uid___X5f_X18951_X1'
myms_dataset_name = 'M51.ms'

# name of the resulting MS
msname = myasdm_dataset_name+'.ms'

# name of the exported ASDM
asdmname = myms_dataset_name+'.asdm'

# name of the reimported MS
reimp_msname = 'reimported-'+myms_dataset_name

# make local copies of the tools
tblocal = tbtool()
mslocal = mstool()

def checktable(thename, theexpectation):
    global msname, myname
    tblocal.open(msname+"/"+thename)
    if thename == "":
        thename = "MAIN"
    for mycell in theexpectation:
        print myname, ": comparing ", mycell
        value = tblocal.getcell(mycell[0], mycell[1])
        # see if value is array
        try:
            isarray = value.__len__
        except:
            # it's not an array
Esempio n. 52
0
def scaleweights(vis="", field=[], spw="", scale=1.0, dotime=False):

    """
    Usage: scaleweights(vis, field, spw, scale)
           Scale the weight column for spw and field by scale.
           The field parameter takes a list of fields.
    """

    if vis == "" or spw == "" or field == [] or scale == 1.0 or not type(field) == list:
        print "Usage: scaleweights(vis, field, spw, scale)"
        print "       The field parameter takes a list of fields."
        return False

    myvis = vis
    myspw = spw
    myscale = scale
    myfields = field
    mytb = taskinit.tbtool()

    mytb.open(myvis)
    w = mytb.getcol("WEIGHT")
    dd = mytb.getcol("DATA_DESC_ID")
    ff = mytb.getcol("FIELD_ID")
    tt = []
    if dotime:
        tt = mytb.getcol("EXPOSURE")
    mytb.close()

    mytb.open(myvis + "/DATA_DESCRIPTION")
    mydds = []

    for i in range(0, mytb.nrows()):
        if mytb.getcell("SPECTRAL_WINDOW_ID", i) != myspw:
            continue
        else:
            mydds.append(i)

    mytb.close()

    print "Will change weights for data description ids ", mydds

    changes = 0

    if len(mydds) > 0:
        for row in range(0, len(dd)):
            if (dd[row] in mydds) and (ff[row] in myfields):
                changes += 1
                for i in range(0, len(w)):
                    w[i][row] *= myscale
                    if dotime:
                        w[i][row] *= 2 * tt[row]  # include 2 factor, too

        mytb.open(myvis, nomodify=False)
        mytb.putcol("WEIGHT", w)
        mytb.close()

    if changes > 0:
        print "Changes applied in ", changes, " rows."
    else:
        print "No changes applied."

    return True
Esempio n. 53
0
def adjustweights(vis="", field="", refspws=[], spws=[]):

    """
       Usage: adjustweights(vis, field, refspws, spws)
              Scale the weights given by spws by a factor
              derived from the average weight Wref of the refspws
              and the original average weight Worig of the spws
              using

                   Wnew = Wold * (Wref/Worig)*(ChanWidthorig/ChanWidthref)

              where ChanWidthorig is the channel width of the spws,
              and  ChanWidthref is the channel width of the refspws.

              refspws and spws are of type list,
              field should be given as field id.
    """

    myvis = vis
    myfield = int(field)
    mytb = taskinit.tbtool()

    if vis == "" or myfield == "" or refspws == [] or spws == [] or not type(refspws) == list or not type(spws) == list:
        print "Usage: adjustweights(vis, field, refspws, spws)"
        print "       refspws and spws are of type list,"
        print "       field should be given as field id"
        return False

    # check that all ref spws have the same chan width
    refcw = 0.0
    for spw in refspws:
        cw = abschanwidth(myvis, spw)
        if cw == 0:
            print "Error reading channel width of spw ", spw
            return False
        if refcw == 0:
            refcw = cw
        else:
            if not refcw == cw:
                print "Error: the spws given in the reference list do not all have the same channel width."
                return False

    # get avweight and chanwidth from spws
    cws = []
    avweights = []
    for spw in spws:
        cw = abschanwidth(myvis, spw)
        if cw == 0:
            print "Error reading channel width of spw ", spw
            return False
        cws.append(cw)
        avw = getavweight(myvis, [myfield], spw)
        if avw == 0.0:
            print "Error: average weight of spw ", spw, " is zero (could also mean no data)."
            return False
        print "Spw ", spw, ", channelwidth ", cw, ", av. weight ", avw
        avweights.append(avw)

    # get avweight and chanwidth from ref spws
    ravweight = 0.0
    for spw in refspws:
        avw = getavweight(myvis, [myfield], spw)
        print "Reference Spw ", spw, ", channelwidth ", refcw, ", av. weight ", avw
        ravweight += avw

    if len(refspws) > 0:
        ravweight /= len(refspws)
    else:
        print "Error: no reference spws"
        return False

    print "Average weight of reference spws: ", ravweight

    # calculate scale factor and apply scaling to the spws

    for i in range(0, len(spws)):
        myscale = ravweight / avweights[i] * cws[i] / refcw
        print "Scale factor for weights in spw ", spws[i], " is ", myscale
        scaleweights(myvis, [myfield], spws[i], myscale)

    print "Done."

    return True
Esempio n. 54
0
 def __init__(self,msfile,spwId) :
     self.tb = taskinit.tbtool()
     self.setTableAndSpwId(msfile,spwId)
Esempio n. 55
0
def tsysspwmap(vis,tsystable,trim=True,relax=False, tsysChanTol=0):
    """
    Generate default spwmap for ALMA Tsys, including TDM->FDM associations
    Input:
     vis        the target MeasurementSet 
     tsystable  the input Tsys caltable (w/ TDM Tsys measurements)
     trim       if True (the default), return minimum-length spwmap;
                    otherwise the spwmap will be exhaustive and include
                    the high-numbered (and usually irrelevant) wvr
                    spws
     relax      (not yet implemented)
    Output:
     spw list to use in applycal spwmap parameter for the Tsys caltable

     This function takes the Tsys Caltable you wish to apply to a
     MeasurementSet and generates a "applycal-ready" spwmap that
     provides the appropriate information regarding the transfer
     Tsys calibration from TDM spectral windows to FDM spectral
     windows.  To execute the function:

     tsysmap=tsysspwmap(vis='my.ms',tsystable='mytsys.cal')

     tsysmap can then be supplied to the applycal spwmap parameter
     to ensure proper Tsys calibration application.

    """

    localTb = taskinit.tbtool()
    spwMaps = []
    # Get the spectral windows with entries in the solution table
    localTb.open(tsystable)
    measuredTsysSpw = numpy.unique(localTb.getcol("SPECTRAL_WINDOW_ID"))
    localTb.close()
    # Get the frequency ranges for the allowed 
    localTb.open("%s/SPECTRAL_WINDOW" % tsystable)
    for i in measuredTsysSpw:
        spwMap = SpwMap(i)
        chanFreqs = localTb.getcell("CHAN_FREQ",i)
        chanWidth = abs(chanFreqs[1]-chanFreqs[0])
        spwMap.chanWidth = chanWidth
        spwMap.validFreqRange = [chanFreqs.min()-0.5*chanWidth,\
                                 chanFreqs.max()+0.5*chanWidth]
        spwMaps.append(spwMap)
    localTb.close()
    # Now loop through the main table's spectral window table
    # to map the spectral windows as desired.
    localTb.open("%s/SPECTRAL_WINDOW" % vis)
    it = localTb.nrows()
    localTb.close()
    for j in spwMaps :
        localTb.open("%s/SPECTRAL_WINDOW" % vis)
        j.bbNo = localTb.getcell("BBC_NO",j.calSpwId)
        localTb.close()
        for i in range(it) :
            localTb.open("%s/SPECTRAL_WINDOW" % vis)
            chanFreqs = localTb.getcell("CHAN_FREQ",i)
            if len(chanFreqs) > 1 :
                chanWidth = localTb.getcell("CHAN_WIDTH",i)[0]
                freqMin = chanFreqs.min()-0.5*chanWidth
                freqMax = chanFreqs.max()+0.5*chanWidth
            else :
                chanWidth = localTb.getcell("CHAN_WIDTH",i)
                freqMin = chanFreqs-0.5*chanWidth
                freqMax = chanFreqs+0.5*chanWidth
            msSpw  = SpwInfo(vis,i)
            if j.bbNo == msSpw.values['BBC_NO']:
                if freqMin >= j.validFreqRange[0]-tsysChanTol*j.chanWidth and \
                   freqMax <= j.validFreqRange[1]+tsysChanTol*j.chanWidth :
                    j.mapsToSpw.append(i)
            localTb.close()
    applyCalSpwMap = []
    spwWithoutMatch = []
    localTb.open("%s/SPECTRAL_WINDOW" % vis)
    for i in range(it) :
        useSpw = None
        for j in spwMaps :
            if i in j.mapsToSpw :
                if useSpw is not None :
                    if localTb.getcell("BBC_NO") == j.bbNo :
                        useSpw = j.calSpwId
                else :
                    useSpw = j.calSpwId
        if useSpw == None :
            useSpw = i
            spwWithoutMatch.append(i)
        applyCalSpwMap.append(int(useSpw))        
    if len(spwWithoutMatch) != 0:
        taskinit.casalog.post('Found no match for following spw ids: '+str(spwWithoutMatch))
    if trim :
        return trimSpwmap(applyCalSpwMap)
    else :
        return applyCalSpwMap
Esempio n. 56
0
def xyamb(xytab, qu, xyout=''):

    mytb = taskinit.tbtool()

    if not isinstance(qu, tuple):
        raise Exception, 'qu must be a tuple: (Q,U)'

    if xyout == '':
        xyout = xytab
    if xyout != xytab:
        os.system('cp -r ' + xytab + ' ' + xyout)

    QUexp = complex(qu[0], qu[1])
    print 'Expected QU = ', qu  # , '  (',pl.angle(QUexp)*180/pi,')'

    mytb.open(xyout, nomodify=False)

    QU = mytb.getkeyword('QU')['QU']
    P = pl.sqrt(QU[0, :]**2 + QU[1, :]**2)

    nspw = P.shape[0]
    for ispw in range(nspw):
        st = mytb.query('SPECTRAL_WINDOW_ID==' + str(ispw))
        if (st.nrows() > 0):
            q = QU[0, ispw]
            u = QU[1, ispw]
            qufound = complex(q, u)
            c = st.getcol('CPARAM')
            fl = st.getcol('FLAG')
            xyph0 = pl.angle(pl.mean(c[0, :, :][pl.logical_not(fl[0, :, :])]),
                             True)
            print 'Spw = ' + str(ispw) + ': Found QU = ' + str(
                QU[:, ispw])  # +'   ('+str(pl.angle(qufound)*180/pi)+')'
            #if ( (abs(q)>0.0 and abs(qu[0])>0.0 and (q/qu[0])<0.0) or
            #     (abs(u)>0.0 and abs(qu[1])>0.0 and (u/qu[1])<0.0) ):
            if (pl.absolute(pl.angle(qufound / QUexp) * 180 / pi) > 90.0):
                c[0, :, :] *= -1.0
                xyph1 = pl.angle(
                    pl.mean(c[0, :, :][pl.logical_not(fl[0, :, :])]), True)
                st.putcol('CPARAM', c)
                QU[:, ispw] *= -1
                print '   ...CONVERTING X-Y phase from ' + str(
                    xyph0) + ' to ' + str(xyph1) + ' deg'
            else:
                print '      ...KEEPING X-Y phase ' + str(xyph0) + ' deg'
            st.close()
    QUr = {}
    QUr['QU'] = QU
    mytb.putkeyword('QU', QUr)
    mytb.close()
    QUm = pl.mean(QU[:, P > 0], 1)
    QUe = pl.std(QU[:, P > 0], 1)
    Pm = pl.sqrt(QUm[0]**2 + QUm[1]**2)
    Xm = 0.5 * atan2(QUm[1], QUm[0]) * 180 / pi

    print 'Ambiguity resolved (spw mean): Q=', QUm[0], 'U=', QUm[
        1], '(rms=', QUe[0], QUe[1], ')', 'P=', Pm, 'X=', Xm

    stokes = [1.0, QUm[0], QUm[1], 0.0]
    print 'Returning the following Stokes vector: ' + str(stokes)

    return stokes
Esempio n. 57
0
def qufromgain(caltable, badspw=[], paoffset=0.0):

    mytb = taskinit.tbtool()
    myme = taskinit.metool()

    mytb.open(caltable + '/ANTENNA')
    pos = mytb.getcol('POSITION')
    meanpos = pl.mean(pos, 1)
    frame = mytb.getcolkeyword('POSITION', 'MEASINFO')['Ref']
    units = mytb.getcolkeyword('POSITION', 'QuantumUnits')
    mpos = myme.position(frame,
                         str(meanpos[0]) + units[0],
                         str(meanpos[1]) + units[1],
                         str(meanpos[2]) + units[2])
    myme.doframe(mpos)

    # _geodetic_ latitude
    latr = myme.measure(mpos, 'WGS84')['m1']['value']

    print 'Latitude = ', latr * 180 / pi

    mytb.open(caltable + '/FIELD')
    nfld = mytb.nrows()
    dirs = mytb.getcol('DELAY_DIR')[:, 0, :]
    mytb.close()
    print 'Found as many as ' + str(nfld) + ' fields.'

    mytb.open(caltable + '/SPECTRAL_WINDOW')
    nspw = mytb.nrows()
    bandnames = [x.split('#')[0].split('_')[-1] for x in mytb.getcol('NAME')]
    mytb.close()
    print 'Found as many as ' + str(nspw) + ' spws.'

    R = pl.zeros((nspw, nfld))
    Q = pl.zeros((nspw, nfld))
    U = pl.zeros((nspw, nfld))
    mask = pl.ones((nspw, nfld), dtype=bool)

    if (len(badspw) > 0):
        mask[badspw, :] = False

    QU = {}
    mytb.open(caltable)
    for ifld in range(nfld):
        for ispw in range(nspw):
            st = mytb.query('FIELD_ID==' + str(ifld) +
                            ' && SPECTRAL_WINDOW_ID==' + str(ispw))
            nrows = st.nrows()
            if nrows > 0:

                rah = dirs[0, ifld] * 12.0 / pi
                decr = dirs[1, ifld]
                times = st.getcol('TIME')
                gains = st.getcol('CPARAM')
                ants = st.getcol('ANTENNA1')

                nants = ants.max() + 1

                # times
                time0 = 86400.0 * floor(times[0] / 86400.0)
                rtimes = times - time0

                # amplitude ratio
                amps = pl.absolute(gains)
                amps[amps == 0.0] = 1.0
                ratio = amps[0, 0, :] / amps[1, 0, :]

                ratio.resize(nrows / nants, nants)

                # parang
                parang = pl.zeros(len(times))

                for itim in range(len(times)):
                    tm = myme.epoch('UTC', str(times[itim]) + 's')
                    last = myme.measure(tm, 'LAST')['m0']['value']
                    last -= floor(last)  # days
                    last *= 24.0  # hours
                    ha = last - rah  # hours
                    har = ha * 2.0 * pi / 24.0

                    parang[itim] = atan2((cos(latr) * sin(har)),
                                         (sin(latr) * cos(decr) -
                                          cos(latr) * sin(decr) * cos(har)))

                parang.resize(nrows / nants, nants)
                parang += bandpa(bandnames[ispw])  # feed pos ang offset
                parang += (paoffset * pi / 180.)  # manual feed pa offset
                parangd = parang * (180.0 / pi)

                A = pl.ones((nrows / nants, 3))
                A[:, 1] = pl.cos(2 * parang[:, 0])
                A[:, 2] = pl.sin(2 * parang[:, 0])

                fit = pl.lstsq(A, pl.square(ratio))

                ants0 = range(nants)
                rsum = pl.sum(ratio[:, ants0], 1)
                rsum /= len(ants0)

                fit = pl.lstsq(A, pl.square(rsum))
                R[ispw, ifld] = fit[0][0]
                Q[ispw, ifld] = fit[0][1] / R[ispw, ifld] / 2.0
                U[ispw, ifld] = fit[0][2] / R[ispw, ifld] / 2.0
                P = sqrt(Q[ispw, ifld]**2 + U[ispw, ifld]**2)
                X = 0.5 * atan2(U[ispw, ifld], Q[ispw, ifld]) * 180 / pi

                print 'Fld=', ifld, 'Spw=', ispw, '(B=' + str(
                    bandnames[ispw]) + ', PA offset=' + str(
                        bandpa(bandnames[ispw]) * 180. /
                        pi) + 'deg)', 'Gx/Gy=', R[ispw, ifld], 'Q=', Q[
                            ispw, ifld], 'U=', U[ispw, ifld], 'P=', P, 'X=', X

            else:
                mask[ispw, ifld] = False

            st.close()

        if sum(mask[:, ifld]) > 0:
            print 'For field id = ', ifld, ' there are ', sum(
                mask[:, ifld]), 'good spws.'

            Qm = pl.mean(Q[mask[:, ifld], ifld])
            Um = pl.mean(U[mask[:, ifld], ifld])
            QU[ifld] = (Qm, Um)
            Qe = pl.std(Q[mask[:, ifld], ifld])
            Ue = pl.std(U[mask[:, ifld], ifld])
            Pm = sqrt(Qm**2 + Um**2)
            Xm = 0.5 * atan2(Um, Qm) * 180 / pi
            print 'Spw mean: Fld=', ifld, 'Q=', Qm, 'U=', Um, '(rms=', Qe, Ue, ')', 'P=', Pm, 'X=', Xm

    mytb.close()

    return QU