Пример #1
0
def copy_column_from_bands(mslist, ms_to, inputcol, outputcol):
    """
    Copies one column from multiple MS files (bands) to a single MS file

    Note: the bands are assumed to be ordered by frequency, with a nonexisting
    file (e.g., 'dummy.ms') denoting missing bands

    Parameters
    ----------
    mslist : list
        MS files to copy from
    ms_to : str
        MS file receiving copy
    inputcol : str
        Column name to copy from
    outputcol : str
        Column name to copy to

    """
    dataout = pt.table(ms_to, readonly=False)
    data = dataout.getcol(outputcol, nrow=1)
    numberofchans = numpy.int(numpy.shape(data)[1])
    chanperms = numberofchans/numpy.int(len(mslist))

    for ms_id, ms in enumerate(mslist):
        if os.path.isdir(ms):
            datain = pt.table(ms, readonly=True)
            data = datain.getcol(inputcol)
            dataout.putcolslice(outputcol, data, [chanperms*ms_id,0], [(chanperms*(ms_id+1))-1,3])
            datain.close()
    dataout.flush()
    dataout.close()
Пример #2
0
def copy_column_to_bands(mslist, ms_from, inputcol, outputcol):
    """
    Copies one column from an MS file to multiple MS files (bands)

    Parameters
    ----------
    mslist : list
        MS files receiving copy
    ms_from : str
        MS file to copy from
    inputcol : str
        Column name to copy from
    outputcol : str
        Column name to copy to

    """
    datain = pt.table(ms_from)
    data = datain.getcol(inputcol, nrow=1)
    numberofchans = numpy.int(numpy.shape(data)[1])
    chanperms = numberofchans/numpy.int(len(mslist))

    for ms_id, ms in enumerate(mslist):
        if os.path.isdir(ms):
            data = datain.getcolslice(inputcol, [chanperms*ms_id,0], [(chanperms*(ms_id+1))-1,3])
            dataout = pt.table(ms, readonly=False)
            dataout.putcol(outputcol, data)
            dataout.flush()
            dataout.close()
def addextraweights(msfiles):
   '''
   Adds the column WEIGHT_SPECTRUM_FROM_IMAGING_WEIGHT from IMAGING_WEIGHT from DR2
   Input msfiles (list of ms)
   '''

   for ms in msfiles:
     ts  = pt.table(ms, readonly=False)
     colnames = ts.colnames()
     if 'WEIGHT_SPECTRUM_FROM_IMAGING_WEIGHT' not in colnames:
       desc = ts.getcoldesc('WEIGHT_SPECTRUM')
       desc['name']='WEIGHT_SPECTRUM_FROM_IMAGING_WEIGHT'
       ts.addcols(desc)
       ts.close() # to write results

     else:
         print 'WEIGHT_SPECTRUM_FROM_IMAGING_WEIGHT already exists'
         ts.close()
         
     ts  = pt.table(ms, readonly=False)    

     if 'IMAGING_WEIGHT' in colnames:
       iw = ts.getcol('IMAGING_WEIGHT')
       ws_tmp = ts.getcol('WEIGHT_SPECTRUM_FROM_IMAGING_WEIGHT')
       n, nfreq, npol = np.shape(ws_tmp)

       for i in range(npol):
         print 'Copying over correlation ', i, ms
         ws_tmp[:,:,i] = iw
         ts.putcol('WEIGHT_SPECTRUM_FROM_IMAGING_WEIGHT',ws_tmp)
     else:
         print 'IMAGING_WEIGHT column is not present in:', ms
     ts.close()

   return
Пример #4
0
def get_baseline_lengths(ms):
    """
    Returns dict of baseline lengths in km for all baselines in input dataset
    """
    anttab = pt.table(ms+'::ANTENNA', ack=False)
    antnames = anttab.getcol('NAME')
    anttab.close()

    t = pt.table(ms, ack=False)
    ant1 = t.getcol('ANTENNA1')
    ant2 = t.getcol('ANTENNA2')
    all_uvw = t.getcol('UVW')
    t.close()

    baseline_dict = {}
    for ant in itertools.product(set(ant1), set(ant2)):
        if ant[0] >= ant[1]:
            continue
        sel1 = np.where(ant1 == ant[0])[0]
        sel2 = np.where(ant2 == ant[1])[0]
        sel = sorted(list(frozenset(sel1).intersection(sel2)))
        uvw = all_uvw[sel, :]
        uvw_dist = np.sqrt(uvw[:, 0]**2 + uvw[:, 1]**2 + uvw[:, 2]**2)
        baseline_dict['{0}'.format(ant[0])] = antnames[ant[0]]
        baseline_dict['{0}'.format(ant[1])] = antnames[ant[1]]
        baseline_dict['{0}-{1}'.format(ant[0], ant[1])] = np.mean(uvw_dist) / 1.e3

    return baseline_dict
Пример #5
0
def copy_column_to_ms(ms, inputcol, outputcol, ms_from=None):
    """
    Copies one column to another, within an MS file or between two MS files

    Parameters
    ----------
    ms : str
        MS file receiving copy
    inputcol : str
        Column name to copy from
    outputcol : str
        Column name to copy to
    ms_from : str, optional
        MS file to copy from. If None, the column is copied internally

    """
    t = pt.table(ms, readonly=False, ack=False)
    if ms_from is not None:
        tf = pt.table(ms_from, readonly=False, ack=False)
        data = tf.getcol(inputcol)
        desc = tf.getcoldesc(inputcol)
    else:
        data = t.getcol(inputcol)
        desc = t.getcoldesc(inputcol)

    # Add the output column if needed
    if outputcol not in t.colnames():
        desc['name'] = outputcol
        t.addcols(desc)

    t.putcol(outputcol, data)
    t.flush()
    t.close()
Пример #6
0
def copy_column_to_ms(ms, inputcol, outputcol, ms_from=None, use_compression=False):
    """
    Copies one column to another, within an MS file or between two MS files

    Parameters
    ----------
    ms : str
        MS file receiving copy
    inputcol : str
        Column name to copy from
    outputcol : str
        Column name to copy to
    ms_from : str, optional
        MS file to copy from. If None, the column is copied internally

    """
    t = pt.table(ms, readonly=False, ack=False)
    if ms_from is not None:
        tf = pt.table(ms_from, readonly=False, ack=False)
        data = tf.getcol(inputcol)
        desc = tf.getcoldesc(inputcol)
    else:
        data = t.getcol(inputcol)
        desc = t.getcoldesc(inputcol)

    # Add the output column if needed
    if outputcol not in t.colnames():
        if use_compression:
            # Set DyscoStMan to be storage manager for DATA and WEIGHT_SPECTRUM
            # We use a visibility bit rate of 16 and truncation of 1.5 sigma to keep the
            # compression noise below ~ 0.01 mJy, as estimated from Fig 4 of
            # Offringa (2016). For the weights, we use a bit rate of 12, as
            # recommended in Sec 4.4 of Offringa (2016)
            desc['name'] = outputcol
            dmi = {
                'SPEC': {
                    'dataBitCount': numpy.uint32(16),
                    'distribution': 'TruncatedGaussian',
                    'distributionTruncation': 1.5,
                    'normalization': 'RF',
                    'weightBitCount': numpy.uint32(12)},
                'NAME': '{}_dm'.format(outputcol),
                'SEQNR': 1,
                'TYPE': 'DyscoStMan'}
            desc['option'] = 1 # make a Direct column
            t.addcols(desc, dmi)
        else:
            desc['name'] = outputcol
            t.addcols(desc)

    if use_compression:
        # Replace flagged values with NaNs before compression
        flags = t.getcol('FLAG')
        flagged = numpy.where(flags)
        data[flagged] = numpy.NaN

    t.putcol(outputcol, data)
    t.flush()
    t.close()
Пример #7
0
def readMS(fn, sbs, column='DATA'):
    """Return the visibilites and UVW coordinates from a SE607 LOFAR XST format file
    fn: XST filename
    column: string, data column
    sbs: 1-D array of subband IDs

    returns:
        vis: visibilities [4, Nsamples, Nsubbands]
        uvw: UVW coordinates [Nsamples, 3, Nsubbands]
        freqs: frequencies [Nsubbands]
        obsdata: [latitude, longitude, LST]
    """
    try:
        import casacore.tables as tbls
    except ImportError:
        print 'ERROR: could not import casacore.tables, cannot read measurement sets'
        exit(1)

    MS = tbls.table(fn, readonly=True)
    data_column = column.upper()
    uvw = MS.col('UVW').getcol() # [vis id, (u,v,w)]
    vis = MS.col(data_column).getcol() #[vis id, freq id, stokes id]
    vis = vis[:,sbs,:] #select subbands
    MS.close()

    # lat/long/lst information
    ANTS = tbls.table(fn + '/ANTENNA')
    positions = ANTS.col('POSITION').getcol()
    ant0Lat, ant0Long, ant0hgt = ecef.ecef2geodetic(positions[0,0], positions[0,1], positions[0,2], degrees=False) # use the first antenna in the table to get the array lat/long
    ANTS.close()
    SRC = tbls.table(fn + '/SOURCE')
    direction = SRC.col('DIRECTION').getcol()
    obsLat = direction[0,1]
    obsLong = ant0Long
    LSTangle = direction[0,0]
    SRC.close()

    # freq information, convert uvw coordinates
    SW = tbls.table(fn + '/SPECTRAL_WINDOW')
    freqs = SW.col('CHAN_FREQ').getcol()[0, sbs] # [nchan]
    print 'SUBBANDS:', sbs, '(', freqs/1e6, 'MHz)'
    SW.close()

    # TODO: check rotation reference is the same as with LOFAR data, north pole is dec=+90, ra=0
    # in order to accommodate multiple observations at different times/sidereal times all the positions need to be rotated relative to sidereal time 0
    print 'LST:',  LSTangle
    rotAngle = float(LSTangle) - obsLong # adjust LST to that of the Observatory longitutude to make the LST that at Greenwich
    # to be honest, the next two lines change the LST to make the images come out but i haven't worked out the coordinate transforms, so for now these work without justification
    rotAngle += np.pi
    rotAngle *= -1
    # Rotation matrix for antenna positions
    rotMatrix = np.array([[np.cos(rotAngle), -1.*np.sin(rotAngle), 0.],
                          [np.sin(rotAngle), np.cos(rotAngle),     0.],
                          [0.,               0.,                   1.]]) #rotate about the z-axis
    uvwRot = np.dot(uvw, rotMatrix).reshape(uvw.shape[0], uvw.shape[1], 1)
    uvwRotRepeat = np.repeat(uvwRot, len(sbs), axis=2)

    return np.transpose(vis, (2,0,1)), uvwRotRepeat, freqs, [obsLat, obsLong, LSTangle]
Пример #8
0
    def test_numpy_unicode(self):
        table_path = join(self.workdir, 'blah.ms')
        col1 = makescacoldesc('mycol1', 'test', valuetype='string')
        col2 = makescacoldesc('mycol2', 'test', valuetype='string')
        t = table(table_path, maketabdesc([col1, col2]), ack=False)
        t.addrows(2)
        t.putcol('mycol1', np.array([unicode_string, unicode_string]))
        t.putcol('mycol2', [unicode_string, unicode_string])
        t.close()

        t = table(table_path)
        self.assertEqual(t.getcol('mycol1'), t.getcol('mycol2'))
Пример #9
0
 def RotateMS(self,radec):
     import ModRotate
     ModRotate.Rotate(self,radec)
     ta=table(self.MSName+'/FIELD/',ack=False,readonly=False)
     ra,dec=radec
     radec=np.array([[[ra,dec]]])
     ta.putcol("DELAY_DIR",radec)
     ta.putcol("PHASE_DIR",radec)
     ta.putcol("REFERENCE_DIR",radec)
     ta.close()
     t=table(self.MSName,ack=False,readonly=False)
     t.putcol(self.ColName,self.data)
     t.putcol("UVW",self.uvw)
     t.close()
Пример #10
0
 def test_subtables(self):
     """Testing subtables."""
     c1 = makescacoldesc("coli", 0)
     c2 = makescacoldesc("cold", 0.)
     c3 = makescacoldesc("cols", "")
     c4 = makescacoldesc("colb", True)
     c5 = makescacoldesc("colc", 0. + 0j)
     c6 = makearrcoldesc("colarr", 0.)
     t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5,
                                                 c6)), ack=False)
     sub = table("sub", maketabdesc((c1, c2, c3)))
     t.putkeyword("subtablename", sub, makesubrecord=True)
     print(t.getsubtables())
     t.close()
     tabledelete("ttable.py_tmp.tab1")
Пример #11
0
    def test_tableinfo(self):
        """Test table info."""
        c1 = makescacoldesc("coli", 0)
        c2 = makescacoldesc("cold", 0.)
        c3 = makescacoldesc("cols", "")
        c4 = makescacoldesc("colb", True)
        c5 = makescacoldesc("colc", 0. + 0j)
        c6 = makearrcoldesc("colarr", 0.)
        t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5,
                                                     c6)), ack=False)
        self.assertTrue(tableexists("ttable.py_tmp.tab1"))
        self.assertTrue(tableiswritable("ttable.py_tmp.tab1"))
        self.assertEqual(t.nrows(), 0)
        self.assertEqual(t.ncols(), 6)
        self.assertTrue(compare(t.colnames(), ['cols', 'colc', 'coli',
                                               'cold', 'colb', 'colarr']))
        self.assertEqual(tableinfo("ttable.py_tmp.tab1"),
                         {'readme': '', 'subType': '', 'type': ''})
        t.addreadmeline("test table run")
        t.putinfo({'type': 'test', 'subType': 'test1'})

        self.assertEqual(t.info()['readme'], 'test table run\n')
        self.assertEqual(t.info()['subType'], 'test1')
        self.assertEqual(t.info()['type'], 'test')
        self.assertEqual(len(t), 0)
        print(str(t))
        self.assertEqual(t.endianformat(), 'little')
        t.close()
        tabledelete("ttable.py_tmp.tab1")
Пример #12
0
 def PutBackupCol(self,back="CORRECTED_DATA"):
     backname="%s_BACKUP"%back
     backnameFlag="FLAG_BACKUP"
     self.PutCasaCols()
     t=table(self.MSName,readonly=False,ack=False)
     JustAdded=False
     if not(backname in t.colnames()):
         print "  Putting column ",backname," in MS"
         desc=t.getcoldesc("CORRECTED_DATA")
         desc["name"]=backname
         desc['comment']=desc['comment'].replace(" ","_")
         t.addcols(desc)
         print "  Copying CORRECTED_DATA in CORRECTED_DATA_BACKUP"
         self.CopyCol("CORRECTED_DATA",backname)
         #t.putcol(backname,t.getcol("CORRECTED_DATA"))
     if not(backnameFlag in t.colnames()):
         desc=t.getcoldesc("FLAG")
         desc["name"]=backnameFlag
         desc['comment']=desc['comment'].replace(" ","_")
         t.addcols(desc)
         self.CopyCol("FLAG",backnameFlag)
         #t.putcol(backnameFlag,t.getcol("FLAG"))
         JustAdded=True
     #else:
         #print "  Column %s already there..."%backname
     t.close()
     return JustAdded
Пример #13
0
def main(ms_file, column1, column2):
    """
    Switch the names of two columns

    Parameters
    ----------
    ms_file : str
        Name of MS file
    column1 : str
        Name of column 1
    column2 : str
        Name of column 2

    """
    t = pt.table(ms_file, readonly=False, ack=False)

    if column1 == column2:
        return

    if column1 not in t.colnames() and column2 not in t.colnames():
        print('Both columns must be present in MS')
        sys.exit(1)

    # Rename column1 to temp col
    t.renamecol(column1, column1+'_TEMP')

    # Rename column2 to column1
    t.renamecol(column2, column1)

    # Rename temp col to column2
    t.renamecol(column1+'_TEMP', column2)
    t.flush()
    t.close()
Пример #14
0
 def __init__(self, ms_file):
     """
     ms_file: a MeasurementSet file
     """
     logging.info('Reading: %s' % ms_file)
     self.ms_file = ms_file
     self.ms = table(ms_files[0], readonly=False, ack=False)
Пример #15
0
def main(ms_file):
    """
    Switch the names of normal and baseline-averaged columns

    Parameters
    ----------
    ms_file : str
        Name of MS file

    """
    t = pt.table(ms_file, readonly=False, ack=False)

    for (column1, column2) in zip(['DATA', 'WEIGHT_SPECTRUM'], ['BLAVG_DATA', 'BLAVG_WEIGHT_SPECTRUM']):
        if column1 not in t.colnames() and column2 not in t.colnames():
            print('Both columns must be present in MS')
            sys.exit(1)

        # Rename column1 to temp col
        t.renamecol(column1, column1+'_TEMP')

        # Rename column2 to column1
        t.renamecol(column2, column1)

        # Rename temp col to column2
        t.renamecol(column1+'_TEMP', column2)
    t.flush()
    t.close()
Пример #16
0
def main(fitsimage, outfilename, force_stokes_i=False):
    """
    Convert a fits image to a CASA image

    Parameters
    ----------
    fitsimage : str
        Name of FITS image
    outfilename : str
        Name of output CASA image
    force_stokes_i : bool, optional
        If True, force Stokes axis to be 'I'

    """
    casaimage = pim.image(fitsimage)
    casaimage.saveas(outfilename, overwrite=True)

    if type(force_stokes_i) is str:
        if force_stokes_i.lower() == 'true':
            force_stokes_i = True
        else:
            force_stokes_i = False

    if force_stokes_i:
        coords = casaimage.coordinates().dict()
        coords['stokes1']['stokes'] = ['I']
        freq = coords['spectral2']['wcs']['crval']
        coords['spectral2']['restfreqs'] = np.array([freq])
        outtable = pt.table(outfilename, readonly=False, ack=False)
        outtable.putkeywords({'coords': coords})
        outtable.done()
def removecolumn(msfile,colname):
     t = pt.table(msfile,readonly=False)
     colnames =t.colnames()
     if colname in colnames:
        print 'Removing ',  colname, 'from ', msfile
        t.removecols(colname)
     t.close()
     return
def columnchecker(mslist, colname):
    
    for ms in mslist:
      t = pt.table(ms, ack=False) 
      if colname not in t.colnames():
          print colname, ' not present in ', ms
          sys.exit()
      t.close()
Пример #19
0
 def PutNewCol(self,Name,LikeCol="CORRECTED_DATA"):
     if not(Name in self.ColNames):
         print "  Putting column %s in MS, with format of %s"%(Name,LikeCol)
         t=table(self.MSName,readonly=False,ack=False)
         desc=t.getcoldesc(LikeCol)
         desc["name"]=Name
         t.addcols(desc) 
         t.close()
Пример #20
0
 def ZeroFlagSave(self,spw=0):
     self.flag_all.fill(0)
     if self.swapped:
         flagout=np.swapaxes(self.flag_all[spw*self.Nchan:(spw+1)*self.Nchan],0,1)
     else:
         flagout=self.flag_all
     t=table(self.MSName,readonly=False,ack=False)
     t.putcol("FLAG",flagout)
     
     t.close()
Пример #21
0
 def SaveVis(self,vis=None,Col="CORRECTED_DATA",spw=0,DoPrint=True):
     if vis==None:
         vis=self.data
     if DoPrint: print "  Writting data in column %s"%ModColor.Str(Col,col="green")
     table_all=table(self.MSName,ack=False,readonly=False)
     if self.swapped:
         visout=np.swapaxes(vis[spw*self.Nchan:(spw+1)*self.Nchan],0,1)
     else:
         visout=vis
     table_all.putcol(Col,visout,self.ROW0,self.nRowRead)
     table_all.close()
Пример #22
0
    def SaveAllDataStruct(self):
        t=table(self.MSName,ack=False,readonly=False)

        t.putcol('ANTENNA1',self.A0)
        t.putcol('ANTENNA2',self.A1)
        t.putcol("TIME",self.times_all)
        t.putcol("TIME_CENTROID",self.times_all)
        t.putcol("UVW",self.uvw)
        t.putcol("FLAG",self.flag_all)
        for icol in range(len(self.ColName)):
            t.putcol(self.ColName[icol],self.data[icol])
        t.close()
Пример #23
0
 def LoadSR(self):
     if self.SR!=None: return
     import lofar.stationresponse as lsr
     f=self.ChanFreq.flatten()
     if f.shape[0]>1:
         t=table(self.MSName+"/SPECTRAL_WINDOW/",readonly=False)
         c=t.getcol("CHAN_WIDTH")
         c.fill(np.abs((f[0:-1]-f[1::])[0]))
         t.putcol("CHAN_WIDTH",c)
         t.close()
     self.SR = lsr.stationresponse(self.MSName)
     self.SR.setDirection(self.rarad,self.decrad)
def get_uvw(vis_ms_path):
    """
    Extract uvw data as squeezed numpy array. Units of metres.

    Args:
        vis_ms_path (str): Path to visibility MeasurementSet
    Returns (numpy.array): uvw data.
        Array of floats, shape: (n_baseline_samples, 3)
    """
    with closing(casatables.table(vis_ms_path)) as tbl:
        uvw_metres = tbl.getcol('UVW')
    return uvw_metres
def get_stokes_i_vis(vis_ms_path):
    """
    Export 'CORRECTED_DATA' Stokes I complex visibilities to numpy-array

    Args:
        vis_ms_path (str): Path to visibility MeasurementSet
    Returns (numpy.array): visibility data.
        Array of complex, shape: (n_baseline_samples,)
    """
    with closing(casatables.table(vis_ms_path)) as tbl:
        stokes_i = tbl.getcol('CORRECTED_DATA').squeeze()[:, 0]
    return stokes_i
Пример #26
0
 def Restore(self):
     backname="CORRECTED_DATA_BACKUP"
     backnameFlag="FLAG_BACKUP"
     t=table(self.MSName,readonly=False,ack=False)
     if backname in t.colnames():
         print "  Copying ",backname," to CORRECTED_DATA"
         #t.putcol("CORRECTED_DATA",t.getcol(backname))
         self.CopyCol(backname,"CORRECTED_DATA")
         print "  Copying ",backnameFlag," to FLAG"
         self.CopyCol(backnameFlag,"FLAG")
         #t.putcol(,t.getcol(backnameFlag))
     t.close()
Пример #27
0
    def test_tableascii(self):
        """Testing ASCII table."""
        c1 = makescacoldesc("coli", 0)
        c2 = makescacoldesc("cold", 0.)
        c3 = makescacoldesc("cols", "")
        c4 = makescacoldesc("colb", True)
        c5 = makescacoldesc("colc", 0. + 0j)

        t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5)),
                  ack=False)
        tcol = t.colnames()
        t.addrows(5)
        t.toascii('asciitemp1', columnnames=tcol)
        tablefromascii(tablename='tablefromascii', asciifile='asciitemp1')
        ta = table("tablefromascii", readonly=False)
        tacol = ta.colnames()
        self.assertEqual(tcol, tacol)
        ta.close()
        t.close()
        tabledelete('tablefromascii')
        tabledelete("ttable.py_tmp.tab1")
Пример #28
0
 def test_hypercolumn(self):
     """Test hypercolumns."""
     scd1 = makescacoldesc("col2", "aa")
     scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
     scd3 = makescacoldesc("colrec1", {})
     acd1 = makearrcoldesc("arr1", 1, 0, [2, 3, 4])
     acd2 = makearrcoldesc("arr2", 0. + 0j)
     td = maketabdesc([scd1, scd2, scd3, acd1, acd2])
     tabledefinehypercolumn(td, "TiledArray", 4, ["arr1"])
     tab = table("mytable", tabledesc=td, nrow=100)
     tab.done()
     tabledelete("mytable")
Пример #29
0
def main(options):
    ms = options.ms
    if ms == '':
            logging.error('You have to specify an input MS, use -h for help')
            return
    cols = options.cols
    incol = options.incol
    
    t = pt.table(ms, readonly=False, ack=False)

    for col in cols.split(','):
        if col not in t.colnames():
            logging.info('Adding the output column '+col+' to '+ms+'.')
            if incol == '':
                # prepare col metadata
                cd = t.getcoldesc('DATA')
                coldmi = t.getdminfo('DATA')
                if options.dysco:
                    cd['dataManagerType'] = 'DyscoStMan'
                    cd['dataManagerGroup'] = 'DyscoData'
                    coldmi = {'NAME': col,'SEQNR': 3,'SPEC': {'dataBitCount': 10,'distribution': 'TruncatedGaussian','distributionTruncation': 2.5,'normalization': 'AF','studentTNu': 0.0,'weightBitCount': 12},'TYPE': 'DyscoStMan'}
                # not as performing as standard DATA
                else:
                    coldmi["NAME"] = col
                #    cd['dataManagerType'] = 'StandardStMan'
                #    cd['dataManagerGroup'] = 'SSMVar'
                #    coldmi = {'NAME': col,'SEQNR': 0,'SPEC': {'ActualCacheSize': 2,'BUCKETSIZE': 32768,'IndexLength': 799832,'PERSCACHESIZE': 2},'TYPE': 'StandardStMan'}

                cd['comment'] = 'Added by addcol2ms'
                t.addcols(pt.makecoldesc(col, cd), coldmi)

                # if non dysco is done by default
                if options.dysco:
                    logging.warning('Setting '+col+' = 0')
                    pt.taql("update $t set "+col+"=0")

            else:
                # prepare col metadata
                coldmi = t.getdminfo(incol)
                coldmi['NAME'] = col
                cd = t.getcoldesc(incol)

                cd['comment'] = 'Added by addcol2ms'
                t.addcols(pt.makecoldesc(col, cd), coldmi)

                logging.warning('Setting '+col+' = '+incol)
                pt.taql("update $t set "+col+"="+incol)

        else:
            logging.warning('Column '+col+' already exists.')

    t.close()
def mscolexist(ms, colname):
    """ Check if a colname exists in the measurement set ms, returns either True or False """
    if os.path.isdir(ms):
      t = pt.table(ms,readonly=True)
      colnames =t.colnames()
      if colname in colnames: # check if the column is in the list
         exist = True
      else:
        exist = False  
      t.close()
    else:
      exist = False # ms does not exist  
    return exist
Пример #31
0
def measureflux_ms(
    src_dir, imagems, fitms, catname, ATCA_band, sourcepar, n_spw, timerange="", field="",
):
    try:
        split(
            vis=imagems, datacolumn="data", outputvis=fitms)
        listobs(vis=fitms, filename=f"listobs_{fitms}.dat", overwrite=True)
    except:
        print("Not splitting")
    int_flux_c = []
    uvrange = ""

    for i in range(n_spw):
        spw = str(i)
        # If things look like theyre not working, then check the source position! Chances are it can't find the source too far away from the phase centre
        outfile = f"{src_dir}/casa_files/{catname}_{spw}.cl"
        os.system(f"rm -r {outfile}")
        uvmodelfit(
            vis=fitms,
            niter=15,
            comptype="P",
            spw=spw,
            sourcepar=sourcepar,
            outfile=outfile,
            uvrange=uvrange,
            field=field,
            selectdata=True,
            timerange=timerange,
        )
        tbl = table(outfile)
        flux = tbl.getcell("Flux", 0)[0].astype("float64")
        int_flux_c.append(flux)
        print(flux)


    if ATCA_band == "C":
        np.savetxt(
            f"{src_dir}/{catname}.csv",
            int_flux_c,
            delimiter=",",
            header="S_Cband",
        )
        print(int_flux_c)
    elif ATCA_band == "X":
        np.savetxt(
            f"{src_dir}/{catname}.csv",
            int_flux_c,
            delimiter=",",
            header="S_Xband",
        )
        print(int_flux_c)
    elif ATCA_band == "L":
        # int_flux_l = np.array(int_flux_c[::-1])
        int_flux_l = int_flux_c
        np.savetxt(
            f"{src_dir}/{catname}.csv",
            int_flux_l,
            header="S_Lband",
            delimiter=",",
        )
        print(int_flux_l)
    return
    def set_beam_calibration(self, caltable, verbose=True):
        """
        Given a a CASA measurement set containing a bandpass calibration, load
        the bandpass calibration into the appropriate pipeline(s).
        """

        # Validate
        assert (os.path.exists(caltable))
        assert (os.path.isdir(caltable))
        assert (os.path.exists(os.path.join(caltable, 'SPECTRAL_WINDOW')))
        assert (os.path.isdir(os.path.join(caltable, 'SPECTRAL_WINDOW')))

        # Load in the calibration data and normalize it
        tab = tables.table(caltable, ack=False)
        caldata = tab.getcol('CPARAM')[...]
        caldata /= numpy.abs(caldata)

        # Load in the flagging data for the calibration
        flgdata = tab.getcol('FLAG')[...]
        tab.close()

        # Load in the frequency information for the calibration
        tab = tables.table(os.path.join(caltable, 'SPECTRAL_WINDOW'),
                           ack=False)
        calfreq = tab.getcol('CHAN_FREQ')[...]
        calfreq = calfreq.ravel()
        tab.close()

        if verbose:
            print(
                f"Loaded {caldata.shape[0]} by {caldata.shape[1]} by {caldata.shape[2]} complex gains covering {calfreq[0]/1e6:.3f} to {calfreq[-1]/1e6:.3f} MHz"
            )

        # Validate the calibration data structure
        assert (caldata.shape[0] == NSTAND)
        assert (caldata.shape[1] == NCHAN_PIPELINE * NPIPELINE_SUBBAND)
        assert (caldata.shape[2] == NPOL)

        # Find the pipelines that should correspond to the specified subband
        # TODO: Use the freuqency information to figure this out for the user
        subband_pipelines = []
        for i in range(NPIPELINE_SUBBAND):
            ## Get the frequency range for the pipeline in the subband and pull
            ## out the middle
            center_freq = calfreq[i * NCHAN_PIPELINE:(i + 1) * NCHAN_PIPELINE]
            center_freq = center_freq[center_freq.size // 2]

            ## Try to map that frequency to a pipeline.  If it works, save the
            ## pipeline to subband_pipelines.
            try:
                j = self._freq_to_pipeline(center_freq)
                subband_pipelines.append(self.pipelines[j])
                if verbose:
                    print(
                        f"Found pipeline {j} covering {self.freqs[j][0]/1e6:.3f} to {self.freqs[j][-1]/1e6:.3f} MHz"
                    )
            except ValueError:
                pass

        # Issue a warning if we don't seem to have the right number of pipelines
        # for the subband
        if len(subband_pipelines) != NPIPELINE_SUBBAND:
            warnings.warn(
                f"Found {len(subband_pipelines)} pipelines associated with these data instead of the expected {NPIPELINE_SUBBAND}"
            )

        # Set the coefficients - this is slow
        pb = progressbar.ProgressBar(redirect_stdout=True)
        pb.start(max_value=len(subband_pipelines) * NSTAND)
        for i, p in enumerate(subband_pipelines):
            for j in range(NSTAND):
                for pol in range(NPOL):
                    cal = 1. / caldata[j, i * NCHAN_PIPELINE:(i + 1) *
                                       NCHAN_PIPELINE, pol].ravel()
                    cal = numpy.where(numpy.isfinite(cal), cal, 0)
                    flg = flgdata[j,
                                  i * NCHAN_PIPELINE:(i + 1) * NCHAN_PIPELINE,
                                  pol].ravel()
                    cal *= (1 - flg)

                    with AllowedPipelineFailure(p):
                        p.beamform.update_calibration_gains(
                            2 * (self.beam - 1) + pol, NPOL * j + pol, cal)
                        time.sleep(0.005)
                pb += 1
            self._cal_set[i] = True
        pb.finish()
Пример #33
0
def create_visibility_from_ms(msname, channum=0):
    """ Minimal MS to Visibility converter

    The MS format is much more general than the ARL Visibility so we cut many corners. This requires casacore to be
    installed. If not an exception ModuleNotFoundError is raised.

    Creates a list of Visibilities, one per phasecentre
    """
    try:
        from casacore.tables import table  # pylint: disable=import-error
    except ModuleNotFoundError:
        raise ModuleNotFoundError("casacore is not installed")

    tab = table(msname)
    print(tab.info())
    fields = numpy.unique(tab.getcol('FIELD_ID'))
    print("Found unique field ids %s" % fields)
    vis_list = list()
    for field in fields:
        # First get the main table information
        ms = tab.query("FIELD_ID==%d" % field)
        print("Found %d rows for field %d" % (ms.nrows(), field))
        time = ms.getcol('TIME')
        channels = len(numpy.transpose(ms.getcol('DATA'))[0])
        print("Found %d channels" % (channels))
        try:
            vis = ms.getcol('DATA')[:, channum, :]
        except IndexError:
            raise IndexError("channel number exceeds max. within ms")
        weight = ms.getcol('WEIGHT')
        uvw = -1 * ms.getcol('UVW')
        antenna1 = ms.getcol('ANTENNA1')
        antenna2 = ms.getcol('ANTENNA2')
        integration_time = ms.getcol('INTERVAL')
        ddid = ms.getcol('DATA_DESC_ID')

        # Now get info from the subtables
        spwtab = table('%s/SPECTRAL_WINDOW' % msname, ack=False)
        cfrequency = spwtab.getcol('CHAN_FREQ')
        frequency = numpy.array([cfrequency[dd] for dd in ddid])[:, channum]
        cchannel_bandwidth = spwtab.getcol('CHAN_WIDTH')
        channel_bandwidth = numpy.array(
            [cchannel_bandwidth[dd] for dd in ddid])[:, 0]

        uvw *= frequency[:, numpy.newaxis] / constants.c.to('m/s').value

        # Get polarisation info
        # poltab = table('%s/POLARIZATION' % msname, ack=False)
        # corr_type = poltab.getcol('CORR_TYPE')
        # TODO: Do interpretation correctly
        polarisation_frame = PolarisationFrame('stokesIQUV')

        # Get configuration
        anttab = table('%s/ANTENNA' % msname, ack=False)
        mount = anttab.getcol('MOUNT')
        names = anttab.getcol('NAME')
        diameter = anttab.getcol('DISH_DIAMETER')
        xyz = anttab.getcol('POSITION')
        configuration = Configuration(name='',
                                      data=None,
                                      location=None,
                                      names=names,
                                      xyz=xyz,
                                      mount=mount,
                                      frame=None,
                                      receptor_frame=ReceptorFrame("linear"),
                                      diameter=diameter)

        # Get phasecentres
        fieldtab = table('%s/FIELD' % msname, ack=False)
        pc = fieldtab.getcol('PHASE_DIR')[field, 0, :]
        phasecentre = SkyCoord(ra=[pc[0]] * u.rad,
                               dec=pc[1] * u.rad,
                               frame='icrs',
                               equinox='J2000')

        vis_list.append(
            Visibility(uvw=uvw,
                       time=time,
                       antenna1=antenna1,
                       antenna2=antenna2,
                       frequency=frequency,
                       vis=vis,
                       weight=weight,
                       imaging_weight=weight,
                       integration_time=integration_time,
                       channel_bandwidth=channel_bandwidth,
                       configuration=configuration,
                       phasecentre=phasecentre,
                       polarisation_frame=polarisation_frame))
    return vis_list
Пример #34
0
def makeMAIN2(tablename,specdata,time,state_id,texp=0.2,tBW=2.5e9):

	# modules
	import casacore.tables as tb

	# params
	nrow  = specdata.shape[0]
	nspw  = specdata.shape[1]
	npol = specdata.shape[2]
	nchan  = specdata.shape[3]

	weight = tBW/float(nchan) * texp
	sigma = (tBW/float(nchan) * texp)**-0.5

	ind_spw = (np.linspace(0,2*nrow-1,2*nrow,dtype='int32') % 2)

	# tables
	colnames = ['UVW',
				'FLAG',
				'FLAG_CATEGORY',
				'WEIGHT',
				'SIGMA',
				'ANTENNA1',
				'ANTENNA2',
				'ARRAY_ID',
				'DATA_DESC_ID',
				'EXPOSURE',
				'FEED1',
				'FEED2',
				'FIELD_ID',
				'FLAG_ROW',
				'INTERVAL',
				'OBSERVATION_ID',
				'PROCESSOR_ID',
				'SCAN_NUMBER',
				'STATE_ID',
				'TIME',
				'TIME_CENTROID',
				'FLOAT_DATA'
				]

	colkeywords = [
		{'MEASINFO': {'Ref': 'ITRF', 'type': 'uvw'},'QuantumUnits': np.array(['m', 'm', 'm'],dtype='|S2')},
		{},
		{'CATEGORY': np.array([],dtype='|S1')},
		{},{},{},{},{},{},
		{'QuantumUnits': np.array(['s'],dtype='|S2')},
		{},{},{},{},
		{'QuantumUnits': np.array(['s'],dtype='|S2')},
		{},{},{},{},
		{'MEASINFO': {'Ref': 'UTC', 'type': 'epoch'}, 'QuantumUnits': np.array(['s'],dtype='|S2')},
		{'MEASINFO': {'Ref': 'UTC', 'type': 'epoch'}, 'QuantumUnits': np.array(['s'],dtype='|S2')},
		{'UNIT': 'K'}
				  ]

	ndims = [1,2,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2]
	isarrays = [True,True,True,True,True,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,True]
	valuetypes = ['double','bool','bool','float','float','int','int','int','int','double','int','int','int','bool','double','int','int','int','int','double','double','float']

	descs = []
	for colname,colkeyword,ndim,valuetype,isarray in zip(colnames,colkeywords,ndims,valuetypes,isarrays):
		if colname=='UVW':
			descs.append(tb.makearrcoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',ndim=ndim,keywords=colkeyword,valuetype=valuetype,options=5,shape=np.array([3], dtype='int32')))
		elif isarray:
			descs.append(tb.makearrcoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',ndim=ndim,keywords=colkeyword,valuetype=valuetype))
		else:
			descs.append(tb.makescacoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',keywords=colkeyword,valuetype=valuetype))

	td = tb.maketabdesc(descs=descs)

	returned_table = tb.table(tablename,tabledesc=td,nrow=2*nrow,readonly=False)

	# put values
	value = np.zeros([2*nrow,3],dtype='float64')
	returned_table.putcol('UVW',value)

	value = np.full([2*nrow,2],sigma)
	returned_table.putcol('SIGMA',value)

	value = np.full([2*nrow,2],weight)
	returned_table.putcol('WEIGHT',value)

	value = np.zeros([2*nrow],dtype='int32')
	returned_table.putcol('ANTENNA1',value)
	returned_table.putcol('ANTENNA2',value)
	returned_table.putcol('ARRAY_ID',value)
	returned_table.putcol('FEED1',value)
	returned_table.putcol('FEED2',value)
	returned_table.putcol('FIELD_ID',value)
	returned_table.putcol('OBSERVATION_ID',value)
	returned_table.putcol('PROCESSOR_ID',value)

	value = np.zeros([2*nrow],dtype='bool')
	returned_table.putcol('FLAG_ROW',value)

	value = np.full([2*nrow],texp,dtype='float64')
	returned_table.putcol('EXPOSURE',value)
	returned_table.putcol('INTERVAL',value)

	value = np.zeros_like(np.concatenate([specdata[:,0],specdata[:,1]],axis=0),dtype='bool')
	value = value.transpose(0,2,1)
	returned_table.putcol('FLAG',value)

	value  = np.zeros_like(np.concatenate([specdata[:,0],specdata[:,1]],axis=0),dtype='float64')

	for i in range(2):
		value[ind_spw==i] = specdata[:,i].copy()

	value = value.transpose(0,2,1)
	returned_table.putcol('FLOAT_DATA',value)

	value = np.zeros(2*nrow,dtype='int32')
	for i in range(2):
		value[ind_spw==i] = state_id

	value = np.zeros(2*nrow,dtype='int32')
	for i in range(2):
		value[ind_spw==i] = i

	returned_table.putcol('DATA_DESC_ID',value)

	value = np.zeros(2*nrow,dtype='int32')
	for i in range(2):
		value[ind_spw==i] = state_id
	returned_table.putcol('STATE_ID',value)

	value = np.zeros(2*nrow,dtype='int32')
	for i in range(2):
		value[ind_spw==i] = np.linspace(0,nrow-1,nrow,dtype='int32')

	returned_table.putcol('SCAN_NUMBER',value)

	value = np.zeros(2*nrow,dtype='float64')
	for i in range(2):
		value[ind_spw==i] = time.copy()
	returned_table.putcol('TIME',value)
	returned_table.putcol('TIME_CENTROID',value)

	returned_table.flush()
	returned_table.close()
Пример #35
0
def main(ms_files, outfile, clobber=True):
    """
    Performs a virtual concatenation with possible frequency gaps

    Parameters
    ----------
    ms_files : list
        List of files to merge, ordered by frequency. Files that do not exist
        are identified as gaps and are filled with flagged dummy data
    outfile : str
        Output file
    clobber : bool, optional
        If True, existing files are overwritten

    """
    if type(ms_files) is str:
        ms_files = [f.strip(' \'\"') for f in ms_files.strip('[]').split(',')]
    if type(clobber) is str:
        if clobber.lower() == 'true':
            clobber = True
        else:
            clobber = False
    if os.path.exists(outfile):
        if clobber:
            pt.tabledelete(outfile)
        else:
            return

    # Find at least one existing ms
    ms_exists = None
    for ms in ms_files:
        if os.path.exists(ms):
            ms_exists = ms
            sw = pt.table('{}::SPECTRAL_WINDOW'.format(ms))
            ms_exists_ref_freq = sw.getcol('REF_FREQUENCY')[0]
            sw.close()
            break
    if ms_exists is None:
        print('ERROR: no files exist')
        sys.exit(1)

    # Identify gaps
    ms_files_to_concat = []
    for i, ms in enumerate(ms_files):
        if not os.path.exists(ms):
            # Missing file means gap, so create an appropriate dummy dataset with
            # a random name
            ms_new = '{0}_{1}.ms'.format(
                os.path.splitext(ms)[0],
                uuid.uuid4().urn.split('-')[-1])
            pt.tableutil.tablecopy(ms_exists, ms_new)

            # Alter SPECTRAL_WINDOW subtable as appropriate to fill gap
            sw = pt.table('{}::SPECTRAL_WINDOW'.format(ms_new), readonly=False)
            tot_bandwidth = sw.getcol('TOTAL_BANDWIDTH')[0]
            if i > 0:
                sw_low = pt.table('{}::SPECTRAL_WINDOW'.format(ms_files[i -
                                                                        1]))
                ref_freq = sw_low.getcol('REF_FREQUENCY') + tot_bandwidth
                sw_low.close()
            else:
                for j in range(1, len(ms_files) - 1):
                    if os.path.exists(ms_files[j]):
                        sw_high = pt.table('{}::SPECTRAL_WINDOW'.format(
                            ms_files[j]))
                        ref_freq = sw_high.getcol(
                            'REF_FREQUENCY') - tot_bandwidth * j
                        sw_high.close()
                        break
            chan_freq = sw.getcol('CHAN_FREQ') - ms_exists_ref_freq + ref_freq
            sw.putcol('REF_FREQUENCY', ref_freq)
            sw.putcol('CHAN_FREQ', chan_freq)
            sw.close()

            # Flag all data
            t = pt.table(ms_new, readonly=False)
            t.putcol('FLAG_ROW', np.ones(len(t), dtype=bool))
            t.close()

            ms_files_to_concat.append(ms_new)
        else:
            ms_files_to_concat.append(ms)

    # Concat
    pt.msutil.msconcat(ms_files_to_concat, outfile)
Пример #36
0
def plugin_main(args, **kwargs):
    """
    Makes a mapfile with the MSs spread across the full bandwidth

    Parameters
    ----------
    mapfile_in : str
        Filename of datamap containing MS files
    mapfile_dir : str
        Directory for output mapfile
    filename: str
        Name of output mapfile
    num: int, optional
        Number of frequencies in output mapfile

    Returns
    -------
    result : dict
        New parmdb datamap filename

    """
    mapfile_in = kwargs['mapfile_in']
    mapfile_dir = kwargs['mapfile_dir']
    filename = kwargs['filename']
    if 'num' in kwargs:
        num = int(kwargs['num'])
    else:
        num = 6
    fileid = os.path.join(mapfile_dir, filename)

    map_in = DataMap.load(mapfile_in)
    map_in.iterator = DataMap.SkipIterator
    map_out = DataMap()
    map_out.data = []
    map_out._data = []

    # do not re-run if we already ran, and input files are deleted.
    if os.path.exists(fileid) and not os.path.exists(map_in[0].file):
        print 'PipelineStep_selectDistFreqs: Not re-running because output file exists, but input files don\'t!'
        return {'mapfile': fileid}

    #sort into frequency groups
    freq_groups = {}
    hosts = []
    for item in map_in:
        # Get the frequency info from the MS file
        sw = pt.table(item.file + '::SPECTRAL_WINDOW', ack=False)
        freq = int(sw.col('REF_FREQUENCY')[0])
        sw.close()
        if freq in freq_groups:
            freq_groups[freq].append(item.file)
        else:
            freq_groups[freq] = [item.file]
        if not item.host in hosts:
            hosts.append(item.host)

    # select frequencies
    freqs = freq_groups.keys()
    freqs.sort()
    num_freqs = len(freqs)
    if num > num_freqs:
        print 'PipelineStep_selectDistFreqs: fewer than %d frequency groups found, continuig with %d groups.' % (
            num, num_freqs)
        num = num_freqs
    dist_ind = get_distributed_indices(0, num_freqs - 1, num)
    selfreqs = [freqs[ind] for ind in dist_ind]
    if len(selfreqs) < 1:
        print "PipelineStep_selectDistFreqs: Selected fewer than one frequency band."
        raise ValueError("Selected fewer than one frequency band.")

    all_files = []
    for selfreq in selfreqs:
        all_files.extend(freq_groups[selfreq])

    # extend the hosts-list
    for i in range(len(all_files) - len(hosts)):
        hosts.append(hosts[i])

    # fill the output-map
    for (host, fname) in zip(hosts, all_files):
        map_out.append(DataProduct(host, fname, False))

    map_out.save(fileid)
    del (map_in)
    del (map_out)
    result = {'mapfile': fileid}

    return result
Пример #37
0
def main(options):
    ms = options.ms
    if ms == '':
        logging.error('You have to specify an input MS, use -h for help')
        return
    cols = options.cols
    incol = options.incol

    t = pt.table(ms, readonly=False, ack=False)

    for col in cols.split(','):
        if col not in t.colnames():
            logging.info('Adding the output column ' + col + ' to ' + ms + '.')
            if incol == '':
                # prepare col metadata
                cd = t.getcoldesc('DATA')
                coldmi = t.getdminfo('DATA')
                if options.dysco:
                    cd['dataManagerType'] = 'DyscoStMan'
                    cd['dataManagerGroup'] = 'DyscoData'
                    coldmi = {
                        'NAME': col,
                        'SEQNR': 3,
                        'SPEC': {
                            'dataBitCount': 10,
                            'distribution': 'TruncatedGaussian',
                            'distributionTruncation': 2.5,
                            'normalization': 'AF',
                            'studentTNu': 0.0,
                            'weightBitCount': 12
                        },
                        'TYPE': 'DyscoStMan'
                    }
                # not as performing as standard DATA
                else:
                    coldmi["NAME"] = col
                #    cd['dataManagerType'] = 'StandardStMan'
                #    cd['dataManagerGroup'] = 'SSMVar'
                #    coldmi = {'NAME': col,'SEQNR': 0,'SPEC': {'ActualCacheSize': 2,'BUCKETSIZE': 32768,'IndexLength': 799832,'PERSCACHESIZE': 2},'TYPE': 'StandardStMan'}

                cd['comment'] = 'Added by addcol2ms'
                t.addcols(pt.makecoldesc(col, cd), coldmi)

                # if non dysco is done by default
                if options.dysco:
                    logging.warning('Setting ' + col + ' = 0')
                    pt.taql("update $t set " + col + "=0")

            else:
                # prepare col metadata
                coldmi = t.getdminfo(incol)
                coldmi['NAME'] = col
                cd = t.getcoldesc(incol)

                cd['comment'] = 'Added by addcol2ms'
                t.addcols(pt.makecoldesc(col, cd), coldmi)

                logging.warning('Setting ' + col + ' = ' + incol)
                pt.taql("update $t set " + col + "=" + incol)

        else:
            logging.warning('Column ' + col + ' already exists.')

    t.close()
Пример #38
0
    def read_ms(self,
                filepath,
                run_check=True,
                check_extra=True,
                run_check_acceptability=True,
                data_column='DATA',
                pol_order='AIPS'):
        '''
        read in a casa measurement set

        Args:
            filepath: name of the measurement set folder
            run_check: Option to check for the existence and proper shapes of
                parameters after reading in the file. Default is True.
            check_extra: Option to check optional parameters as well as required
                ones. Default is True.
            run_check_acceptability: Option to check the values of parameters
                after reading in the file. Default is True.
            data_column: specify which CASA measurement set data column to read from (can be 'DATA','CORRECTED', or 'MODEL')
            pol_order: use 'AIPS' or 'CASA' ordering of polarizations?
        '''
        # make sure user requests a valid data_column
        if data_column != 'DATA' and data_column != 'CORRECTED_DATA' and data_column != 'MODEL':
            raise ValueError(
                'Invalid data_column value supplied. Use \'Data\',\'MODEL\' or \'CORRECTED_DATA\''
            )
        if not os.path.exists(filepath):
            raise (IOError, filepath + ' not found')
        # set visibility units
        if (data_column == 'DATA'):
            self.vis_units = "UNCALIB"
        elif (data_column == 'CORRECTED_DATA'):
            self.vis_units = "JY"
        elif (data_column == 'MODEL'):
            self.vis_units = "JY"
        # limit length of extra_keywords keys to 8 characters to match uvfits & miriad
        self.extra_keywords['DATA_COL'] = data_column
        # get frequency information from spectral window table
        tb_spws = tables.table(filepath + '/SPECTRAL_WINDOW')
        freqs = tb_spws.getcol('CHAN_FREQ')
        self.freq_array = freqs
        self.Nfreqs = int(freqs.shape[1])
        self.channel_width = float(tb_spws.getcol('CHAN_WIDTH')[0, 0])
        self.Nspws = int(freqs.shape[0])
        if self.Nspws > 1:
            raise ValueError('Sorry.  Files with more than one spectral' +
                             'window (spw) are not yet supported. A ' +
                             'great project for the interested student!')

        self.spw_array = np.arange(self.Nspws)
        tb_spws.close()
        # now get the data
        tb = tables.table(filepath)
        # check for multiple subarrays. importuvfits does not appear to preserve subarray information!
        subarray = np.unique(np.int32(tb.getcol('ARRAY_ID')) - 1)
        if len(set(subarray)) > 1:
            raise ValueError('This file appears to have multiple subarray '
                             'values; only files with one subarray are '
                             'supported.')
        times_unique = time.Time(np.unique(tb.getcol('TIME') / (3600. * 24.)),
                                 format='mjd').jd
        self.Ntimes = int(len(times_unique))
        data_array = tb.getcol(data_column)
        self.Nblts = int(data_array.shape[0])
        flag_array = tb.getcol('FLAG')
        # CASA stores data in complex array with dimension NbltsxNfreqsxNpols
        if (len(data_array.shape) == 3):
            data_array = np.expand_dims(data_array, axis=1)
            flag_array = np.expand_dims(flag_array, axis=1)
        self.data_array = data_array
        self.flag_array = flag_array
        self.Npols = int(data_array.shape[-1])
        self.uvw_array = tb.getcol('UVW')
        self.ant_1_array = tb.getcol('ANTENNA1').astype(np.int32)
        self.ant_2_array = tb.getcol('ANTENNA2').astype(np.int32)
        self.Nants_data = len(
            np.unique(
                np.concatenate((np.unique(self.ant_1_array),
                                np.unique(self.ant_2_array)))))
        self.baseline_array = self.antnums_to_baseline(self.ant_1_array,
                                                       self.ant_2_array)
        self.Nbls = len(np.unique(self.baseline_array))
        # Get times. MS from cotter are modified Julian dates in seconds (thanks to Danny Jacobs for figuring out the proper conversion)
        self.time_array = time.Time(tb.getcol('TIME') / (3600. * 24.),
                                    format='mjd').jd
        # Polarization array
        tbPol = tables.table(filepath + '/POLARIZATION')
        # list of lists, probably with each list corresponding to SPW.
        polList = tbPol.getcol('CORR_TYPE')[0]
        self.polarization_array = np.zeros(len(polList), dtype=np.int32)
        for polnum in range(len(polList)):
            self.polarization_array[polnum] = int(polDict[polList[polnum]])
        tbPol.close()
        # Integration time
        # use first interval and assume rest are constant (though measurement set has all integration times for each Nblt )
        # self.integration_time=tb.getcol('INTERVAL')[0]
        # for some reason, interval ends up larger than the difference between times...
        self.integration_time = float(times_unique[1] -
                                      times_unique[0]) * 3600. * 24.
        # open table with antenna location information
        tbAnt = tables.table(filepath + '/ANTENNA')
        tbObs = tables.table(filepath + '/OBSERVATION')
        self.telescope_name = tbObs.getcol('TELESCOPE_NAME')[0]
        self.instrument = tbObs.getcol('TELESCOPE_NAME')[0]
        tbObs.close()
        # Use Telescopes.py dictionary to set array position
        full_antenna_positions = tbAnt.getcol('POSITION')
        xyz_telescope_frame = tbAnt.getcolkeyword('POSITION',
                                                  'MEASINFO')['Ref']
        antFlags = np.empty(len(full_antenna_positions), dtype=bool)
        antFlags[:] = False
        for antnum in range(len(antFlags)):
            antFlags[antnum] = np.all(full_antenna_positions[antnum, :] == 0)
        if (xyz_telescope_frame == 'ITRF'):
            self.telescope_location = np.array(
                np.mean(full_antenna_positions[np.invert(antFlags), :],
                        axis=0))
        if self.telescope_location is None:
            try:
                self.set_telescope_params()
            except ValueError:
                warnings.warn(
                    'Telescope frame is not ITRF and telescope is not '
                    'in known_telescopes, so telescope_location is not set.')

        # antenna names
        ant_names = tbAnt.getcol('STATION')
        ant_diams = tbAnt.getcol('DISH_DIAMETER')

        self.antenna_diameters = ant_diams[ant_diams > 0]

        self.Nants_telescope = len(antFlags[np.invert(antFlags)])
        test_name = ant_names[0]
        names_same = True
        for antnum in range(len(ant_names)):
            if (not (ant_names[antnum] == test_name)):
                names_same = False
        if (not (names_same)):
            # cotter measurement sets store antenna names in the NAMES column.
            self.antenna_names = ant_names
        else:
            # importuvfits measurement sets store antenna names in the STATION column.
            self.antenna_names = tbAnt.getcol('NAME')
        self.antenna_numbers = np.arange(len(self.antenna_names)).astype(int)
        nAntOrig = len(self.antenna_names)
        ant_names = []
        for antNum in range(len(self.antenna_names)):
            if not (antFlags[antNum]):
                ant_names.append(self.antenna_names[antNum])
        self.antenna_names = ant_names
        self.antenna_numbers = self.antenna_numbers[np.invert(antFlags)]

        relative_positions = np.zeros_like(full_antenna_positions)
        relative_positions = full_antenna_positions - self.telescope_location.reshape(
            1, 3)
        self.antenna_positions = relative_positions[np.invert(antFlags), :]

        tbAnt.close()
        tbField = tables.table(filepath + '/FIELD')
        if (tbField.getcol('PHASE_DIR').shape[1] == 2):
            self.phase_type = 'drift'
            self.set_drift()
        elif (tbField.getcol('PHASE_DIR').shape[1] == 1):
            self.phase_type = 'phased'
            # MSv2.0 appears to assume J2000. Not sure how to specifiy otherwise
            self.phase_center_epoch = float(
                tb.getcolkeyword('UVW', 'MEASINFO')['Ref'][1:])
            self.phase_center_ra = float(tbField.getcol('PHASE_DIR')[0][0][0])
            self.phase_center_dec = float(tbField.getcol('PHASE_DIR')[0][0][1])
            self.set_phased()
        # set LST array from times and itrf
        self.set_lsts_from_time_array()
        # set the history parameter
        _, self.history = self._ms_hist_to_string(
            tables.table(filepath + '/HISTORY'))
        # CASA weights column keeps track of number of data points averaged.

        if not uvutils.check_history_version(self.history,
                                             self.pyuvdata_version_str):
            self.history += self.pyuvdata_version_str
        self.nsample_array = tb.getcol('WEIGHT_SPECTRUM')
        if (len(self.nsample_array.shape) == 3):
            self.nsample_array = np.expand_dims(self.nsample_array, axis=1)
        self.object_name = tbField.getcol('NAME')[0]
        tbField.close()
        tb.close()
        # order polarizations
        self.order_pols(pol_order)
        if run_check:
            self.check(check_extra=check_extra,
                       run_check_acceptability=run_check_acceptability)
Пример #39
0
def create_blockvisibility_from_ms(msname, channum=None, ack=False):
    """ Minimal MS to BlockVisibility converter

    The MS format is much more general than the ARL BlockVisibility so we cut many corners. This requires casacore to be
    installed. If not an exception ModuleNotFoundError is raised.

    Creates a list of BlockVisibility's, split by field and spectral window
    
    :param msname: File name of MS
    :param channum: range of channels e.g. range(17,32), default is None meaning all
    :return:
    """
    try:
        from casacore.tables import table  # pylint: disable=import-error
    except ModuleNotFoundError:
        raise ModuleNotFoundError("casacore is not installed")

    tab = table(msname, ack=ack)
    log.debug("create_blockvisibility_from_ms: %s" % str(tab.info()))

    fields = numpy.unique(tab.getcol('FIELD_ID'))
    dds = numpy.unique(tab.getcol('DATA_DESC_ID'))
    log.debug(
        "create_blockvisibility_from_ms: Found unique fields %s, unique data descriptions %s"
        % (str(fields), str(dds)))
    vis_list = list()
    for dd in dds:
        dtab = table(msname, ack=ack).query('DATA_DESC_ID==%d' % dd, style='')
        for field in fields:
            ms = dtab.query('FIELD_ID==%d' % field, style='')
            assert ms.nrows(
            ) > 0, "Empty selection for FIELD_ID=%d and DATA_DESC_ID=%d" % (
                field, dd)
            log.debug("create_blockvisibility_from_ms: Found %d rows" %
                      (ms.nrows()))
            time = ms.getcol('TIME')
            channels = ms.getcol('DATA').shape[-2]
            log.debug("create_visibility_from_ms: Found %d channels" %
                      (channels))
            if channum is None:
                channum = range(channels)
            try:
                ms_vis = ms.getcol('DATA')[:, channum, :]
                ms_weight = ms.getcol('WEIGHT')[:, :]
            except IndexError:
                raise IndexError("channel number exceeds max. within ms")
            uvw = -1 * ms.getcol('UVW')
            antenna1 = ms.getcol('ANTENNA1')
            antenna2 = ms.getcol('ANTENNA2')
            integration_time = ms.getcol('INTERVAL')

            # Now get info from the subtables
            spwtab = table('%s/SPECTRAL_WINDOW' % msname, ack=False)
            cfrequency = spwtab.getcol('CHAN_FREQ')[dd][channum]
            cchannel_bandwidth = spwtab.getcol('CHAN_WIDTH')[dd][channum]
            nchan = cfrequency.shape[0]

            # Get polarisation info
            poltab = table('%s/POLARIZATION' % msname, ack=False)
            corr_type = poltab.getcol('CORR_TYPE')
            # These correspond to the CASA Stokes enumerations
            if numpy.array_equal(corr_type[0], [1, 2, 3, 4]):
                polarisation_frame = PolarisationFrame('stokesIQUV')
            elif numpy.array_equal(corr_type[0], [5, 6, 7, 8]):
                polarisation_frame = PolarisationFrame('circular')
            elif numpy.array_equal(corr_type[0], [9, 10, 11, 12]):
                polarisation_frame = PolarisationFrame('linear')
            else:
                raise KeyError("Polarisation not understood: %s" %
                               str(corr_type))

            npol = 4

            # Get configuration
            anttab = table('%s/ANTENNA' % msname, ack=False)
            nants = anttab.nrows()
            mount = anttab.getcol('MOUNT')
            names = anttab.getcol('NAME')
            diameter = anttab.getcol('DISH_DIAMETER')
            xyz = anttab.getcol('POSITION')
            configuration = Configuration(
                name='',
                data=None,
                location=None,
                names=names,
                xyz=xyz,
                mount=mount,
                frame=None,
                receptor_frame=ReceptorFrame("linear"),
                diameter=diameter)

            # Get phasecentres
            fieldtab = table('%s/FIELD' % msname, ack=False)
            pc = fieldtab.getcol('PHASE_DIR')[field, 0, :]
            phasecentre = SkyCoord(ra=[pc[0]] * u.rad,
                                   dec=pc[1] * u.rad,
                                   frame='icrs',
                                   equinox='J2000')

            bv_times = numpy.unique(time)
            ntimes = len(bv_times)

            bv_vis = numpy.zeros([ntimes, nants, nants, nchan,
                                  npol]).astype('complex')
            bv_weight = numpy.zeros([ntimes, nants, nants, nchan, npol])
            bv_uvw = numpy.zeros([ntimes, nants, nants, 3])

            time_last = time[0]
            time_index = 0
            for row, _ in enumerate(ms_vis):
                # MS has shape [row, npol, nchan]
                # BV has shape [ntimes, nants, nants, nchan, npol]
                if time[row] != time_last:
                    assert time[
                        row] > time_last, "MS is not time-sorted - cannot convert"
                    time_index += 1
                    time_last = time[row]
                bv_vis[time_index, antenna2[row], antenna1[row],
                       ...] = ms_vis[row, ...]
                bv_weight[time_index, antenna2[row], antenna1[row], :,
                          ...] = ms_weight[row, numpy.newaxis, ...]
                bv_uvw[time_index, antenna2[row],
                       antenna1[row], :] = uvw[row, :]

            vis_list.append(
                BlockVisibility(uvw=bv_uvw,
                                time=bv_times,
                                frequency=cfrequency,
                                channel_bandwidth=cchannel_bandwidth,
                                vis=bv_vis,
                                weight=bv_weight,
                                configuration=configuration,
                                phasecentre=phasecentre,
                                polarisation_frame=polarisation_frame))
        tab.close()
    return vis_list
Пример #40
0
    if opts.save and opts.rewrite:
        raise Exception('Only one mode at a time can be used: choose -s OR -r')
    if not opts.fname and opts.rewrite:
        raise Exception('-f/--fname must be specified in rewriting mode')

    fversion = 0
    if opts.fname == None:
        fname_pattern = ms_name + '.flag_v'
        opts.fname = fname_pattern + '%02d' % fversion

    fname_check = os.path.exists(opts.fname + '.npy')
    while fname_check == True and opts.save:
        fversion += 1
        opts.fname = fname_pattern + '%02d' % fversion
        fname_check = os.path.exists(opts.fname + '.npy')

    t = tb.table(filename, readonly=opts.save)
    if opts.save:
        print('Saving FLAG column in ' + opts.fname + '.npy')
        flag = t.getcol('FLAG')
        flagfile = open(opts.fname + '.npy', 'wb')
        np.save(flagfile, flag)
    if opts.rewrite:
        print('Overwriting FLAG column with ' + opts.fname + '.npy')
        flagfile = open(opts.fname + '.npy', 'rb')
        flag = np.load(flagfile)
        t.putcol('FLAG', flag)

    flagfile.close()
    t.close()
Пример #41
0
def add_dummyms(msfiles):
    '''
    Add dummy ms to create a regular freuqency grid when doing a concat with DPPP
    '''
    if len(msfiles) == 1:  # there is nothing to do
        return msfiles

    keyname = 'REF_FREQUENCY'
    freqaxis = []
    newmslist = []

    # Check for wrong REF_FREQUENCY which happens after a DPPP split in frequency
    for ms in msfiles:
        t = pt.table(ms + '/SPECTRAL_WINDOW', readonly=True)
        freq = t.getcol('REF_FREQUENCY')[0]
        t.close()
        freqaxis.append(freq)
    freqaxis = np.sort(np.array(freqaxis))
    minfreqspacing = np.min(np.diff(freqaxis))
    if minfreqspacing == 0.0:
        keyname = 'CHAN_FREQ'

    freqaxis = []
    for ms in msfiles:
        t = pt.table(ms + '/SPECTRAL_WINDOW', readonly=True)
        if keyname == 'CHAN_FREQ':
            freq = t.getcol(keyname)[0][0]
        else:
            freq = t.getcol(keyname)[0]
        t.close()
        freqaxis.append(freq)

    # put everything in order of increasing frequency
    freqaxis = np.array(freqaxis)
    idx = np.argsort(freqaxis)

    freqaxis = freqaxis[np.array(tuple(idx))]
    sortedmslist = list(msfiles[i] for i in idx)
    freqspacing = np.diff(freqaxis)
    minfreqspacing = np.min(np.diff(freqaxis))

    # insert dummies in the ms list if needed
    count = 0
    newmslist.append(
        sortedmslist[0])  # always start with the first ms the list
    for msnumber, ms in enumerate(sortedmslist[1::]):
        if int(round(old_div(freqspacing[msnumber], minfreqspacing))) > 1:
            ndummy = int(round(old_div(freqspacing[msnumber],
                                       minfreqspacing))) - 1

            for dummy in range(ndummy):
                newmslist.append('dummy' + str(count) + '.ms')
                print('Added dummy:', 'dummy' + str(count) + '.ms')
                count = count + 1
        newmslist.append(ms)

    print(
        'Updated ms list with dummies inserted to create a regular frequency grid'
    )
    print(newmslist)
    return newmslist
Пример #42
0
    uvsel = "[0.100000,1000.000000]"

#print doflagafter, takeoutbeam, aoflagger, dysco, split
filechecker(clustercat, fullmask, indico, args['h5sols'])
if args['h5sols'] == None:
    fixsymlinks()
    solsfile = glob.glob('DDS3_full*smoothed.npz')
    if len(solsfile) < 1:
        print('Cannot find the correct solution file')
        sys.exit()

msfiles = ascii.read(args['mslist'], data_start=0)
msfiles = list(
    msfiles[:][msfiles.colnames[0]])  # convert to normal list of strings

t = pt.table(msfiles[0] + '/OBSERVATION')
fieldname = t.getcol('LOFAR_TARGET')['array'][0]
t.close()

msoutconcat = fieldname + '_' + obsid + '.dysco.sub.shift.avg.weights.ms.archive'

if boxfile != 'fullfield':
    composite = False
    r = pyregion.open(boxfile)
    if len(r[:]) > 1:
        composite = True
    else:
        print(boxfile)
        phasecenter = '[' + getregionboxcenter(boxfile) + ']'
        print(phasecenter)
else:
Пример #43
0
 def test_table_unicode(self):
     t = table(join(self.workdir, unicode_string), maketabdesc(), ack=False)
Пример #44
0
    def test_msutil(self):
        """Testing msutil."""
        datacoldesc = makearrcoldesc("DATA", 0., ndim=2, shape=[20, 4])
        ms = default_ms("tabtemp", maketabdesc((datacoldesc)))
        ms.close()

        spw = table("tabtemp/SPECTRAL_WINDOW", readonly=False)
        spw.addrows()
        spw.putcell('NUM_CHAN', 0, 20)
        t = table("tabtemp", readonly=False)
        print(t.colnames())
        addImagingColumns("tabtemp")

        self.assertIn('MODEL_DATA', t.colnames())
        self.assertIn('CORRECTED_DATA', t.colnames())
        self.assertIn('IMAGING_WEIGHT', t.colnames())

        removeImagingColumns("tabtemp")

        self.assertNotIn('MODEL_DATA', t.colnames())
        self.assertNotIn('CORRECTED_DATA', t.colnames())
        self.assertNotIn('IMAGING_WEIGHT', t.colnames())

        addDerivedMSCal("tabtemp")

        self.assertIn('PA1', t.colnames())
        self.assertIn('PA2', t.colnames())
        self.assertIn('LAST', t.colnames())
        self.assertIn('AZEL2', t.colnames())
        self.assertIn('AZEL1', t.colnames())
        self.assertIn('UVW_J2000', t.colnames())
        self.assertIn('LAST1', t.colnames())
        self.assertIn('LAST2', t.colnames())
        self.assertIn('HA1', t.colnames())
        self.assertIn('HA2', t.colnames())
        self.assertIn('HA', t.colnames())

        removeDerivedMSCal("tabtemp")

        self.assertNotIn('PA1', t.colnames())
        self.assertNotIn('PA2', t.colnames())
        self.assertNotIn('LAST', t.colnames())
        self.assertNotIn('AZEL2', t.colnames())
        self.assertNotIn('AZEL1', t.colnames())
        self.assertNotIn('UVW_J2000', t.colnames())
        self.assertNotIn('LAST1', t.colnames())
        self.assertNotIn('LAST2', t.colnames())
        self.assertNotIn('HA1', t.colnames())
        self.assertNotIn('HA2', t.colnames())
        self.assertNotIn('HA', t.colnames())
        self.assertNotIn('HA', t.colnames())
        self.assertNotIn('HA', t.colnames())

        taql("SELECT FROM tabtemp where TIME in (SELECT DISTINCT TIME" +
             " FROM tabtemp LIMIT 10) GIVING first10.MS AS PLAIN")
        taql("SELECT FROM tabtemp where TIME in (SELECT DISTINCT TIME" +
             " FROM tabtemp LIMIT 10 OFFSET 10) GIVING second10.MS AS PLAIN")
        msconcat(["first10.MS", "second10.MS"], "combined.MS", concatTime=True)
        spw.close()
        t.close()
        tabledelete("tabtemp")
Пример #45
0
    def FindWeights(self, colname=""):
        ms = table(self.MSName, ack=False)
        ants = table(ms.getkeyword("ANTENNA"), ack=False)
        antnames = ants.getcol("NAME")
        ants.close()
        nAnt = len(antnames)

        u, v, _ = ms.getcol("UVW").T
        A0 = ms.getcol("ANTENNA1")
        A1 = ms.getcol("ANTENNA2")
        tarray = ms.getcol("TIME")
        nbl = np.where(tarray == tarray[0])[0].size
        warnings.filterwarnings("ignore")
        warnings.filterwarnings("default")
        if "RESIDUAL_DATA" not in ms.colnames():
            print('reweight: RESIDUAL_DATA not found. Exiting...')
            sys.exit(1)
        residualdata = ms.getcol("RESIDUAL_DATA")
        flags = ms.getcol("FLAG")
        ms.close()

        # apply uvcut
        c_m_s = 2.99792458e8
        uvlen = np.sqrt(u**2 + v**2) / c_m_s * self.referencefreq
        flags[uvlen > self.uvcut[1], :, :] = True
        flags[uvlen < self.uvcut[0], :, :] = True
        residualdata[flags] = np.nan
        residualdata[residualdata == 0] = np.nan

        # initialise
        nChan = residualdata.shape[1]
        nPola = residualdata.shape[2]
        nt = residualdata.shape[0] / nbl
        residualdata = residualdata.reshape((nt, nbl, nChan, nPola))
        A0 = A0.reshape((nt, nbl))[0, :]
        A1 = A1.reshape((nt, nbl))[0, :]

        # make rms and residuals arrays
        rmsarray = np.zeros((nt, nbl, nChan, 2), dtype=np.complex64)
        residuals = np.zeros_like(rmsarray, dtype=np.complex64)
        rmsarray[:, :, :, 0] = residualdata[:, :, :, 1]
        rmsarray[:, :, :, 1] = residualdata[:, :, :, 2]
        residuals[:, :, :, 0] = residualdata[:, :, :, 0]
        residuals[:, :, :, 1] = residualdata[:, :, :, 3]

        # start calculating the weights
        CoeffArray = np.zeros((nt, nChan, nAnt))
        ant1 = np.arange(nAnt)
        num_sols_time = int(np.ceil(float(nt) / self.ntSol))
        num_sols_freq = int(np.ceil(float(nChan) / self.nchanSol))
        tcellsize = self.ntSol
        for t_i in range(0, nt, self.ntSol):
            if (t_i == nt - self.ntSol) and (nt % self.ntSol > 0):
                tcellsize = nt % self.ntSol
            t_e = t_i + tcellsize
            fcellsize = self.nchanSol
            for f_i in range(0, nChan, self.nchanSol):
                if (f_i == nChan -
                        self.nchanSol) and (nChan % self.nchanSol > 0):
                    fcellsize = nChan % self.nchanSol
                f_e = f_i + fcellsize

                # build weights for each antenna in the current time-frequency block
                for ant in ant1:
                    # set of vis for baselines ant-ant_i
                    set1 = np.where(A0 == ant)[0]
                    # set of vis for baselines ant_i-ant
                    set2 = np.where(A1 == ant)[0]
                    CoeffArray[t_i:t_e, f_i:f_e, ant] = np.sqrt(
                        np.nanmean(
                            np.append(residuals[t_i:t_e, set1, f_i:f_e, :],
                                      residuals[t_i:t_e, set2, f_i:f_e, :]) *
                            np.append(residuals[t_i:t_e, set1, f_i:f_e, :],
                                      residuals[t_i:t_e, set2,
                                                f_i:f_e, :]).conj()) -
                        np.nanstd(
                            np.append(rmsarray[t_i:t_e, set1, f_i:f_e, :],
                                      rmsarray[t_i:t_e, set2, f_i:f_e, :])))
            if not self.quiet:
                PrintProgress(t_i, nt)

        # plot


#         Nr = 8
#         Nc = 8
#         xvals = range(nt)
#         yvals = range(nChan)
#         figSize = [10+3*Nc, 8+2*Nr]
#         figgrid, axa = plt.subplots(Nr, Nc, sharex=True, sharey=True, figsize=figSize)
#         for i in range(nAnt):
#             ax = axa.flatten()[i]
#             bbox = ax.get_window_extent().transformed(figgrid.dpi_scale_trans.inverted())
#             aspect = ((xvals[-1]-xvals[0])*bbox.height)/((yvals[-1]-yvals[0])*bbox.width)
#             im = ax.imshow(CoeffArray[:, :, i].transpose(1,0), origin='lower', interpolation="none", cmap=plt.cm.rainbow, norm=None,
#                             extent=[xvals[0],xvals[-1],yvals[0],yvals[-1]], aspect=str(aspect))
#         figgrid.colorbar(im, ax=axa.ravel().tolist(), use_gridspec=True, fraction=0.02, pad=0.005, aspect=35)
#         figgrid.savefig(self.MSName+'_coef1.png', bbox_inches='tight')
#         plt.close()

# get rid of NaNs and low values
        CoeffArray[np.isnan(CoeffArray)] = np.inf
        for i in range(nAnt):
            tempars = CoeffArray[:, :, i]
            thres = 0.25 * np.median(tempars[np.where(np.isfinite(tempars))])
            CoeffArray[:, :, i][tempars < thres] = thres
        return CoeffArray
Пример #46
0
# Flagging on concatenated dataset - also flag low-elevation
logger.info('Flagging...')
MSs = lib_ms.AllMSs(glob.glob('mss_t*/*MS'), s)
MSs.run('DPPP '+parset_dir+'/DPPP-flag.parset msin=$pathMS', \
                log='$nameMS_DPPP_flag.log', commandType='DPPP')

#sys.exit() # for DDFacet

# Create time-chunks
logger.info('Splitting in time...')
for groupname in groupnames:
    tc = initc
    ms = groupname + '/' + groupname + '.MS'
    if not os.path.exists(ms): continue
    t = pt.table(ms, ack=False)
    starttime = t[0]['TIME']
    endtime = t[t.nrows() - 1]['TIME']
    hours = (endtime - starttime) / 3600.
    logger.debug(ms + ' has length of ' + str(hours) + ' h.')

    for timerange in np.array_split(t.getcol('TIME'), round(hours)):
        logger.info('%02i - Splitting timerange %f %f' %
                    (tc, timerange[0], timerange[-1]))
        t1 = t.query('TIME >= ' + str(timerange[0]) + ' && TIME <= ' +
                     str(timerange[-1]),
                     sortlist='TIME,ANTENNA1,ANTENNA2')
        splitms = groupname + '/TC%02i.MS' % tc
        lib_util.check_rm(splitms)
        t1.copy(splitms, True)
        t1.close()
Пример #47
0
def createMS2(MS2name,specdata,time,ra,dec,sysvel,sourcename,project,observer,direction,freq,Tsys,Tsys_time,state_id,path_temp='./temp/',removetemp=True):

	# modules
	import casacore.tables as tb

	# params
	os.system('rm -rf '+path_temp)
	os.system('rm -rf '+MS2name)
	os.system('mkdir -p '+path_temp)

	# make tables
	#makeMAIN(MS2name,path_temp+'MAIN',specdata,time,state_id)
	makeMAIN2(MS2name,specdata,time,state_id)
	makeANTENNA(MS2name+'/ANTENNA',path_temp+'ANTENNA')
	makeDATA_DESCRIPTION(MS2name+'/DATA_DESCRIPTION',path_temp+'DATA_DESCRIPTION')
	makeDOPPLER(MS2name+'/DOPPLER',path_temp+'DOPPLER')
	makeFEED(MS2name+'/FEED',path_temp+'FEED')
	makeFIELD(MS2name+'/FIELD',path_temp+'FIELD',ra,dec,sourcename,time)
	makeFLAG_CMD(MS2name+'/FLAG_CMD',path_temp+'FLAG_CMD')
	makeFREQ_OFFSET(MS2name+'/FREQ_OFFSET',path_temp+'FREQ_OFFSET')
	makeHISTORY(MS2name+'/HISTORY',path_temp+'HISTORY')
	makeOBSERVATION(MS2name+'/OBSERVATION',path_temp+'OBSERVATION',time,project,observer)
	#makePOINTING(MS2name+'/POINTING',path_temp+'POINTING',direction,time)
	makePOINTING2(MS2name+'/POINTING',direction,time)
	makePOLARIZAION(MS2name+'/POLARIZATION',path_temp+'POLARIZATION')
	makePROCESSOR(MS2name+'/PROCESSOR',path_temp+'PROCESSOR')
	makeSOURCE(MS2name+'/SOURCE',path_temp+'SOURCE',ra,dec,sysvel,sourcename,time,freq)
	makeSPECTRAL_WINDOW(MS2name+'/SPECTRAL_WINDOW',path_temp+'SPECTRAL_WINDOW',freq)
	makeSTATE(MS2name+'/STATE',path_temp+'STATE')
	makeSYSCAL(MS2name+'/SYSCAL',path_temp+'SYSCAL',Tsys,Tsys_time,time)
	makeWEATHER(MS2name+'/WEATHER',path_temp+'WEATHER')
	makeLMT_ARRAY(MS2name+'/LMT_ARRAY',path_temp+'LMT_ARRAY')

	# put keywords
	abs_path = os.path.abspath(MS2name)

	keywords = {'MS_VERSION': 2.0,
				'ANTENNA': 'Table: '+abs_path+'/ANTENNA',
				'DATA_DESCRIPTION': 'Table: '+abs_path+'/DATA_DESCRIPTION',
				'DOPPLER': 'Table: '+abs_path+'/DOPPLER',
				'FEED': 'Table: '+abs_path+'/FEED',
				'FIELD': 'Table: '+abs_path+'/FIELD',
				'FLAG_CMD': 'Table: '+abs_path+'/FLAG_CMD',
				'FREQ_OFFSET': 'Table: '+abs_path+'/FREQ_OFFSET',
				'HISTORY': 'Table: '+abs_path+'/HISTORY',
				'OBSERVATION': 'Table: '+abs_path+'/OBSERVATION',
				'POINTING': 'Table: '+abs_path+'/POINTING',
				'POLARIZATION': 'Table: '+abs_path+'/POLARIZATION',
				'PROCESSOR': 'Table: '+abs_path+'/PROCESSOR',
				'SOURCE': 'Table: '+abs_path+'/SOURCE',
				'SPECTRAL_WINDOW': 'Table: '+abs_path+'/SPECTRAL_WINDOW',
				'STATE': 'Table: '+abs_path+'/STATE',
				'SYSCAL': 'Table: '+abs_path+'/SYSCAL',
				'WEATHER': 'Table: '+abs_path+'/WEATHER',
				'LMT_ARRAY': 'Table: '+abs_path+'/LMT_ARRAY',
				}
	returnedTable = tb.table(MS2name,readonly=False)
	returnedTable.putkeywords(keywords)

	returnedTable.flush(recursive=True)
	returnedTable.close()

	if removetemp:
	    os.system('rm -rf '+path_temp)
Пример #48
0
def selfcal(vis,minuvw,robust,model='MODEL',outcal_root='',max_sol=600.0,init_sol=30.0,\
            incol='DATA',outcol='DATA',caltype='P',nchan=0):
    if not model:
        imaging(vis, 1000, 10, minuvw, robust)
    # need a predict step to deal with sourcedb here if necessary
    ptant = casatb.table(vis + '/ANTENNA')
    antenna_list = np.array([], dtype='S')
    for i in ptant.select('NAME'):
        antenna_list = np.append(antenna_list, i.values())
    nant = len(antenna_list)
    if caltype == 'P':
        sol_int_range = np.arange(
            np.ceil(np.log(max_sol / init_sol) / np.log(3.)))
        sol_int_range = np.ceil(init_sol * 3.**sol_int_range)
        nsol = len(sol_int_range)
        coh = CCRIT * np.ones((nsol, nant))
        for i in range(nsol):
            solint = sol_int_range[i]
            outcal_root = outcal_root if len(outcal_root) else vis
            outcal = outcal_root + '_c%d.h5' % i
            loop3log(
                vis,
                '\n--- Beginning pass with solint %.1f sec ---' % (solint))
            calib (vis, solint=solint, outcal=outcal, incol=incol, \
                                 outcol=outcol,solmode='P',tsamp=TSAMP,nchan=nchan)
            snplt(vis, htab=outcal, outpng=outcal)
            coh[i] = coherence_metric(outcal, antenna_list)
            loop3log(vis, '\nCoherences by antenna:')
            for j in range(nant):
                loop3log(vis, '%.2f ' % (coh[i, j]), cret=not ((j + 1) % 10))
            loop3log(vis, ' ')
            if len(coh[i][coh[i] >= CCRIT]) == 0:  # all coherent
                break

    # For each antenna in the antenna list, find the selfcal table with
    # the shortest solution interval that contains coherent solutions. If
    # there is no such table, report -1 in order to signal that they should
    # all be set to zero.
        ncoh = np.ones(nant, dtype=int) * -1
        allcoh = np.ones(nant, dtype=float) * CCRIT
        for i in range(nant):
            try:
                ncoh[i] = np.min(np.ravel(np.argwhere(coh[:, i] < CCRIT)))
                allcoh[i] = coh[:, i][ncoh[i]]
            except:
                pass
        loop3log(vis, '\nCombined coherences: ')
        for i in range(nant):
            loop3log(vis, '%.2f ' % (allcoh[i]), cret=not ((i + 1) % 10))
        loop3log(vis, '\nSolution number with first coherence (-1=no coh):')
        for i in range(nant):
            loop3log(vis, '%d ' % (ncoh[i]), cret=not ((i + 1) % 10))
        loop3log(vis, ' ')
        loop3log(vis, ' ----- Starting edit of this solution ------')
        # For each selfcal table containing the shortest solution interval with
        # coherence on some antennas, replace the entries in the first selfcal
        # table with the interpolated values from that antenna
        for i in range(1, coh.shape[0]):
            iant = antenna_list[ncoh == i]
            loop3log(
                vis,
                'Editing %d antennas to h5parm number %d' % (len(iant), i))
            if len(iant):
                clcal (vis,outcal_root+'_c0.h5',outcal_root+'_c%d.h5'%i,\
                                     ant_interp=iant)
    # For each antenna without any coherence at all, zero the phase
    # solutions for that antenna
        iant = antenna_list[ncoh == -1]
        if len(iant):
            loop3log(vis, 'Trying to zero antennas: ', cret=False)
            for i in range(len(iant)):
                loop3log(vis, '%s ' % iant[i], cret=False)
            loop3log(vis, '\n')
            zerosol(vis, outcal_root + '_c0.h5', iant)
    else:  # amplitude selfcal: only one interval
        outcal_root = outcal_root if len(outcal_root) else vis
        outcal = outcal_root + '_c0.h5' % i
        calib (vis, solint=init_sol, outcal=outcal, incol=incol, \
                                 outcol=outcol,solmode='A', nchan=nchan)
        snplt(vis, htab=outcal, outpng=outcal, soltab='amplitude000')
        allcoh = coherence_metric(outcal, antenna_list)
        loop3log(vis, 'Coherences: \n')
        for i in range(nant):
            loop3log(vis, '%.2f ' % (allcoh[i]), cret=not ((i + 1) % 10))
        loop3log(vis, ' ')
        # For each antenna without any coherence at all, zero the amp/phase
        # solutions for that antenna
        # corrected bug here (Neal) - used to be < so everything was zeroed
        iant = antenna_list[allcoh >= CCRIT]
        print('******', allcoh)
        print('******>>> ', iant)
        if len(iant):
            zerosol(vis, outcal_root + '_c0.h5', ant=iant)
    # find the maximum baseline length with coherent cal signal
    cohlength = getcoh_baseline(antenna_list, allcoh, CCRIT)
    return allcoh, cohlength
Пример #49
0
def makePOINTING2(tablename,direction,time,nbeam=1,texp=0.2):

	# modules
	import casacore.tables as tb

	# params
	direction_rad = direction/180.*np.pi
	n_direction = direction.shape[0]
	time_end   = time.max()

	# make table
	colnames = ['DIRECTION',
				'ANTENNA_ID',
				'INTERVAL',
				'NAME',
				'NUM_POLY',
				'TARGET',
				'TIME',
				'TIME_ORIGIN',
				'TRACKING']
	colkeywords = [{'MEASINFO': {'Ref': 'J2000', 'type': 'direction'},'QuantumUnits': np.array(['rad', 'rad'],dtype='|S4')},
				   {},
				   {'QuantumUnits': np.array(['s'],dtype='|S2')},
				   {},{},
				   {'MEASINFO': {'Ref': 'J2000', 'type': 'direction'},'QuantumUnits': np.array(['rad', 'rad'],dtype='|S4')},
				   {'MEASINFO': {'Ref': 'UTC', 'type': 'epoch'}, 'QuantumUnits': np.array(['s'],dtype='|S2')},
				   {'MEASINFO': {'Ref': 'UTC', 'type': 'epoch'}, 'QuantumUnits': np.array(['s'],dtype='|S2')},
				   {}
				  ]

	#ndims = [2,1,1,1,1,2,1,1,1]
	#isarrays = [True,False,False,False,False,True,False,False,False]
	valuetypes = ['double','int','double','string','int','double','double','double','bool']
	ndims = [2,1,1,1,1,-1,1,1,1]
	descs = []

	for colname,colkeyword,valuetype,ndim in zip(colnames,colkeywords,valuetypes,ndims):
		if (colname=='DIRECTION' or colname=='TARGET'):
			descs.append(tb.makearrcoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',ndim=ndim,keywords=colkeyword,valuetype=valuetype,options=0))
		else:
			descs.append(tb.makescacoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',keywords=colkeyword,valuetype=valuetype))

	td = tb.maketabdesc(descs=descs)

	returned_table = tb.table(tablename,tabledesc=td,nrow=n_direction,readonly=False)

	value  = np.zeros([n_direction,1,2],dtype='float64')
	value[:,0] = direction_rad.copy()
	returned_table.putcol('DIRECTION',value)

	value = np.zeros([n_direction],dtype='int32')
	returned_table.putcol('ANTENNA_ID',value)
	returned_table.putcol('NUM_POLY',value)

	value = np.zeros([n_direction],dtype='float64')
	returned_table.putcol('TIME_ORIGIN',value)

	value = np.zeros([n_direction],dtype='bool')
	returned_table.putcol('TRACKING',value)

	value = np.full([n_direction],texp,dtype='float64')
	returned_table.putcol('INTERVAL',value)

	value = time
	returned_table.putcol('TIME',value)

	returned_table.flush()
	returned_table.close()
Пример #50
0
def main (vis,strategy='P30,P30,P30,A500,A450,A400',startmod='',ith=5.0,\
          bandwidth='8MHz',goodness=2.,minuvw=50.0,robust=-1.0):
    ## format arguments
    strategy = str(strategy)
    startmod = str(startmod)
    ith = float(ith)
    bandwidth = str(bandwidth)
    minuvw = float(minuvw) * 1000.0  # convert km -> m
    robust = float(robust)
    ## process arguments
    vis = vis.rstrip('/')
    vis = vis.split('/')[-1]
    strategy = strategy.split(',')
    bw_val = ''
    bw_unit = ''
    for c in bandwidth:
        try:
            float(c)
            bw_val = bw_val + c
        except ValueError:
            bw_unit = bw_unit + c
    if bw_unit == 'MHz':
        bw_val = float(bw_val) * 1e6
    ## get bandwidth of vis
    spec_info = casatb.table(vis + '::SPECTRAL_WINDOW')
    total_bw = spec_info.getcol('TOTAL_BANDWIDTH')[0]
    num_chan = spec_info.getcol('NUM_CHAN')[0]
    spec_info.close()
    if total_bw < bw_val:
        wsclean_chans = 0
        mfs = ''
        nchan = 0
    else:
        wsclean_chans = int(np.ceil(total_bw / bw_val))
        mfs = '-MFS'
        nchan = num_chan / wsclean_chans


## make a working directory and go there
    tmp_dir = 'loop3_' + vis.rstrip('.ms').rstrip('.MS')
    os.system('mkdir %s' % tmp_dir)
    os.chdir(tmp_dir)
    os.system('mv ../%s .' % vis)
    import bdsf
    prevstat = 0.0
    cohlength = 2.0E6
    strategy_type = []
    for i in strategy:
        strategy_type.append(i[0])
    ploop, nloop, snver = strategy_type.count('P'), len(strategy), 0
    #
    # PHASE CALIBRATION - run through ploop iterations, exiting if we have convergence
    #
    for iloop in range(ploop):
        fitsmask = vis + '_%02d-mask.fits' % (iloop - 1) if iloop else ''
        if startmod == '' or iloop:
            pstr = '******* PHASE LOOP %d running wsclean ************' % iloop
            loop3log(vis, pstr + '\n')
            imagr(vis,minuvwm=minuvw,robust=robust,cellsize='0.05asec',domultiscale=True,\
                  outname=vis+'_%02d'%iloop,channelsout=wsclean_chans,\
                  fitsmask=fitsmask,dolocalrms=True,maxuvwm=cohlength)
        else:
            # Need something here to produce an image from startmod
            pass
        # check if there's a source
        thisstat = measure_statistic2(vis + '_%02d%s-image.fits' %
                                      (iloop, mfs))
        if thisstat < goodness:
            pstr = 'SNR is %f, breaking out of loop.' % thisstat
            loop3log(vis, pstr + '\n')
            montage_plot('*MFS-image.fits',
                         imscale=0.65,
                         nup='4x2',
                         plot_resid=False)
            return (0)
        pstr = '******* PHASE LOOP %d making mask %s_%02d%s-image.fits ********' % (
            iloop, vis, iloop, mfs)
        loop3log(vis, pstr + '\n')
        stdout = sys.stdout
        sys.stdout = open('bdsf_chunterings', 'a')
        img = bdsf.process_image('%s_%02d%s-image.fits' % (vis, iloop, mfs),
                                 atrous_do=True,
                                 thresh_isl=ith)
        sys.stdout.close()
        sys.stdout = stdout
        img.export_image(img_type='island_mask',
                         outfile='%s_%02d-mask.fits' % (vis, iloop))
        # exit loop if clean finishing
        pstr = '******* PHASE LOOP %d goodness stat %f ************' % (
            iloop, thisstat)
        loop3log(vis, pstr + '\n')
        if thisstat - prevstat < 0.01:
            pstr = '****** EXITING PHASE CAL with diff %f *********' % (
                thisstat - prevstat)
            loop3log(vis, pstr + '\n')
            break
        else:
            prevstat = thisstat
            imagr(vis,minuvwm=minuvw,robust=robust,dopredict=True,fitsmask=fitsmask,\
                  autothreshold=3,dolocalrms=True,\
                  outname=vis+'_%02d%s'%(iloop,mfs))
        pstr = '******* PHASE LOOP %d making new cal file %s ************' % (
            iloop, vis + '_%02d' % iloop)
        loop3log(vis, pstr + '\n')
        caltype, sol0 = strategy[iloop][0], float(strategy[iloop][1:])
        coh, cohlength = selfcal(vis,minuvw,robust,model='MODEL',incol='DATA',\
            outcol='CORRECTED_DATA',outcal_root=vis+'_%02d'%iloop,\
            caltype=caltype,init_sol=sol0,nchan=nchan)
        snver = iloop
        pstr='******** END PHASE LOOP %d - coherence on %.1f km **********' % \
              (iloop,cohlength/1000.)
        loop3log(vis, pstr + '\n')
    # Exit at this point if we are not doing amplitude cal
    if ploop == nloop:
        exit()
    #
    # If we are doing amplitude calibration, we now need to apply the
    # calibration and write a new MS with a DATA column
    visA = vis + '_A'
    # delete all existing files beginning with vis+'_A'
    os.system('rm -fr %s*' % visA)
    pstr = '****** APPLYING CALIBRATION TABLE %d\n' % snver
    loop3log(vis, pstr + '\n')
    applycal_split(vis, visA, 'sol000', '%s_%02d_c0.h5' % (vis, snver))
    init_fitsmask = vis + '_%02d-mask.fits' % iloop
    init_img = vis + '_%02d%s-image.fits' % (iloop, mfs)
    pred_img = vis + '_%02d%s' % (iloop, mfs)
    for iloop in range(ploop, nloop):
        fitsmask = init_fitsmask if iloop == ploop else visA + '_%02d-mask.fits' % (
            iloop - 1)
        pstr = '******* AMPLITUDE LOOP %d running wsclean ************' % iloop
        loop3log(vis, pstr + '\n')
        imagr(visA,minuvwm=minuvw,robust=robust,cellsize='0.05asec',domultiscale=True,\
                  outname=visA+'_%02d'%iloop,channelsout=wsclean_chans,\
                  fitsmask=fitsmask,dolocalrms=True,maxuvwm=cohlength)
        ## check if there's a source
        thisstat = measure_statistic2(visA + '_%02d%s-image.fits' %
                                      (iloop, mfs))
        if thisstat < goodness:
            pstr = 'SNR is %f, breaking out of loop.' % thisstat
            loop3log(vis, pstr + '\n')
            montage_plot('*MFS-image.fits',
                         imscale=0.65,
                         nup='4x2',
                         plot_resid=True)
            return (0)
        image_bdsf = '%s_%02d%s-image.fits' % (visA, iloop, mfs)
        pstr = '******* AMPLITUDE LOOP %d making mask %s_%02d%s-image.fits ************' % (
            iloop, visA, iloop, mfs)
        loop3log(vis, pstr + '\n')
        img = bdsf.process_image(image_bdsf, atrous_do=True, thresh_isl=ith)
        img.export_image(img_type='island_mask',
                         outfile='%s_%02d-mask.fits' % (visA, iloop))
        pstr = '******* AMPLITUDE LOOP %d goodness stat %f ************' % (
            iloop, thisstat)
        loop3log(vis, pstr + '\n')
        if iloop != ploop and thisstat - prevstat < 0.01:
            pstr = '****** EXITING AMPLITUDE CAL with diff %f *********' % (
                thisstat - prevstat)
            loop3log(vis, pstr + '\n')
            break
        else:
            prevstat = thisstat
            imagr(visA,minuvwm=minuvw,dopredict=True,fitsmask=fitsmask,\
                  autothreshold=3,dolocalrms=True,robust=robust,\
                  outname=visA+'_%02d%s'%(iloop,mfs))
        pstr = '******* AMPLITUDE LOOP %d making new cal file %s ************' % (
            iloop, visA + '_%02d' % iloop)
        loop3log(vis, pstr + '\n')
        caltype, sol0 = strategy[iloop][0], float(strategy[iloop][1:])
        coh,cohlength = selfcal(visA,minuvw,robust,model='MODEL',incol='DATA',\
            outcol='CORRECTED_DATA',outcal_root=visA+'_%02d'%iloop,\
            caltype=caltype,init_sol=sol0,nchan=nchan)
        pstr='******** END AMPLITUDE LOOP %d - coherence on %.1f km **********' % \
                      (iloop,cohlength/1000.)
        loop3log(vis, pstr + '\n')

    fitsmask = init_fitsmask if iloop == ploop else visA + '_%02d-mask.fits' % (
        iloop - 1)
    imagr(visA,minuvwm=minuvw,cellsize='0.05asec',domultiscale=True,\
          outname=visA+'_final',channelsout=wsclean_chans,robust=robust,\
          fitsmask=fitsmask,dolocalrms=True)

    ## make a model from the final image
    final_im = glob.glob('*final*image.fits')
    if len(final_im) > 1:
        tmp = [a for a in final_im if 'MFS' in a]
        final_im = tmp
    img = bdsf.process_image(final_im[0], atrous_do=True, thresh_isl=ith)
    skyfile = final_im[0].replace('fits', 'skymodel')
    img.write_catalog(outfile=skyfile,
                      bbs_patches='single',
                      catalog_type='gaul',
                      format='bbs')
    ## convert it to a sourcedb
    ss = "makesourcedb in=%s out=%s format='<'" % (
        skyfile, skyfile.replace('skymodel', 'sky'))
    os.system(ss)

    ## plot things like solutions
    make_plots(vis)
    make_plots(visA)

    ## If we got to this point, self-cal has successfully completed
    # montage_plot( '*MFS-image.fits', imscale=0.65, nup='4x2', plot_resid=True)

    pngfile, h5files = cleanup(vis)

    for h5file in h5files:
        os.system('mv %s ../' % h5file)
    # os.system('mv *.pdf ../')
    # os.system('mv *.png ../')
    os.system('mv *skymodel ../')
    os.system('mv *sky ../')
    os.system('mv %s ../' % vis)

    print 'Output calibration tables', h5files
    #return pngfile,h5files
    return 0
Пример #51
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--xm', required=True)
    parser.add_argument('--ms', required=True)
    parser.add_argument('--center', required=True)
    parser.add_argument('--radius',
                        type=float,
                        default=20,
                        help="TEC map radius (degrees)")
    parser.add_argument('--delta',
                        type=float,
                        default=0.25,
                        help="Pixel size (degrees)")
    args = parser.parse_args()

    # Parse image center value
    center = SkyCoord(args.center, unit=(units.hourangle, units.degree))

    # Open measurement set, and find central time and frequency
    ms = table(args.ms)
    times = sorted(set(ms.getcol('TIME_CENTROID')))
    midtime = (max(times) +
               min(times)) / 2  # We just starting with the central time
    nearesttime = min(times, key=lambda x: abs(x - midtime))
    freqs = table(args.ms + '::SPECTRAL_WINDOW').getcell(
        'CHAN_FREQ', 0)  # Assume just one SPW entry
    midfreq = (max(freqs) + min(freqs)) / 2

    print("Calculating TEC values for frequency %f and time %f" %
          (midfreq, times[0]),
          file=sys.stderr)

    # prototype = fits.open(args.prototype)[0]
    # header = prototype.header
    # data = prototype.data
    # wcs = WCS(prototype)

    # # Calculate image dimension
    # # We want our TEC image to be at least as large as the original image
    # # Calculating this is yucky due to projection warping
    # width, height = data.shape[3], data.shape[2]  # [ stokes, freq, dec (y), ra (x) ]
    # center_x, center_y = width // 2, height // 2
    # center_ra, center_dec, _, _ = wcs.all_pix2world([[center_x, center_y, 0, 0]], 0)[0]

    # # Calculate maximum angular extent of image
    # corners = wcs.all_pix2world([
    #     [0, 0, 0, 0],
    #     [width, 0, 0, 0],
    #     [0, height, 0, 0],
    #     [width, height, 0, 0]
    # ], 0)
    # max_ra = max(abs(corners.T[0] - center_ra))
    # max_dec = max(abs(corners.T[1] - center_dec))

    # width, height = int((2 * max_ra) // args.delta) + 1, int((2 * max_dec) // args.delta) + 1
    # center_x, center_y = int(width // 2), int(height // 2)

    # Provision TEC fits file and set up headers
    width, height = int((2 * args.radius) // args.delta), int(
        (2 * args.radius) // args.delta)
    center_x, center_y = int(width // 2), int(height // 2)

    print("Creating TEC image of dimesions (%d, %d)" % (width, height),
          file=sys.stdout)

    data = np.zeros((1, 1, 128, height, width),
                    dtype=np.float)  # [time, frequency, antennas, dec, ra]
    Atec = fits.PrimaryHDU(data)

    Atec.header['CTYPE1'] = 'RA---SIN'
    Atec.header['CRPIX1'] = center_x
    Atec.header['CRVAL1'] = center.ra.deg
    Atec.header['CDELT1'] = args.delta
    Atec.header['CUNIT1'] = 'deg'

    Atec.header['CTYPE2'] = 'DEC--SIN'
    Atec.header['CRPIX2'] = center_y
    Atec.header['CRVAl2'] = center.dec.deg
    Atec.header['CDELT2'] = args.delta
    Atec.header['CUNIT2'] = 'deg'

    Atec.header['CTYPE3'] = 'ANTENNA'
    Atec.header['CRPIX3'] = 1
    Atec.header['CRVAL3'] = 0

    Atec.header['CTYPE4'] = 'FREQ'
    Atec.header['CRPIX4'] = 1
    Atec.header['CRVAL4'] = midfreq
    Atec.header['CDELT4'] = 1
    Atec.header['CUNIT4'] = 'Hz'

    Atec.header['CTYPE5'] = 'TIME'
    Atec.header['CRPIX5'] = 1
    Atec.header['CRVAL5'] = midtime  # FIXME
    Atec.header['CDELT5'] = 1

    wcs = WCS(Atec.header)

    # Process crossmatched sources
    xm = Table.read(args.xm)
    model_positions = np.radians(np.array([xm['model_ra'], xm['model_dec']]))
    source_positions = np.radians(np.array([xm['source_ra'],
                                            xm['source_dec']]))
    offsets = np.sin(model_positions - source_positions)

    # Filter out extreme offsets
    separations = angular_separation(model_positions[0], model_positions[1],
                                     source_positions[0], source_positions[1])
    exclusions = separations > 2 * np.median(separations)
    print("Excluding %d / %d extremal offsets" %
          (sum(exclusions), len(exclusions)))
    model_positions = model_positions[:, ~exclusions]
    source_positions = source_positions[:, ~exclusions]
    offsets = offsets[:, ~exclusions]

    model_positions_lm = radec_to_lm(model_positions[0], model_positions[1],
                                     center.ra.rad, center.dec.rad)

    # Get oversampled l,m values for TEC file
    xx, yy = np.meshgrid(range(0, 3 * width), range(0, 3 * height))
    pixels = np.array([xx.flatten(), yy.flatten()]).T

    ret = wcs.all_pix2world([[x / 3 - 1 / 3, y / 3 - 1 / 3, 0, 0, 0]
                             for x, y in pixels], 0)
    grid_lm = radec_to_lm(np.radians(ret.T[0]), np.radians(ret.T[1]),
                          center.ra.rad, center.dec.rad)

    # Compute interpolated position offsets
    delta_l = griddata(model_positions_lm.T,
                       offsets[0],
                       grid_lm.T,
                       fill_value=0)
    delta_m = griddata(model_positions_lm.T,
                       offsets[1],
                       grid_lm.T,
                       fill_value=0)

    delta_l = np.reshape(delta_l, (3 * height, 3 * width))  # [ dec, ra ]
    delta_m = np.reshape(delta_m, (3 * height, 3 * width))

    # Gaussian smooth
    delta_l = gaussian_filter(delta_l, 3, mode='constant', cval=0)
    delta_m = gaussian_filter(delta_m, 3, mode='constant', cval=0)

    # Downsample
    delta_l = delta_l[1::3, 1::3]
    delta_m = delta_m[1::3, 1::3]

    # Create downsampled grid
    xx, yy = np.meshgrid(range(0, width), range(0, height))
    pixels = np.array([xx.flatten(), yy.flatten()]).T
    ret = wcs.all_pix2world([[x, y, 0, 0, 0] for x, y in pixels], 0)
    grid_lm = radec_to_lm(np.radians(ret.T[0]), np.radians(ret.T[1]),
                          center.ra.rad, center.dec.rad)

    # Plot interpolation
    plt.figure()
    plt.quiver(model_positions_lm[0],
               model_positions_lm[1],
               offsets[0],
               offsets[1],
               angles='xy',
               scale=0.01,
               scale_units='xy')
    plt.quiver(grid_lm[0],
               grid_lm[1],
               delta_l.flatten(),
               delta_m.flatten(),
               angles='xy',
               scale=0.01,
               scale_units='xy',
               color='gray')
    plt.gca().invert_xaxis()
    plt.show()

    # Create debugging fits files
    for delta, name in [(delta_l, 'delta-l'), (delta_m, 'delta-m')]:
        hdu = fits.PrimaryHDU(delta)
        hdu.header = Atec.header.copy()
        hdu.writeto(name + '.fits', overwrite=True)

    for i, time in enumerate([midtime]):
        for ant in range(1, 128):
            sys.stderr.write("\rCalculating antenna %d..." % ant)
            sys.stderr.flush()

            tbl = taql(
                "select UVW from $ms where TIME_CENTROID = $nearesttime and ANTENNA1 = 0 and ANTENNA2 = $ant"
            )

            if len(tbl) == 0:
                continue
            elif len(tbl) > 1:
                print("Oopsie doodle!")
                exit(1)

            u, v = tbl.getcol('UVW')[0][[True, True, False
                                         ]] / (299792458 / midfreq)
            phase = 2 * np.pi * (u * delta_l + v * delta_m)
            data[i, 0, ant, :, :] = phase / -8.44797245E9 * midfreq

    print(" Done.", file=sys.stderr)

    Atec.data = data
    Atec.writeto('tec.fits', overwrite=True)
Пример #52
0
def check_phaseref_in_MS(mspath, phaseref, sep_threshold=1., frame='icrs', ack=False):
    """Check if a given phase-reference point is amongst the phase centre of an MS for any
    field and direction existing in that MS

    This function is needed as the Phase centre referencing is NOT clear in the MS format
    using ASKAP observations.

    Parameters
    ==========
    mspath: str
        The input MS path

    phaseref: Astropy coordinate
        Astropy SkyCoord object with the same frame as the :param frame: parameter 

    sep_threshold: float
        Maximum allowed separation between the given phaseref and the phasecentres in the MS.
        The separation is defined in arcesconds. If the phaseref and the phasecentre within the
        separation, it counts as a match

    frame: str, optional
        Reference frame used to calculate the Astropy phase centre. Default: 'icrs'

    ack: bool, optional
        Enabling messages of successful interaction with the MS
        e.g. successful opening of a table
    
    Returns
    =======
    IDs: list of lists
        If the phase reference given matches with at least one of the 
        phasecentre in the MS, the filed index and direction index is returned as a list.
        Else an empty list is returned.

        The returned indices are the field followed by direction for each match

    """
    assert type(phaseref) == type(SkyCoord(ra = 0 * u.deg, dec = 0 * u.deg, frame=frame, equinox='J2000')), 'Input phaseref is not an astropy SkyCoord object!'

    MS = ds.msutil.create_MS_object(mspath, ack=ack)

    fields_table = casatables.table(mspath + '/FIELD', ack=ack)    

    #Get the reference equinox from the table keywords
    equinox = fields_table.getcolkeyword('PHASE_DIR','MEASINFO')['Ref'] 

    #Only can convert from radians
    assert fields_table.getcolkeyword('PHASE_DIR','QuantumUnits')[0] == 'rad', 'Phase centre direction is not in radians!'

    IDs = []

    for d in range(0,np.shape(fields_table.getcol('PHASE_DIR'))[0]):
        for f in range(0,np.shape(fields_table.getcol('PHASE_DIR'))[1]):
            pc = fields_table.getcol('PHASE_DIR')[d,f, :]

            if phaseref.separation(SkyCoord(ra=pc[0] * u.rad, dec=pc[1] * u.rad, frame=frame, equinox=equinox)).arcsecond <= sep_threshold:
                IDs.append([f,d])

    MS.close()

    return IDs
Пример #53
0
def copy_column_to_ms(ms,
                      inputcol,
                      outputcol,
                      ms_from=None,
                      use_compression=False):
    """
    Copies one column to another, within an MS file or between two MS files

    Parameters
    ----------
    ms : str
        MS file receiving copy
    inputcol : str
        Column name to copy from
    outputcol : str
        Column name to copy to
    ms_from : str, optional
        MS file to copy from. If None, the column is copied internally

    """
    t = pt.table(ms, readonly=False, ack=False)
    if ms_from is not None:
        tf = pt.table(ms_from, readonly=False, ack=False)
        data = tf.getcol(inputcol)
        desc = tf.getcoldesc(inputcol)
    else:
        data = t.getcol(inputcol)
        desc = t.getcoldesc(inputcol)

    # Add the output column if needed
    if outputcol not in t.colnames():
        if use_compression:
            # Set DyscoStMan to be storage manager for DATA and WEIGHT_SPECTRUM
            # We use a visibility bit rate of 16 and truncation of 1.5 sigma to keep the
            # compression noise below ~ 0.01 mJy, as estimated from Fig 4 of
            # Offringa (2016). For the weights, we use a bit rate of 12, as
            # recommended in Sec 4.4 of Offringa (2016)
            desc['name'] = outputcol
            dmi = {
                'SPEC': {
                    'dataBitCount': numpy.uint32(16),
                    'distribution': 'TruncatedGaussian',
                    'distributionTruncation': 1.5,
                    'normalization': 'RF',
                    'weightBitCount': numpy.uint32(12)
                },
                'NAME': '{}_dm'.format(outputcol),
                'SEQNR': 1,
                'TYPE': 'DyscoStMan'
            }
            desc['option'] = 1  # make a Direct column
            t.addcols(desc, dmi)
        else:
            desc['name'] = outputcol
            t.addcols(desc)

    if use_compression:
        # Replace flagged values with NaNs before compression
        flags = t.getcol('FLAG')
        flagged = numpy.where(flags)
        data[flagged] = numpy.NaN

    t.putcol(outputcol, data)
    t.flush()
    t.close()
Пример #54
0
def get_MS_phasecentre_all(mspath, frame='icrs', ack=False):
    """Get the list of the phase centres for each field and direction of the MS
    and return a list of astropy skycoord values

    Both field and direction IDs are expected to increment from zero, and the maximum
    ID can be the number of unique fields/dds. However, less than the maximum number of
    valid IDs can occurs and this code can handle that.

    e.g. one field and one direction ID, but in the PHASE_DIR table, 
    phase centre for two directions are existing, the code chooses the valid one

    Parameters
    ==========

    mspath: str
        The input MS path

    frame: str, optional
        Reference frame used to calculate the Astropy phase centre. Default: 'icrs'

    ack: bool, optional
        Enabling messages of successful interaction with the MS
        e.g. successful opening of a table
    
    Returns
    =======
    phasecentres: list of lists containing Astropy skycoords
        A list of the phasecentres for each field and direction in the MS as a list of lists
        i.e. each element is a list

    """
    MS = ds.msutil.create_MS_object(mspath, ack=ack)

    #Get the number of unique fields and data descriptoions (e.g. footprints)
    fields = np.unique(MS.getcol('FIELD_ID'))
    dds = np.unique(MS.getcol('DATA_DESC_ID'))

    fields_table = casatables.table(mspath + '/FIELD', ack=ack)    

    phasecentres = []

    #Get the reference equinox from the table keywords
    equinox = fields_table.getcolkeyword('PHASE_DIR','MEASINFO')['Ref'] 

    #Only can convert from radians
    assert fields_table.getcolkeyword('PHASE_DIR','QuantumUnits')[0] == 'rad', 'Phase centre direction is not in radians!'

    i = 0
    j = 0
    for field in range(0,np.size(fields)):
        #The number and referencing of fields can be messy
        if np.shape(fields_table.getcol('PHASE_DIR'))[1] > np.size(fields):
            field_ID = fields[i]
        else:
            field_ID = field

        directions = []

        for dd in range(0,np.size(dds)):
            #Same for the DDs as the fields
            if np.shape(fields_table.getcol('PHASE_DIR'))[0] > np.size(dds):
                dd_ID = dds[i]
            else:
                dd_ID = dd

            pc = fields_table.getcol('PHASE_DIR')[dd_ID,field_ID, :]

            #Convert to astropy coordinates
            directions.append(SkyCoord(ra=pc[0] * u.rad, dec=pc[1] * u.rad, frame=frame, equinox=equinox))

            j += 1
    
    phasecentres.append(directions)

    i += 1

    MS.close()

    return phasecentres
Пример #55
0
def main(input_image, input_skymodel_pb, input_bright_skymodel_pb, output_root,
         vertices_file, threshisl=5.0, threshpix=7.5, rmsbox=(150, 50),
         rmsbox_bright=(35, 7), adaptive_rmsbox=True,
         use_adaptive_threshold=False, adaptive_thresh=75.0, beamMS=None,
         peel_bright=False):
    """
    Filter the input sky model so that they lie in islands in the image

    Parameters
    ----------
    input_image : str
        Filename of input image to use to detect sources for filtering. Ideally, this
        should be a flat-noise image (i.e., without primary-beam correction)
    input_skymodel_pb : str
        Filename of input makesourcedb sky model, with primary-beam correction
    input_bright_skymodel_pb : str
        Filename of input makesourcedb sky model of bright sources only, with primary-
        beam correction
    output_root : str
        Root of filename of output makesourcedb sky models. Output filenames will be
        output_root+'.apparent_sky.txt' and output_root+'.true_sky.txt'
    vertices_file : str
        Filename of file with vertices
    threshisl : float, optional
        Value of thresh_isl PyBDSF parameter
    threshpix : float, optional
        Value of thresh_pix PyBDSF parameter
    rmsbox : tuple of floats, optional
        Value of rms_box PyBDSF parameter
    rmsbox_bright : tuple of floats, optional
        Value of rms_box_bright PyBDSF parameter
    adaptive_rmsbox : tuple of floats, optional
        Value of adaptive_rms_box PyBDSF parameter
    use_adaptive_threshold : bool, optional
        If True, use an adaptive threshold estimated from the negative values in
        the image
    adaptive_thresh : float, optional
        If adaptive_rmsbox is True, this value sets the threshold above
        which a source will use the small rms box
    peel_bright : bool, optional
        If True, bright sources were peeled, so add then back before filtering
    """
    if rmsbox is not None and isinstance(rmsbox, str):
        rmsbox = eval(rmsbox)
    if isinstance(rmsbox_bright, str):
        rmsbox_bright = eval(rmsbox_bright)
    adaptive_rmsbox = misc.string2bool(adaptive_rmsbox)
    use_adaptive_threshold = misc.string2bool(use_adaptive_threshold)
    if isinstance(beamMS, str):
        beamMS = misc.string2list(beamMS)
    peel_bright = misc.string2bool(peel_bright)

    # Try to set the TMPDIR evn var to a short path, to ensure we do not hit the length
    # limits for socket paths (used by the mulitprocessing module). We try a number of
    # standard paths (the same ones used in the tempfile Python library)
    old_tmpdir = os.environ["TMPDIR"]
    for tmpdir in ['/tmp', '/var/tmp', '/usr/tmp']:
        if os.path.exists(tmpdir):
            os.environ["TMPDIR"] = tmpdir
            break

    # Run PyBDSF to make a mask for grouping
    if use_adaptive_threshold:
        # Get an estimate of the rms by running PyBDSF to make an rms map
        img = bdsf.process_image(input_image, mean_map='zero', rms_box=rmsbox,
                                 thresh_pix=threshpix, thresh_isl=threshisl,
                                 thresh='hard', adaptive_rms_box=adaptive_rmsbox,
                                 adaptive_thresh=adaptive_thresh, rms_box_bright=rmsbox_bright,
                                 rms_map=True, quiet=True, stop_at='isl')

        # Find min and max pixels
        max_neg_val = abs(np.min(img.ch0_arr))
        max_neg_pos = np.where(img.ch0_arr == np.min(img.ch0_arr))
        max_pos_val = abs(np.max(img.ch0_arr))
        max_pos_pos = np.where(img.ch0_arr == np.max(img.ch0_arr))

        # Estimate new thresh_isl from min pixel value's sigma, but don't let
        # it get higher than 1/2 of the peak's sigma
        threshisl_neg = 2.0 * max_neg_val / img.rms_arr[max_neg_pos][0]
        max_sigma = max_pos_val / img.rms_arr[max_pos_pos][0]
        if threshisl_neg > max_sigma / 2.0:
            threshisl_neg = max_sigma / 2.0

        # Use the new threshold only if it is larger than the user-specified one
        if threshisl_neg > threshisl:
            threshisl = threshisl_neg

    img = bdsf.process_image(input_image, mean_map='zero', rms_box=rmsbox,
                             thresh_pix=threshpix, thresh_isl=threshisl,
                             thresh='hard', adaptive_rms_box=adaptive_rmsbox,
                             adaptive_thresh=adaptive_thresh, rms_box_bright=rmsbox_bright,
                             atrous_do=True, atrous_jmax=3, rms_map=True, quiet=True)

    emptysky = False
    if img.nisl > 0:
        maskfile = input_image + '.mask'
        img.export_image(outfile=maskfile, clobber=True, img_type='island_mask')

        # Construct polygon needed to trim the mask to the sector
        header = pyfits.getheader(maskfile, 0)
        w = wcs.WCS(header)
        RAind = w.axis_type_names.index('RA')
        Decind = w.axis_type_names.index('DEC')
        vertices = misc.read_vertices(vertices_file)
        RAverts = vertices[0]
        Decverts = vertices[1]
        verts = []
        for RAvert, Decvert in zip(RAverts, Decverts):
            ra_dec = np.array([[0.0, 0.0, 0.0, 0.0]])
            ra_dec[0][RAind] = RAvert
            ra_dec[0][Decind] = Decvert
            verts.append((w.wcs_world2pix(ra_dec, 0)[0][RAind], w.wcs_world2pix(ra_dec, 0)[0][Decind]))

        hdu = pyfits.open(maskfile, memmap=False)
        data = hdu[0].data

        # Rasterize the poly
        data_rasertize = data[0, 0, :, :]
        data_rasertize = misc.rasterize(verts, data_rasertize)
        data[0, 0, :, :] = data_rasertize

        hdu[0].data = data
        hdu.writeto(maskfile, overwrite=True)

        # Now filter the sky model using the mask made above
        if len(beamMS) > 1:
            # Select the best MS for the beam attenuation
            ms_times = []
            for ms in beamMS:
                tab = pt.table(ms, ack=False)
                ms_times.append(np.mean(tab.getcol('TIME')))
                tab.close()
            ms_times_sorted = sorted(ms_times)
            mid_time = ms_times_sorted[int(len(ms_times)/2)]
            beam_ind = ms_times.index(mid_time)
        else:
            beam_ind = 0
        try:
            s = lsmtool.load(input_skymodel_pb, beamMS=beamMS[beam_ind])
        except astropy.io.ascii.InconsistentTableError:
            emptysky = True
        if peel_bright:
            try:
                # If bright sources were peeled before imaging, add them back
                s_bright = lsmtool.load(input_bright_skymodel_pb, beamMS=beamMS[beam_ind])

                # Rename the bright sources, removing the '_sector_*' added previously
                # (otherwise the '_sector_*' text will be added every iteration,
                # eventually making for very long source names)
                new_names = [name.split('_sector')[0] for name in s_bright.getColValues('Name')]
                s_bright.setColValues('Name', new_names)
                if not emptysky:
                    s.concatenate(s_bright)
                else:
                    s = s_bright
                    emptysky = False
            except astropy.io.ascii.InconsistentTableError:
                pass
        if not emptysky:
            s.select('{} == True'.format(maskfile))  # keep only those in PyBDSF masked regions
            if len(s) == 0:
                emptysky = True
            else:
                # Write out apparent and true-sky models
                del(img)  # helps reduce memory usage
                s.group(maskfile)  # group the sky model by mask islands
                s.write(output_root+'.true_sky.txt', clobber=True)
                s.write(output_root+'.apparent_sky.txt', clobber=True, applyBeam=True)
    else:
        emptysky = True

    if emptysky:
        # No sources cleaned/found in image, so just make a dummy sky model with single,
        # very faint source at center
        dummylines = ["Format = Name, Type, Patch, Ra, Dec, I, SpectralIndex, LogarithmicSI, "
                      "ReferenceFrequency='100000000.0', MajorAxis, MinorAxis, Orientation\n"]
        ra, dec = img.pix2sky((img.shape[-2]/2.0, img.shape[-1]/2.0))
        if ra < 0.0:
            ra += 360.0
        ra = misc.ra2hhmmss(ra)
        sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.6f" % (ra[2])).zfill(6)
        dec = misc.dec2ddmmss(dec)
        decsign = ('-' if dec[3] < 0 else '+')
        sdec = decsign+str(dec[0]).zfill(2)+'.'+str(dec[1]).zfill(2)+'.'+str("%.6f" % (dec[2])).zfill(6)
        dummylines.append(',,p1,{0},{1}\n'.format(sra, sdec))
        dummylines.append('s0c0,POINT,p1,{0},{1},0.00000001,'
                          '[0.0,0.0],false,100000000.0,,,\n'.format(sra, sdec))
        with open(output_root+'.apparent_sky.txt', 'w') as f:
            f.writelines(dummylines)
        with open(output_root+'.true_sky.txt', 'w') as f:
            f.writelines(dummylines)

    # Set the TMPDIR env var back to its original value
    os.environ["TMPDIR"] = old_tmpdir
Пример #56
0
def main(ms_input,
         input_colname,
         output_data_colname,
         output_weights_colname,
         baseline_file,
         delta_theta_deg,
         target_peak_reduction_factor=0.99):
    """
    Pre-average data using a sliding Gaussian kernel in frequency

    Parameters
    ----------
    ms_input : str
        MS filename
    input_colname : str
        Name of the column in the MS from which the data are read
    output_data_colname : str
        Name of the column in the MS into which the averaged data are written
    output_weights_colname : str
        Name of the column in the MS into which the averaged data weights are
        written
    baseline_file : str
        Filename of pickled baseline lengths
    delta_theta_deg : float
        Radius of calibration region in degrees
    target_peak_reduction_factor : float, optional
        Target reduction in peak flux density. Note: this reduction is in
        addition to any incurred by earlier averaging

    """
    if os.path.exists(baseline_file):
        f = open(baseline_file, 'r')
        baseline_dict = pickle.load(f)
        f.close()
    else:
        print('Cannot find baseline_file. Exiting...')
        sys.exit(1)
    delta_theta_deg = float(delta_theta_deg)
    target_peak_reduction_factor = float(target_peak_reduction_factor)

    ms = pt.table(ms_input, readonly=False, ack=False)
    ant1_list = ms.getcol('ANTENNA1')
    ant2_list = ms.getcol('ANTENNA2')
    data_all = ms.getcol(input_colname)
    weights_all = ms.getcol('WEIGHT_SPECTRUM')
    flags = ms.getcol('FLAG')

    # Get lowest frequency of MS and channel width
    sw = pt.table(ms_input + '::SPECTRAL_WINDOW', ack=False)
    freq_hz = sw.col('CHAN_FREQ')[0][0]
    chan_width_hz = sw.col('CHAN_WIDTH')[0][0]

    flags[np.isnan(data_all)] = True  # flag NaNs
    weights_all = weights_all * ~flags  # set weight of flagged data to 0

    # Check that all NaNs are flagged
    if np.count_nonzero(np.isnan(data_all[~flags])) > 0:
        logging.error('NaNs in unflagged data in {0}!'.format(ms_input))
        sys.exit(1)

    # Weight data and set bad data to 0 so nans do not propagate
    data_all = np.nan_to_num(data_all * weights_all)

    # Iteration on baseline combination
    for ant in itertools.product(set(ant1_list), set(ant2_list)):
        if ant[0] >= ant[1]:
            continue
        sel1 = np.where(ant1_list == ant[0])[0]
        sel2 = np.where(ant2_list == ant[1])[0]
        sel_list = sorted(list(frozenset(sel1).intersection(sel2)))

        data = data_all[sel_list, :, :]
        weights = weights_all[sel_list, :, :]

        # compute the Gaussian sigma from the max bandwidth over which we
        # can average and avoid significant bandwidth smearing but limited to
        # no more than 3 MHz (to avoid smoothing over the beam-induced effects)
        lambda_km = 299792.458 / freq_hz
        dist_km = baseline_dict['{0}-{1}'.format(ant[0], ant[1])]
        if dist_km > 0:
            resolution_deg = lambda_km / dist_km * 180.0 / np.pi
            stddev_hz = min(
                3e6,
                get_target_bandwidth(freq_hz, delta_theta_deg, resolution_deg,
                                     target_peak_reduction_factor) / 4.0)
            stddev_nchan = stddev_hz / chan_width_hz * np.sqrt(0.5 / dist_km)

            # smear weighted data and weights
            dataR = gfilter(np.real(data), stddev_nchan, axis=1)
            dataI = gfilter(np.imag(data), stddev_nchan, axis=1)
            weights = gfilter(weights, stddev_nchan, axis=1)

            # re-create data
            data = (dataR + 1j * dataI)
            data[(weights != 0)] /= weights[(weights != 0)]  # avoid divbyzero
            data_all[sel_list, :, :] = data
            weights_all[sel_list, :, :] = weights

    # Add the output columns if needed
    if output_data_colname not in ms.colnames():
        desc = ms.getcoldesc(input_colname)
        desc['name'] = output_data_colname
        ms.addcols(desc)
    if output_weights_colname not in ms.colnames():
        desc = ms.getcoldesc('WEIGHT_SPECTRUM')
        desc['name'] = output_weights_colname
        ms.addcols(desc)

    ms.putcol(output_data_colname, data_all)
    ms.putcol('FLAG', flags)  # this saves flags of nans, which is always good
    ms.putcol(output_weights_colname, weights_all)
    ms.close()
Пример #57
0
import casacore.tables as tables

print 'tmp-compressed.ms:'
t = tables.table('tmp-compressed.ms')
print t.getdminfo()

print 'tmp-uncompressed.ms:'
t = tables.table('tmp-uncompressed.ms')
print t.getdminfo()

print 'tmp-weightcompressed.ms:'
t = tables.table('tmp-weightcompressed.ms')
print t.getdminfo()
Пример #58
0
def main(infile,
         clean_sig=6,
         map_size=512,
         pix_size=100,
         obs_length=900,
         datacolumn='CORRECTED_DATA',
         startmod=True,
         verbose=False,
         pols='I',
         catalogue=None):

    # current working directory
    current_dir = os.getcwd()
    ## make a working directory, move the data, and chdir
    # get filestem to make unique name
    infile = infile.rstrip('/')
    tmp = infile.split('/')
    filestem = tmp[-1].split('_')[0]
    # working directory
    work_dir = os.path.join(current_dir, 'difmap_{:s}'.format(filestem))
    os.mkdir(work_dir)
    os.system('cp -r {:s} {:s}'.format(infile, work_dir))
    os.chdir(work_dir)
    ## redefine infile
    infile = glob.glob(os.path.join(work_dir, tmp[-1]))[0]

    ## if a catalogue is specified, override the imaging parameters
    if catalogue is not None:
        print(
            'Catalogue is specified, reading information to set imaging parameters.'
        )
        ## get ra and dec
        [[[ra, dec]]] = ct.table(infile + '::FIELD',
                                 readonly=True).getcol('PHASE_DIR')
        # => shift for the negative angles before the conversion to deg (so that RA in [0;2pi])
        if ra < 0:
            ra = ra + 2 * np.pi
        # convert radians to degrees
        ra_deg = ra / np.pi * 180.
        dec_deg = dec / np.pi * 180.
        tgt_coords = SkyCoord(ra_deg, dec_deg, unit='deg')

        t = Table.read(catalogue, format='csv')
        ## more flexible RA/DEC column naming
        mycols = t.colnames
        ra_col = [val for val in mycols if val == 'RA']
        de_col = [val for val in mycols if val == 'DEC']
        if len(ra_col) == 1:
            ra_col = ra_col[0]
            de_col = de_col[0]
        else:
            ## pick LoTSS position
            ra_col = [val for val in mycols if val == 'RA_LOTSS'][0]
            de_col = [val for val in mycols if val == 'DEC_LOTSS'][0]
        coords = SkyCoord(t[ra_col], t[de_col], unit='deg')
        seps = coords.separation(tgt_coords).value
        src_idx = np.where(seps == np.min(seps))[0]
        src_tmp = t[src_idx]
        ## use LGZ_Size if available, otherwise use DC_Maj
        if 'LGZ_Size' in src_tmp.colnames:
            ## in arcsec
            size_asec = src_tmp['LGZ_Size'][0]
        else:
            size_asec = src_tmp['DC_Maj'][0] * 60. * 60.

        padding = 1.5
        possible_map_sizes = np.array([512, 1024, 2048, 4096, 8192])
        possible_map_asec = possible_map_sizes * float(
            pix_size) * 1e-3  ## convert to arcsec
        possible_idx = np.where(size_asec * adding <= possible_map_asec)[0]
        if len(possible_idx) >= 1:
            map_size = possible_map_sizes[np.min(possible_idx)]
            print(
                'Estimated source size {:s}, making image with {:s}x{:s} pixels ({:s}x{:s} arcsec)'
                .format(str(size_asec), str(map_size), str(map_size),
                        str(map_size * float(pix_size) * 1e-3),
                        str(map_size * float(pix_size) * 1e-3)))
        else:
            print(
                'Image size exceeds {:s} arcseconds! Are you sure you want to make this image?'
                .format(str(np.max(possible_map_asec))))
            print('Image size too large, aborting.')
            return

        total_flux = src_tmp['Total_flux'][0]

    if pols == 'I':
        ## use stokes I only for self-cal and imaging (best option)
        fitsfile = dif_script(infile,
                              pol=pols,
                              clean_sigma=clean_sig,
                              map_size=map_size,
                              pixel_size=pix_size,
                              obs_length=obs_length,
                              datacolumn=datacolumn,
                              startmod=startmod)
        corpltfile = glob.glob(os.path.join(work_dir, 'CORPLT'))[0]
        ampI, amperrI, phsI, phserrI, utI, stnI = corplt2array(corpltfile)
        corpltout = insert_into_filestem(
            corpltfile.replace('CORPLT', '_CORPLT_I'), filestem)
        os.system('mv {:s} {:s}'.format(corpltfile, corpltout))
    else:
        ## self-cal the polarisations separately
        ## create a difmap script
        fitsfile = dif_script(infile,
                              pol='XX',
                              clean_sigma=clean_sig,
                              map_size=map_size,
                              pixel_size=pix_size,
                              obs_length=obs_length,
                              datacolumn=datacolumn,
                              startmod=startmod)
        ## plot solutions and get values
        corpltfile = glob.glob(os.path.join(work_dir, 'CORPLT'))[0]
        ampXX, amperrXX, phsXX, phserrXX, utXX, stnXX = corplt2array(
            corpltfile)
        corpltout = insert_into_filestem(
            corpltfile.replace('CORPLT', '_CORPLT_XX'), filestem)
        os.system('mv {:s} {:s}'.format(corpltfile, corpltout))
        ## write a difmap script for the YY polarisation and run
        fitsfile = dif_script(infile,
                              pol='YY',
                              clean_sigma=clean_sig,
                              map_size=map_size,
                              pixel_size=pix_size,
                              obs_length=obs_length,
                              datacolumn=datacolumn,
                              startmod=startmod)
        ## plot solutions and get values
        corpltfile = glob.glob(os.path.join(work_dir, 'CORPLT'))[0]
        ampYY, amperrYY, phsYY, phserrYY, utYY, stnYY = corplt2array(
            corpltfile)
        corpltout = insert_into_filestem(
            corpltfile.replace('CORPLT', '_CORPLT_YY'), filestem)
        os.system('mv {:s} {:s}'.format(corpltfile, corpltout))

    if pols == 'I':
        ut = utI
        stn = stnI
    else:
        ut = utXX
        stn = stnXX

    ## convert time axis to lofar times
    myms = ct.table(infile)
    lof_times = myms.getcol('TIME')
    myms.close()
    utvec = ut - np.min(ut)
    time_ax = utvec + np.min(lof_times)

    ## get frequency axis
    myspw = ct.table(infile + '::SPECTRAL_WINDOW')
    freq_ax = np.squeeze(myspw.getcol('CHAN_FREQ'))
    myspw.close()

    if pols == 'I':
        ## split the stokes I correction across XX and YY
        tmp_amp = np.rollaxis(
            np.dstack((np.sqrt(ampI / 2.), np.sqrt(ampI / 2.))), 1, 0)
        tmp_phs = np.rollaxis(np.dstack((phsI, phsI)), 1, 0)
    else:
        ## combine XX and YY information and reformat axes
        tmp_amp = np.rollaxis(np.dstack((ampXX, ampYY)), 1, 0)
        tmp_phs = np.rollaxis(np.dstack((phsXX, phsYY)), 1, 0)
    ## expand to fill frequency axis
    tmp_amp2 = np.expand_dims(tmp_amp, axis=1)
    tmp_phs2 = np.expand_dims(tmp_phs, axis=1)
    amp = np.repeat(tmp_amp2, len(freq_ax), axis=1)
    phs = np.repeat(tmp_phs2, len(freq_ax), axis=1)
    phs = phs * -1.0  ## difmap has a different convention

    ## get antenna information
    new_ants = make_ant_table(stn)

    ## get pointing information
    ptg = ct.table(infile + '::FIELD')
    ptg_dir = ptg.getcol('PHASE_DIR')[0]
    ptg.close()
    new_dir = {}
    new_dir[filestem] = ptg_dir[0]

    ## write solutions to an h5parm
    h5parmfile = os.path.join(work_dir, filestem + '_sols.h5')
    out_h5 = h5parm(h5parmfile, readonly=False)
    out_solset = out_h5.makeSolset(solsetName='sol000')
    antenna_table = out_solset.obj._f_get_child('antenna')
    antenna_table.append(new_ants.items())
    out_solset.obj.source.append(new_dir.items())
    out_solset.makeSoltab('amplitude',
                          axesNames=['time', 'freq', 'ant', 'pol'],
                          axesVals=[time_ax, freq_ax, stn, ['XX', 'YY']],
                          vals=amp,
                          weights=np.ones_like(amp))
    out_solset.makeSoltab('phase',
                          axesNames=['time', 'freq', 'ant', 'pol'],
                          axesVals=[time_ax, freq_ax, stn, ['XX', 'YY']],
                          vals=phs,
                          weights=np.ones_like(phs))
    out_h5.close()

    ## apply the solutions
    with open('applysols.parset', 'w') as f:
        f.write('msin={:s}\n'.format(infile))
        f.write('msin.datacolumn=DATA\n')
        f.write('msout=.\n')
        f.write('msout.datacolumn=CORRECTED_DATA\n')
        f.write('numthreads=6\n')
        f.write('steps=[applycal]\n')
        f.write('applycal.type=applycal\n')
        f.write('applycal.parmdb={:s}\n'.format(h5parmfile))
        f.write('applycal.steps=[applyphs,applyamp]\n')
        f.write('applycal.applyphs.correction=phase000\n')
        f.write('applycal.applyamp.correction=amplitude000\n')
    f.close()

    ss = 'NDPPP applysols.parset > applysols.log 2>&1'
    os.system(ss)

    ## rename files so they have unique names
    diflogs = glob.glob(os.path.join(work_dir, 'dif*'))
    for diflog in diflogs:
        diflogout = insert_into_filestem(diflog, filestem + '_')
        os.system('mv {:s} {:s}'.format(diflog, diflogout))

    ## Make a BBS format skymodel from the difmap XX model
    ## get reference frequency
    rf = ct.taql(
        'select REF_FREQUENCY from {:s}::SPECTRAL_WINDOW'.format(infile))
    ref_freq = rf.getcol('REF_FREQUENCY')[0]
    # find the model file
    modfiles = glob.glob(filestem + '*.mod')
    if pols == 'I':
        xx_file = modfiles[0]
    else:
        xx_file = [mf for mf in modfiles if 'XX' in mf][0]
    ## read the model
    xx_mod = read_mod(xx_file)
    ## find central point
    xx_cen = find_centre(xx_mod)
    ## and get the flux
    xx_flux = []
    for mykey in xx_mod.keys():
        xx_flux.append(xx_mod[mykey][0])
    ## write the model file
    outfile = os.path.join(work_dir, filestem + '_StokesI.mod')
    with open(outfile, 'w') as f:
        f.write(
            "# (Name, Type, Patch, Ra, Dec, I, Q, U, V, MajorAxis, MinorAxis, Orientation, ReferenceFrequency='{:s}', SpectralIndex='[]') = format\n\n"
            .format(str(ref_freq)))
        ## patch name
        patch_cen = lof_coords(
            SkyCoord(xx_cen[0], xx_cen[1], frame='icrs', unit='deg'))
        f.write(", , difmap_cc, {:s}\n".format(patch_cen))
        for mykey in xx_mod.keys():
            flux = xx_mod[mykey][0]
            coords = lof_coords(xx_mod[mykey][1])
            f.write(
                "{:s}, POINT, difmap_cc, {:s}, {:s}, 0.0, 0.0, 0.0, 0.00000e+00, 0.00000e+00, 0.00000e+00, {:s}, [-0.8]\n"
                .format(mykey, coords, str(flux), str(ref_freq)))
    f.close()

    ## convert to a sourcedb
    ss = 'makesourcedb in={:s} out={:s} format="<"'.format(
        outfile, outfile.replace('mod', 'skymodel'))
    os.system(ss)

    ## run wsclean
    #wsclean_name = filestem + '_wsclean'
    ## convert the im params to wsclean format
    #ss = 'wsclean -j 16 -mem 20 -v -reorder -update-model-required -weight uniform -mf-weighting -weighting-rank-filter 3 -name {:s} -size {:s} {:s} -padding 1.4 -scale {:s}asec -channels-out 6 -data-column CORRECTED_DATA -niter 10000 -auto-threshold 3 -auto-mask 5 -mgain 0.8 -join-channels -fit-spectral-pol 3 -fit-beam {:s}'.format( wsclean_name, str(map_size), str(map_size), str(float(pix_size)*0.001), infile )
    #os.system( ss )

    ## TO DO: move final files
    ## h5parm, images, log files, and skymodel
    image_files = glob.glob(os.path.join(work_dir, '*.ps'))
    log_files = glob.glob(os.path.join(work_dir, '*log'))
    #wsclean_ims = glob.glob( os.path.join( work_dir, '*wsclean*MFS*fits' ) )
    myh5parm = glob.glob(os.path.join(work_dir, '*h5'))
    skymodel = glob.glob(os.path.join(work_dir, '*skymodel'))
    #file_list = image_files + log_files + wsclean_ims + myh5parm + skymodel
    file_list = image_files + log_files + myh5parm + skymodel
    for myfile in file_list:
        ff = myfile.split('/')[-1]
        ss = 'mv {:s} {:s}'.format(myfile, os.path.join(current_dir, ff))
        os.system(ss)
    ## move the infile back and rename it
    tmp = infile.split('/')[-1]
    new_file = tmp + '.selfcal'
    selfcal_file = os.path.join(current_dir, new_file)
    os.system('cp -r {:s} {:s}'.format(infile, selfcal_file))

    print('done')
Пример #59
0
#!/usr/bin/python
# usage: plot_beamcorr.py <MS>

import os, sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import casacore.tables as pt

msname = sys.argv[1]
print "Plotting beam of " + msname

# get time
t = pt.table(msname)
times = t.getcol('TIME')[::600]  # every 10 min
t.close()

# get direction
t = pt.table(msname + '/FIELD')
direction = t.getcol('PHASE_DIR')[0][0]
print "Direction:", direction
t.close()

# get stations
t = pt.table(msname + '/ANTENNA')
stations = t.getcol('NAME')
t.close()
print "Stations:", stations

import lofar.stationresponse as st
Пример #60
0
 def test_getcol(self):
     c1 = makescacoldesc(unicode_string, 0)
     t = table(join(self.workdir, 'ascii'), maketabdesc([c1]), ack=False)
     t.getcol(unicode_string)